python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to validate that targets are properly constructed.
The input is a CFG defining valid target constructions for a given task.
This can be viewed as a loose check that the target would be executable
for a given formalism and database.
This can be useful for NQG, which can otherwise over-generate syntactically
invalid targets as the grammars are restricted to a single non-terminal symbol.
"""
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from common.cky import cfg_parser
from common.cky import cfg_rule
from tensorflow.io import gfile
# Used for string formatting.
NON_TERMINAL_PREFIX = "##"
ARROW = "=>"
# Root non-terminal symbol.
ROOT_SYMBOL = "ROOT"
# Special non-terminal that can match any terminal sequence.
ANYTHING = "ANYTHING"
class TargetCfgRule(object):
"""Represents a rule."""
def __init__(self, lhs, rhs):
self.lhs = lhs # String.
self.rhs = rhs # String.
def __str__(self):
return "%s %s %s" % (self.lhs, ARROW, self.rhs)
def __repr__(self):
return str(self)
@classmethod
def from_string(cls, rule_string):
symbols = rule_string.split(" ")
if symbols[1] != ARROW:
raise ValueError("Invalid rule_string: %s." % rule_string)
lhs = symbols[0]
rhs = " ".join(symbols[2:])
return cls(lhs, rhs)
def rules_to_txt_file(rules, filename):
"""Write rules to txt file."""
with gfile.GFile(filename, "w") as rule_file:
for rule in rules:
rule_file.write("%s\n" % str(rule))
print("Wrote %s rules to %s." % (len(rules), filename))
def load_rules_from_file(filename):
"""Load list of TargetCfgRules from txt file."""
rules = []
with gfile.GFile(filename, "r") as rule_file:
for line in rule_file:
# Allow blank lines and comment lines in grammar files starting with '#'.
if line and not line.startswith("#"):
line = line.rstrip()
rule = TargetCfgRule.from_string(line)
rules.append(rule)
print("Loaded %s rules from %s." % (len(rules), filename))
return rules
def _convert_to_parser_rule(rule, terminals_to_ids, nonterminals_to_ids,
rule_idx):
"""Convert Rule to CFGRule."""
rhs = []
for token in rule.rhs.split(" "):
if token.startswith(NON_TERMINAL_PREFIX):
symbol_idx = nonterminals_to_ids[token[len(NON_TERMINAL_PREFIX):]]
rhs.append(cfg_rule.CFGSymbol(idx=symbol_idx, type=cfg_rule.NON_TERMINAL))
else:
if token not in terminals_to_ids:
return None
symbol_idx = terminals_to_ids[token]
rhs.append(cfg_rule.CFGSymbol(idx=symbol_idx, type=cfg_rule.TERMINAL))
lhs = nonterminals_to_ids[rule.lhs]
parser_rule = cfg_rule.CFGRule(idx=rule_idx, lhs=lhs, rhs=rhs)
return parser_rule
def _populate_fn(unused_span_begin, unused_span_end, unused_parser_rule,
unused_children):
# We are only interested in the presence of a parse, not the parse itself.
# So, we use `True` to simply indicate the presence of some parse.
return True
def _postprocess_fn(nodes):
"""Merge any nodes."""
if nodes:
return [True]
else:
return []
def can_parse(target_string, rules, verbose=False):
"""Returns True if there exists >=1 parse of target_string given rules."""
tokens = target_string.split(" ")
# Add a rule for every span in target_string with lhs `ANYTHING`.
anything_rules = []
for start_idx in range(len(tokens)):
for end_idx in range(start_idx + 1, len(tokens) + 1):
rhs = " ".join(tokens[start_idx:end_idx])
anything_rules.append(TargetCfgRule(ANYTHING, rhs))
# Convert tokens to integer IDs.
terminals_to_ids = {}
for idx, token in enumerate(set(tokens)):
terminals_to_ids[token] = idx
input_ids = [terminals_to_ids[token] for token in tokens]
# Generate non-terminal IDs.
nonterminals_to_ids = {}
nt_idx = 0
for rule in rules + anything_rules:
if rule.lhs not in nonterminals_to_ids:
nonterminals_to_ids[rule.lhs] = nt_idx
nt_idx += 1
nonterminals = nonterminals_to_ids.values()
start_idx = nonterminals_to_ids[ROOT_SYMBOL]
# Convert rules.
parser_rules = []
for rule_idx, rule in enumerate(rules + anything_rules):
parser_rule = _convert_to_parser_rule(rule, terminals_to_ids,
nonterminals_to_ids, rule_idx)
if parser_rule:
parser_rules.append(parser_rule)
# Run parser.
parses = cfg_parser.parse(
input_ids,
parser_rules,
nonterminals,
start_idx,
_populate_fn,
_postprocess_fn,
verbose=verbose)
if parses:
return True
else:
return False
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/parser/inference/targets/target_grammar.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate target CFG for SQL given Spider databases.
Note that for simplicity and because it has minimal impact on accuracy, the
grammar generated by this file is slightly different than the one used for the
experiments in the paper, which was specialized for each database.
"""
import json
from absl import app
from absl import flags
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from model.parser.inference.targets import target_grammar
from tensorflow.io import gfile
FLAGS = flags.FLAGS
flags.DEFINE_string("spider_tables", "", "Tables JSON file for Spider.")
flags.DEFINE_string("output", "", "Output rules txt file.")
RULES = [
"ROOT => ##ROOT union ##ROOT",
"ROOT => ##ROOT intersect ##ROOT",
"ROOT => ##ROOT except ##ROOT",
"ROOT => select ##EXPR",
"EXPR => ##T from ##T",
"EXPR => ##T from ##FROM",
"EXPR => ##EXPR where ##ANYTHING",
"EXPR => ##EXPR group by ##ANYTHING",
"EXPR => ##EXPR order by ##ANYTHING",
"EXPR => ##EXPR limit ##ANYTHING",
"T => ( ##ROOT )",
"T => ##T - ##T",
"T => ##T + ##T",
"T => ##T / ##T",
"T => ##T * ##T",
"T => ( ##T )",
"T => distinct ##T",
"T => distinct ( ##T )",
"T => ##T , ##T",
"T => *",
"T => ##T as ##T",
"T => t1",
"T => t2",
"T => t3",
"T => t4",
"T => t5",
"T => t6",
"T => t7",
"T => t8",
"T => t9",
"T => ##T . ##T",
"T => count ( ##T )",
"T => sum ( ##T )",
"T => avg ( ##T )",
"T => max ( ##T )",
"T => min ( ##T )",
"T => count ( ##T )",
"T => sum ( ##T )",
"T => avg ( ##T )",
"T => max ( ##T )",
"T => min ( ##T )",
"FROM => ##FROM join ##T",
"FROM => ##T join ##T",
"FROM => ##FROM on ##JOIN_COL",
"FROM => ##FROM and ##JOIN_COL",
"JOIN_COL => ##T = ##T"
]
def load_json(filepath):
with gfile.GFile(filepath, "r") as reader:
text = reader.read()
return json.loads(text)
def main(unused_argv):
tables_json = load_json(FLAGS.spider_tables)
rules = []
for rule_string in RULES:
rules.append(target_grammar.TargetCfgRule.from_string(rule_string))
schema_elements = set()
for table in tables_json:
columns = set(name for _, name in table["column_names_original"])
tables = set(table["table_names_original"])
schema_elements |= columns
schema_elements |= tables
for name in schema_elements:
rules.append(
target_grammar.TargetCfgRule.from_string("T => %s" % name.lower()))
target_grammar.rules_to_txt_file(rules, FLAGS.output)
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/parser/inference/targets/generate_spider_grammars.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for tokenization.
We use tokens to refer to coarsely tokenized (e.g. split on spaces) tokens
which is implicitly used for tokenization by the QCFG rules and parser.
We use wordpieces to refer to the wordpieces tokenized inputs for BERT.
"""
from official.nlp.bert import tokenization
# Map for special tokens.
SPECIAL_MAP = {
"m0": "[unused0]",
"m1": "[unused1]"
}
def get_tokenizer(bert_vocab_file):
tokenizer = tokenization.FullTokenizer(bert_vocab_file, do_lower_case=True)
return tokenizer
def get_wordpiece_inputs(tokens, tokenizer, verbose=False, max_num_wordpiece=80):
"""Returns inputs related to tokenization.
The resulting set of tensors includes alignment information between the
space-separated token sequence (which the QCFG parser uses) and the resulting
wordpiece sequence (which the neural encoder uses). There is always a
one-to-many correspondance between tokens and wordpieces.
Args:
tokens: List of string tokens.
tokenizer: `tokenization.FullTokenizer` instance or equivalent.
verbose: Print debug logging if True.
Returns:
A tuple of (wordpiece_ids, num_wordpieces, token_start_wp_idx,
token_end_wp_idx):
wordpiece_ids: List of wordpiece ids for input sequence.
num_wordpieces: Number of wordpieces.
token_start_wp_idx: Specifies the index in wordpiece_ids for the first
wordpiece for each input token (inclusive).
token_end_wp_idx: Specifies the index in wordpiece_ids for the last
wordpiece for each input token (inclusive).
"""
wordpiece_idx = 1
token_start_wp_idx = []
token_end_wp_idx = []
wordpieces = []
for token in tokens:
token_start_wp_idx.append(wordpiece_idx)
if token in SPECIAL_MAP:
wordpieces.append(SPECIAL_MAP[token])
wordpiece_idx += 1
else:
token_wordpieces = tokenizer.tokenize(token)
wordpieces.extend(token_wordpieces)
wordpiece_idx += len(token_wordpieces)
# Inclusive end idx.
token_end_wp_idx.append(wordpiece_idx - 1)
if verbose:
print("token_start_wp_idx: %s" % token_start_wp_idx)
print("token_end_wp_idx: %s" % token_end_wp_idx)
if len(token_start_wp_idx) != len(tokens):
# No truncation happens
raise ValueError("Bad token alignment!")
if len(token_end_wp_idx) != len(tokens):
raise ValueError("Bad token alignment!")
wordpieces = ["[CLS]"] + wordpieces + ["[SEP]"]
wordpiece_ids = tokenizer.convert_tokens_to_ids(wordpieces)
num_wordpieces = len(wordpiece_ids)
if verbose:
print("wordpieces: %s" % wordpieces)
print("wordpiece_ids: %s" % wordpiece_ids)
return (wordpiece_ids, num_wordpieces, token_start_wp_idx, token_end_wp_idx)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/parser/data/tokenization_utils.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Constants used in generating tf.Examples that are used across modules."""
# Forest node types.
RULE_APPLICATION = 1
AGGREGATION = 2
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/parser/data/data_constants.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Write tf.Example protos for model training.
This requires a dataset tsv file and a set of QCFG rules as input.
"""
import os
import pdb
from absl import app
from absl import flags
import sys
import os
sys.path.append(os.environ['BASE_DIR'] + "baseline_replication/TMCD")
from model.parser import config_utils
from model.parser.data import example_converter
from model.parser.data import tokenization_utils
from model.qcfg import qcfg_file
from tasks import tsv_utils
import tensorflow as tf
FLAGS = flags.FLAGS
flags.DEFINE_string("input", "", "Input tsv file.")
flags.DEFINE_string("output", "", "Output TF example file.")
flags.DEFINE_string("bert_dir", "", "Directory for BERT, including vocab file.")
flags.DEFINE_string("config", "", "Config file.")
flags.DEFINE_string("rules", "", "Input rules file.")
flags.DEFINE_integer("offset", 0, "Start index for examples to process.")
flags.DEFINE_integer("limit", 0, "End index for examples to process if >0.")
def main(unused_argv):
config = config_utils.json_file_to_dict(FLAGS.config)
examples = tsv_utils.read_tsv(FLAGS.input)
rules = qcfg_file.read_rules(FLAGS.rules)
tokenizer = tokenization_utils.get_tokenizer(
os.path.join(FLAGS.bert_dir, "vocab.txt"))
converter = example_converter.ExampleConverter(rules, tokenizer, config)
total_written = 0
writer = tf.io.TFRecordWriter(FLAGS.output)
for idx, example in enumerate(examples):
if FLAGS.offset and idx < FLAGS.offset:
continue
if FLAGS.limit and idx >= FLAGS.limit:
break
print("Processing example %s." % idx)
try:
tf_example = converter.convert(example)
writer.write(tf_example.SerializeToString())
total_written += 1
except:
print("Length of this grammar is larger than maximum length we can process, omitting this rule.")
converter.print_max_sizes()
print("Wrote %d examples." % total_written)
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/parser/data/write_examples.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for writing tf.Example files."""
import collections
import sys
import os
sys.path.append(os.environ['BASE_DIR'] + "baseline_replication/TMCD")
from model.parser.data import forest_serialization
from model.parser.data import parsing_utils
from model.parser.data import tokenization_utils
import tensorflow as tf
import pdb
def _pad_values(values, padded_length):
# EDIT: Added truncation for sequences longer than max sequence length length
if len(values) > padded_length:
# raise ValueError("length %s is > %s" % (len(values), padded_length))
values = values[:padded_length]
for _ in range(len(values), padded_length):
values.append(0)
return values
def _create_int_feature(values, padded_length):
values = _pad_values(values, padded_length)
feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return feature
def get_rule_to_idx_map(rules):
rule_to_idx_map = {}
for idx, rule in enumerate(rules):
rule_to_idx_map[rule] = idx + 1 # Reserve 0 for padding.
return rule_to_idx_map
def _get_applications(root_node, rule_to_idx_map, token_start_wp_idx,
token_end_wp_idx):
"""Returns structures for anchored applications."""
# Traverse all nodes.
node_stack = [root_node]
seen_fingerprints = set()
# Set of (span_begin, span_end, rule).
applications = set()
while node_stack:
node = node_stack.pop()
fingerprint = id(node)
if fingerprint in seen_fingerprints:
continue
seen_fingerprints.add(fingerprint)
if isinstance(node, parsing_utils.AggregationNode):
for child in node.children:
node_stack.append(child)
elif isinstance(node, parsing_utils.RuleApplicationNode):
for child in node.children:
node_stack.append(child)
applications.add((node.span_begin, node.span_end, node.rule))
else:
raise ValueError("Unexpected node type.")
# Map of (span_begin, span_end, rule) to integer idx.
application_key_to_idx_map = {}
# Lists of integers.
application_span_begin = []
application_span_end = []
application_rule_idx = []
# Sort applications to avoid non-determinism.
applications = sorted(applications)
for idx, (span_begin, span_end, rule) in enumerate(applications):
application_key_to_idx_map[(span_begin, span_end, rule)] = idx
application_span_begin.append(token_start_wp_idx[span_begin])
# token_end_wp_idx is an *inclusive* idx.
# span_end is an *exclusive* idx.
# application_span_end is an *inclusive* idx.
application_span_end.append(token_end_wp_idx[span_end - 1])
rule_idx = rule_to_idx_map[rule]
application_rule_idx.append(rule_idx)
return (application_key_to_idx_map, application_span_begin,
application_span_end, application_rule_idx)
def _convert_to_tf_example(example, tokenizer, rules, config, max_sizes=None):
"""Return tf.Example generated for input (source, target)."""
source = example[0]
target = example[1]
tokens = source.split(" ")
num_tokens = len(tokens)
# Tokenize.
(wordpiece_ids, num_wordpieces, token_start_wp_idx,
token_end_wp_idx) = tokenization_utils.get_wordpiece_inputs(
tokens, tokenizer)
# Run chart parser.
target_node = parsing_utils.get_target_node(source, target, rules)
if not target_node:
raise ValueError("No parse returned for target for example: (%s, %s)" %
(source, target))
merged_node = parsing_utils.get_merged_node(source, rules)
# Get anchored applications.
rule_to_idx_map = get_rule_to_idx_map(rules)
(application_key_to_idx_map, application_span_begin, application_span_end,
application_rule_idx) = _get_applications(merged_node, rule_to_idx_map,
token_start_wp_idx,
token_end_wp_idx)
num_applications = len(application_span_begin)
# Raise error if the rule is applied in parts that are longer than max sequence length
if application_span_end[0] >= config["max_num_wordpieces"]:
raise ValueError("Rule application ends at %s >= %s" % (application_span_end, config["max_num_wordpieces"]))
def application_idx_fn(span_begin, span_end, rule):
return application_key_to_idx_map[(span_begin, span_end, rule)]
# Get numerator forest.
(nu_node_type, nu_node_1_idx, nu_node_2_idx, nu_application_idx,
nu_num_nodes) = forest_serialization.get_forest_lists(
target_node, num_tokens, application_idx_fn)
# Get denominator forest.
(de_node_type, de_node_1_idx, de_node_2_idx, de_application_idx,
de_num_nodes) = forest_serialization.get_forest_lists(
merged_node, num_tokens, application_idx_fn)
# Create features dict.
features = collections.OrderedDict()
features["wordpiece_ids"] = _create_int_feature(wordpiece_ids,
config["max_num_wordpieces"])
features["num_wordpieces"] = _create_int_feature([num_wordpieces], 1)
features["application_span_begin"] = _create_int_feature(
application_span_begin, config["max_num_applications"])
features["application_span_end"] = _create_int_feature(
application_span_end, config["max_num_applications"])
features["application_rule_idx"] = _create_int_feature(
application_rule_idx, config["max_num_applications"])
features["nu_node_type"] = _create_int_feature(
nu_node_type, config["max_num_numerator_nodes"])
features["nu_node_1_idx"] = _create_int_feature(
nu_node_1_idx, config["max_num_numerator_nodes"])
features["nu_node_2_idx"] = _create_int_feature(
nu_node_2_idx, config["max_num_numerator_nodes"])
features["nu_application_idx"] = _create_int_feature(
nu_application_idx, config["max_num_numerator_nodes"])
features["nu_num_nodes"] = _create_int_feature([nu_num_nodes], 1)
features["de_node_type"] = _create_int_feature(
de_node_type, config["max_num_denominator_nodes"])
features["de_node_1_idx"] = _create_int_feature(
de_node_1_idx, config["max_num_denominator_nodes"])
features["de_node_2_idx"] = _create_int_feature(
de_node_2_idx, config["max_num_denominator_nodes"])
features["de_application_idx"] = _create_int_feature(
de_application_idx, config["max_num_denominator_nodes"])
features["de_num_nodes"] = _create_int_feature([de_num_nodes], 1)
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
# Update max sizes.
if max_sizes is not None:
max_sizes["num_wordpieces"] = max(max_sizes["num_wordpieces"],
num_wordpieces)
max_sizes["num_applications"] = max(max_sizes["num_applications"],
num_applications)
max_sizes["nu_num_nodes"] = max(max_sizes["nu_num_nodes"], nu_num_nodes)
max_sizes["de_num_nodes"] = max(max_sizes["de_num_nodes"], de_num_nodes)
return tf_example
class ExampleConverter(object):
"""Converts inputs to tf.Example protos."""
def __init__(self, rules, tokenizer, config):
self.rules = rules
self.tokenizer = tokenizer
self.config = config
self.max_sizes = collections.defaultdict(int)
def convert(self, example):
"""Return tf.Example or Raise."""
tf_example = _convert_to_tf_example(example, self.tokenizer, self.rules,
self.config, self.max_sizes)
return tf_example
def print_max_sizes(self):
"""Print max sizes which is useful for determining necessary padding."""
print("max_sizes: %s" % self.max_sizes)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/parser/data/example_converter.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for generating input tensors for parse forests.
The output of the QCFG parser used for pre-processing is a forest
representation of a set of parses. This representation factors common sub-trees
to represent exponentially many trees in an effecient manner.
In our TensorFlow graph, we want to sum over scores for the given set of parse
trees, using dynamic programming over the forest representation for effeciency.
Therefore, this module serializes the forest into a set of integer lists that
collectively represent a sequence of nodes, with child nodes always preceding
their parents. We create new nodes as necessary so that no node has more than
2 children.
"""
import collections
import sys
import os
sys.path.append(os.environ['BASE_DIR'] + "baseline_replication/TMCD")
from model.parser.data import data_constants
from model.parser.data import parsing_utils
def _get_node_fingerprint(node):
return id(node)
def _get_span_to_nodes_maps(root_node):
"""Return maps of span indexes to nodes."""
node_stack = [root_node]
seen_fingerprints = set()
span_to_production_nodes = collections.defaultdict(list)
span_to_aggregation_nodes = collections.defaultdict(list)
while node_stack:
node = node_stack.pop()
fingerprint = _get_node_fingerprint(node)
if fingerprint in seen_fingerprints:
continue
seen_fingerprints.add(fingerprint)
if isinstance(node, parsing_utils.AggregationNode):
for child in node.children:
node_stack.append(child)
span_to_aggregation_nodes[(node.span_begin, node.span_end)].append(node)
elif isinstance(node, parsing_utils.RuleApplicationNode):
for child in node.children:
node_stack.append(child)
span_to_production_nodes[(node.span_begin, node.span_end)].append(node)
else:
raise ValueError("Unexpected node type.")
return span_to_production_nodes, span_to_aggregation_nodes
def get_forest_lists(root_node, num_tokens, application_idx_fn):
"""Get integer lists for serialized forest.
Args:
root_node: Root parsing_utils.ForestNode for parse forest.
num_tokens: Number of tokens in input.
application_idx_fn: Takes (span_begin, span_end, rule) and returns a idx.
Returns:
A tuple (node_type_list, node_1_idx_list, node_2_idx_list,
application_idx_list, num_nodes). All of these are lists of integers
with length equal to the number of nodes in the forest, except for num_nodes
which is the integer number of nodes in the forest. The lists include
the following information:
node_type_list: Where node is of type AGGREGATION or RULE_APPLICATION.
node_1_idx_list: If node has >= 1 children, this is the index of its
first child. A node index refers to its index in these lists.
If node has no children, will be -1.
node_2_idx_list: If node has 2 children, this is the index of its
second child, otherwise will be -1.
application_idx_list: If node is of type RULE_APPLICATION, this is
the index of the anchored rule application, where indexing is
defined by application_idx_fn.
"""
(span_to_production_nodes,
span_to_aggregation_nodes) = _get_span_to_nodes_maps(root_node)
# Setup empty lists.
node_type_list = []
node_1_idx_list = []
node_2_idx_list = []
application_idx_list = []
# Map of fingerprints to index.
fingerprint_to_idx = {}
current_index = 0
# Iterate through chart.
for span_end in range(1, num_tokens + 1):
for span_begin in range(span_end - 1, -1, -1):
if (span_begin, span_end) in span_to_production_nodes:
for node in span_to_production_nodes[(span_begin, span_end)]:
fingerprint = _get_node_fingerprint(node)
fingerprint_to_idx[fingerprint] = current_index
current_index += 1
if not isinstance(node, parsing_utils.RuleApplicationNode):
raise ValueError
node_type_list.append(data_constants.RULE_APPLICATION)
if not node.children:
node_1_idx_list.append(-1)
node_2_idx_list.append(-1)
elif len(node.children) == 1:
node_1_idx_list.append(fingerprint_to_idx[_get_node_fingerprint(
node.children[0])])
node_2_idx_list.append(-1)
elif len(node.children) == 2:
node_1_idx_list.append(fingerprint_to_idx[_get_node_fingerprint(
node.children[0])])
node_2_idx_list.append(fingerprint_to_idx[_get_node_fingerprint(
node.children[1])])
else:
raise ValueError
application_idx_list.append(
application_idx_fn(node.span_begin, node.span_end, node.rule))
for node in span_to_aggregation_nodes[(span_begin, span_end)]:
if not isinstance(node, parsing_utils.AggregationNode):
raise ValueError
node_type_list.append(data_constants.AGGREGATION)
application_idx_list.append(-1)
# Compute sum of first 2 nodes.
node_1_fingerprint = _get_node_fingerprint(node.children[0])
node_1_idx = fingerprint_to_idx[node_1_fingerprint]
node_1_idx_list.append(node_1_idx)
node_2_fingerprint = _get_node_fingerprint(node.children[1])
node_2_idx = fingerprint_to_idx[node_2_fingerprint]
node_2_idx_list.append(node_2_idx)
current_index += 1
# Sum the remaining.
for idx in range(2, len(node.children)):
node_type_list.append(data_constants.AGGREGATION)
application_idx_list.append(-1)
node_1_idx_list.append(current_index - 1)
node_2_idx = fingerprint_to_idx[_get_node_fingerprint(
node.children[idx])]
node_2_idx_list.append(node_2_idx)
current_index += 1
# Point to last node for index.
fingerprint = _get_node_fingerprint(node)
fingerprint_to_idx[fingerprint] = current_index - 1
num_nodes = current_index
return (node_type_list, node_1_idx_list, node_2_idx_list,
application_idx_list, num_nodes)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/parser/data/forest_serialization.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for generating parse forests for model training."""
import collections
import sys
import os
sys.path.append(os.environ['BASE_DIR'] + "baseline_replication/TMCD")
from model.qcfg import qcfg_parser
from model.qcfg import qcfg_rule
class ForestNode(object):
"""Parent class representing a node in parse forest."""
def __init__(self, span_begin, span_end, target_string, rule, children):
self.span_begin = span_begin
self.span_end = span_end
self.target_string = target_string
self.rule = rule # Can be None for AggregationNode.
self.children = children # List of ForestNode.
def __str__(self):
node_type = self.__class__.__name__
return "%s (%s, %s): %s, %s" % (node_type, self.span_begin,
self.span_end,
self.target_string,
self.rule)
def __repr__(self):
return self.__str__()
class AggregationNode(ForestNode):
"""Represents an aggregation over multiple nodes."""
def __init__(self, children):
target_string = children[0].target_string
span_begin = children[0].span_begin
span_end = children[0].span_end
# All nodes should have the same span and target_string.
for node in children:
if ((node.target_string, node.span_begin, node.span_end) !=
(target_string, span_begin, span_end)):
raise ValueError("Cannot aggreagate different spans or targets: %s" %
children)
super(AggregationNode, self).__init__(span_begin, span_end, target_string,
None, children)
class RuleApplicationNode(ForestNode):
"""Represents an anchored rule application."""
def __init__(self, rule, children, span_begin, span_end, target_string):
super(RuleApplicationNode, self).__init__(span_begin, span_end,
target_string, rule, children)
def _fingerprint(node):
return node.target_string
def _aggregate(nodes):
"""Returns list of nodes aggregated by target string."""
fingerprints_to_nodes = collections.OrderedDict()
aggregated_nodes = []
for node in nodes:
fingerprint = _fingerprint(node)
if fingerprint not in fingerprints_to_nodes:
fingerprints_to_nodes[fingerprint] = []
fingerprints_to_nodes[fingerprint].append(node)
for _, nodes in fingerprints_to_nodes.items():
if len(nodes) > 1:
aggregated_node = AggregationNode(nodes)
aggregated_nodes.append(aggregated_node)
else:
aggregated_nodes.append(nodes[0])
return aggregated_nodes
def filter_nodes(nodes, target_string):
new_nodes = []
for node in nodes:
if node.target_string not in target_string:
continue
new_nodes.append(node)
return new_nodes
def get_target_node(source, target, rules):
"""Return node corresponding to parses for target, or None."""
tokens = source.split(" ")
def node_fn(span_begin, span_end, rule, children):
target_string = qcfg_rule.apply_target(
rule, [node.target_string for node in children])
return RuleApplicationNode(rule, children, span_begin, span_end,
target_string)
def postprocess_fn(nodes):
nodes = filter_nodes(nodes, target)
return _aggregate(nodes)
nodes = qcfg_parser.parse(
tokens, rules, node_fn=node_fn, postprocess_cell_fn=postprocess_fn)
# Filter for nodes where target_string matches target exactly.
ret_nodes = []
for node in nodes:
if node.target_string == target:
ret_nodes.append(node)
if not ret_nodes:
return None
if len(ret_nodes) > 1:
raise ValueError
return ret_nodes[0]
def get_merged_node(source, rules):
"""Return node corresponding to all parses."""
tokens = source.split(" ")
def node_fn(span_begin, span_end, rule, children):
# Target string is ignored for this case.
target_string = None
return RuleApplicationNode(rule, children, span_begin, span_end,
target_string)
def postprocess_fn(nodes):
if len(nodes) > 1:
return [AggregationNode(nodes)]
else:
return nodes
nodes = qcfg_parser.parse(
tokens, rules, node_fn=node_fn, postprocess_cell_fn=postprocess_fn)
if len(nodes) != 1:
raise ValueError("example `%s` len(nodes) != 1: %s" % (source, nodes))
return nodes[0]
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/parser/data/parsing_utils.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data structures for representing Quasi-Synchronous CFG (QCFG) rules.
Currently, both terminal and non-terminal symbols are simply represented
as strings, with special strings reserved for non-terminals.
QCFG rules used by NQG follow the following restrictions:
- There is only one non-terminal symbol, `NT`
- The only allowed non-terminal indexes are 1 and 2.
Therefore, we only need to reserve two strings to represent indexed
non-terminals.
We also expect all rules to be normalized as follows: a non-terminal with index
2 should never appear before a non-terminal with index 1 in the source
sequence.
Note that this data structure could potentially be improved:
1. A more flexible representation for terminal and non-terminal symbols
would avoid possible collisions between terminal and non-terminal symbols,
and allow for representing QCFGs that do not conform to the restrictions above.
2. Representing symbols as integers rather than strings may have computational
benefits for various operations over QCFG rules.
"""
import collections
# Represents the non-terminal symbol `NT` with linked index 1.
NT_1 = "NT_1"
# Represents the non-terminal symbol `NT` with linked index 2.
NT_2 = "NT_2"
# All other strings are assumed to represent terminal symbols.
# The LHS non-terminal is always assumed to be `NT` so is not represented.
QCFGRuleParent = collections.namedtuple(
"QCFGRuleParent",
[
"source", # Tuple of source symbols (strings).
"target", # Tuple of target symbols (strings).
"arity", # The number of unique non-terminal indexes (0, 1, or 2).
])
# Used for separating source and target sequences for string formatting.
SEPARATOR = "###"
# Define sub-class to override __str__ and __repr__ for easier debugging.
class QCFGRule(QCFGRuleParent):
def __str__(self):
return "%s %s %s" % (" ".join(self.source), SEPARATOR, " ".join(
self.target))
def __repr__(self):
return str(self)
def _get_arity(source):
if NT_1 in source and NT_2 in source:
return 2
if NT_1 in source:
return 1
if NT_2 in source:
raise ValueError("Source is unnormalized: %s" % source)
return 0
def rule_from_string(rule_str):
"""Parse rule in format 'source SEPARATOR target'."""
splits = rule_str.split(SEPARATOR)
if len(splits) != 2:
raise ValueError("Invalid rule string: %s" % rule_str)
source_str, target_str = splits
source = source_str.strip().split()
target = target_str.strip().split()
arity = _get_arity(source)
return QCFGRule(tuple(source), tuple(target), arity)
def apply_target(rule, substitutions):
"""Return target string with non-terminals replaced with substitutions."""
if rule.arity != len(substitutions):
raise ValueError
output = []
for token in rule.target:
if token == NT_1:
output.append(substitutions[0])
elif token == NT_2:
output.append(substitutions[1])
else:
output.append(token)
return " ".join(output)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/qcfg/qcfg_rule.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Read and write QCFG grammars to/from human readable txt files."""
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from model.qcfg import qcfg_rule
from tensorflow.io import gfile
def read_rules(filename):
"""Read rule txt file to list of rules."""
rules = []
with gfile.GFile(filename, "r") as txt_file:
for line in txt_file:
line = line.rstrip()
rule = qcfg_rule.rule_from_string(line)
rules.append(rule)
print("Loaded %s rules from %s." % (len(rules), filename))
return rules
def write_rules(rules, filename):
"""Write rules to txt file."""
with gfile.GFile(filename, "w") as txt_file:
for rule in rules:
line = "%s\n" % str(rule)
txt_file.write(line)
print("Wrote %s rules to %s." % (len(rules), filename))
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/qcfg/qcfg_file.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for QCFG parsing by extending a general CFG parser."""
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from common.cky import cfg_parser
from common.cky import cfg_rule
from model.qcfg import qcfg_rule
def _convert_rhs(rule, nt_idx, tokens_to_input_ids):
"""Convert rule to `rhs` argument for CFGRule."""
rhs = []
for token in rule.source:
if token == qcfg_rule.NT_1:
rhs.append(cfg_rule.CFGSymbol(idx=nt_idx, type=cfg_rule.NON_TERMINAL))
elif token == qcfg_rule.NT_2:
rhs.append(cfg_rule.CFGSymbol(idx=nt_idx, type=cfg_rule.NON_TERMINAL))
else:
if token not in tokens_to_input_ids:
# Rule contains tokens not in the input so can be ignored for parsing.
return None
else:
token_id = tokens_to_input_ids[token]
rhs.append(cfg_rule.CFGSymbol(idx=token_id, type=cfg_rule.TERMINAL))
return tuple(rhs)
def parse(tokens, rules, node_fn, postprocess_cell_fn, verbose=False):
"""Run bottom up parser.
Args:
tokens: List of strings for input.
rules: List of QCFGRule instances.
node_fn: Function with input arguments (span_begin, span_end, rule,
children) and returns a "node".
postprocess_cell_fn: Function from a list of "nodes" to "nodes".
verbose: Print debug output if True.
Returns:
A List of "node" objects for completed parses.
"""
if verbose:
print("tokens: %s" % (tokens,))
print("rules:")
for rule in rules:
print(str(rule))
# Convert tokens to integer IDs.
tokens_to_input_ids = {}
input_ids_to_tokens = {}
for idx, token in enumerate(set(tokens)):
input_ids_to_tokens[idx] = token
tokens_to_input_ids[token] = idx
input_ids = [tokens_to_input_ids[token] for token in tokens]
# Our QCFG grammars always use a single NT symbol.
nt_idx = 0
# Convert to ParserRule format.
idx_to_rule = {}
parser_rules = []
rule_idx = 0
for rule in rules:
rhs = _convert_rhs(rule, nt_idx, tokens_to_input_ids)
if rhs is None:
continue
parser_rule = cfg_rule.CFGRule(idx=rule_idx, lhs=nt_idx, rhs=rhs)
parser_rules.append(parser_rule)
idx_to_rule[rule_idx] = rule
rule_idx += 1
# Wrap node_fn to pass original Rule instead of CFGRule.
def populate_fn(span_begin, span_end, parser_rule, children):
rule = idx_to_rule[parser_rule.idx]
return node_fn(span_begin, span_end, rule, children)
nonterminals = {nt_idx}
start_idx = nt_idx
if verbose:
print("parser_rules: %s" % parser_rules)
parses = cfg_parser.parse(
input_ids,
parser_rules,
nonterminals,
start_idx,
populate_fn,
postprocess_cell_fn,
verbose=verbose)
return parses
def can_parse(source, target, rules, verbose=False):
"""Return True if source and target can be derived given rules using parser.
Args:
source: Source string (cannot contain non-terminals).
target: Target string (cannot contain non-terminals).
rules: List of QCFGRule instances.
verbose: Print debug output if True.
Returns:
True if source and target can be derived.
"""
def node_fn(unused_span_begin, unused_span_end, rule, children):
"""Represent nodes as target strings."""
return qcfg_rule.apply_target(rule, children)
def postprocess_cell_fn(nodes):
"""Filter and merge generated nodes."""
new_nodes = []
for node in nodes:
# Discard targets that are not substrings of the gold target.
if node in target:
new_nodes.append(node)
return list(set(new_nodes))
tokens = source.split(" ")
outputs = parse(
tokens,
rules,
verbose=verbose,
node_fn=node_fn,
postprocess_cell_fn=postprocess_cell_fn)
if outputs and target in outputs:
return True
else:
return False
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/qcfg/qcfg_parser.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for qcfg_parser."""
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from model.qcfg import qcfg_parser
from model.qcfg import qcfg_rule
import tensorflow as tf
def _node_fn(unused_span_begin, unused_span_end, rule, children):
"""Nodes will represent target strings."""
return qcfg_rule.apply_target(rule, children)
def _postprocess_cell_fn(nodes):
return nodes
class QcfgParserTest(tf.test.TestCase):
def test_parse(self):
tokens = ["dax", "twice"]
rules = [
qcfg_rule.rule_from_string("dax ### DAX"),
qcfg_rule.rule_from_string("NT_1 twice ### NT_1 NT_1"),
]
parses = qcfg_parser.parse(tokens, rules, _node_fn, _postprocess_cell_fn)
self.assertEqual(parses, ["DAX DAX"])
def test_parse_flat(self):
tokens = ["dax", "twice"]
rules = [
qcfg_rule.rule_from_string("dax twice ### DAX TWICE"),
]
parses = qcfg_parser.parse(tokens, rules, _node_fn, _postprocess_cell_fn)
self.assertEqual(parses, ["DAX TWICE"])
def test_can_parse(self):
rules = [
qcfg_rule.rule_from_string("dax ### DAX"),
qcfg_rule.rule_from_string("NT_1 twice ### NT_1 NT_1"),
]
can_parse = qcfg_parser.can_parse(
source="dax twice", target="DAX DAX", rules=rules)
self.assertTrue(can_parse)
if __name__ == "__main__":
tf.test.main()
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/qcfg/qcfg_parser_test.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Compute % of examples in a dataset that can be derived by a given QCFG."""
from absl import app
from absl import flags
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from model.qcfg import qcfg_file
from model.qcfg import qcfg_parser
from tasks import tsv_utils
FLAGS = flags.FLAGS
flags.DEFINE_string("input", "", "Input tsv file.")
flags.DEFINE_integer("limit", 100, "End processing at this example index.")
flags.DEFINE_integer("offset", 0, "Start processing at this example index.")
flags.DEFINE_string("rules", "", "Grammar rules txt file.")
def main(unused_argv):
examples = tsv_utils.read_tsv(FLAGS.input)
rules = qcfg_file.read_rules(FLAGS.rules)
print("Rules: %s" % rules)
num_examples = 0
num_covered = 0
for idx, example in enumerate(examples):
if FLAGS.offset and idx < FLAGS.offset:
continue
if FLAGS.limit and idx >= FLAGS.limit:
break
print("Processing example %s." % idx)
print("Source: %s" % example[0])
print("Target: %s" % example[1])
source = example[0]
gold_target = example[1]
can_parse = qcfg_parser.can_parse(source, gold_target, rules, verbose=False)
num_examples += 1
if can_parse:
num_covered += 1
else:
print("Output set does not contain gold target.")
print("%s covered out of %s" % (num_covered, num_examples))
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/qcfg/compute_recall.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for grammar induction."""
import collections
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from model.induction import codelength_utils
from model.induction import derivation_utils
from model.induction import exact_match_utils
from model.induction import rule_utils
from model.induction import split_utils
from model.qcfg import qcfg_parser
from model.qcfg import qcfg_rule
InductionConfig = collections.namedtuple("InductionConfig", [
"sample_size",
"max_iterations",
"min_delta",
"terminal_codelength",
"non_terminal_codelength",
"parse_sample",
"allow_repeated_target_nts",
"seed_exact_match",
"balance_parens",
])
# We track the state of search during rule induction in the following tuple.
# Note that our implementation relies on two important aspects:
# 1. We can quickly determine if any substitution can potentially exist such
# that a given rule can be used to derive a given string pair, based only
# on terminal symbol overlap.
# 2. The set of derivable string pairs in our induced grammar is monotonically
# increasing, based on our criteria for adding and removing rules.
SearchState = collections.namedtuple(
"SearchState",
[
"current_rules", # Set of rules in induced grammar.
"rules_to_candidates", # Dictionary of rules to candidates.
"derivable_rules", # Set of derivable rules.
])
def _find_affected_rules(rules, new_rule):
# TODO(petershaw): This can potentially be made more effecient by
# pre-indexing rules in a data structure such as a Trie.
found_rules = []
for rule in rules:
if (rule_utils.rhs_can_maybe_derive(new_rule.source, rule.source) and
rule_utils.rhs_can_maybe_derive(new_rule.target, rule.target)):
found_rules.append(rule)
return found_rules
def _has_balanced_parens(rhs):
"""Returns True if all '(' precede and are followed by a correspoding ')'."""
open_parens = 0
for token in rhs:
for char in token:
if char == "(":
open_parens += 1
elif char == ")":
open_parens -= 1
if open_parens < 0:
return False
return open_parens == 0
def _is_balanced_paren_candidate(rule):
if not _has_balanced_parens(rule.source):
return False
if not _has_balanced_parens(rule.target):
return False
return True
def _filter_unbalanced_paren_candidates(rules):
new_rules = set()
for rule in rules:
if _is_balanced_paren_candidate(rule):
new_rules.add(rule)
return new_rules
def _get_max_rule(search_state, config, examples):
"""Identify a rule to add that maximizes the decrease in codelength."""
# Dict of rule candidates to the codelenth savings
# (i.e. negative codelength delta).
candidates_to_delta = {}
# Map of rule candidates to the set of rules that they enable removing.
# (inverse of rules_to_candidates).
candidates_to_rules = collections.defaultdict(set)
for rule in search_state.current_rules:
candidates = search_state.rules_to_candidates[rule]
for candidate in candidates:
if candidate not in candidates_to_delta:
# Subtract cost of new rule if not already accounted for.
candidates_to_delta[candidate] = -codelength_utils.rule_codelength(
candidate, config)
# Add cost of every possible removed rule.
candidates_to_delta[candidate] += codelength_utils.rule_codelength(
rule, config)
candidates_to_rules[candidate].add(rule)
# Sort candidates by codelength reduction (prior to computing the codelength
# delta of the dataset encoding, which is relatively more expensive).
# Use lexical ordering to break ties.
candidates_to_delta_sorted = sorted(
candidates_to_delta.items(), key=lambda x: (-x[1], x[0]))
# For debugging, print up to the top 15 candidates.
print("Candidate rules:")
for rule, delta in candidates_to_delta_sorted[:15]:
print("%s (%s)" % (rule, delta))
min_delta = config.min_delta
max_rule_to_add = None
max_rules_to_remove = None
for rule, delta in candidates_to_delta_sorted:
if delta <= min_delta:
break
rules_to_remove = candidates_to_rules[rule]
targets_delta = codelength_utils.get_dataset_encoding_delta(
sample_size=config.parse_sample,
examples=examples,
current_rules=search_state.current_rules,
candidate_rule_to_add=rule,
candidate_rules_to_remove=rules_to_remove)
print("Targets encoding delta for %s: -%s" % (rule, targets_delta))
# Compute the full deta including both the codelength reduction of encoding
# the grammar (previously computed) and the codelength delta of encoding
# the targets with the new grammar.
delta -= targets_delta
if delta > min_delta:
min_delta = delta
max_rule_to_add = rule
max_rules_to_remove = rules_to_remove
return max_rule_to_add, max_rules_to_remove
def _update_state(affected_rules, search_state, config):
"""Sparsely update the state for rules that may be affected."""
for idx, affected_rule in enumerate(affected_rules):
# Debug logging every Nth rule.
if idx % 10 == 0:
print("Updating rule %s of %s." % (idx + 1, len(affected_rules)))
# Check if rule can now be generated. Ideally, this should have been
# determined upstream when determining which rules could be removed,
# but some cases are not caught until here, such as when source
# sequences contain repeated substrings and are therefore not considered
# by `get_candidates`.
# Regardless, it is still important to run this for the side-effect of
# updating the set of derivable rules.
if derivation_utils.can_derive(affected_rule, search_state.current_rules,
search_state.derivable_rules):
print("Can now generate: %s." % str(affected_rule))
search_state.current_rules.remove(affected_rule)
else:
candidates = split_utils.find_possible_splits(
affected_rule,
search_state.derivable_rules,
allow_repeated_target_nts=config.allow_repeated_target_nts,
)
if config.balance_parens:
candidates = _filter_unbalanced_paren_candidates(candidates)
for candidate in candidates:
search_state.rules_to_candidates[affected_rule].add(candidate)
print("Updates complete.")
def _induce_rules_for_examples(examples, seed_rules, config):
"""Iteratively searches for rules to optimize codelength objective."""
# Initialize the search state.
search_state = SearchState(
current_rules=seed_rules,
rules_to_candidates=collections.defaultdict(set),
derivable_rules=seed_rules.copy())
# Update state for all seed rules.
_update_state(seed_rules, search_state, config)
# Iteratively update grammar.
for iteration_num in range(config.max_iterations):
print("Iteration %s." % iteration_num)
rule, rules_to_remove = _get_max_rule(search_state, config, examples)
# Break if there is no candidate that improves codelength objective.
if rule is None:
print("Breaking as no candidate exceeds minimum threshold.")
break
# Otherwise, update the set of rules.
print("Adding rule: %s" % str(rule))
search_state.current_rules.add(rule)
search_state.derivable_rules.add(rule)
for rule_to_remove in rules_to_remove:
print("Removing rule: %s" % str(rule_to_remove))
search_state.current_rules.remove(rule_to_remove)
del search_state.rules_to_candidates[rule_to_remove]
print("Number of current_rules: %s" % len(search_state.current_rules))
# Update the search state based on any potentially affected rules.
# The set of affected rules includes any rule that the added rule
# may potentially be used in a derivation for.
affected_rules = _find_affected_rules(search_state.current_rules, rule)
_update_state(affected_rules, search_state, config)
# Return the set of induced rules.
return search_state.current_rules
def _example_to_rule(source_str, target_str):
"""Convert (source, target) example to a QCFGRule."""
return qcfg_rule.QCFGRule(
tuple(source_str.split()), tuple(target_str.split()), arity=0)
def _get_rules_for_other_examples(induced_rules, other_examples):
"""Add rules for examples outside of sample that cannot be derived."""
new_rules = set()
for source_str, target_str in other_examples:
goal_rule = qcfg_rule.QCFGRule(
tuple(source_str.split()), tuple(target_str.split()), arity=0)
if not derivation_utils.can_derive(goal_rule, induced_rules, None):
new_rules.add(goal_rule)
print("Added %s rules for examples outside of initial sample." %
len(new_rules))
return new_rules
def _split_examples(examples, config):
"""Split examples into a sampled and a remaining subset based on config."""
# Only consider unique examples.
# TODO(petershaw): Consider preserving the number of occurences for each
# unique example to better weight sampling for computing the dataset encoding
# codelength.
examples = list(set([tuple(example) for example in examples]))
if config.sample_size:
# Sort by number of input tokens.
examples_sorted = sorted(examples, key=lambda x: len(x[0].split()))
examples_sample = examples_sorted[:config.sample_size]
examples_other = examples_sorted[config.sample_size:]
else:
examples_sample = examples
examples_other = []
return examples_sample, examples_other
def induce_rules(examples, config):
"""Return set of induced rules for a given set of examples."""
# For effeciency, we only run grammar induction a subset of examples based
# on the sample size specified in the config.
print("Started to induce rules")
examples_sample, examples_other = _split_examples(examples, config)
print("Started to initialize examples")
# Initialize with a rule for each example.
seed_rules = set()
for source_str, target_str in examples_sample:
seed_rules.add(_example_to_rule(source_str, target_str))
print("Added %s seed rules for examples." % len(seed_rules))
# Optionally add exact match rules.
if config.seed_exact_match:
seed_rules |= exact_match_utils.get_exact_match_rules(examples_sample)
print("Seed rules after adding exact match rules for sampled examples: %s." %
len(seed_rules))
# Iteratively induce rules over the sampled set of examples.
induced_rules = _induce_rules_for_examples(examples_sample, seed_rules,
config)
print("Induced %s rules from sample of %s examples." %
(len(induced_rules), len(examples_sample)))
# Verify that induced grammar can derive all examples in examples_sample.
# We use the QCFG parser rather than `derivation_utils` as it is typically
# faster when we do not need to consider non-terminals in the goal strings,
# and to verify consistency of the algorithms.
for source_str, target_str in examples_sample:
if not qcfg_parser.can_parse(source_str, target_str, induced_rules):
raise ValueError("Induced rules cannot parse example: (%s, %s)" %
(source_str, target_str))
print("Checking %s remaining examples." % len(examples_other))
# Add rules for any examples that were not in the original sample and cannot
# be derived by the induced set of rules.
if examples_other:
if config.seed_exact_match:
induced_rules |= exact_match_utils.get_exact_match_rules(examples_other)
print("Rules after exact match for remaining examples: %s" %
len(induced_rules))
for source_str, target_str in examples_other:
if not qcfg_parser.can_parse(source_str, target_str, induced_rules):
induced_rules.add(_example_to_rule(source_str, target_str))
print("Rules after adding rules for unparsable remaining examples: %s" %
len(induced_rules))
return induced_rules
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/induction/induction_utils.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for identifying candidate rules."""
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from model.induction import rule_utils
from model.qcfg import qcfg_rule
# Non-terminal with temporary index that is gauranteed to be unused in the
# current rule. This should be replaced with NT_1 or NT_2 to form a valid
# QCFGRule.
NT_TMP = "NT_?"
def _get_non_terminals(rhs):
"""Return set of non-terminal symbols in `rhs`."""
non_terminals = set()
for symbol in rhs:
if symbol in (qcfg_rule.NT_1, qcfg_rule.NT_2, NT_TMP):
non_terminals.add(symbol)
return non_terminals
def _get_tmp_nt_replacement(nts):
if nts == {NT_TMP}:
return qcfg_rule.NT_1
elif nts == {NT_TMP, qcfg_rule.NT_1}:
return qcfg_rule.NT_2
elif nts == {NT_TMP, qcfg_rule.NT_2}:
return qcfg_rule.NT_1
else:
raise ValueError("Unexpected NTs: %s" % nts)
def _replace_tmp_nt(source, target, nts):
new_nt = _get_tmp_nt_replacement(nts)
source = rule_utils.rhs_replace(source, [NT_TMP], new_nt)
target = rule_utils.rhs_replace(target, [NT_TMP], new_nt)
return source, target
def _make_rule(nts, source, target):
"""Canoncalize NT indexes and return QCFGRule."""
arity = len(nts)
source, target = rule_utils.canonicalize_nts(source, target, arity)
return qcfg_rule.QCFGRule(tuple(source), tuple(target), arity)
def _maybe_get_candidate_pair(source_g, source_h, target_g, target_h):
"""Returns candidate rule pair if proposed sources and targets are valid."""
# Check that proposed sources and targets contain same non-terminal indexes.
nts_g = _get_non_terminals(source_g)
if nts_g != _get_non_terminals(target_g):
return None
nts_h = _get_non_terminals(source_h)
if nts_h != _get_non_terminals(target_h):
return None
# Canonicalize non-terminal index ordering and return candidate pair.
source_g, target_g = _replace_tmp_nt(source_g, target_g, nts_g)
rule_g = _make_rule(nts_g, source_g, target_g)
rule_h = _make_rule(nts_h, source_h, target_h)
return (rule_g, rule_h)
def _get_split_candidates(rule, allow_repeated_target_nts=True):
"""Implements `SPLIT` procedure described in paper appendix.
To explain this function, let us review some notation for SCFGs/QCFGs.
Let `g` and `h` refer to QCFG rules. Let `=>_g` denote the application of
rule g, such that <a,b> `=>_g` <c,d> means
that <c,d> can be generated from <a,b> by applying the rule `g` to replace
some indexed non-terminal in <a,b>. Let
`=>_g =>_h` refer to a chain of rule applications of `g` and `h`, ommiting
the intermediate rule pair.
We can now define the behavoir of this function. Let `NT -> <a,b>` refer to
the input argument `rule`. The function returns the following set:
{(g,h) | <NT,NT> =>_g =>_h <a,b>}
In other words, we return pairs of rules that can generate the input `rule`.
We leave it to the caller to also consider the rule pair (h,g).
Certain restrictions also apply to the rule pairs that will be considered.
For example, if `rule` is:
NT -> <foo bar, bar foo>
Then the return set will include the following rule pair:
NT -> <NT_0 bar, bar NT_0>
NT -> <foo, foo>
Args:
rule: A QcfgRule.
allow_repeated_target_nts: Whether to allow repeated substrings to be
replaced with multiple non-terminals sharing the same index in target
sequences.
Returns:
List of rule pairs.
"""
candidate_pairs = []
# Consider all pairs of subspans in source and target to replace with
# a new non-terminal symbol.
for source_nt_start in range(len(rule.source)):
for source_nt_end in range(source_nt_start + 1, len(rule.source) + 1):
source_h = rule.source[source_nt_start:source_nt_end]
# Don't allow source_h to occur multiple times in rule.source.
# Otherwise this leads to an ambiguous selection between the occurences,
# so take the more conservative approach and disallow this.
if rule_utils.rhs_count(rule.source, source_h) > 1:
continue
# Don't allow source_h to only contain a single non-terminal.
if source_h == tuple([qcfg_rule.NT_1]) or source_h == tuple(
[qcfg_rule.NT_2]):
continue
source_g = (
rule.source[:source_nt_start] + tuple([NT_TMP]) +
rule.source[source_nt_end:])
# Don't allow source_g to only contain a single non-terminal.
if source_g == tuple([NT_TMP]):
continue
# Don't allow source_g to contain >2 non-terminals.
if qcfg_rule.NT_1 in source_g and qcfg_rule.NT_2 in source_g:
continue
for target_nt_start in range(len(rule.target)):
for target_nt_end in range(target_nt_start + 1, len(rule.target) + 1):
target_h = rule.target[target_nt_start:target_nt_end]
# Optionally allow target_h to occur multiple times in rule.target.
if rule_utils.rhs_count(rule.target, target_h) > 1:
if allow_repeated_target_nts:
target_g = rule_utils.rhs_replace(rule.target, target_h, NT_TMP)
else:
continue
else:
target_g = (
rule.target[:target_nt_start] + tuple([NT_TMP]) +
rule.target[target_nt_end:])
# Don't allow target_g to contain >2 non-terminals.
if qcfg_rule.NT_1 in target_g and qcfg_rule.NT_2 in target_g:
continue
candidate_pair = _maybe_get_candidate_pair(source_g, source_h,
target_g, target_h)
if candidate_pair:
candidate_pairs.append(candidate_pair)
return candidate_pairs
def find_possible_splits(rule, derivable_rules, allow_repeated_target_nts=True):
"""Implements `NEW` procedure described in paper appendix."""
candidates = _get_split_candidates(rule, allow_repeated_target_nts)
# Set of QCFGRules.
rule_candidates = set()
for rule_b, rule_c in candidates:
if rule_b in derivable_rules and rule_c not in derivable_rules:
# <NT, NT> =>_a == <NT, NT> =>_b =>_c where b is in derivable_rules.
rule_candidates.add(rule_c)
elif rule_c in derivable_rules and rule_b not in derivable_rules:
# <NT, NT> =>_a == <NT, NT> =>_b =>_c where c is in derivable_rules.
rule_candidates.add(rule_b)
return rule_candidates
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/induction/split_utils.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Various common functions related to QCFGRules."""
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from model.qcfg import qcfg_rule
def _swap_nt_order(rhs):
new_rhs = []
for symbol in rhs:
if symbol == qcfg_rule.NT_1:
new_rhs.append(qcfg_rule.NT_2)
elif symbol == qcfg_rule.NT_2:
new_rhs.append(qcfg_rule.NT_1)
else:
new_rhs.append(symbol)
return tuple(new_rhs)
def canonicalize_nts(source, target, arity):
"""Follows convention of source indexes being in order."""
if arity == 1:
if qcfg_rule.NT_2 in source:
source = rhs_replace(source, [qcfg_rule.NT_2], qcfg_rule.NT_1)
target = rhs_replace(target, [qcfg_rule.NT_2], qcfg_rule.NT_1)
elif arity == 2:
if qcfg_rule.NT_1 not in source or qcfg_rule.NT_2 not in source:
raise ValueError("Bad arity 2 source: %s" % (source,))
if source.index(qcfg_rule.NT_1) > source.index(qcfg_rule.NT_2):
source = _swap_nt_order(source)
target = _swap_nt_order(target)
return source, target
def rhs_count(list_to_search, sublist):
"""Returns count of occurances of sublist in list_to_search."""
if len(sublist) > len(list_to_search):
return 0
count = 0
for idx in range(len(list_to_search) - len(sublist) + 1):
if list_to_search[idx:idx + len(sublist)] == sublist:
count += 1
return count
def rhs_contains(list_to_search, sublist):
"""Returns True if sublist is contained in list_to_search."""
if len(sublist) > len(list_to_search):
return False
for idx in range(len(list_to_search) - len(sublist) + 1):
if list_to_search[idx:idx + len(sublist)] == sublist:
return True
return False
def rhs_can_maybe_derive(rhs_a, rhs_b):
"""Return False if rhs_a cannot be used in a derivation of rhs_b.
This function uses a fast approximation based on terminal sequence overlap
to identify cases where `rhs_a` could never be used in a derivation of
`rhs_b`.
For example, given `rhs_a`:
"foo NT foo NT"
There is a derivation that includes `rhs_a` that derives:
"foo bar bar foo NT"
But there is no derivation that includes `rhs_a` and derives:
"foo NT dax NT"
Args:
rhs_a: Tuple of strings for source or target of QCFGRule.
rhs_b: Same type as rhs_a.
Returns:
False if rhs_a cannot be used in a derivation of rhs_b.
"""
len_rhs_a = len(rhs_a)
len_rhs_b = len(rhs_b)
if len_rhs_a > len_rhs_b:
return False
if not rhs_a or not rhs_b:
return False
# Represent search state with backtracking.
rhs_a_idx_backtrack = 0
rhs_a_idx = 0
rhs_b_idx_backtrack = 0
rhs_b_idx = 0
while True:
if rhs_a_idx >= len_rhs_a:
# Completed matching all terminals.
return True
if rhs_b_idx >= len_rhs_b:
# Failed to match all terminal sequences.
return False
# Fail early if match cannot be made based on remaining length.
if (len_rhs_a - rhs_a_idx) > (len_rhs_b - rhs_b_idx):
return False
a_symbol = rhs_a[rhs_a_idx]
b_symbol = rhs_b[rhs_b_idx]
if a_symbol == b_symbol:
# Matched next terminal symbol, increment indexes.
rhs_a_idx += 1
rhs_b_idx += 1
elif a_symbol == qcfg_rule.NT_2 or a_symbol == qcfg_rule.NT_1:
# Completed matching terminal sequence.
# Increment backtrack indexes past this sequence.
rhs_a_idx += 1
rhs_a_idx_backtrack = rhs_a_idx
rhs_b_idx_backtrack = rhs_b_idx
else:
# Symbols do not match, backtrack.
rhs_a_idx = rhs_a_idx_backtrack
rhs_b_idx_backtrack += 1
rhs_b_idx = rhs_b_idx_backtrack
def rhs_replace(rhs, sublist, replacement):
"""Replace occurrences of sublist in rhs with replacement."""
sublist = tuple(sublist)
rhs = tuple(rhs)
if len(sublist) > len(rhs):
raise ValueError
if not sublist:
raise ValueError
new_list = []
idx = 0
while idx < len(rhs):
if rhs[idx:idx + len(sublist)] == sublist:
new_list.append(replacement)
idx += len(sublist)
else:
new_list.append(rhs[idx])
idx += 1
return tuple(new_list)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/induction/rule_utils.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for computing codelengths over QCFG rules."""
import collections
import math
import random
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from model.induction import rule_utils
from model.qcfg import qcfg_parser
from model.qcfg import qcfg_rule
def rule_codelength(rule, config):
"""Computes the codelength for a given rule."""
length = 0.0
for token in rule.source + rule.target:
if token in {qcfg_rule.NT_1, qcfg_rule.NT_2}:
length += config.non_terminal_codelength
else:
length += config.terminal_codelength
return length
def _aggregate_counts(child_counts):
"""Return aggregated node count as int."""
if not child_counts:
return 1
elif len(child_counts) == 1:
return child_counts[0]
elif len(child_counts) == 2:
return child_counts[0] * child_counts[1]
else:
raise ValueError
def _get_num_all_derivations(source, rules, verbose):
"""Return total number of derivations for any target."""
def node_fn(unused_span_begin, unused_span_end, unused_rule, children):
"""Represent nodes as integer counts of possible derivations."""
return _aggregate_counts(children)
def postprocess_fn(nodes):
"""Merge and sum all nodes."""
return [sum(nodes)]
outputs = qcfg_parser.parse(
source,
rules,
node_fn=node_fn,
postprocess_cell_fn=postprocess_fn,
verbose=verbose)
if len(outputs) != 1:
raise ValueError
num_outputs = outputs[0]
return num_outputs
def _get_num_target_derivations(source, target, rules, verbose):
"""Return number of derivations of target."""
goal_target_string = " ".join(target)
def node_fn(unused_span_begin, unused_span_end, rule, children):
"""Represent nodes as (target string, int count of possible derivations)."""
target_strings = [target_string for target_string, _ in children]
new_target_string = qcfg_rule.apply_target(rule, target_strings)
child_counts = [child_count for _, child_count in children]
count = _aggregate_counts(child_counts)
return (new_target_string, count)
def postprocess_fn(nodes):
"""Discard nodes that cannot reach goal and aggregate counts."""
counts_dict = collections.defaultdict(int)
for target_string, count in nodes:
# Discard any targets that are not substrings of goal target.
if target_string not in goal_target_string:
continue
counts_dict[target_string] += count
return [
(target_string, count) for target_string, count in counts_dict.items()
]
outputs = qcfg_parser.parse(
source,
rules,
node_fn=node_fn,
postprocess_cell_fn=postprocess_fn,
verbose=verbose)
for target_string, count in outputs:
if target_string == goal_target_string:
return count
raise ValueError("No target derivation for example (%s, %s)" %
(source, target))
def _target_codelength(source, target, rules, verbose=False):
"""Return codelength for encoding `target` given `source` and `rules`.
The codelength of the target is computed as -log_2(P(y|x)).
For P(y|x) we use a naive uniform distribution over derivations, such that:
P(y|x) = # of derivations of <x,y> / # of derivations of <x,?>,
where ? is any target strings.
We therefore run a QCFG parser twice to determine the numberator and
denominator counts.
Args:
source: Tuple of source tokens.
target: Tuple of target tokens.
rules: Set of QCFGRule instances.
verbose: Print debug logging if True.
Returns:
Float representing codelength for encoding `target` given `source` and
`rules`.
"""
num_derivations = _get_num_all_derivations(source, rules, verbose=verbose)
num_target_derivations = _get_num_target_derivations(
source, target, rules, verbose=verbose)
# Note log(B/A) = -log(A/B).
codelength = math.log2(float(num_derivations) / float(num_target_derivations))
if verbose:
print("(%s, %s): %s derivations, %s target derivations, %s codelength" %
(source, target, num_derivations, num_target_derivations, codelength))
return codelength
def _find_relevant_examples(dataset, rule):
"""Find examples in `dataset` where `rule` could be used in a derivation."""
# TODO(petershaw): This could potentially be more effecient by pre-indexing
# the dataset sources in a data structure such as a Trie.
examples = []
for source_str, target_str in dataset:
source = source_str.split()
target = target_str.split()
if rule_utils.rhs_can_maybe_derive(rule.source, source):
examples.append((source, target))
return examples
def get_dataset_encoding_delta(sample_size,
examples,
current_rules,
candidate_rule_to_add,
candidate_rules_to_remove,
verbose=False):
"""Approximate increase in codelength to encode dataset."""
# Make a copy of the ruleset and add/remove candidates.
new_rules = current_rules.copy()
for rule_to_remove in candidate_rules_to_remove:
new_rules.remove(rule_to_remove)
new_rules.add(candidate_rule_to_add)
relevant_examples = _find_relevant_examples(examples, candidate_rule_to_add)
num_relevant_examples = len(relevant_examples)
sample = False
if verbose:
print("%s relevant rules." % num_relevant_examples)
# If configured, sample rules for effeciency.
if sample_size and num_relevant_examples > sample_size:
random.shuffle(relevant_examples)
relevant_examples = relevant_examples[:sample_size]
sample = True
# Compute the increase in target codelength summed across the sample.
delta = 0
for source, target in relevant_examples:
new_codelength = _target_codelength(
source, target, new_rules, verbose=verbose)
original_codelength = _target_codelength(
source, target, current_rules, verbose=verbose)
delta += (new_codelength - original_codelength)
# Estimate delta across entire set based on our sample.
if sample:
scale_factor = float(num_relevant_examples) / float(sample_size)
delta *= scale_factor
if verbose:
print("Scaling delta by %s." % scale_factor)
if verbose:
print("Delta: %s." % delta)
return delta
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/induction/codelength_utils.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for identifying identical substrings in sources and targets."""
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from model.qcfg import qcfg_rule
def _in_matched_range(start_idx, end_idx, matched_ranges):
"""Return True if provided indices overlap any spans in matched_ranges."""
for range_start_idx, range_end_idx in matched_ranges:
if not (end_idx <= range_start_idx or start_idx >= range_end_idx):
return True
return False
def _find_exact_matches(source, target):
"""Returns longest non-overlapping sub-strings shared by source and target."""
source_len = len(source)
target_len = len(target)
matches = set()
matched_source_ranges = set()
matched_target_ranges = set()
for sequence_len in range(max(target_len, source_len), 0, -1):
for source_start_idx in range(0, source_len - sequence_len + 1):
source_end_idx = source_start_idx + sequence_len
if _in_matched_range(source_start_idx, source_end_idx,
matched_source_ranges):
continue
for target_start_idx in range(0, target_len - sequence_len + 1):
target_end_idx = target_start_idx + sequence_len
if _in_matched_range(target_start_idx, target_end_idx,
matched_target_ranges):
continue
source_span = source[source_start_idx:source_end_idx]
target_span = target[target_start_idx:target_end_idx]
if source_span == target_span:
matches.add(tuple(source_span))
matched_source_ranges.add((source_start_idx, source_end_idx))
matched_target_ranges.add((target_start_idx, target_end_idx))
return matches
def get_exact_match_rules(dataset):
"""Return set of rules for terminal sequences in both source and target."""
matches = set()
for source_str, target_str in dataset:
source = source_str.split()
target = target_str.split()
matches.update(_find_exact_matches(source, target))
exact_match_rules = set()
for match in matches:
rule = qcfg_rule.QCFGRule(source=tuple(match), target=tuple(match), arity=0)
exact_match_rules.add(rule)
return exact_match_rules
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/induction/exact_match_utils.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Induce and write QCFG rules."""
from absl import app
from absl import flags
import pdb
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from model.induction import induction_utils
from model.qcfg import qcfg_file
from tasks import tsv_utils
FLAGS = flags.FLAGS
flags.DEFINE_string("input", "", "Input tsv file of examples.")
flags.DEFINE_string("output", "", "Output rule txt file.")
flags.DEFINE_integer("sample_size", 500,
"Number of examples to sample for induction.")
flags.DEFINE_integer("max_iterations", 10000,
"Maximum number of grammar induction iterations.")
flags.DEFINE_integer("min_delta", 0,
"Minimum codelength delta to add a new rule.")
flags.DEFINE_integer("terminal_codelength", 32,
"Codelength coeffecient for terminals.")
flags.DEFINE_integer("non_terminal_codelength", 1,
"Codelength coeffecient for non-terminals.")
flags.DEFINE_integer(
"parse_sample", 10,
"Number of examples to sample for estimating target encoding codelength.")
flags.DEFINE_bool(
"allow_repeated_target_nts", True,
"Whether to allow multiple non-terminals with same index in targets.")
flags.DEFINE_bool("seed_exact_match", True,
"Whether to seed induction with exact match rules.")
flags.DEFINE_bool("balance_parens", True,
"Whether to require rules to have balanced parentheses.")
def induce_and_write_rules():
"""Induce and write set of rules."""
examples = tsv_utils.read_tsv(FLAGS.input)
config = induction_utils.InductionConfig(
sample_size=FLAGS.sample_size,
max_iterations=FLAGS.max_iterations,
min_delta=FLAGS.min_delta,
terminal_codelength=FLAGS.terminal_codelength,
non_terminal_codelength=FLAGS.non_terminal_codelength,
parse_sample=FLAGS.parse_sample,
allow_repeated_target_nts=FLAGS.allow_repeated_target_nts,
seed_exact_match=FLAGS.seed_exact_match,
balance_parens=FLAGS.balance_parens,
)
print("In induce rules main")
induced_rules = induction_utils.induce_rules(examples, config)
qcfg_file.write_rules(induced_rules, FLAGS.output)
pdb.set_trace()
def main(unused_argv):
induce_and_write_rules()
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/induction/induce_rules.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for applying rules to produce derivations.
Note that in this module we will reuse the QCFGRule tuple to represent both
derived string pairs and QCFG rules. Since we only allow a single LHS
non-terminal, both concepts can be represented as a pair of source and target
sequences. Therefore, we abuse terminology and refer to each concept
interchangeably in certain contexts.
"""
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from model.induction import rule_utils
from model.qcfg import qcfg_rule
def _substitute(rhs_a, rhs_b, nt=qcfg_rule.NT_1):
"""Replace nt in rhs_a with rhs_b, re-indexing non-terminals if needed."""
output = []
for token in rhs_a:
if token == nt and nt == qcfg_rule.NT_2:
# Our goal is to replace NT_2 in rhs_a with rhs_b, but we need to
# do some re-indexing to avoid collisions.
# First, we re-index NT_1 in rhs_b to NT_2.
# Based on the logic in `apply`, if rhs_a has arity 2, then rhs_b
# will have arity < 2, i.e. will not contain NT_2.
rhs_b = rule_utils.rhs_replace(rhs_b, [qcfg_rule.NT_1], qcfg_rule.NT_2)
# We can now safely replace NT_2 in rhs_a with rhs_b, which should
# contain only NT_2.
output.extend(rhs_b)
elif token == nt:
# Replace NT_1 in rhs_a with rhs_b.
# Based on the logic in `apply`, no collisions on non-terminal indexes
# should occur, since we should either be in the case:
# 1. rhs_a only has NT_1, and rhs_b has NT_1 and NT_2
# 2. rhs_a has NT_1 and NT_2, but rhs_b only has NT_1
output.extend(rhs_b)
else:
output.append(token)
return output
def _apply(rule_a, rule_b):
"""Applies rule_b to rule_a, returning set of derived rules."""
outputs = []
if rule_a.arity == 2:
new_arity = 1 + rule_b.arity
if new_arity <= 2:
# Cannot apply an arity 2 rule to an arity 2 rule because this would lead
# to a rule with 3 different non-terminal indexes, which is disallowed
# by our QCFG conventions.
source_0 = _substitute(rule_a.source, rule_b.source)
target_0 = _substitute(rule_a.target, rule_b.target)
outputs.append((source_0, target_0, new_arity))
# Rule can potentially be applied to either non-terminal in rule_a.
source_1 = _substitute(rule_a.source, rule_b.source, nt=qcfg_rule.NT_2)
target_1 = _substitute(rule_a.target, rule_b.target, nt=qcfg_rule.NT_2)
outputs.append((source_1, target_1, new_arity))
elif rule_a.arity == 1:
new_arity = rule_b.arity
source = _substitute(rule_a.source, rule_b.source)
target = _substitute(rule_a.target, rule_b.target)
outputs.append((source, target, new_arity))
output_rules = set()
for source, target, arity in outputs:
source, target = rule_utils.canonicalize_nts(source, target, arity)
output_rules.add(qcfg_rule.QCFGRule(tuple(source), tuple(target), arity))
return output_rules
def _can_maybe_derive_from(rule, goal_rule):
"""Return True if rule can potentially be used to derive goal_rule."""
# Don't allow 'reflexive' derivations.
if rule == goal_rule:
return False
if not rule_utils.rhs_can_maybe_derive(rule.source, goal_rule.source):
return False
if not rule_utils.rhs_can_maybe_derive(rule.target, goal_rule.target):
return False
return True
def _filter_rules(rules, goal_rule):
return [rule for rule in rules if _can_maybe_derive_from(rule, goal_rule)]
def _verify_arity(rule):
"""Raise ValueError if rule does not follow valid arity convention."""
if rule.arity == 0:
if qcfg_rule.NT_1 in rule.source:
raise ValueError("Invalid rule: %s" % (rule,))
if qcfg_rule.NT_2 in rule.source:
raise ValueError("Invalid rule: %s" % (rule,))
elif rule.arity == 1:
if qcfg_rule.NT_1 not in rule.source:
raise ValueError("Invalid rule: %s" % (rule,))
if qcfg_rule.NT_2 in rule.source:
raise ValueError("Invalid rule: %s" % (rule,))
elif rule.arity == 2:
if qcfg_rule.NT_1 not in rule.source:
raise ValueError("Invalid rule: %s" % (rule,))
if qcfg_rule.NT_2 not in rule.source:
raise ValueError("Invalid rule: %s" % (rule,))
return True
def can_derive(goal_rule,
rules,
derived_rules=None,
max_iter=15,
verbose=False):
"""Return True if `goal_rule` can be derived given `rules`.
We perform a relatively naive breadth first search (BFS), with early pruning
in cases where it can be quickly determined that an intermediate result
cannot be used in a derivation of our goal.
Args:
goal_rule: A QCFGRule representing a string pair to derive.
rules: A set of QCFGRules.
derived_rules: If not None, will add any derived QCFGRules that can
potentially derive `goal_rule` given some substitution to this set.
max_iter: Maximum number of iterations (i.e. derivation depth) for
attempting to derive `goal_rule`.
verbose: Print debugging logging if True.
Returns:
True if `goal_rule` can be derived given `rules`.
"""
# Filter rules to the set that can potentially be used in a derivation
# of `goal_rule`.
filtered_rules = _filter_rules(rules, goal_rule)
if verbose:
print("filtered_rules: %s" % filtered_rules)
# Track seen rules.
seen_rules = set(filtered_rules)
# Set of derived rules with derivation depth equal to iteration.
search_state = set(filtered_rules)
for _ in range(max_iter):
if not search_state:
if verbose:
print("Cannot derive %s." % str(goal_rule))
return False
if verbose:
print("Starting next iteration with search_state:")
for rule in search_state:
print(rule)
new_search_state = set()
for rule_a in search_state:
# Attempt to apply every relevant rule to every rule in search_state.
for rule_b in filtered_rules:
new_rules = _apply(rule_a, rule_b)
if verbose:
print("Applying %s to %s yields %s" % (rule_b, rule_a, new_rules))
for new_rule in new_rules:
# Check that application has not led to a malformed rule.
_verify_arity(new_rule)
if new_rule in seen_rules:
continue
seen_rules.add(new_rule)
if goal_rule == new_rule:
if verbose:
print("Derived %s." % str(goal_rule))
return True
# If the generated rule can be potentially used in a derivation of
# our goal, then add to the search state for the next iteration.
if _can_maybe_derive_from(new_rule, goal_rule):
if derived_rules is not None:
derived_rules.add(new_rule)
new_search_state.add(new_rule)
else:
if verbose:
print("Cannot derive goal from: %s" % str(new_rule))
search_state = new_search_state
# For the datasets we have studied, this limit should not generally apply.
print("Reached max iterations for rule `%s` given rules `%s`" %
(goal_rule, filtered_rules))
return False
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/induction/derivation_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Training script for permute MNIST experiment.
"""
from __future__ import print_function
import argparse
import os
import sys
import math
import time
import datetime
import numpy as np
import tensorflow as tf
from copy import deepcopy
from six.moves import cPickle as pickle
from utils.data_utils import construct_permute_mnist
from utils.utils import get_sample_weights, sample_from_dataset, update_episodic_memory, concatenate_datasets, samples_for_each_class, sample_from_dataset_icarl, compute_fgt, update_reservior
from utils.vis_utils import plot_acc_multiple_runs, plot_histogram, snapshot_experiment_meta_data, snapshot_experiment_eval
from model import Model
###############################################################
################ Some definitions #############################
### These will be edited by the command line options ##########
###############################################################
## Training Options
NUM_RUNS = 10 # Number of experiments to average over
TRAIN_ITERS = 5000 # Number of training iterations per task
BATCH_SIZE = 16
LEARNING_RATE = 1e-3
RANDOM_SEED = 1234
VALID_OPTIMS = ['SGD', 'MOMENTUM', 'ADAM']
OPTIM = 'SGD'
OPT_POWER = 0.9
OPT_MOMENTUM = 0.9
VALID_ARCHS = ['FC-S', 'FC-B']
ARCH = 'FC-S'
## Model options
MODELS = ['VAN', 'PI', 'EWC', 'MAS', 'RWALK', 'A-GEM', 'S-GEM', 'FTR_EXT', 'PNN', 'ER'] #List of valid models
IMP_METHOD = 'EWC'
SYNAP_STGTH = 75000
FISHER_EMA_DECAY = 0.9 # Exponential moving average decay factor for Fisher computation (online Fisher)
FISHER_UPDATE_AFTER = 10 # Number of training iterations for which the F_{\theta}^t is computed (see Eq. 10 in RWalk paper)
SAMPLES_PER_CLASS = 25 # Number of samples per task
INPUT_FEATURE_SIZE = 784
IMG_HEIGHT = 28
IMG_WIDTH = 28
IMG_CHANNELS = 1
TOTAL_CLASSES = 10 # Total number of classes in the dataset
EPS_MEM_BATCH_SIZE = 256
DEBUG_EPISODIC_MEMORY = False
USE_GPU = True
K_FOR_CROSS_VAL = 3
TIME_MY_METHOD = False
COUNT_VIOLATIONS = False
MEASURE_PERF_ON_EPS_MEMORY = False
## Logging, saving and testing options
LOG_DIR = './permute_mnist_results'
## Evaluation options
## Num Tasks
NUM_TASKS = 20
MULTI_TASK = False
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description="Script for permutted mnist experiment.")
parser.add_argument("--cross-validate-mode", action="store_true",
help="If option is chosen then snapshoting after each batch is disabled")
parser.add_argument("--online-cross-val", action="store_true",
help="If option is chosen then enable the online cross validation of the learning rate")
parser.add_argument("--train-single-epoch", action="store_true",
help="If option is chosen then train for single epoch")
parser.add_argument("--eval-single-head", action="store_true",
help="If option is chosen then evaluate on a single head setting.")
parser.add_argument("--arch", type=str, default=ARCH, help="Network Architecture for the experiment.\
\n \nSupported values: %s"%(VALID_ARCHS))
parser.add_argument("--num-runs", type=int, default=NUM_RUNS,
help="Total runs/ experiments over which accuracy is averaged.")
parser.add_argument("--train-iters", type=int, default=TRAIN_ITERS,
help="Number of training iterations for each task.")
parser.add_argument("--batch-size", type=int, default=BATCH_SIZE,
help="Mini-batch size for each task.")
parser.add_argument("--random-seed", type=int, default=RANDOM_SEED,
help="Random Seed.")
parser.add_argument("--learning-rate", type=float, default=LEARNING_RATE,
help="Starting Learning rate for each task.")
parser.add_argument("--optim", type=str, default=OPTIM,
help="Optimizer for the experiment. \
\n \nSupported values: %s"%(VALID_OPTIMS))
parser.add_argument("--imp-method", type=str, default=IMP_METHOD,
help="Model to be used for LLL. \
\n \nSupported values: %s"%(MODELS))
parser.add_argument("--synap-stgth", type=float, default=SYNAP_STGTH,
help="Synaptic strength for the regularization.")
parser.add_argument("--fisher-ema-decay", type=float, default=FISHER_EMA_DECAY,
help="Exponential moving average decay for Fisher calculation at each step.")
parser.add_argument("--fisher-update-after", type=int, default=FISHER_UPDATE_AFTER,
help="Number of training iterations after which the Fisher will be updated.")
parser.add_argument("--mem-size", type=int, default=SAMPLES_PER_CLASS,
help="Number of samples per class from previous tasks.")
parser.add_argument("--eps-mem-batch", type=int, default=EPS_MEM_BATCH_SIZE,
help="Number of samples per class from previous tasks.")
parser.add_argument("--examples-per-task", type=int, default=1000,
help="Number of examples per task.")
parser.add_argument("--log-dir", type=str, default=LOG_DIR,
help="Directory where the plots and model accuracies will be stored.")
return parser.parse_args()
def train_task_sequence(model, sess, args):
"""
Train and evaluate LLL system such that we only see a example once
Args:
Returns:
dict A dictionary containing mean and stds for the experiment
"""
# List to store accuracy for each run
runs = []
batch_size = args.batch_size
if model.imp_method == 'A-GEM' or model.imp_method == 'ER':
use_episodic_memory = True
else:
use_episodic_memory = False
# Loop over number of runs to average over
for runid in range(args.num_runs):
print('\t\tRun %d:'%(runid))
# Initialize the random seeds
np.random.seed(args.random_seed+runid)
# Load the permute mnist dataset
datasets = construct_permute_mnist(model.num_tasks)
episodic_mem_size = args.mem_size*model.num_tasks*TOTAL_CLASSES
# Initialize all the variables in the model
sess.run(tf.global_variables_initializer())
# Run the init ops
model.init_updates(sess)
# List to store accuracies for a run
evals = []
# List to store the classes that we have so far - used at test time
test_labels = np.arange(TOTAL_CLASSES)
if use_episodic_memory:
# Reserve a space for episodic memory
episodic_images = np.zeros([episodic_mem_size, INPUT_FEATURE_SIZE])
episodic_labels = np.zeros([episodic_mem_size, TOTAL_CLASSES])
count_cls = np.zeros(TOTAL_CLASSES, dtype=np.int32)
episodic_filled_counter = 0
examples_seen_so_far = 0
# Mask for softmax
# Since all the classes are present in all the tasks so nothing to mask
logit_mask = np.ones(TOTAL_CLASSES)
if model.imp_method == 'PNN':
pnn_train_phase = np.array(np.zeros(model.num_tasks), dtype=np.bool)
pnn_logit_mask = np.ones([model.num_tasks, TOTAL_CLASSES])
if COUNT_VIOLATIONS:
violation_count = np.zeros(model.num_tasks)
vc = 0
# Training loop for all the tasks
for task in range(len(datasets)):
print('\t\tTask %d:'%(task))
# If not the first task then restore weights from previous task
if(task > 0 and model.imp_method != 'PNN'):
model.restore(sess)
# Extract training images and labels for the current task
task_train_images = datasets[task]['train']['images']
task_train_labels = datasets[task]['train']['labels']
# If multi_task is set the train using datasets of all the tasks
if MULTI_TASK:
if task == 0:
for t_ in range(1, len(datasets)):
task_train_images = np.concatenate((task_train_images, datasets[t_]['train']['images']), axis=0)
task_train_labels = np.concatenate((task_train_labels, datasets[t_]['train']['labels']), axis=0)
else:
# Skip training for this task
continue
# Assign equal weights to all the examples
task_sample_weights = np.ones([task_train_labels.shape[0]], dtype=np.float32)
total_train_examples = task_train_images.shape[0]
# Randomly suffle the training examples
perm = np.arange(total_train_examples)
np.random.shuffle(perm)
train_x = task_train_images[perm][:args.examples_per_task]
train_y = task_train_labels[perm][:args.examples_per_task]
task_sample_weights = task_sample_weights[perm][:args.examples_per_task]
print('Received {} images, {} labels at task {}'.format(train_x.shape[0], train_y.shape[0], task))
# Array to store accuracies when training for task T
ftask = []
num_train_examples = train_x.shape[0]
# Train a task observing sequence of data
if args.train_single_epoch:
num_iters = num_train_examples // batch_size
else:
num_iters = args.train_iters
# Training loop for task T
for iters in range(num_iters):
if args.train_single_epoch and not args.cross_validate_mode:
if (iters < 10) or (iters < 100 and iters % 10 == 0) or (iters % 100 == 0):
# Snapshot the current performance across all tasks after each mini-batch
fbatch = test_task_sequence(model, sess, datasets, args.online_cross_val)
ftask.append(fbatch)
offset = (iters * batch_size) % (num_train_examples - batch_size)
residual = batch_size
if model.imp_method == 'PNN':
pnn_train_phase[:] = False
pnn_train_phase[task] = True
feed_dict = {model.x: train_x[offset:offset+batch_size], model.y_[task]: train_y[offset:offset+batch_size],
model.sample_weights: task_sample_weights[offset:offset+batch_size],
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 1.0}
train_phase_dict = {m_t: i_t for (m_t, i_t) in zip(model.train_phase, pnn_train_phase)}
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, pnn_logit_mask)}
feed_dict.update(train_phase_dict)
feed_dict.update(logit_mask_dict)
else:
feed_dict = {model.x: train_x[offset:offset+batch_size], model.y_: train_y[offset:offset+batch_size],
model.sample_weights: task_sample_weights[offset:offset+batch_size],
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 1.0,
model.output_mask: logit_mask, model.train_phase: True}
if model.imp_method == 'VAN':
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'PNN':
feed_dict[model.task_id] = task
_, loss = sess.run([model.train[task], model.unweighted_entropy[task]], feed_dict=feed_dict)
elif model.imp_method == 'FTR_EXT':
if task == 0:
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
else:
_, loss = sess.run([model.train_classifier, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'EWC':
# If first iteration of the first task then set the initial value of the running fisher
if task == 0 and iters == 0:
sess.run([model.set_initial_running_fisher], feed_dict=feed_dict)
# Update fisher after every few iterations
if (iters + 1) % model.fisher_update_after == 0:
sess.run(model.set_running_fisher)
sess.run(model.reset_tmp_fisher)
_, _, loss = sess.run([model.set_tmp_fisher, model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'PI':
_, _, _, loss = sess.run([model.weights_old_ops_grouped, model.train, model.update_small_omega,
model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'MAS':
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'A-GEM':
if task == 0:
# Normal application of gradients
_, loss = sess.run([model.train_first_task, model.agem_loss], feed_dict=feed_dict)
else:
## Compute and store the reference gradients on the previous tasks
if episodic_filled_counter <= args.eps_mem_batch:
mem_sample_mask = np.arange(episodic_filled_counter)
else:
# Sample a random subset from episodic memory buffer
mem_sample_mask = np.random.choice(episodic_filled_counter, args.eps_mem_batch, replace=False) # Sample without replacement so that we don't sample an example more than once
# Store the reference gradient
sess.run(model.store_ref_grads, feed_dict={model.x: episodic_images[mem_sample_mask], model.y_: episodic_labels[mem_sample_mask],
model.keep_prob: 1.0, model.output_mask: logit_mask, model.train_phase: True})
if COUNT_VIOLATIONS:
vc, _, loss = sess.run([model.violation_count, model.train_subseq_tasks, model.agem_loss], feed_dict=feed_dict)
else:
# Compute the gradient for current task and project if need be
_, loss = sess.run([model.train_subseq_tasks, model.agem_loss], feed_dict=feed_dict)
# Put the batch in the ring buffer
for er_x, er_y_ in zip(train_x[offset:offset+residual], train_y[offset:offset+residual]):
cls = np.unique(np.nonzero(er_y_))[-1]
# Write the example at the location pointed by count_cls[cls]
cls_to_index_map = cls
with_in_task_offset = args.mem_size * cls_to_index_map
mem_index = count_cls[cls] + with_in_task_offset + episodic_filled_counter
episodic_images[mem_index] = er_x
episodic_labels[mem_index] = er_y_
count_cls[cls] = (count_cls[cls] + 1) % args.mem_size
elif model.imp_method == 'RWALK':
# If first iteration of the first task then set the initial value of the running fisher
if task == 0 and iters == 0:
sess.run([model.set_initial_running_fisher], feed_dict=feed_dict)
# Store the current value of the weights
sess.run(model.weights_delta_old_grouped)
# Update fisher and importance score after every few iterations
if (iters + 1) % model.fisher_update_after == 0:
# Update the importance score using distance in riemannian manifold
sess.run(model.update_big_omega_riemann)
# Now that the score is updated, compute the new value for running Fisher
sess.run(model.set_running_fisher)
# Store the current value of the weights
sess.run(model.weights_delta_old_grouped)
# Reset the delta_L
sess.run([model.reset_small_omega])
_, _, _, _, loss = sess.run([model.set_tmp_fisher, model.weights_old_ops_grouped,
model.train, model.update_small_omega, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'ER':
mem_filled_so_far = examples_seen_so_far if (examples_seen_so_far < episodic_mem_size) else episodic_mem_size
if mem_filled_so_far < args.eps_mem_batch:
er_mem_indices = np.arange(mem_filled_so_far)
else:
er_mem_indices = np.random.choice(mem_filled_so_far, args.eps_mem_batch, replace=False)
np.random.shuffle(er_mem_indices)
# Train on a batch of episodic memory first
er_train_x_batch = np.concatenate((episodic_images[er_mem_indices], train_x[offset:offset+residual]), axis=0)
er_train_y_batch = np.concatenate((episodic_labels[er_mem_indices], train_y[offset:offset+residual]), axis=0)
feed_dict = {model.x: er_train_x_batch, model.y_: er_train_y_batch,
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 1.0,
model.output_mask: logit_mask, model.train_phase: True}
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
for er_x, er_y_ in zip(train_x[offset:offset+residual], train_y[offset:offset+residual]):
update_reservior(er_x, er_y_, episodic_images, episodic_labels, episodic_mem_size, examples_seen_so_far)
examples_seen_so_far += 1
if (iters % 100 == 0):
print('Step {:d} {:.3f}'.format(iters, loss))
if (math.isnan(loss)):
print('ERROR: NaNs NaNs Nans!!!')
sys.exit(0)
print('\t\t\t\tTraining for Task%d done!'%(task))
# Upaate the episodic memory filled counter
if use_episodic_memory:
episodic_filled_counter += args.mem_size * TOTAL_CLASSES
if model.imp_method == 'A-GEM' and COUNT_VIOLATIONS:
violation_count[task] = vc
print('Task {}: Violation Count: {}'.format(task, violation_count))
sess.run(model.reset_violation_count, feed_dict=feed_dict)
# Compute the inter-task updates, Fisher/ importance scores etc
# Don't calculate the task updates for the last task
if (task < (len(datasets) - 1)) or MEASURE_PERF_ON_EPS_MEMORY:
model.task_updates(sess, task, task_train_images, np.arange(TOTAL_CLASSES))
print('\t\t\t\tTask updates after Task%d done!'%(task))
if args.train_single_epoch and not args.cross_validate_mode:
fbatch = test_task_sequence(model, sess, datasets, False)
ftask.append(fbatch)
ftask = np.array(ftask)
else:
if MEASURE_PERF_ON_EPS_MEMORY:
eps_mem = {
'images': episodic_images,
'labels': episodic_labels,
}
# Measure perf on episodic memory
ftask = test_task_sequence(model, sess, eps_mem, args.online_cross_val)
else:
# List to store accuracy for all the tasks for the current trained model
ftask = test_task_sequence(model, sess, datasets, args.online_cross_val)
# Store the accuracies computed at task T in a list
evals.append(ftask)
# Reset the optimizer
model.reset_optimizer(sess)
#-> End for loop task
runs.append(np.array(evals))
# End for loop runid
runs = np.array(runs)
return runs
def test_task_sequence(model, sess, test_data, cross_validate_mode):
"""
Snapshot the current performance
"""
if TIME_MY_METHOD:
# Only compute the training time
return np.zeros(model.num_tasks)
list_acc = []
if model.imp_method == 'PNN':
pnn_logit_mask = np.ones([model.num_tasks, TOTAL_CLASSES])
else:
logit_mask = np.ones(TOTAL_CLASSES)
if MEASURE_PERF_ON_EPS_MEMORY:
for task in range(model.num_tasks):
mem_offset = task*SAMPLES_PER_CLASS*TOTAL_CLASSES
feed_dict = {model.x: test_data['images'][mem_offset:mem_offset+SAMPLES_PER_CLASS*TOTAL_CLASSES],
model.y_: test_data['labels'][mem_offset:mem_offset+SAMPLES_PER_CLASS*TOTAL_CLASSES], model.keep_prob: 1.0,
model.output_mask: logit_mask, model.train_phase: False}
acc = model.accuracy.eval(feed_dict = feed_dict)
list_acc.append(acc)
print(list_acc)
return list_acc
for task, _ in enumerate(test_data):
if model.imp_method == 'PNN':
pnn_train_phase = np.array(np.zeros(model.num_tasks), dtype=np.bool)
feed_dict = {model.x: test_data[task]['test']['images'],
model.y_[task]: test_data[task]['test']['labels'], model.keep_prob: 1.0}
train_phase_dict = {m_t: i_t for (m_t, i_t) in zip(model.train_phase, pnn_train_phase)}
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, pnn_logit_mask)}
feed_dict.update(train_phase_dict)
feed_dict.update(logit_mask_dict)
acc = model.accuracy[task].eval(feed_dict = feed_dict)
else:
feed_dict = {model.x: test_data[task]['test']['images'],
model.y_: test_data[task]['test']['labels'], model.keep_prob: 1.0,
model.output_mask: logit_mask, model.train_phase: False}
acc = model.accuracy.eval(feed_dict = feed_dict)
list_acc.append(acc)
return list_acc
def main():
"""
Create the model and start the training
"""
# Get the CL arguments
args = get_arguments()
# Check if the network architecture is valid
if args.arch not in VALID_ARCHS:
raise ValueError("Network architecture %s is not supported!"%(args.arch))
# Check if the method to compute importance is valid
if args.imp_method not in MODELS:
raise ValueError("Importance measure %s is undefined!"%(args.imp_method))
# Check if the optimizer is valid
if args.optim not in VALID_OPTIMS:
raise ValueError("Optimizer %s is undefined!"%(args.optim))
# Create log directories to store the results
if not os.path.exists(args.log_dir):
print('Log directory %s created!'%(args.log_dir))
os.makedirs(args.log_dir)
# Generate the experiment key and store the meta data in a file
exper_meta_data = {'DATASET': 'PERMUTE_MNIST',
'NUM_RUNS': args.num_runs,
'TRAIN_SINGLE_EPOCH': args.train_single_epoch,
'IMP_METHOD': args.imp_method,
'SYNAP_STGTH': args.synap_stgth,
'FISHER_EMA_DECAY': args.fisher_ema_decay,
'FISHER_UPDATE_AFTER': args.fisher_update_after,
'OPTIM': args.optim,
'LR': args.learning_rate,
'BATCH_SIZE': args.batch_size,
'MEM_SIZE': args.mem_size}
experiment_id = "PERMUTE_MNIST_HERDING_%s_%s_%s_%s_%r_%s-"%(args.arch, args.train_single_epoch, args.imp_method, str(args.synap_stgth).replace('.', '_'),
str(args.batch_size), str(args.mem_size)) + datetime.datetime.now().strftime("%y-%m-%d-%H-%M")
snapshot_experiment_meta_data(args.log_dir, experiment_id, exper_meta_data)
# Get the subset of data depending on training or cross-validation mode
if args.online_cross_val:
num_tasks = K_FOR_CROSS_VAL
else:
num_tasks = NUM_TASKS - K_FOR_CROSS_VAL
# Variables to store the accuracies and standard deviations of the experiment
acc_mean = dict()
acc_std = dict()
# Reset the default graph
tf.reset_default_graph()
graph = tf.Graph()
with graph.as_default():
# Set the random seed
tf.set_random_seed(args.random_seed)
# Define Input and Output of the model
x = tf.placeholder(tf.float32, shape=[None, INPUT_FEATURE_SIZE])
#x = tf.placeholder(tf.float32, shape=[None, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS])
if args.imp_method == 'PNN':
y_ = []
for i in range(num_tasks):
y_.append(tf.placeholder(tf.float32, shape=[None, TOTAL_CLASSES]))
else:
y_ = tf.placeholder(tf.float32, shape=[None, TOTAL_CLASSES])
# Define the optimizer
if args.optim == 'ADAM':
opt = tf.train.AdamOptimizer(learning_rate=args.learning_rate)
elif args.optim == 'SGD':
opt = tf.train.GradientDescentOptimizer(learning_rate=args.learning_rate)
elif args.optim == 'MOMENTUM':
base_lr = tf.constant(args.learning_rate)
learning_rate = tf.scalar_mul(base_lr, tf.pow((1 - train_step / training_iters), OPT_POWER))
opt = tf.train.MomentumOptimizer(args.learning_rate, OPT_MOMENTUM)
# Create the Model/ contruct the graph
model = Model(x, y_, num_tasks, opt, args.imp_method, args.synap_stgth, args.fisher_update_after,
args.fisher_ema_decay, network_arch=args.arch)
# Set up tf session and initialize variables.
if USE_GPU:
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
else:
config = tf.ConfigProto(
device_count = {'GPU': 0}
)
time_start = time.time()
with tf.Session(config=config, graph=graph) as sess:
runs = train_task_sequence(model, sess, args)
# Close the session
sess.close()
time_end = time.time()
time_spent = time_end - time_start
# Store all the results in one dictionary to process later
exper_acc = dict(mean=runs)
# If cross-validation flag is enabled, store the stuff in a text file
if args.cross_validate_mode:
acc_mean = runs.mean(0)
acc_std = runs.std(0)
cross_validate_dump_file = args.log_dir + '/' + 'PERMUTE_MNIST_%s_%s'%(args.imp_method, args.optim) + '.txt'
with open(cross_validate_dump_file, 'a') as f:
if MULTI_TASK:
f.write('GPU:{} \t ARCH: {} \t LR:{} \t LAMBDA: {} \t ACC: {}\n'.format(USE_GPU, args.arch, args.learning_rate,
args.synap_stgth, acc_mean[-1, :].mean()))
else:
f.write('GPU: {} \t ARCH: {} \t LR:{} \t LAMBDA: {} \t ACC: {} \t Fgt: {} \t Time: {}\n'.format(USE_GPU, args.arch, args.learning_rate,
args.synap_stgth, acc_mean[-1, :].mean(), compute_fgt(acc_mean), str(time_spent)))
# Store the experiment output to a file
snapshot_experiment_eval(args.log_dir, experiment_id, exper_acc)
if __name__ == '__main__':
main()
|
agem-main
|
fc_permute_mnist.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Training script for split AWA experiment.
"""
from __future__ import print_function
import argparse
import os
import sys
import math
import random
import time
import datetime
import numpy as np
import tensorflow as tf
from copy import deepcopy
from six.moves import cPickle as pickle
from utils.data_utils import image_scaling, random_crop_and_pad_image, random_horizontal_flip, construct_split_awa
from utils.utils import get_sample_weights, sample_from_dataset, update_episodic_memory, concatenate_datasets, samples_for_each_class, sample_from_dataset_icarl, compute_fgt, load_task_specific_data, load_task_specific_data_in_proportion
from utils.vis_utils import plot_acc_multiple_runs, plot_histogram, snapshot_experiment_meta_data, snapshot_experiment_eval, snapshot_task_labels
from model import Model
###############################################################
################ Some definitions #############################
### These will be edited by the command line options ##########
###############################################################
## Training Options
NUM_RUNS = 5 # Number of experiments to average over
TRAIN_ITERS = 2000 # Number of training iterations per task
BATCH_SIZE = 16
LEARNING_RATE = 0.1
RANDOM_SEED = 1234
VALID_OPTIMS = ['SGD', 'MOMENTUM', 'ADAM']
OPTIM = 'SGD'
OPT_MOMENTUM = 0.9
OPT_POWER = 0.9
VALID_ARCHS = ['CNN', 'VGG', 'RESNET-B']
ARCH = 'RESNET-B'
PRETRAIN = False
## Model options
#MODELS = ['VAN', 'PI', 'EWC', 'MAS', 'RWALK', 'M-EWC', 'GEM', 'A-GEM', 'S-GEM'] #List of valid models
MODELS = ['VAN', 'PI', 'EWC', 'MAS', 'RWALK', 'A-GEM'] #List of valid models
IMP_METHOD = 'VAN'
SYNAP_STGTH = 75000
FISHER_EMA_DECAY = 0.9 # Exponential moving average decay factor for Fisher computation (online Fisher)
FISHER_UPDATE_AFTER = 50 # Number of training iterations for which the F_{\theta}^t is computed (see Eq. 10 in RWalk paper)
SAMPLES_PER_CLASS = 20 # Number of samples per task
IMG_HEIGHT = 224
IMG_WIDTH = 224
IMG_CHANNELS = 3
TOTAL_CLASSES = 50 # Total number of classes in the dataset
MEASURE_CONVERGENCE_AFTER = 0.9
EPS_MEM_BATCH_SIZE = 128
DEBUG_EPISODIC_MEMORY = False
KEEP_EPISODIC_MEMORY_FULL = False
K_FOR_CROSS_VAL = 3
CLASSES_PER_TASK = 5
## Logging, saving and testing options
LOG_DIR = './split_awa_results'
SNAPSHOT_DIR = './awa_snapshots/sgd'
SAVE_MODEL_PARAMS = False
RESNET18_IMAGENET_CHECKPOINT = './resnet-18-pretrained-imagenet/model.ckpt'
## Evaluation options
## Task split
NUM_TASKS = 20
MULTI_TASK = False
## Dataset specific options
DATA_DIR= './AWA_data/Animals_with_Attributes2/'
AWA_TRAIN_LIST = './dataset_lists/AWA_train_list.txt'
AWA_VAL_LIST = './dataset_lists/AWA_val_list.txt'
AWA_TEST_LIST = './dataset_lists/AWA_test_list.txt'
#AWA_TRAIN_LIST = './dataset_lists/tmp_list_awa.txt'
#AWA_VAL_LIST = './dataset_lists/tmp_list_awa.txt'
#AWA_TEST_LIST = './dataset_lists/tmp_list_awa.txt'
# Define function to load/ store training weights. We will use ImageNet initialization later on
def save(saver, sess, logdir, step):
'''Save weights.
Args:
saver: TensorFlow Saver object.
sess: TensorFlow session.
logdir: path to the snapshots directory.
step: current training step.
'''
model_name = 'model.ckpt'
checkpoint_path = os.path.join(logdir, model_name)
if not os.path.exists(logdir):
os.makedirs(logdir)
saver.save(sess, checkpoint_path, global_step=step)
print('The checkpoint has been created.')
def load(saver, sess, ckpt_path):
'''Load trained weights.
Args:
saver: TensorFlow Saver object.
sess: TensorFlow session.
ckpt_path: path to checkpoint file with parameters.
'''
saver.restore(sess, ckpt_path)
print("Restored model parameters from {}".format(ckpt_path))
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description="Script for split AWA experiment.")
parser.add_argument("--cross-validate-mode", action="store_true",
help="If option is chosen then snapshoting after each batch is disabled")
parser.add_argument("--online-cross-val", action="store_true",
help="If option is chosen then enable the online cross validation of the learning rate")
parser.add_argument("--train-single-epoch", action="store_true",
help="If option is chosen then train for single epoch")
parser.add_argument("--eval-single-head", action="store_true",
help="If option is chosen then evaluate on a single head setting.")
parser.add_argument("--arch", type=str, default=ARCH,
help="Network Architecture for the experiment.\
\n \nSupported values: %s"%(VALID_ARCHS))
parser.add_argument("--num-runs", type=int, default=NUM_RUNS,
help="Total runs/ experiments over which accuracy is averaged.")
parser.add_argument("--train-iters", type=int, default=TRAIN_ITERS,
help="Number of training iterations for each task.")
parser.add_argument("--batch-size", type=int, default=BATCH_SIZE,
help="Mini-batch size for each task.")
parser.add_argument("--random-seed", type=int, default=RANDOM_SEED,
help="Random Seed.")
parser.add_argument("--learning-rate", type=float, default=LEARNING_RATE,
help="Starting Learning rate for each task.")
parser.add_argument("--optim", type=str, default=OPTIM,
help="Optimizer for the experiment. \
\n \nSupported values: %s"%(VALID_OPTIMS))
parser.add_argument("--imp-method", type=str, default=IMP_METHOD,
help="Model to be used for LLL. \
\n \nSupported values: %s"%(MODELS))
parser.add_argument("--synap-stgth", type=float, default=SYNAP_STGTH,
help="Synaptic strength for the regularization.")
parser.add_argument("--fisher-ema-decay", type=float, default=FISHER_EMA_DECAY,
help="Exponential moving average decay for Fisher calculation at each step.")
parser.add_argument("--fisher-update-after", type=int, default=FISHER_UPDATE_AFTER,
help="Number of training iterations after which the Fisher will be updated.")
parser.add_argument("--do-sampling", action="store_true",
help="Whether to do sampling")
parser.add_argument("--mem-size", type=int, default=SAMPLES_PER_CLASS,
help="Number of samples per class from previous tasks.")
parser.add_argument("--is-herding", action="store_true",
help="Herding based sampling")
parser.add_argument("--data-dir", type=str, default=DATA_DIR,
help="Directory from where the AWA data will be read.\
NOTE: Provide path till <AWA_DIR>/Animals_with_Attributes2")
parser.add_argument("--init-checkpoint", type=str, default=RESNET18_IMAGENET_CHECKPOINT,
help="Path to TF checkpoint file or npz file containing initialization for ImageNet.\
NOTE: NPZ file for VGG and TF checkpoint for ResNet")
parser.add_argument("--log-dir", type=str, default=LOG_DIR,
help="Directory where the plots and model accuracies will be stored.")
return parser.parse_args()
def train_task_sequence(model, sess, saver, datasets, cross_validate_mode, train_single_epoch, do_sampling, is_herding,
episodic_mem_size, train_iters, batch_size, num_runs, init_checkpoint, online_cross_val, random_seed):
"""
Train and evaluate LLL system such that we only see a example once
Args:
Returns:
dict A dictionary containing mean and stds for the experiment
"""
# List to store accuracy for each run
runs = []
task_labels_dataset = []
break_training = 0
# Loop over number of runs to average over
for runid in range(num_runs):
print('\t\tRun %d:'%(runid))
# Initialize the random seeds
np.random.seed(random_seed+runid)
random.seed(random_seed+runid)
# Get the task labels from the total number of tasks and full label space
task_labels = []
classes_per_task = CLASSES_PER_TASK
classes_appearing_in_tasks = dict()
for cls in range(TOTAL_CLASSES):
classes_appearing_in_tasks[cls] = 0
if online_cross_val:
label_array = np.arange(TOTAL_CLASSES)
for tt in range(model.num_tasks):
offset = tt * classes_per_task
task_labels.append(list(label_array[offset:offset+classes_per_task]))
else:
for tt in range(model.num_tasks):
task_labels.append(random.sample(range(K_FOR_CROSS_VAL*classes_per_task, TOTAL_CLASSES), classes_per_task))
for lab in task_labels[tt]:
classes_appearing_in_tasks[lab] += 1
print('Task: {}, Labels:{}'.format(tt, task_labels[tt]))
print('Class frequency in Tasks: {}'.format(classes_appearing_in_tasks))
# Store the task labels
task_labels_dataset.append(task_labels)
# Initialize all the variables in the model
sess.run(tf.global_variables_initializer())
if PRETRAIN:
# Load the variables from a checkpoint
if model.network_arch == 'RESNET-B':
# Define loader (weights which will be loaded from a checkpoint)
restore_vars = [v for v in model.trainable_vars if 'fc' not in v.name]
loader = tf.train.Saver(restore_vars)
load(loader, sess, init_checkpoint)
elif model.network_arch == 'VGG':
# Load the pretrained weights from the npz file
weights = np.load(init_checkpoint)
keys = sorted(weights.keys())
for i, key in enumerate(keys[:-2]): # Load everything except the last layer
sess.run(model.trainable_vars[i].assign(weights[key]))
# Run the init ops
model.init_updates(sess)
# List to store accuracies for a run
evals = []
if model.imp_method == 'S-GEM':
# List to store the episodic memories of the previous tasks
task_based_memory = []
if model.imp_method == 'A-GEM':
# Reserve a space for episodic memory
episodic_images = np.zeros([episodic_mem_size, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS])
episodic_labels = np.zeros([episodic_mem_size, model.num_tasks*TOTAL_CLASSES])
episodic_filled_counter = 0
a_gem_logit_mask = np.zeros([model.num_tasks, model.total_classes])
if do_sampling:
# List to store important samples from the previous tasks
last_task_x = None
last_task_y_ = None
# Mask for softmax
logit_mask = np.zeros(model.total_classes)
max_batch_dimension = 500
# Dict to store the number of times a class has already been seen in the training
class_seen_already = dict()
for cls in range(TOTAL_CLASSES):
class_seen_already[cls] = 0
# Training loop for all the tasks
for task in range(len(task_labels)):
print('\t\tTask %d:'%(task))
# If not the first task then restore weights from previous task
if(task > 0):
model.restore(sess)
# Increment the class seen count
for cls in task_labels[task]:
class_seen_already[cls] += 1
# Load the task specific dataset
task_train_images, task_train_labels = load_task_specific_data_in_proportion(datasets[0]['train'], task_labels[task], classes_appearing_in_tasks, class_seen_already)
print('Received {} images, {} labels at task {}'.format(task_train_images.shape[0], task_train_labels.shape[0], task))
print('Unique labels in the task: {}'.format(np.unique(np.nonzero(task_train_labels)[1])))
# Assign equal weights to all the examples
task_sample_weights = np.ones([task_train_labels.shape[0]], dtype=np.float32)
num_train_examples = task_train_images.shape[0]
logit_mask[:] = 0
# Train a task observing sequence of data
if train_single_epoch:
# Ceiling operation
num_iters = (num_train_examples + batch_size - 1) // batch_size
else:
num_iters = train_iters
logit_mask_offset = task * TOTAL_CLASSES
classes_adjusted_for_head = [cls + logit_mask_offset for cls in task_labels[task]]
logit_mask[classes_adjusted_for_head] = 1.0
# Randomly suffle the training examples
perm = np.arange(num_train_examples)
np.random.shuffle(perm)
train_x = task_train_images[perm]
train_y = task_train_labels[perm]
task_sample_weights = task_sample_weights[perm]
# Array to store accuracies when training for task T
if cross_validate_mode:
# Because we will evalaute at the end
ftask = 0
elif train_single_epoch:
# Because we will evaluate after every mini-batch of every task
ftask = np.zeros([max_batch_dimension+1, model.num_tasks])
batch_dim_count = 0
else:
# Because we will evaluate after every task
ftask = []
# Number of iterations after which convergence is checked
convergence_iters = int(num_iters * MEASURE_CONVERGENCE_AFTER)
final_train_labels = np.zeros([batch_size, model.total_classes])
head_offset = task * TOTAL_CLASSES
# Training loop for task T
for iters in range(num_iters):
if train_single_epoch and not cross_validate_mode:
if (iters < 11):
# Snapshot the current performance across all tasks after each mini-batch
fbatch = test_task_sequence(model, sess, datasets[0]['test'], task_labels, task, online_cross_val)
ftask[batch_dim_count] = fbatch
# Increment the batch_dim_count
batch_dim_count += 1
# Set the output labels over which the model needs to be trained
if model.imp_method == 'A-GEM':
a_gem_logit_mask[:] = 0
a_gem_logit_mask[task][classes_adjusted_for_head] = 1.0
else:
logit_mask[:] = 0
logit_mask[classes_adjusted_for_head] = 1.0
if train_single_epoch:
offset = iters * batch_size
if (offset+batch_size <= num_train_examples):
residual = batch_size
else:
residual = num_train_examples - offset
final_train_labels[:residual, head_offset:head_offset+TOTAL_CLASSES] = train_y[offset:offset+residual]
feed_dict = {model.x: train_x[offset:offset+residual], model.y_: final_train_labels[:residual],
model.sample_weights: task_sample_weights[offset:offset+residual],
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 0.5,
model.train_phase: True}
else:
offset = (iters * batch_size) % (num_train_examples - batch_size)
final_train_labels[:, head_offset:head_offset+TOTAL_CLASSES] = train_y[offset:offset+residual]
feed_dict = {model.x: train_x[offset:offset+batch_size], model.y_: final_train_labels,
model.sample_weights: task_sample_weights[offset:offset+batch_size],
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 0.5,
model.train_phase: True}
if model.imp_method == 'VAN':
feed_dict[model.output_mask] = logit_mask
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'EWC' or model.imp_method == 'M-EWC':
feed_dict[model.output_mask] = logit_mask
# If first iteration of the first task then set the initial value of the running fisher
if task == 0 and iters == 0:
sess.run([model.set_initial_running_fisher], feed_dict=feed_dict)
# Update fisher after every few iterations
if (iters + 1) % model.fisher_update_after == 0:
sess.run(model.set_running_fisher)
sess.run(model.reset_tmp_fisher)
if (iters >= convergence_iters) and (model.imp_method == 'M-EWC'):
_, _, _, _, loss = sess.run([model.weights_old_ops_grouped, model.set_tmp_fisher, model.train, model.update_small_omega,
model.reg_loss], feed_dict=feed_dict)
else:
_, _, loss = sess.run([model.set_tmp_fisher, model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'PI':
feed_dict[model.output_mask] = logit_mask
_, _, _, loss = sess.run([model.weights_old_ops_grouped, model.train, model.update_small_omega,
model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'MAS':
feed_dict[model.output_mask] = logit_mask
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'S-GEM':
if task == 0:
logit_mask[:] = 0
logit_mask[task_labels[task]] = 1.0
feed_dict[model.output_mask] = logit_mask
# Normal application of gradients
_, loss = sess.run([model.train_first_task, model.agem_loss], feed_dict=feed_dict)
else:
# Randomly sample a task from the previous tasks
prev_task = np.random.randint(0, task)
# Set the logit mask for the randomly sampled task
logit_mask[:] = 0
logit_mask[task_labels[prev_task]] = 1.0
# Store the reference gradient
sess.run(model.store_ref_grads, feed_dict={model.x: task_based_memory[prev_task]['images'], model.y_: task_based_memory[prev_task]['labels'],
model.keep_prob: 1.0, model.output_mask: logit_mask, model.train_phase: True})
# Compute the gradient for current task and project if need be
logit_mask[:] = 0
logit_mask[task_labels[task]] = 1.0
feed_dict[model.output_mask] = logit_mask
_, loss = sess.run([model.train_subseq_tasks, model.agem_loss], feed_dict=feed_dict)
elif model.imp_method == 'A-GEM':
if task == 0:
a_gem_logit_mask[:] = 0
a_gem_logit_mask[task][classes_adjusted_for_head] = 1.0
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, a_gem_logit_mask)}
feed_dict.update(logit_mask_dict)
feed_dict[model.mem_batch_size] = batch_size
# Normal application of gradients
_, loss = sess.run([model.train_first_task, model.agem_loss], feed_dict=feed_dict)
else:
## Compute and store the reference gradients on the previous tasks
# Reset the reference gradients
# Set the mask for all the previous tasks so far
a_gem_logit_mask[:] = 0
for tt in range(task):
logit_mask_offset = tt * TOTAL_CLASSES
classes_adjusted_for_head = [cls + logit_mask_offset for cls in task_labels[tt]]
a_gem_logit_mask[tt][classes_adjusted_for_head] = 1.0
if KEEP_EPISODIC_MEMORY_FULL:
mem_sample_mask = np.random.choice(episodic_mem_size, EPS_MEM_BATCH_SIZE, replace=False) # Sample without replacement so that we don't sample an example more than once
else:
if episodic_filled_counter <= EPS_MEM_BATCH_SIZE:
mem_sample_mask = np.arange(episodic_filled_counter)
else:
# Sample a random subset from episodic memory buffer
mem_sample_mask = np.random.choice(episodic_filled_counter, EPS_MEM_BATCH_SIZE, replace=False) # Sample without replacement so that we don't sample an example more than once
ref_feed_dict = {model.x: episodic_images[mem_sample_mask], model.y_: episodic_labels[mem_sample_mask],
model.keep_prob: 1.0, model.train_phase: True}
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, a_gem_logit_mask)}
ref_feed_dict.update(logit_mask_dict)
ref_feed_dict[model.mem_batch_size] = float(len(mem_sample_mask))
sess.run(model.store_ref_grads, feed_dict=ref_feed_dict)
# Compute the gradient for current task and project if need be
a_gem_logit_mask[:] = 0
logit_mask_offset = task * TOTAL_CLASSES
classes_adjusted_for_head = [cls + logit_mask_offset for cls in task_labels[task]]
a_gem_logit_mask[task][classes_adjusted_for_head] = 1.0
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, a_gem_logit_mask)}
feed_dict.update(logit_mask_dict)
feed_dict[model.mem_batch_size] = batch_size
_, loss = sess.run([model.train_subseq_tasks, model.agem_loss], feed_dict=feed_dict)
elif model.imp_method == 'RWALK':
feed_dict[model.output_mask] = logit_mask
# If first iteration of the first task then set the initial value of the running fisher
if task == 0 and iters == 0:
sess.run([model.set_initial_running_fisher], feed_dict=feed_dict)
# Store the current value of the weights
sess.run(model.weights_delta_old_grouped)
# Update fisher and importance score after every few iterations
if (iters + 1) % model.fisher_update_after == 0:
# Update the importance score using distance in riemannian manifold
sess.run(model.update_big_omega_riemann)
# Now that the score is updated, compute the new value for running Fisher
sess.run(model.set_running_fisher)
# Store the current value of the weights
sess.run(model.weights_delta_old_grouped)
# Reset the delta_L
sess.run([model.reset_small_omega])
_, _, _, _, loss = sess.run([model.set_tmp_fisher, model.weights_old_ops_grouped,
model.train, model.update_small_omega, model.reg_loss], feed_dict=feed_dict)
if (iters % 100 == 0):
print('Step {:d} {:.3f}'.format(iters, loss))
if (math.isnan(loss)):
print('ERROR: NaNs NaNs NaNs!!!')
break_training = 1
break
print('\t\t\t\tTraining for Task%d done!'%(task))
if break_training:
break
# Compute the inter-task updates, Fisher/ importance scores etc
# Don't calculate the task updates for the last task
if task < (len(task_labels) - 1):
model.task_updates(sess, task, task_train_images, task_labels[task]) # TODO: For MAS, should the gradients be for current task or all the previous tasks
print('\t\t\t\tTask updates after Task%d done!'%(task))
# If importance method is '*-GEM' then store the episodic memory for the task
if 'GEM' in model.imp_method:
data_to_sample_from = {
'images': task_train_images,
'labels': task_train_labels,
}
if model.imp_method == 'S-GEM':
# Get the important samples from the current task
if is_herding: # Sampling based on MoF
# Compute the features of training data
features_dim = model.image_feature_dim
features = np.zeros([num_train_examples, features_dim])
samples_at_a_time = 32
residual = num_train_examples % samples_at_a_time
for i in range(num_train_examples// samples_at_a_time):
offset = i * samples_at_a_time
features[offset:offset+samples_at_a_time] = sess.run(model.features, feed_dict={model.x: task_train_images[offset:offset+samples_at_a_time],
model.y_: task_train_labels[offset:offset+samples_at_a_time], model.keep_prob: 1.0,
model.output_mask: logit_mask, model.train_phase: False})
if residual > 0:
offset = (i + 1) * samples_at_a_time
features[offset:offset+residual] = sess.run(model.features, feed_dict={model.x: task_train_images[offset:offset+residual],
model.y_: task_train_labels[offset:offset+residual], model.keep_prob: 1.0,
model.output_mask: logit_mask, model.train_phase: False})
imp_images, imp_labels = sample_from_dataset_icarl(data_to_sample_from, features, task_labels[task], SAMPLES_PER_CLASS)
else: # Random sampling
# Do the uniform sampling/ only get examples from current task
importance_array = np.ones(num_train_examples, dtype=np.float32)
imp_images, imp_labels = sample_from_dataset(data_to_sample_from, importance_array, task_labels[task], SAMPLES_PER_CLASS)
task_memory = {
'images': deepcopy(imp_images),
'labels': deepcopy(imp_labels),
}
task_based_memory.append(task_memory)
elif model.imp_method == 'A-GEM':
# Do the uniform sampling/ only get examples from current task
importance_array = np.ones(num_train_examples, dtype=np.float32)
if KEEP_EPISODIC_MEMORY_FULL:
update_episodic_memory(data_to_sample_from, importance_array, episodic_mem_size, task, episodic_images, episodic_labels)
else:
imp_images, imp_labels = sample_from_dataset(data_to_sample_from, importance_array, task_labels[task], SAMPLES_PER_CLASS)
if not KEEP_EPISODIC_MEMORY_FULL: # Fill the memory to always keep M/T samples per task
total_imp_samples = imp_images.shape[0]
eps_offset = task * total_imp_samples
episodic_images[eps_offset:eps_offset+total_imp_samples] = imp_images
episodic_labels[eps_offset:eps_offset+total_imp_samples, head_offset:head_offset+TOTAL_CLASSES] = imp_labels
episodic_filled_counter += total_imp_samples
print('Unique labels in the episodic memory: {}'.format(np.unique(np.nonzero(episodic_labels)[1])))
# Inspect episodic memory
if DEBUG_EPISODIC_MEMORY:
# Which labels are present in the memory
unique_labels = np.unique(np.nonzero(episodic_labels)[-1])
print('Unique Labels present in the episodic memory'.format(unique_labels))
print('Labels count:')
for lbl in unique_labels:
print('Label {}: {} samples'.format(lbl, np.where(np.nonzero(episodic_labels)[-1] == lbl)[0].size))
# Is there any space which is not filled
print('Empty space: {}'.format(np.where(np.sum(episodic_labels, axis=1) == 0)))
print('Episodic memory of {} images at task {} saved!'.format(episodic_images.shape[0], task))
# If sampling flag is set, store few of the samples from previous task
if do_sampling:
# Do the uniform sampling/ only get examples from current task
importance_array = np.ones([datasets[task]['train']['images'].shape[0]], dtype=np.float32)
# Get the important samples from the current task
imp_images, imp_labels = sample_from_dataset(datasets[task]['train'], importance_array,
task_labels[task], SAMPLES_PER_CLASS)
if imp_images is not None:
if last_task_x is None:
last_task_x = imp_images
last_task_y_ = imp_labels
else:
last_task_x = np.concatenate((last_task_x, imp_images), axis=0)
last_task_y_ = np.concatenate((last_task_y_, imp_labels), axis=0)
# Delete the importance array now that you don't need it in the current run
del importance_array
print('\t\t\t\tEpisodic memory is saved for Task%d!'%(task))
if cross_validate_mode:
# Only evaluate after the last task
if (task == model.num_tasks - 1) or MULTI_TASK:
# List to store accuracy for all the tasks for the current trained model
ftask = test_task_sequence(model, sess, datasets[0]['test'], task_labels, task, online_cross_val)
elif train_single_epoch:
fbatch = test_task_sequence(model, sess, datasets[0]['test'], task_labels, task, False)
print('Task: {} Acc: {}'.format(task, fbatch))
ftask[batch_dim_count] = fbatch
else:
# Multi-epoch training, so compute accuracy at the end
ftask = test_task_sequence(model, sess, datasets[0]['test'], task_labels, task, online_cross_val)
if SAVE_MODEL_PARAMS:
save(saver, sess, SNAPSHOT_DIR, iters)
if not cross_validate_mode:
# Store the accuracies computed at task T in a list
evals.append(np.array(ftask))
# Reset the optimizer
model.reset_optimizer(sess)
#-> End for loop task
if not cross_validate_mode:
runs.append(np.array(evals))
if break_training:
break
# End for loop runid
if cross_validate_mode:
return np.mean(ftask), task_labels_dataset
else:
runs = np.array(runs)
return runs, task_labels_dataset
def test_task_sequence(model, sess, test_data, all_task_labels, task, cross_validate_mode):
"""
Snapshot the current performance
"""
final_acc = np.zeros(model.num_tasks)
test_set = 'test'
if model.imp_method == 'A-GEM':
logit_mask = np.zeros([model.num_tasks, model.total_classes])
else:
logit_mask = np.zeros(model.total_classes)
for tt, labels in enumerate(all_task_labels):
if tt > task:
return final_acc
samples_at_a_time = 10
task_images, task_labels = load_task_specific_data(test_data, labels)
global_class_indices = np.column_stack(np.nonzero(task_labels))
logit_mask_offset = tt * TOTAL_CLASSES
classes_adjusted_for_head = [cls + logit_mask_offset for cls in labels]
logit_mask[:] = 0
if model.imp_method == 'A-GEM':
logit_mask[tt][classes_adjusted_for_head] = 1.0
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, logit_mask)}
else:
logit_mask[classes_adjusted_for_head] = 1.0
acc = np.zeros(len(labels))
final_train_labels = np.zeros([samples_at_a_time, model.total_classes])
head_offset = tt * TOTAL_CLASSES
for cli, cls in enumerate(labels):
class_indices = np.squeeze(global_class_indices[global_class_indices[:,1] == cls][:,np.array([True, False])])
class_indices = np.sort(class_indices, axis=None)
task_test_images = task_images[class_indices]
task_test_labels = task_labels[class_indices]
total_test_samples = task_test_images.shape[0]
total_corrects = 0
if total_test_samples < samples_at_a_time:
i = -1
for i in range(total_test_samples/ samples_at_a_time):
offset = i*samples_at_a_time
final_train_labels[:, head_offset:head_offset+TOTAL_CLASSES] = task_test_labels[offset:offset+samples_at_a_time]
feed_dict = {model.x: task_test_images[offset:offset+samples_at_a_time],
model.y_: final_train_labels,
model.keep_prob: 1.0, model.train_phase: False}
if model.imp_method == 'A-GEM':
feed_dict.update(logit_mask_dict)
total_corrects += np.sum(sess.run(model.correct_predictions[tt], feed_dict=feed_dict))
else:
feed_dict[model.output_mask] = logit_mask
total_corrects += np.sum(sess.run(model.correct_predictions, feed_dict=feed_dict))
# Compute the corrects on residuals
offset = (i+1)*samples_at_a_time
num_residuals = total_test_samples % samples_at_a_time
final_train_labels[:num_residuals, head_offset:head_offset+TOTAL_CLASSES] = task_test_labels[offset:offset+num_residuals]
feed_dict = {model.x: task_test_images[offset:offset+num_residuals],
model.y_: final_train_labels[:num_residuals],
model.keep_prob: 1.0, model.train_phase: False}
if model.imp_method == 'A-GEM':
feed_dict.update(logit_mask_dict)
total_corrects += np.sum(sess.run(model.correct_predictions[tt], feed_dict=feed_dict))
else:
feed_dict[model.output_mask] = logit_mask
total_corrects += np.sum(sess.run(model.correct_predictions, feed_dict=feed_dict))
# Accuracy
if total_test_samples != 0:
acc[cli] = total_corrects/ float(total_test_samples)
final_acc[tt] = np.mean(acc)
return final_acc
def main():
"""
Create the model and start the training
"""
# Get the CL arguments
args = get_arguments()
# Initialize the random seed of numpy
np.random.seed(args.random_seed)
# Check if the network architecture is valid
if args.arch not in VALID_ARCHS:
raise ValueError("Network architecture %s is not supported!"%(args.arch))
# Check if the method to compute importance is valid
if args.imp_method not in MODELS:
raise ValueError("Importance measure %s is undefined!"%(args.imp_method))
# Check if the optimizer is valid
if args.optim not in VALID_OPTIMS:
raise ValueError("Optimizer %s is undefined!"%(args.optim))
# Create log directories to store the results
if not os.path.exists(args.log_dir):
print('Log directory %s created!'%(args.log_dir))
os.makedirs(args.log_dir)
if args.online_cross_val:
num_tasks = K_FOR_CROSS_VAL
else:
num_tasks = NUM_TASKS - K_FOR_CROSS_VAL
# Load the split AWA dataset for all the classes
data_labs = [np.arange(TOTAL_CLASSES)]
datasets = construct_split_awa(data_labs, args.data_dir, AWA_TRAIN_LIST, AWA_VAL_LIST, AWA_TEST_LIST, IMG_HEIGHT, IMG_WIDTH)
if args.cross_validate_mode:
#models_list = MODELS
#learning_rate_list = [0.1, 0.03, 0.01, 0.003, 0.0003]
models_list = [args.imp_method]
learning_rate_list = [0.01]
else:
models_list = [args.imp_method]
for imp_method in models_list:
if imp_method == 'VAN':
synap_stgth_list = [0]
if args.online_cross_val or args.cross_validate_mode:
pass
else:
learning_rate_list = [0.001]
elif imp_method == 'PI':
if args.online_cross_val or args.cross_validate_mode:
synap_stgth_list = [0.1, 1, 10]
else:
synap_stgth_list = [1]
learning_rate_list = [0.003]
elif imp_method == 'EWC' or imp_method == 'M-EWC':
if args.online_cross_val or args.cross_validate_mode:
synap_stgth_list = [0.1, 1, 10, 100]
else:
synap_stgth_list = [100]
learning_rate_list = [0.003]
elif imp_method == 'MAS':
if args.online_cross_val or args.cross_validate_mode:
synap_stgth_list = [0.1, 1, 10, 100]
else:
synap_stgth_list = [1]
learning_rate_list = [0.003]
elif imp_method == 'RWALK':
if args.online_cross_val or args.cross_validate_mode:
synap_stgth_list = [0.1, 1, 10, 100]
else:
synap_stgth_list = [10] # Run again
learning_rate_list = [0.003]
elif imp_method == 'S-GEM':
synap_stgth_list = [0]
if args.online_cross_val:
pass
else:
learning_rate_list = [args.learning_rate]
elif imp_method == 'A-GEM':
synap_stgth_list = [0]
if args.online_cross_val or args.cross_validate_mode:
pass
else:
learning_rate_list = [0.01]
for synap_stgth in synap_stgth_list:
for lr in learning_rate_list:
# Generate the experiment key and store the meta data in a file
exper_meta_data = {'ARCH': args.arch,
'DATASET': 'SPLIT_AWA',
'NUM_RUNS': args.num_runs,
'TRAIN_SINGLE_EPOCH': args.train_single_epoch,
'IMP_METHOD': imp_method,
'SYNAP_STGTH': synap_stgth,
'FISHER_EMA_DECAY': args.fisher_ema_decay,
'FISHER_UPDATE_AFTER': args.fisher_update_after,
'OPTIM': args.optim,
'LR': lr,
'BATCH_SIZE': args.batch_size,
'EPS_MEMORY': args.do_sampling,
'MEM_SIZE': args.mem_size,
'IS_HERDING': args.is_herding}
experiment_id = "SPLIT_AWA_ONE_HOT_HERDING_%r_%s_%r_%s_%s_%s_%s_%r_%s-"%(args.is_herding, args.arch, args.train_single_epoch, imp_method,
str(synap_stgth).replace('.', '_'), str(lr).replace('.', '_'),
str(args.batch_size), args.do_sampling, str(args.mem_size)) + datetime.datetime.now().strftime("%y-%m-%d-%H-%M")
snapshot_experiment_meta_data(args.log_dir, experiment_id, exper_meta_data)
# Reset the default graph
tf.reset_default_graph()
graph = tf.Graph()
with graph.as_default():
# Set the random seed
tf.set_random_seed(args.random_seed)
# Define Input and Output of the model
x = tf.placeholder(tf.float32, shape=[None, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS])
y_ = tf.placeholder(tf.float32, shape=[None, num_tasks*TOTAL_CLASSES])
if not args.train_single_epoch:
# Define ops for data augmentation
x_aug = image_scaling(x)
x_aug = random_crop_and_pad_image(x_aug, IMG_HEIGHT, IMG_WIDTH)
# Define the optimizer
if args.optim == 'ADAM':
opt = tf.train.AdamOptimizer(learning_rate=lr)
elif args.optim == 'SGD':
opt = tf.train.GradientDescentOptimizer(learning_rate=lr)
elif args.optim == 'MOMENTUM':
base_lr = tf.constant(lr)
learning_rate = tf.scalar_mul(base_lr, tf.pow((1 - train_step / training_iters), OPT_POWER))
opt = tf.train.MomentumOptimizer(lr, OPT_MOMENTUM)
# Create the Model/ contruct the graph
if args.train_single_epoch:
# When training using a single epoch then there is no need for data augmentation
model = Model(x, y_, num_tasks, opt, imp_method, synap_stgth, args.fisher_update_after,
args.fisher_ema_decay, network_arch=args.arch, is_ATT_DATASET=True)
else:
model = Model(x_aug, y_, num_tasks, opt, imp_method, synap_stgth, args.fisher_update_after,
args.fisher_ema_decay, network_arch=args.arch, is_ATT_DATASET=True, x_test=x)
# Set up tf session and initialize variables.
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
time_start = time.time()
with tf.Session(config=config, graph=graph) as sess:
saver = tf.train.Saver(var_list=tf.global_variables(), max_to_keep=100)
runs, task_labels_dataset = train_task_sequence(model, sess, saver, datasets, args.cross_validate_mode, args.train_single_epoch,
args.do_sampling, args.is_herding, args.mem_size*CLASSES_PER_TASK*num_tasks, args.train_iters, args.batch_size, args.num_runs, args.init_checkpoint, args.online_cross_val, args.random_seed)
# Close the session
sess.close()
time_end = time.time()
time_spent = time_end - time_start
print('Time spent: {}'.format(time_spent))
# Clean up
del model
if args.cross_validate_mode:
# If cross-validation flag is enabled, store the stuff in a text file
cross_validate_dump_file = args.log_dir + '/' + 'SPLIT_AWA_%s_%s'%(imp_method, args.optim) + '.txt'
with open(cross_validate_dump_file, 'a') as f:
f.write('HERDING: {} \t ARCH: {} \t LR:{} \t LAMBDA: {} \t ACC: {}\n'.format(args.is_herding, args.arch, lr, synap_stgth, runs))
else:
# Store all the results in one dictionary to process later
exper_acc = dict(mean=runs)
exper_labels = dict(labels=task_labels_dataset)
# Store the experiment output to a file
snapshot_experiment_eval(args.log_dir, experiment_id, exper_acc)
snapshot_task_labels(args.log_dir, experiment_id, exper_labels)
if __name__ == '__main__':
main()
|
agem-main
|
conv_split_awa.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Training script for split CUB experiment.
"""
from __future__ import print_function
import argparse
import os
import sys
import math
import time
import datetime
import numpy as np
import tensorflow as tf
from copy import deepcopy
from six.moves import cPickle as pickle
from utils.data_utils import image_scaling, random_crop_and_pad_image, random_horizontal_flip, construct_split_cub
from utils.utils import get_sample_weights, sample_from_dataset, update_episodic_memory_with_less_data, concatenate_datasets, samples_for_each_class, sample_from_dataset_icarl, load_task_specific_data
from utils.vis_utils import plot_acc_multiple_runs, plot_histogram, snapshot_experiment_meta_data, snapshot_experiment_eval, snapshot_task_labels
from model import Model
###############################################################
################ Some definitions #############################
### These will be edited by the command line options ##########
###############################################################
## Training Options
NUM_RUNS = 5 # Number of experiments to average over
TRAIN_ITERS = 2000 # Number of training iterations per task
BATCH_SIZE = 16
LEARNING_RATE = 0.1
RANDOM_SEED = 1234
VALID_OPTIMS = ['SGD', 'MOMENTUM', 'ADAM']
OPTIM = 'SGD'
OPT_MOMENTUM = 0.9
OPT_POWER = 0.9
VALID_ARCHS = ['CNN', 'VGG', 'RESNET-B']
ARCH = 'RESNET-B'
PRETRAIN = True
## Model options
#MODELS = ['VAN', 'PI', 'EWC', 'MAS', 'RWALK', 'M-EWC', 'GEM', 'A-GEM', 'S-GEM'] #List of valid models
MODELS = ['VAN', 'PI', 'EWC', 'MAS', 'RWALK', 'A-GEM'] #List of valid models
IMP_METHOD = 'PI'
SYNAP_STGTH = 75000
FISHER_EMA_DECAY = 0.9 # Exponential moving average decay factor for Fisher computation (online Fisher)
FISHER_UPDATE_AFTER = 50 # Number of training iterations for which the F_{\theta}^t is computed (see Eq. 10 in RWalk paper)
SAMPLES_PER_CLASS = 5 # Number of samples per task
IMG_HEIGHT = 224
IMG_WIDTH = 224
IMG_CHANNELS = 3
TOTAL_CLASSES = 200 # Total number of classes in the dataset
EPS_MEM_BATCH_SIZE = 128
DEBUG_EPISODIC_MEMORY = False
KEEP_EPISODIC_MEMORY_FULL = False
K_FOR_CROSS_VAL = 3
## Logging, saving and testing options
LOG_DIR = './split_cub_results'
SNAPSHOT_DIR = './cub_snapshots/sgd'
SAVE_MODEL_PARAMS = False
## Evaluation options
## Task split
NUM_TASKS = 20
MULTI_TASK = False
## Dataset specific options
DATA_DIR='CUB_data/CUB_200_2011/images'
CUB_TRAIN_LIST = './dataset_lists/CUB_train_list.txt'
CUB_TEST_LIST = './dataset_lists/CUB_test_list.txt'
RESNET18_IMAGENET_CHECKPOINT = './resnet-18-pretrained-imagenet/model.ckpt'
# Define function to load/ store training weights. We will use ImageNet initialization later on
def save(saver, sess, logdir, step):
'''Save weights.
Args:
saver: TensorFlow Saver object.
sess: TensorFlow session.
logdir: path to the snapshots directory.
step: current training step.
'''
model_name = 'model.ckpt'
checkpoint_path = os.path.join(logdir, model_name)
if not os.path.exists(logdir):
os.makedirs(logdir)
saver.save(sess, checkpoint_path, global_step=step)
print('The checkpoint has been created.')
def load(saver, sess, ckpt_path):
'''Load trained weights.
Args:
saver: TensorFlow Saver object.
sess: TensorFlow session.
ckpt_path: path to checkpoint file with parameters.
'''
saver.restore(sess, ckpt_path)
print("Restored model parameters from {}".format(ckpt_path))
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description="Script for split CUB experiment.")
parser.add_argument("--cross-validate-mode", action="store_true",
help="If option is chosen then snapshoting after each batch is disabled")
parser.add_argument("--online-cross-val", action="store_true",
help="If option is chosen then enable the online cross validation of the learning rate")
parser.add_argument("--train-single-epoch", action="store_true",
help="If option is chosen then train for single epoch")
parser.add_argument("--eval-single-head", action="store_true",
help="If option is chosen then evaluate on a single head setting.")
parser.add_argument("--arch", type=str, default=ARCH,
help="Network Architecture for the experiment.\
\n \nSupported values: %s"%(VALID_ARCHS))
parser.add_argument("--num-runs", type=int, default=NUM_RUNS,
help="Total runs/ experiments over which accuracy is averaged.")
parser.add_argument("--train-iters", type=int, default=TRAIN_ITERS,
help="Number of training iterations for each task.")
parser.add_argument("--batch-size", type=int, default=BATCH_SIZE,
help="Mini-batch size for each task.")
parser.add_argument("--random-seed", type=int, default=RANDOM_SEED,
help="Random Seed.")
parser.add_argument("--learning-rate", type=float, default=LEARNING_RATE,
help="Starting Learning rate for each task.")
parser.add_argument("--optim", type=str, default=OPTIM,
help="Optimizer for the experiment. \
\n \nSupported values: %s"%(VALID_OPTIMS))
parser.add_argument("--imp-method", type=str, default=IMP_METHOD,
help="Model to be used for LLL. \
\n \nSupported values: %s"%(MODELS))
parser.add_argument("--synap-stgth", type=float, default=SYNAP_STGTH,
help="Synaptic strength for the regularization.")
parser.add_argument("--fisher-ema-decay", type=float, default=FISHER_EMA_DECAY,
help="Exponential moving average decay for Fisher calculation at each step.")
parser.add_argument("--fisher-update-after", type=int, default=FISHER_UPDATE_AFTER,
help="Number of training iterations after which the Fisher will be updated.")
parser.add_argument("--do-sampling", action="store_true",
help="Whether to do sampling")
parser.add_argument("--mem-size", type=int, default=SAMPLES_PER_CLASS,
help="Number of samples per class from previous tasks.")
parser.add_argument("--is-herding", action="store_true",
help="Herding based sampling")
parser.add_argument("--data-dir", type=str, default=DATA_DIR,
help="Directory from where the CUB data will be read.\
NOTE: Provide path till <CUB_DIR>/images")
parser.add_argument("--init-checkpoint", type=str, default=RESNET18_IMAGENET_CHECKPOINT,
help="Path to TF checkpoint file or npz file containing initialization for ImageNet.\
NOTE: NPZ file for VGG and TF checkpoint for ResNet")
parser.add_argument("--log-dir", type=str, default=LOG_DIR,
help="Directory where the plots and model accuracies will be stored.")
return parser.parse_args()
def train_task_sequence(model, sess, saver, datasets, cross_validate_mode, train_single_epoch, do_sampling, is_herding,
mem_per_class, train_iters, batch_size, num_runs, init_checkpoint, online_cross_val, random_seed):
"""
Train and evaluate LLL system such that we only see a example once
Args:
Returns:
dict A dictionary containing mean and stds for the experiment
"""
# List to store accuracy for each run
runs = []
task_labels_dataset = []
break_training = 0
# Loop over number of runs to average over
for runid in range(num_runs):
print('\t\tRun %d:'%(runid))
# Initialize the random seeds
np.random.seed(random_seed+runid)
# Get the task labels from the total number of tasks and full label space
task_labels = []
classes_per_task = TOTAL_CLASSES// NUM_TASKS
total_classes = classes_per_task * model.num_tasks
if online_cross_val:
label_array = np.arange(total_classes)
else:
class_label_offset = K_FOR_CROSS_VAL * classes_per_task
label_array = np.arange(class_label_offset, total_classes+class_label_offset)
np.random.shuffle(label_array)
for tt in range(model.num_tasks):
tt_offset = tt*classes_per_task
task_labels.append(list(label_array[tt_offset:tt_offset+classes_per_task]))
print('Task: {}, Labels:{}'.format(tt, task_labels[tt]))
# Store the task labels
task_labels_dataset.append(task_labels)
# Set episodic memory size
episodic_mem_size = mem_per_class * total_classes
# Initialize all the variables in the model
sess.run(tf.global_variables_initializer())
if PRETRAIN:
# Load the variables from a checkpoint
if model.network_arch == 'RESNET-B':
# Define loader (weights which will be loaded from a checkpoint)
restore_vars = [v for v in model.trainable_vars if 'fc' not in v.name]
loader = tf.train.Saver(restore_vars)
load(loader, sess, init_checkpoint)
elif model.network_arch == 'VGG':
# Load the pretrained weights from the npz file
weights = np.load(init_checkpoint)
keys = sorted(weights.keys())
for i, key in enumerate(keys[:-2]): # Load everything except the last layer
sess.run(model.trainable_vars[i].assign(weights[key]))
# Run the init ops
model.init_updates(sess)
# List to store accuracies for a run
evals = []
# List to store the classes that we have so far - used at test time
test_labels = []
if model.imp_method == 'S-GEM':
# List to store the episodic memories of the previous tasks
task_based_memory = []
if model.imp_method == 'A-GEM':
# Reserve a space for episodic memory
episodic_images = np.zeros([episodic_mem_size, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS])
episodic_labels = np.zeros([episodic_mem_size, TOTAL_CLASSES])
episodic_filled_counter = 0
a_gem_logit_mask = np.zeros([model.num_tasks, TOTAL_CLASSES])
if do_sampling:
# List to store important samples from the previous tasks
last_task_x = None
last_task_y_ = None
# Mask for softmax
logit_mask = np.zeros(TOTAL_CLASSES)
# Training loop for all the tasks
for task in range(len(task_labels)):
print('\t\tTask %d:'%(task))
# If not the first task then restore weights from previous task
if(task > 0):
model.restore(sess)
# If sampling flag is set append the previous datasets
if do_sampling:
task_tr_images, task_tr_labels = load_task_specific_data(datasets[0]['train'], task_labels[task])
if task > 0:
task_train_images, task_train_labels = concatenate_datasets(task_tr_images, task_tr_labels, last_task_x, last_task_y_)
else:
task_train_images = task_tr_images
task_train_labels = task_tr_labels
else:
# Extract training images and labels for the current task
task_train_images, task_train_labels = load_task_specific_data(datasets[0]['train'], task_labels[task])
# If multi_task is set then train using all the datasets of all the tasks
if MULTI_TASK:
if task == 0:
for t_ in range(1, len(task_labels)):
task_tr_images, task_tr_labels = load_task_specific_data(datasets[0]['train'], task_labels[t_])
task_train_images = np.concatenate((task_train_images, task_tr_images), axis=0)
task_train_labels = np.concatenate((task_train_labels, task_tr_labels), axis=0)
else:
# Skip training for this task
continue
print('Received {} images, {} labels at task {}'.format(task_train_images.shape[0], task_train_labels.shape[0], task))
print('Unique labels in the task: {}'.format(np.unique(np.nonzero(task_train_labels)[1])))
# Test for the tasks that we've seen so far
test_labels.extend(task_labels[task])
# Declare variables to store sample importance if sampling flag is set
if do_sampling:
# Get the sample weighting
task_sample_weights = get_sample_weights(task_train_labels, test_labels)
else:
# Assign equal weights to all the examples
task_sample_weights = np.ones([task_train_labels.shape[0]], dtype=np.float32)
num_train_examples = task_train_images.shape[0]
logit_mask[:] = 0
# Train a task observing sequence of data
if train_single_epoch:
# Ceiling operation
num_iters = (num_train_examples + batch_size - 1) // batch_size
if cross_validate_mode:
if do_sampling:
logit_mask[test_labels] = 1.0
else:
logit_mask[task_labels[task]] = 1.0
else:
num_iters = train_iters
# Set the mask only once before starting the training for the task
if do_sampling:
logit_mask[test_labels] = 1.0
else:
logit_mask[task_labels[task]] = 1.0
if MULTI_TASK:
logit_mask[:] = 1.0
# Randomly suffle the training examples
perm = np.arange(num_train_examples)
np.random.shuffle(perm)
train_x = task_train_images[perm]
train_y = task_train_labels[perm]
task_sample_weights = task_sample_weights[perm]
# Array to store accuracies when training for task T
ftask = []
# Training loop for task T
for iters in range(num_iters):
if train_single_epoch and not cross_validate_mode and not MULTI_TASK:
if (iters < 10) or (iters % 5 == 0):
# Snapshot the current performance across all tasks after each mini-batch
fbatch = test_task_sequence(model, sess, datasets[0]['test'], task_labels, task)
ftask.append(fbatch)
# Set the output labels over which the model needs to be trained
if model.imp_method == 'A-GEM':
a_gem_logit_mask[:] = 0
a_gem_logit_mask[task][task_labels[task]] = 1.0
else:
logit_mask[:] = 0
if do_sampling:
logit_mask[test_labels] = 1.0
else:
logit_mask[task_labels[task]] = 1.0
if train_single_epoch:
offset = iters * batch_size
if (offset+batch_size <= num_train_examples):
residual = batch_size
else:
residual = num_train_examples - offset
feed_dict = {model.x: train_x[offset:offset+residual], model.y_: train_y[offset:offset+residual],
model.sample_weights: task_sample_weights[offset:offset+residual],
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 0.5,
model.train_phase: True}
else:
offset = (iters * batch_size) % (num_train_examples - batch_size)
feed_dict = {model.x: train_x[offset:offset+batch_size], model.y_: train_y[offset:offset+batch_size],
model.sample_weights: task_sample_weights[offset:offset+batch_size],
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 0.5,
model.train_phase: True}
if model.imp_method == 'VAN':
feed_dict[model.output_mask] = logit_mask
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'EWC':
feed_dict[model.output_mask] = logit_mask
# If first iteration of the first task then set the initial value of the running fisher
if task == 0 and iters == 0:
sess.run([model.set_initial_running_fisher], feed_dict=feed_dict)
# Update fisher after every few iterations
if (iters + 1) % model.fisher_update_after == 0:
sess.run(model.set_running_fisher)
sess.run(model.reset_tmp_fisher)
_, _, loss = sess.run([model.set_tmp_fisher, model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'PI':
feed_dict[model.output_mask] = logit_mask
_, _, _, loss = sess.run([model.weights_old_ops_grouped, model.train, model.update_small_omega,
model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'MAS':
feed_dict[model.output_mask] = logit_mask
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'S-GEM':
if task == 0:
logit_mask[:] = 0
logit_mask[task_labels[task]] = 1.0
feed_dict[model.output_mask] = logit_mask
# Normal application of gradients
_, loss = sess.run([model.train_first_task, model.agem_loss], feed_dict=feed_dict)
else:
# Randomly sample a task from the previous tasks
prev_task = np.random.randint(0, task)
# Set the logit mask for the randomly sampled task
logit_mask[:] = 0
logit_mask[task_labels[prev_task]] = 1.0
# Store the reference gradient
sess.run(model.store_ref_grads, feed_dict={model.x: task_based_memory[prev_task]['images'], model.y_: task_based_memory[prev_task]['labels'],
model.keep_prob: 1.0, model.output_mask: logit_mask, model.train_phase: True})
# Compute the gradient for current task and project if need be
logit_mask[:] = 0
logit_mask[task_labels[task]] = 1.0
feed_dict[model.output_mask] = logit_mask
_, loss = sess.run([model.train_subseq_tasks, model.agem_loss], feed_dict=feed_dict)
elif model.imp_method == 'A-GEM':
if task == 0:
a_gem_logit_mask[:] = 0
a_gem_logit_mask[task][task_labels[task]] = 1.0
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, a_gem_logit_mask)}
feed_dict.update(logit_mask_dict)
feed_dict[model.mem_batch_size] = batch_size
# Normal application of gradients
_, loss = sess.run([model.train_first_task, model.agem_loss], feed_dict=feed_dict)
else:
## Compute and store the reference gradients on the previous tasks
# Set the mask for all the previous tasks so far
a_gem_logit_mask[:] = 0
for tt in range(task):
a_gem_logit_mask[tt][task_labels[tt]] = 1.0
if KEEP_EPISODIC_MEMORY_FULL:
mem_sample_mask = np.random.choice(episodic_mem_size, EPS_MEM_BATCH_SIZE, replace=False) # Sample without replacement so that we don't sample an example more than once
else:
if episodic_filled_counter <= EPS_MEM_BATCH_SIZE:
mem_sample_mask = np.arange(episodic_filled_counter)
else:
# Sample a random subset from episodic memory buffer
mem_sample_mask = np.random.choice(episodic_filled_counter, EPS_MEM_BATCH_SIZE, replace=False) # Sample without replacement so that we don't sample an example more than once
# Store the reference gradient
ref_feed_dict = {model.x: episodic_images[mem_sample_mask], model.y_: episodic_labels[mem_sample_mask],
model.keep_prob: 1.0, model.train_phase: True}
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, a_gem_logit_mask)}
ref_feed_dict.update(logit_mask_dict)
ref_feed_dict[model.mem_batch_size] = float(len(mem_sample_mask))
sess.run(model.store_ref_grads, feed_dict=ref_feed_dict)
# Compute the gradient for current task and project if need be
a_gem_logit_mask[:] = 0
a_gem_logit_mask[task][task_labels[task]] = 1.0
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, a_gem_logit_mask)}
feed_dict.update(logit_mask_dict)
feed_dict[model.mem_batch_size] = batch_size
_, loss = sess.run([model.train_subseq_tasks, model.agem_loss], feed_dict=feed_dict)
elif model.imp_method == 'RWALK':
feed_dict[model.output_mask] = logit_mask
# If first iteration of the first task then set the initial value of the running fisher
if task == 0 and iters == 0:
sess.run([model.set_initial_running_fisher], feed_dict=feed_dict)
# Store the current value of the weights
sess.run(model.weights_delta_old_grouped)
# Update fisher and importance score after every few iterations
if (iters + 1) % model.fisher_update_after == 0:
# Update the importance score using distance in riemannian manifold
sess.run(model.update_big_omega_riemann)
# Now that the score is updated, compute the new value for running Fisher
sess.run(model.set_running_fisher)
# Store the current value of the weights
sess.run(model.weights_delta_old_grouped)
# Reset the delta_L
sess.run([model.reset_small_omega])
_, _, _, _, loss = sess.run([model.set_tmp_fisher, model.weights_old_ops_grouped,
model.train, model.update_small_omega, model.reg_loss], feed_dict=feed_dict)
if (iters % 50 == 0):
print('Step {:d} {:.3f}'.format(iters, loss))
if (math.isnan(loss)):
print('ERROR: NaNs NaNs NaNs!!!')
break_training = 1
break
print('\t\t\t\tTraining for Task%d done!'%(task))
if break_training:
break
# Compute the inter-task updates, Fisher/ importance scores etc
# Don't calculate the task updates for the last task
if task < (len(task_labels) - 1):
model.task_updates(sess, task, task_train_images, task_labels[task]) # TODO: For MAS, should the gradients be for current task or all the previous tasks
print('\t\t\t\tTask updates after Task%d done!'%(task))
# If importance method is '*-GEM' then store the episodic memory for the task
if 'GEM' in model.imp_method:
data_to_sample_from = {
'images': task_train_images,
'labels': task_train_labels,
}
if model.imp_method == 'S-GEM':
# Get the important samples from the current task
if is_herding: # Sampling based on MoF
# Compute the features of training data
features_dim = model.image_feature_dim
features = np.zeros([num_train_examples, features_dim])
samples_at_a_time = 32
residual = num_train_examples % samples_at_a_time
for i in range(num_train_examples// samples_at_a_time):
offset = i * samples_at_a_time
features[offset:offset+samples_at_a_time] = sess.run(model.features, feed_dict={model.x: task_train_images[offset:offset+samples_at_a_time],
model.y_: task_train_labels[offset:offset+samples_at_a_time], model.keep_prob: 1.0,
model.output_mask: logit_mask, model.train_phase: False})
if residual > 0:
offset = (i + 1) * samples_at_a_time
features[offset:offset+residual] = sess.run(model.features, feed_dict={model.x: task_train_images[offset:offset+residual],
model.y_: task_train_labels[offset:offset+residual], model.keep_prob: 1.0,
model.output_mask: logit_mask, model.train_phase: False})
imp_images, imp_labels = sample_from_dataset_icarl(data_to_sample_from, features, task_labels[task], SAMPLES_PER_CLASS)
else: # Random sampling
# Do the uniform sampling/ only get examples from current task
importance_array = np.ones(num_train_examples, dtype=np.float32)
imp_images, imp_labels = sample_from_dataset(data_to_sample_from, importance_array, task_labels[task], SAMPLES_PER_CLASS)
task_memory = {
'images': deepcopy(imp_images),
'labels': deepcopy(imp_labels),
}
task_based_memory.append(task_memory)
elif model.imp_method == 'A-GEM':
if is_herding: # Sampling based on MoF
# Compute the features of training data
features_dim = model.image_feature_dim
features = np.zeros([num_train_examples, features_dim])
samples_at_a_time = 32
residual = num_train_examples % samples_at_a_time
for i in range(num_train_examples// samples_at_a_time):
offset = i * samples_at_a_time
features[offset:offset+samples_at_a_time] = sess.run(model.features, feed_dict={model.x: task_train_images[offset:offset+samples_at_a_time],
model.y_: task_train_labels[offset:offset+samples_at_a_time], model.keep_prob: 1.0,
model.output_mask: logit_mask, model.train_phase: False})
if residual > 0:
offset = (i + 1) * samples_at_a_time
features[offset:offset+residual] = sess.run(model.features, feed_dict={model.x: task_train_images[offset:offset+residual],
model.y_: task_train_labels[offset:offset+residual], model.keep_prob: 1.0,
model.output_mask: logit_mask, model.train_phase: False})
if KEEP_EPISODIC_MEMORY_FULL:
update_episodic_memory(data_to_sample_from, features, episodic_mem_size, task, episodic_images, episodic_labels, task_labels=task_labels[task], is_herding=True)
else:
imp_images, imp_labels = sample_from_dataset_icarl(data_to_sample_from, features, task_labels[task], SAMPLES_PER_CLASS)
else: # Random sampling
# Do the uniform sampling/ only get examples from current task
importance_array = np.ones(num_train_examples, dtype=np.float32)
if KEEP_EPISODIC_MEMORY_FULL:
update_episodic_memory(data_to_sample_from, importance_array, episodic_mem_size, task, episodic_images, episodic_labels)
else:
imp_images, imp_labels = sample_from_dataset(data_to_sample_from, importance_array, task_labels[task], SAMPLES_PER_CLASS)
if not KEEP_EPISODIC_MEMORY_FULL: # Fill the memory to always keep M/T samples per task
total_imp_samples = imp_images.shape[0]
eps_offset = task * total_imp_samples
episodic_images[eps_offset:eps_offset+total_imp_samples] = imp_images
episodic_labels[eps_offset:eps_offset+total_imp_samples] = imp_labels
episodic_filled_counter += total_imp_samples
print('Unique labels in the episodic memory: {}'.format(np.unique(np.nonzero(episodic_labels)[1])))
# Inspect episodic memory
if DEBUG_EPISODIC_MEMORY:
# Which labels are present in the memory
unique_labels = np.unique(np.nonzero(episodic_labels)[-1])
print('Unique Labels present in the episodic memory'.format(unique_labels))
print('Labels count:')
for lbl in unique_labels:
print('Label {}: {} samples'.format(lbl, np.where(np.nonzero(episodic_labels)[-1] == lbl)[0].size))
# Is there any space which is not filled
print('Empty space: {}'.format(np.where(np.sum(episodic_labels, axis=1) == 0)))
print('Episodic memory of {} images at task {} saved!'.format(episodic_images.shape[0], task))
# If sampling flag is set, store few of the samples from previous task
if do_sampling:
# Do the uniform sampling/ only get examples from current task
importance_array = np.ones([task_train_images.shape[0]], dtype=np.float32)
# Get the important samples from the current task
task_data = {
'images': task_tr_images,
'labels': task_tr_labels,
}
imp_images, imp_labels = sample_from_dataset(task_data, importance_array, task_labels[task], SAMPLES_PER_CLASS)
if imp_images is not None:
if last_task_x is None:
last_task_x = imp_images
last_task_y_ = imp_labels
else:
last_task_x = np.concatenate((last_task_x, imp_images), axis=0)
last_task_y_ = np.concatenate((last_task_y_, imp_labels), axis=0)
# Delete the importance array now that you don't need it in the current run
del importance_array
print('\t\t\t\tEpisodic memory is saved for Task%d!'%(task))
if cross_validate_mode:
# Only evaluate after the last task
if (task == model.num_tasks - 1) or MULTI_TASK:
# List to store accuracy for all the tasks for the current trained model
ftask = test_task_sequence(model, sess, datasets[0]['test'], task_labels, task)
elif train_single_epoch:
fbatch = test_task_sequence(model, sess, datasets[0]['test'], task_labels, task)
print('Task: {} Acc: {}'.format(task, fbatch))
ftask.append(fbatch)
else:
# Multi-epoch training, so compute accuracy at the end
ftask = test_task_sequence(model, sess, datasets[0]['test'], task_labels, task)
if SAVE_MODEL_PARAMS:
save(saver, sess, SNAPSHOT_DIR, iters)
if not cross_validate_mode:
# Store the accuracies computed at task T in a list
evals.append(np.array(ftask))
# Reset the optimizer
model.reset_optimizer(sess)
#-> End for loop task
if not cross_validate_mode:
runs.append(np.array(evals))
if break_training:
break
# End for loop runid
if cross_validate_mode:
return np.mean(ftask), task_labels_dataset
else:
runs = np.array(runs)
return runs, task_labels_dataset
def test_task_sequence(model, sess, test_data, test_tasks, task):
"""
Snapshot the current performance
"""
final_acc = np.zeros(model.num_tasks)
if model.imp_method == 'A-GEM':
logit_mask = np.zeros([model.num_tasks, TOTAL_CLASSES])
else:
logit_mask = np.zeros(TOTAL_CLASSES)
for tt, labels in enumerate(test_tasks):
if not MULTI_TASK:
if tt > task:
return final_acc
task_test_images, task_test_labels = load_task_specific_data(test_data, labels)
total_test_samples = task_test_images.shape[0]
samples_at_a_time = 10
total_corrects = 0
logit_mask[:] = 0
if model.imp_method == 'A-GEM':
logit_mask[tt][labels] = 1.0
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, logit_mask)}
else:
logit_mask[labels] = 1.0
for i in range(total_test_samples/ samples_at_a_time):
offset = i*samples_at_a_time
feed_dict = {model.x: task_test_images[offset:offset+samples_at_a_time],
model.y_: task_test_labels[offset:offset+samples_at_a_time],
model.keep_prob: 1.0, model.train_phase: False}
if model.imp_method == 'A-GEM':
feed_dict.update(logit_mask_dict)
total_corrects += np.sum(sess.run(model.correct_predictions[tt], feed_dict=feed_dict))
else:
feed_dict[model.output_mask] = logit_mask
total_corrects += np.sum(sess.run(model.correct_predictions, feed_dict=feed_dict))
# Compute the corrects on residuals
offset = (i+1)*samples_at_a_time
num_residuals = total_test_samples % samples_at_a_time
feed_dict = {model.x: task_test_images[offset:offset+num_residuals],
model.y_: task_test_labels[offset:offset+num_residuals],
model.keep_prob: 1.0, model.train_phase: False}
if model.imp_method == 'A-GEM':
feed_dict.update(logit_mask_dict)
total_corrects += np.sum(sess.run(model.correct_predictions[tt], feed_dict=feed_dict))
else:
feed_dict[model.output_mask] = logit_mask
total_corrects += np.sum(sess.run(model.correct_predictions, feed_dict=feed_dict))
# Mean accuracy on the task
acc = total_corrects/ float(total_test_samples)
final_acc[tt] = acc
return final_acc
def main():
"""
Create the model and start the training
"""
# Get the CL arguments
args = get_arguments()
# Check if the network architecture is valid
if args.arch not in VALID_ARCHS:
raise ValueError("Network architecture %s is not supported!"%(args.arch))
# Check if the method to compute importance is valid
if args.imp_method not in MODELS:
raise ValueError("Importance measure %s is undefined!"%(args.imp_method))
# Check if the optimizer is valid
if args.optim not in VALID_OPTIMS:
raise ValueError("Optimizer %s is undefined!"%(args.optim))
# Create log directories to store the results
if not os.path.exists(args.log_dir):
print('Log directory %s created!'%(args.log_dir))
os.makedirs(args.log_dir)
if args.online_cross_val:
num_tasks = K_FOR_CROSS_VAL
else:
num_tasks = NUM_TASKS - K_FOR_CROSS_VAL
# Load the split CUB dataset
data_labs = [np.arange(TOTAL_CLASSES)]
datasets = construct_split_cub(data_labs, args.data_dir, CUB_TRAIN_LIST, CUB_TEST_LIST, IMG_HEIGHT, IMG_WIDTH)
if args.cross_validate_mode:
#models_list = MODELS
#learning_rate_list = [0.3, 0.1, 0.01, 0.003, 0.001]
models_list = [args.imp_method]
learning_rate_list = [0.03]
else:
models_list = [args.imp_method]
for imp_method in models_list:
if imp_method == 'VAN':
synap_stgth_list = [0]
if args.online_cross_val or args.cross_validate_mode:
pass
else:
learning_rate_list = [0.03]
elif imp_method == 'PI':
if args.online_cross_val or args.cross_validate_mode:
synap_stgth_list = [0.1, 1, 10]
else:
synap_stgth_list = [0.1]
learning_rate_list = [0.03]
elif imp_method == 'EWC' or imp_method == 'M-EWC':
if args.online_cross_val or args.cross_validate_mode:
synap_stgth_list = [0.1, 1, 10, 100]
else:
synap_stgth_list = [1]
learning_rate_list = [0.03]
elif imp_method == 'MAS':
if args.online_cross_val or args.cross_validate_mode:
synap_stgth_list = [0.1, 1, 10, 100]
else:
synap_stgth_list = [0.1]
learning_rate_list = [0.03]
elif imp_method == 'RWALK':
if args.online_cross_val or args.cross_validate_mode:
synap_stgth_list = [0.1, 1, 10, 100]
else:
synap_stgth_list = [1]
learning_rate_list = [0.03]
elif imp_method == 'S-GEM':
synap_stgth_list = [0]
if args.online_cross_val:
pass
else:
learning_rate_list = [args.learning_rate]
elif imp_method == 'A-GEM':
synap_stgth_list = [0]
if args.online_cross_val or args.cross_validate_mode:
pass
else:
learning_rate_list = [0.03]
for synap_stgth in synap_stgth_list:
for lr in learning_rate_list:
# Generate the experiment key and store the meta data in a file
exper_meta_data = {'ARCH': args.arch,
'DATASET': 'SPLIT_CUB',
'NUM_RUNS': args.num_runs,
'TRAIN_SINGLE_EPOCH': args.train_single_epoch,
'IMP_METHOD': imp_method,
'SYNAP_STGTH': synap_stgth,
'FISHER_EMA_DECAY': args.fisher_ema_decay,
'FISHER_UPDATE_AFTER': args.fisher_update_after,
'OPTIM': args.optim,
'LR': lr,
'BATCH_SIZE': args.batch_size,
'EPS_MEMORY': args.do_sampling,
'MEM_SIZE': args.mem_size,
'IS_HERDING': args.is_herding}
experiment_id = "SPLIT_CUB_ONE_HOT_HERDING_%r_%s_%r_%s_%s_%s_%s_%r_%s-"%(args.is_herding, args.arch, args.train_single_epoch, imp_method,
str(synap_stgth).replace('.', '_'), str(lr).replace('.', '_'),
str(args.batch_size), args.do_sampling, str(args.mem_size)) + datetime.datetime.now().strftime("%y-%m-%d-%H-%M")
snapshot_experiment_meta_data(args.log_dir, experiment_id, exper_meta_data)
# Reset the default graph
#tf.reset_default_graph()
graph = tf.Graph()
with graph.as_default():
# Set the random seed
tf.set_random_seed(RANDOM_SEED)
# Define Input and Output of the model
x = tf.placeholder(tf.float32, shape=[None, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS])
y_ = tf.placeholder(tf.float32, shape=[None, TOTAL_CLASSES])
if not args.train_single_epoch:
# Define ops for data augmentation
x_aug = image_scaling(x)
x_aug = random_crop_and_pad_image(x_aug, IMG_HEIGHT, IMG_WIDTH)
# Define the optimizer
if args.optim == 'ADAM':
opt = tf.train.AdamOptimizer(learning_rate=lr)
elif args.optim == 'SGD':
opt = tf.train.GradientDescentOptimizer(learning_rate=lr)
elif args.optim == 'MOMENTUM':
base_lr = tf.constant(lr)
learning_rate = tf.scalar_mul(base_lr, tf.pow((1 - train_step / training_iters), OPT_POWER))
opt = tf.train.MomentumOptimizer(lr, OPT_MOMENTUM)
# Create the Model/ contruct the graph
if args.train_single_epoch:
# When training using a single epoch then there is no need for data augmentation
model = Model(x, y_, num_tasks, opt, imp_method, synap_stgth, args.fisher_update_after,
args.fisher_ema_decay, network_arch=args.arch, is_ATT_DATASET=True)
else:
model = Model(x_aug, y_, num_tasks, opt, imp_method, synap_stgth, args.fisher_update_after,
args.fisher_ema_decay, network_arch=args.arch, is_ATT_DATASET=True, x_test=x)
# Set up tf session and initialize variables.
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
time_start = time.time()
with tf.Session(config=config, graph=graph) as sess:
saver = tf.train.Saver(var_list=tf.global_variables(), max_to_keep=100)
runs, task_labels_dataset = train_task_sequence(model, sess, saver, datasets, args.cross_validate_mode, args.train_single_epoch,
args.do_sampling, args.is_herding, args.mem_size, args.train_iters, args.batch_size, args.num_runs, args.init_checkpoint, args.online_cross_val, args.random_seed)
# Close the session
sess.close()
time_end = time.time()
time_spent = time_end - time_start
print('Time spent: {}'.format(time_spent))
# Clean up
del model
if args.cross_validate_mode:
# If cross-validation flag is enabled, store the stuff in a text file
cross_validate_dump_file = args.log_dir + '/' + 'SPLIT_CUB_%s_%s'%(imp_method, args.optim) + '.txt'
with open(cross_validate_dump_file, 'a') as f:
f.write('HERDING: {} \t ARCH: {} \t LR:{} \t LAMBDA: {} \t ACC: {}\n'.format(args.is_herding, args.arch, lr, synap_stgth, runs))
else:
# Store all the results in one dictionary to process later
exper_acc = dict(mean=runs)
exper_labels = dict(labels=task_labels_dataset)
# Store the experiment output to a file
snapshot_experiment_eval(args.log_dir, experiment_id, exper_acc)
snapshot_task_labels(args.log_dir, experiment_id, exper_labels)
if __name__ == '__main__':
main()
|
agem-main
|
conv_split_cub.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Training script for split CIFAR 100 experiment.
"""
from __future__ import print_function
import argparse
import os
import sys
import math
import time
import datetime
import numpy as np
import tensorflow as tf
from copy import deepcopy
from six.moves import cPickle as pickle
from utils.data_utils import construct_split_cifar
from utils.utils import get_sample_weights, sample_from_dataset, update_episodic_memory, concatenate_datasets, samples_for_each_class, sample_from_dataset_icarl, compute_fgt, load_task_specific_data
from utils.utils import average_acc_stats_across_runs, average_fgt_stats_across_runs, update_reservior
from utils.vis_utils import plot_acc_multiple_runs, plot_histogram, snapshot_experiment_meta_data, snapshot_experiment_eval, snapshot_task_labels
from model import Model
###############################################################
################ Some definitions #############################
### These will be edited by the command line options ##########
###############################################################
## Training Options
NUM_RUNS = 5 # Number of experiments to average over
TRAIN_ITERS = 2000 # Number of training iterations per task
BATCH_SIZE = 16
LEARNING_RATE = 0.1
RANDOM_SEED = 1234
VALID_OPTIMS = ['SGD', 'MOMENTUM', 'ADAM']
OPTIM = 'SGD'
OPT_MOMENTUM = 0.9
OPT_POWER = 0.9
VALID_ARCHS = ['CNN', 'RESNET-S', 'RESNET-B', 'VGG']
ARCH = 'RESNET-S'
## Model options
MODELS = ['VAN', 'PI', 'EWC', 'MAS', 'RWALK', 'M-EWC', 'S-GEM', 'A-GEM', 'FTR_EXT', 'PNN', 'ER'] #List of valid models
IMP_METHOD = 'EWC'
SYNAP_STGTH = 75000
FISHER_EMA_DECAY = 0.9 # Exponential moving average decay factor for Fisher computation (online Fisher)
FISHER_UPDATE_AFTER = 50 # Number of training iterations for which the F_{\theta}^t is computed (see Eq. 10 in RWalk paper)
SAMPLES_PER_CLASS = 13
IMG_HEIGHT = 32
IMG_WIDTH = 32
IMG_CHANNELS = 3
TOTAL_CLASSES = 100 # Total number of classes in the dataset
VISUALIZE_IMPORTANCE_MEASURE = False
MEASURE_CONVERGENCE_AFTER = 0.9
EPS_MEM_BATCH_SIZE = 256
DEBUG_EPISODIC_MEMORY = False
K_FOR_CROSS_VAL = 3
TIME_MY_METHOD = False
COUNT_VIOLATONS = False
MEASURE_PERF_ON_EPS_MEMORY = False
## Logging, saving and testing options
LOG_DIR = './split_cifar_results'
RESNET18_CIFAR10_CHECKPOINT = './resnet-18-pretrained-cifar10/model.ckpt-19999'
## Evaluation options
## Task split
NUM_TASKS = 20
MULTI_TASK = False
# Define function to load/ store training weights. We will use ImageNet initialization later on
def save(saver, sess, logdir, step):
'''Save weights.
Args:
saver: TensorFlow Saver object.
sess: TensorFlow session.
logdir: path to the snapshots directory.
step: current training step.
'''
model_name = 'model.ckpt'
checkpoint_path = os.path.join(logdir, model_name)
if not os.path.exists(logdir):
os.makedirs(logdir)
saver.save(sess, checkpoint_path, global_step=step)
print('The checkpoint has been created.')
def load(saver, sess, ckpt_path):
'''Load trained weights.
Args:
saver: TensorFlow Saver object.
sess: TensorFlow session.
ckpt_path: path to checkpoint file with parameters.
'''
saver.restore(sess, ckpt_path)
print("Restored model parameters from {}".format(ckpt_path))
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description="Script for split cifar experiment.")
parser.add_argument("--cross-validate-mode", action="store_true",
help="If option is chosen then snapshoting after each batch is disabled")
parser.add_argument("--online-cross-val", action="store_true",
help="If option is chosen then enable the online cross validation of the learning rate")
parser.add_argument("--train-single-epoch", action="store_true",
help="If option is chosen then train for single epoch")
parser.add_argument("--eval-single-head", action="store_true",
help="If option is chosen then evaluate on a single head setting.")
parser.add_argument("--arch", type=str, default=ARCH,
help="Network Architecture for the experiment.\
\n \nSupported values: %s"%(VALID_ARCHS))
parser.add_argument("--num-runs", type=int, default=NUM_RUNS,
help="Total runs/ experiments over which accuracy is averaged.")
parser.add_argument("--train-iters", type=int, default=TRAIN_ITERS,
help="Number of training iterations for each task.")
parser.add_argument("--batch-size", type=int, default=BATCH_SIZE,
help="Mini-batch size for each task.")
parser.add_argument("--random-seed", type=int, default=RANDOM_SEED,
help="Random Seed.")
parser.add_argument("--learning-rate", type=float, default=LEARNING_RATE,
help="Starting Learning rate for each task.")
parser.add_argument("--optim", type=str, default=OPTIM,
help="Optimizer for the experiment. \
\n \nSupported values: %s"%(VALID_OPTIMS))
parser.add_argument("--imp-method", type=str, default=IMP_METHOD,
help="Model to be used for LLL. \
\n \nSupported values: %s"%(MODELS))
parser.add_argument("--synap-stgth", type=float, default=SYNAP_STGTH,
help="Synaptic strength for the regularization.")
parser.add_argument("--fisher-ema-decay", type=float, default=FISHER_EMA_DECAY,
help="Exponential moving average decay for Fisher calculation at each step.")
parser.add_argument("--fisher-update-after", type=int, default=FISHER_UPDATE_AFTER,
help="Number of training iterations after which the Fisher will be updated.")
parser.add_argument("--mem-size", type=int, default=SAMPLES_PER_CLASS,
help="Total size of episodic memory.")
parser.add_argument("--eps-mem-batch", type=int, default=EPS_MEM_BATCH_SIZE,
help="Number of samples per class from previous tasks.")
parser.add_argument("--log-dir", type=str, default=LOG_DIR,
help="Directory where the plots and model accuracies will be stored.")
return parser.parse_args()
def train_task_sequence(model, sess, datasets, args):
"""
Train and evaluate LLL system such that we only see a example once
Args:
Returns:
dict A dictionary containing mean and stds for the experiment
"""
# List to store accuracy for each run
runs = []
task_labels_dataset = []
if model.imp_method == 'A-GEM' or model.imp_method == 'ER':
use_episodic_memory = True
else:
use_episodic_memory = False
batch_size = args.batch_size
# Loop over number of runs to average over
for runid in range(args.num_runs):
print('\t\tRun %d:'%(runid))
# Initialize the random seeds
np.random.seed(args.random_seed+runid)
# Get the task labels from the total number of tasks and full label space
task_labels = []
classes_per_task = TOTAL_CLASSES// NUM_TASKS
total_classes = classes_per_task * model.num_tasks
if args.online_cross_val:
label_array = np.arange(total_classes)
else:
class_label_offset = K_FOR_CROSS_VAL * classes_per_task
label_array = np.arange(class_label_offset, total_classes+class_label_offset)
np.random.shuffle(label_array)
for tt in range(model.num_tasks):
tt_offset = tt*classes_per_task
task_labels.append(list(label_array[tt_offset:tt_offset+classes_per_task]))
print('Task: {}, Labels:{}'.format(tt, task_labels[tt]))
# Store the task labels
task_labels_dataset.append(task_labels)
# Set episodic memory size
episodic_mem_size = args.mem_size * total_classes
# Initialize all the variables in the model
sess.run(tf.global_variables_initializer())
# Run the init ops
model.init_updates(sess)
# List to store accuracies for a run
evals = []
# List to store the classes that we have so far - used at test time
test_labels = []
if use_episodic_memory:
# Reserve a space for episodic memory
episodic_images = np.zeros([episodic_mem_size, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS])
episodic_labels = np.zeros([episodic_mem_size, TOTAL_CLASSES])
episodic_filled_counter = 0
nd_logit_mask = np.zeros([model.num_tasks, TOTAL_CLASSES])
count_cls = np.zeros(TOTAL_CLASSES, dtype=np.int32)
episodic_filled_counter = 0
examples_seen_so_far = 0
# Mask for softmax
logit_mask = np.zeros(TOTAL_CLASSES)
if COUNT_VIOLATONS:
violation_count = np.zeros(model.num_tasks)
vc = 0
# Training loop for all the tasks
for task in range(len(task_labels)):
print('\t\tTask %d:'%(task))
# If not the first task then restore weights from previous task
if(task > 0 and model.imp_method != 'PNN'):
model.restore(sess)
if model.imp_method == 'PNN':
pnn_train_phase = np.array(np.zeros(model.num_tasks), dtype=np.bool)
pnn_train_phase[task] = True
pnn_logit_mask = np.zeros([model.num_tasks, TOTAL_CLASSES])
# If not in the cross validation mode then concatenate the train and validation sets
task_tr_images, task_tr_labels = load_task_specific_data(datasets[0]['train'], task_labels[task])
task_val_images, task_val_labels = load_task_specific_data(datasets[0]['validation'], task_labels[task])
task_train_images, task_train_labels = concatenate_datasets(task_tr_images, task_tr_labels, task_val_images, task_val_labels)
# If multi_task is set then train using all the datasets of all the tasks
if MULTI_TASK:
if task == 0:
for t_ in range(1, len(task_labels)):
task_tr_images, task_tr_labels = load_task_specific_data(datasets[0]['train'], task_labels[t_])
task_train_images = np.concatenate((task_train_images, task_tr_images), axis=0)
task_train_labels = np.concatenate((task_train_labels, task_tr_labels), axis=0)
else:
# Skip training for this task
continue
print('Received {} images, {} labels at task {}'.format(task_train_images.shape[0], task_train_labels.shape[0], task))
print('Unique labels in the task: {}'.format(np.unique(np.nonzero(task_train_labels)[1])))
# Test for the tasks that we've seen so far
test_labels += task_labels[task]
# Assign equal weights to all the examples
task_sample_weights = np.ones([task_train_labels.shape[0]], dtype=np.float32)
num_train_examples = task_train_images.shape[0]
logit_mask[:] = 0
# Train a task observing sequence of data
if args.train_single_epoch:
# Ceiling operation
num_iters = (num_train_examples + batch_size - 1) // batch_size
if args.cross_validate_mode:
logit_mask[task_labels[task]] = 1.0
else:
num_iters = args.train_iters
# Set the mask only once before starting the training for the task
logit_mask[task_labels[task]] = 1.0
if MULTI_TASK:
logit_mask[:] = 1.0
# Randomly suffle the training examples
perm = np.arange(num_train_examples)
np.random.shuffle(perm)
train_x = task_train_images[perm]
train_y = task_train_labels[perm]
task_sample_weights = task_sample_weights[perm]
# Array to store accuracies when training for task T
ftask = []
# Number of iterations after which convergence is checked
convergence_iters = int(num_iters * MEASURE_CONVERGENCE_AFTER)
# Training loop for task T
for iters in range(num_iters):
if args.train_single_epoch and not args.cross_validate_mode and not MULTI_TASK:
if (iters <= 20) or (iters > 20 and iters % 50 == 0):
# Snapshot the current performance across all tasks after each mini-batch
fbatch = test_task_sequence(model, sess, datasets[0]['test'], task_labels, task)
ftask.append(fbatch)
if model.imp_method == 'PNN':
pnn_train_phase[:] = False
pnn_train_phase[task] = True
pnn_logit_mask[:] = 0
pnn_logit_mask[task][task_labels[task]] = 1.0
elif model.imp_method == 'A-GEM':
nd_logit_mask[:] = 0
nd_logit_mask[task][task_labels[task]] = 1.0
else:
# Set the output labels over which the model needs to be trained
logit_mask[:] = 0
logit_mask[task_labels[task]] = 1.0
if args.train_single_epoch:
offset = iters * batch_size
if (offset+batch_size <= num_train_examples):
residual = batch_size
else:
residual = num_train_examples - offset
if model.imp_method == 'PNN':
feed_dict = {model.x: train_x[offset:offset+residual], model.y_[task]: train_y[offset:offset+residual],
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 0.5}
train_phase_dict = {m_t: i_t for (m_t, i_t) in zip(model.train_phase, pnn_train_phase)}
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, pnn_logit_mask)}
feed_dict.update(train_phase_dict)
feed_dict.update(logit_mask_dict)
else:
feed_dict = {model.x: train_x[offset:offset+residual], model.y_: train_y[offset:offset+residual],
model.sample_weights: task_sample_weights[offset:offset+residual],
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 0.5,
model.train_phase: True}
else:
offset = (iters * batch_size) % (num_train_examples - batch_size)
if model.imp_method == 'PNN':
feed_dict = {model.x: train_x[offset:offset+batch_size], model.y_[task]: train_y[offset:offset+batch_size],
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 0.5}
train_phase_dict = {m_t: i_t for (m_t, i_t) in zip(model.train_phase, pnn_train_phase)}
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, pnn_logit_mask)}
feed_dict.update(train_phase_dict)
feed_dict.update(logit_mask_dict)
else:
feed_dict = {model.x: train_x[offset:offset+batch_size], model.y_: train_y[offset:offset+batch_size],
model.sample_weights: task_sample_weights[offset:offset+batch_size],
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 0.5,
model.train_phase: True}
if model.imp_method == 'VAN':
feed_dict[model.output_mask] = logit_mask
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'PNN':
_, loss = sess.run([model.train[task], model.unweighted_entropy[task]], feed_dict=feed_dict)
elif model.imp_method == 'FTR_EXT':
feed_dict[model.output_mask] = logit_mask
if task == 0:
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
else:
_, loss = sess.run([model.train_classifier, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'EWC' or model.imp_method == 'M-EWC':
feed_dict[model.output_mask] = logit_mask
# If first iteration of the first task then set the initial value of the running fisher
if task == 0 and iters == 0:
sess.run([model.set_initial_running_fisher], feed_dict=feed_dict)
# Update fisher after every few iterations
if (iters + 1) % model.fisher_update_after == 0:
sess.run(model.set_running_fisher)
sess.run(model.reset_tmp_fisher)
if (iters >= convergence_iters) and (model.imp_method == 'M-EWC'):
_, _, _, _, loss = sess.run([model.weights_old_ops_grouped, model.set_tmp_fisher, model.train, model.update_small_omega,
model.reg_loss], feed_dict=feed_dict)
else:
_, _, loss = sess.run([model.set_tmp_fisher, model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'PI':
feed_dict[model.output_mask] = logit_mask
_, _, _, loss = sess.run([model.weights_old_ops_grouped, model.train, model.update_small_omega,
model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'MAS':
feed_dict[model.output_mask] = logit_mask
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'A-GEM':
if task == 0:
nd_logit_mask[:] = 0
nd_logit_mask[task][task_labels[task]] = 1.0
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, nd_logit_mask)}
feed_dict.update(logit_mask_dict)
feed_dict[model.mem_batch_size] = batch_size
# Normal application of gradients
_, loss = sess.run([model.train_first_task, model.agem_loss], feed_dict=feed_dict)
else:
## Compute and store the reference gradients on the previous tasks
# Set the mask for all the previous tasks so far
nd_logit_mask[:] = 0
for tt in range(task):
nd_logit_mask[tt][task_labels[tt]] = 1.0
if episodic_filled_counter <= args.eps_mem_batch:
mem_sample_mask = np.arange(episodic_filled_counter)
else:
# Sample a random subset from episodic memory buffer
mem_sample_mask = np.random.choice(episodic_filled_counter, args.eps_mem_batch, replace=False) # Sample without replacement so that we don't sample an example more than once
# Store the reference gradient
ref_feed_dict = {model.x: episodic_images[mem_sample_mask], model.y_: episodic_labels[mem_sample_mask],
model.keep_prob: 1.0, model.train_phase: True}
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, nd_logit_mask)}
ref_feed_dict.update(logit_mask_dict)
ref_feed_dict[model.mem_batch_size] = float(len(mem_sample_mask))
sess.run(model.store_ref_grads, feed_dict=ref_feed_dict)
# Compute the gradient for current task and project if need be
nd_logit_mask[:] = 0
nd_logit_mask[task][task_labels[task]] = 1.0
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, nd_logit_mask)}
feed_dict.update(logit_mask_dict)
feed_dict[model.mem_batch_size] = batch_size
if COUNT_VIOLATONS:
vc, _, loss = sess.run([model.violation_count, model.train_subseq_tasks, model.agem_loss], feed_dict=feed_dict)
else:
_, loss = sess.run([model.train_subseq_tasks, model.agem_loss], feed_dict=feed_dict)
# Put the batch in the ring buffer
for er_x, er_y_ in zip(train_x[offset:offset+residual], train_y[offset:offset+residual]):
cls = np.unique(np.nonzero(er_y_))[-1]
# Write the example at the location pointed by count_cls[cls]
cls_to_index_map = np.where(np.array(task_labels[task]) == cls)[0][0]
with_in_task_offset = args.mem_size * cls_to_index_map
mem_index = count_cls[cls] + with_in_task_offset + episodic_filled_counter
episodic_images[mem_index] = er_x
episodic_labels[mem_index] = er_y_
count_cls[cls] = (count_cls[cls] + 1) % args.mem_size
elif model.imp_method == 'RWALK':
feed_dict[model.output_mask] = logit_mask
# If first iteration of the first task then set the initial value of the running fisher
if task == 0 and iters == 0:
sess.run([model.set_initial_running_fisher], feed_dict=feed_dict)
# Store the current value of the weights
sess.run(model.weights_delta_old_grouped)
# Update fisher and importance score after every few iterations
if (iters + 1) % model.fisher_update_after == 0:
# Update the importance score using distance in riemannian manifold
sess.run(model.update_big_omega_riemann)
# Now that the score is updated, compute the new value for running Fisher
sess.run(model.set_running_fisher)
# Store the current value of the weights
sess.run(model.weights_delta_old_grouped)
# Reset the delta_L
sess.run([model.reset_small_omega])
_, _, _, _, loss = sess.run([model.set_tmp_fisher, model.weights_old_ops_grouped,
model.train, model.update_small_omega, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'ER':
mem_filled_so_far = examples_seen_so_far if (examples_seen_so_far < episodic_mem_size) else episodic_mem_size
if mem_filled_so_far < args.eps_mem_batch:
er_mem_indices = np.arange(mem_filled_so_far)
else:
er_mem_indices = np.random.choice(mem_filled_so_far, args.eps_mem_batch, replace=False)
np.random.shuffle(er_mem_indices)
nd_logit_mask[:] = 0
for tt in range(task+1):
nd_logit_mask[tt][task_labels[tt]] = 1.0
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, nd_logit_mask)}
er_train_x_batch = np.concatenate((episodic_images[er_mem_indices], train_x[offset:offset+residual]), axis=0)
er_train_y_batch = np.concatenate((episodic_labels[er_mem_indices], train_y[offset:offset+residual]), axis=0)
feed_dict = {model.x: er_train_x_batch, model.y_: er_train_y_batch,
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 1.0,
model.train_phase: True}
feed_dict.update(logit_mask_dict)
feed_dict[model.mem_batch_size] = float(er_train_x_batch.shape[0])
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
# Reservoir update
for er_x, er_y_ in zip(train_x[offset:offset+residual], train_y[offset:offset+residual]):
update_reservior(er_x, er_y_, episodic_images, episodic_labels, episodic_mem_size, examples_seen_so_far)
examples_seen_so_far += 1
if (iters % 100 == 0):
print('Step {:d} {:.3f}'.format(iters, loss))
if (math.isnan(loss)):
print('ERROR: NaNs NaNs NaNs!!!')
sys.exit(0)
print('\t\t\t\tTraining for Task%d done!'%(task))
if use_episodic_memory:
episodic_filled_counter += args.mem_size * classes_per_task
if model.imp_method == 'A-GEM':
if COUNT_VIOLATONS:
violation_count[task] = vc
print('Task {}: Violation Count: {}'.format(task, violation_count))
sess.run(model.reset_violation_count, feed_dict=feed_dict)
# Compute the inter-task updates, Fisher/ importance scores etc
# Don't calculate the task updates for the last task
if (task < (len(task_labels) - 1)) or MEASURE_PERF_ON_EPS_MEMORY:
model.task_updates(sess, task, task_train_images, task_labels[task]) # TODO: For MAS, should the gradients be for current task or all the previous tasks
print('\t\t\t\tTask updates after Task%d done!'%(task))
if VISUALIZE_IMPORTANCE_MEASURE:
if runid == 0:
for i in range(len(model.fisher_diagonal_at_minima)):
if i == 0:
flatten_fisher = np.array(model.fisher_diagonal_at_minima[i].eval()).flatten()
else:
flatten_fisher = np.concatenate((flatten_fisher,
np.array(model.fisher_diagonal_at_minima[i].eval()).flatten()))
#flatten_fisher [flatten_fisher > 0.1] = 0.1
if args.train_single_epoch:
plot_histogram(flatten_fisher, 100, '/private/home/arslanch/Dropbox/LLL_experiments/Single_Epoch/importance_vis/single_epoch/m_ewc/hist_fisher_task%s.png'%(task))
else:
plot_histogram(flatten_fisher, 100, '/private/home/arslanch/Dropbox/LLL_experiments/Single_Epoch/importance_vis/single_epoch/m_ewc/hist_fisher_task%s.png'%(task))
if args.train_single_epoch and not args.cross_validate_mode:
fbatch = test_task_sequence(model, sess, datasets[0]['test'], task_labels, task)
print('Task: {}, Acc: {}'.format(task, fbatch))
ftask.append(fbatch)
ftask = np.array(ftask)
if model.imp_method == 'PNN':
pnn_train_phase[:] = False
pnn_train_phase[task] = True
pnn_logit_mask[:] = 0
pnn_logit_mask[task][task_labels[task]] = 1.0
else:
if MEASURE_PERF_ON_EPS_MEMORY:
eps_mem = {
'images': episodic_images,
'labels': episodic_labels,
}
# Measure perf on episodic memory
ftask = test_task_sequence(model, sess, eps_mem, task_labels, task, classes_per_task=classes_per_task)
else:
# List to store accuracy for all the tasks for the current trained model
ftask = test_task_sequence(model, sess, datasets[0]['test'], task_labels, task)
print('Task: {}, Acc: {}'.format(task, ftask))
# Store the accuracies computed at task T in a list
evals.append(ftask)
# Reset the optimizer
model.reset_optimizer(sess)
#-> End for loop task
runs.append(np.array(evals))
# End for loop runid
runs = np.array(runs)
return runs, task_labels_dataset
def test_task_sequence(model, sess, test_data, test_tasks, task, classes_per_task=0):
"""
Snapshot the current performance
"""
if TIME_MY_METHOD:
# Only compute the training time
return np.zeros(model.num_tasks)
final_acc = np.zeros(model.num_tasks)
if model.imp_method == 'PNN' or model.imp_method == 'A-GEM' or model.imp_method == 'ER':
logit_mask = np.zeros([model.num_tasks, TOTAL_CLASSES])
else:
logit_mask = np.zeros(TOTAL_CLASSES)
if MEASURE_PERF_ON_EPS_MEMORY:
for tt, labels in enumerate(test_tasks):
# Multi-head evaluation setting
logit_mask[:] = 0
logit_mask[labels] = 1.0
mem_offset = tt*SAMPLES_PER_CLASS*classes_per_task
feed_dict = {model.x: test_data['images'][mem_offset:mem_offset+SAMPLES_PER_CLASS*classes_per_task],
model.y_: test_data['labels'][mem_offset:mem_offset+SAMPLES_PER_CLASS*classes_per_task], model.keep_prob: 1.0, model.train_phase: False, model.output_mask: logit_mask}
acc = model.accuracy.eval(feed_dict = feed_dict)
final_acc[tt] = acc
return final_acc
for tt, labels in enumerate(test_tasks):
if not MULTI_TASK:
if tt > task:
return final_acc
task_test_images, task_test_labels = load_task_specific_data(test_data, labels)
if model.imp_method == 'PNN':
pnn_train_phase = np.array(np.zeros(model.num_tasks), dtype=np.bool)
logit_mask[:] = 0
logit_mask[tt][labels] = 1.0
feed_dict = {model.x: task_test_images,
model.y_[tt]: task_test_labels, model.keep_prob: 1.0}
train_phase_dict = {m_t: i_t for (m_t, i_t) in zip(model.train_phase, pnn_train_phase)}
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, logit_mask)}
feed_dict.update(train_phase_dict)
feed_dict.update(logit_mask_dict)
acc = model.accuracy[tt].eval(feed_dict = feed_dict)
elif model.imp_method == 'A-GEM' or model.imp_method == 'ER':
logit_mask[:] = 0
logit_mask[tt][labels] = 1.0
feed_dict = {model.x: task_test_images,
model.y_: task_test_labels, model.keep_prob: 1.0, model.train_phase: False}
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, logit_mask)}
feed_dict.update(logit_mask_dict)
acc = model.accuracy[tt].eval(feed_dict = feed_dict)
else:
logit_mask[:] = 0
logit_mask[labels] = 1.0
feed_dict = {model.x: task_test_images,
model.y_: task_test_labels, model.keep_prob: 1.0, model.train_phase: False, model.output_mask: logit_mask}
acc = model.accuracy.eval(feed_dict = feed_dict)
final_acc[tt] = acc
return final_acc
def main():
"""
Create the model and start the training
"""
# Get the CL arguments
args = get_arguments()
# Check if the network architecture is valid
if args.arch not in VALID_ARCHS:
raise ValueError("Network architecture %s is not supported!"%(args.arch))
# Check if the method to compute importance is valid
if args.imp_method not in MODELS:
raise ValueError("Importance measure %s is undefined!"%(args.imp_method))
# Check if the optimizer is valid
if args.optim not in VALID_OPTIMS:
raise ValueError("Optimizer %s is undefined!"%(args.optim))
# Create log directories to store the results
if not os.path.exists(args.log_dir):
print('Log directory %s created!'%(args.log_dir))
os.makedirs(args.log_dir)
# Generate the experiment key and store the meta data in a file
exper_meta_data = {'ARCH': args.arch,
'DATASET': 'SPLIT_CIFAR',
'NUM_RUNS': args.num_runs,
'TRAIN_SINGLE_EPOCH': args.train_single_epoch,
'IMP_METHOD': args.imp_method,
'SYNAP_STGTH': args.synap_stgth,
'FISHER_EMA_DECAY': args.fisher_ema_decay,
'FISHER_UPDATE_AFTER': args.fisher_update_after,
'OPTIM': args.optim,
'LR': args.learning_rate,
'BATCH_SIZE': args.batch_size,
'MEM_SIZE': args.mem_size}
experiment_id = "SPLIT_CIFAR_HERDING_%s_%r_%s_%s_%s_%s_%s-"%(args.arch, args.train_single_epoch, args.imp_method,
str(args.synap_stgth).replace('.', '_'), str(args.learning_rate).replace('.', '_'),
str(args.batch_size), str(args.mem_size)) + datetime.datetime.now().strftime("%y-%m-%d-%H-%M")
snapshot_experiment_meta_data(args.log_dir, experiment_id, exper_meta_data)
# Get the task labels from the total number of tasks and full label space
if args.online_cross_val:
num_tasks = K_FOR_CROSS_VAL
else:
num_tasks = NUM_TASKS - K_FOR_CROSS_VAL
# Load the split cifar dataset
data_labs = [np.arange(TOTAL_CLASSES)]
datasets = construct_split_cifar(data_labs)
# Variables to store the accuracies and standard deviations of the experiment
acc_mean = dict()
acc_std = dict()
# Reset the default graph
tf.reset_default_graph()
graph = tf.Graph()
with graph.as_default():
# Set the random seed
tf.set_random_seed(args.random_seed)
# Define Input and Output of the model
x = tf.placeholder(tf.float32, shape=[None, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS])
if args.imp_method == 'PNN':
y_ = []
for i in range(num_tasks):
y_.append(tf.placeholder(tf.float32, shape=[None, TOTAL_CLASSES]))
else:
y_ = tf.placeholder(tf.float32, shape=[None, TOTAL_CLASSES])
# Define the optimizer
if args.optim == 'ADAM':
opt = tf.train.AdamOptimizer(learning_rate=args.learning_rate)
elif args.optim == 'SGD':
opt = tf.train.GradientDescentOptimizer(learning_rate=args.learning_rate)
elif args.optim == 'MOMENTUM':
base_lr = tf.constant(args.learning_rate)
learning_rate = tf.scalar_mul(base_lr, tf.pow((1 - train_step / training_iters), OPT_POWER))
opt = tf.train.MomentumOptimizer(args.learning_rate, OPT_MOMENTUM)
# Create the Model/ contruct the graph
model = Model(x, y_, num_tasks, opt, args.imp_method, args.synap_stgth, args.fisher_update_after,
args.fisher_ema_decay, network_arch=args.arch)
# Set up tf session and initialize variables.
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
time_start = time.time()
with tf.Session(config=config, graph=graph) as sess:
runs, task_labels_dataset = train_task_sequence(model, sess, datasets, args)
# Close the session
sess.close()
time_end = time.time()
time_spent = time_end - time_start
# Store all the results in one dictionary to process later
exper_acc = dict(mean=runs)
exper_labels = dict(labels=task_labels_dataset)
# If cross-validation flag is enabled, store the stuff in a text file
if args.cross_validate_mode:
acc_mean, acc_std = average_acc_stats_across_runs(runs, model.imp_method)
fgt_mean, fgt_std = average_fgt_stats_across_runs(runs, model.imp_method)
cross_validate_dump_file = args.log_dir + '/' + 'SPLIT_CIFAR_%s_%s'%(args.imp_method, args.optim) + '.txt'
with open(cross_validate_dump_file, 'a') as f:
if MULTI_TASK:
f.write('HERDING: {} \t ARCH: {} \t LR:{} \t LAMBDA: {} \t ACC: {}\n'.format(args.arch, args.learning_rate, args.synap_stgth, acc_mean[-1,:].mean()))
else:
f.write('ARCH: {} \t LR:{} \t LAMBDA: {} \t ACC: {} \t Fgt: {} \t Time: {}\n'.format(args.arch, args.learning_rate,
args.synap_stgth, acc_mean, fgt_mean, str(time_spent)))
# Store the experiment output to a file
snapshot_experiment_eval(args.log_dir, experiment_id, exper_acc)
snapshot_task_labels(args.log_dir, experiment_id, exper_labels)
if __name__ == '__main__':
main()
|
agem-main
|
conv_split_cifar.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Training script for split AWA experiment with hybrid learning.
"""
from __future__ import print_function
import argparse
import os
import sys
import math
import random
import time
import datetime
import numpy as np
import tensorflow as tf
from copy import deepcopy
from six.moves import cPickle as pickle
from utils.data_utils import image_scaling, random_crop_and_pad_image, random_horizontal_flip, construct_split_awa
from utils.utils import get_sample_weights, sample_from_dataset, concatenate_datasets, update_episodic_memory, samples_for_each_class, sample_from_dataset_icarl, load_task_specific_data, load_task_specific_data_in_proportion
from utils.vis_utils import plot_acc_multiple_runs, plot_histogram, snapshot_experiment_meta_data, snapshot_experiment_eval, snapshot_task_labels
from model import Model
###############################################################
################ Some definitions #############################
### These will be edited by the command line options ##########
###############################################################
## Training Options
NUM_RUNS = 5 # Number of experiments to average over
TRAIN_ITERS = 2000 # Number of training iterations per task
BATCH_SIZE = 16
LEARNING_RATE = 0.1
RANDOM_SEED = 1234
VALID_OPTIMS = ['SGD', 'MOMENTUM', 'ADAM']
OPTIM = 'SGD'
OPT_MOMENTUM = 0.9
OPT_POWER = 0.9
VALID_ARCHS = ['CNN', 'VGG', 'RESNET-B']
ARCH = 'RESNET-B'
PRETRAIN = False
## Model options
MODELS = ['VAN', 'PI', 'EWC', 'MAS', 'RWALK', 'A-GEM'] #List of valid models
IMP_METHOD = 'VAN'
SYNAP_STGTH = 75000
FISHER_EMA_DECAY = 0.9 # Exponential moving average decay factor for Fisher computation (online Fisher)
FISHER_UPDATE_AFTER = 50 # Number of training iterations for which the F_{\theta}^t is computed (see Eq. 10 in RWalk paper)
SAMPLES_PER_CLASS = 20 # Number of samples per task
IMG_HEIGHT = 224
IMG_WIDTH = 224
IMG_CHANNELS = 3
TOTAL_CLASSES = 50 # Total number of classes in the dataset
MEASURE_CONVERGENCE_AFTER = 0.9
EPS_MEM_BATCH_SIZE = 128
DEBUG_EPISODIC_MEMORY = False
KEEP_EPISODIC_MEMORY_FULL = False
K_FOR_CROSS_VAL = 3
CLASSES_PER_TASK = 5
## Logging, saving and testing options
LOG_DIR = './split_awa_results'
SNAPSHOT_DIR = './awa_snapshots'
SAVE_MODEL_PARAMS = False
RESNET18_IMAGENET_CHECKPOINT = './resnet-18-pretrained-imagenet/model.ckpt'
## Evaluation options
## Task split
NUM_TASKS = 20
MULTI_TASK = False
## Dataset specific options
ATTR_DIMS = 85
DATA_DIR= './AWA_data/Animals_with_Attributes2/'
AWA_TRAIN_LIST = './dataset_lists/AWA_train_list.txt'
AWA_VAL_LIST = './dataset_lists/AWA_val_list.txt'
AWA_TEST_LIST = './dataset_lists/AWA_test_list.txt'
#AWA_TRAIN_LIST = './dataset_lists/tmp_list_awa.txt'
#AWA_VAL_LIST = './dataset_lists/tmp_list_awa.txt'
#AWA_TEST_LIST = './dataset_lists/tmp_list_awa.txt'
AWA_ATTR_LIST = 'dataset_lists/AWA_attr_in_order.pickle'
# Define function to load/ store training weights. We will use ImageNet initialization later on
def save(saver, sess, logdir, step):
'''Save weights.
Args:
saver: TensorFlow Saver object.
sess: TensorFlow session.
logdir: path to the snapshots directory.
step: current training step.
'''
model_name = 'model.ckpt'
checkpoint_path = os.path.join(logdir, model_name)
if not os.path.exists(logdir):
os.makedirs(logdir)
saver.save(sess, checkpoint_path, global_step=step)
print('The checkpoint has been created.')
def load(saver, sess, ckpt_path):
'''Load trained weights.
Args:
saver: TensorFlow Saver object.
sess: TensorFlow session.
ckpt_path: path to checkpoint file with parameters.
'''
saver.restore(sess, ckpt_path)
print("Restored model parameters from {}".format(ckpt_path))
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description="Script for split AWA Hybrid experiment.")
parser.add_argument("--cross-validate-mode", action="store_true",
help="If option is chosen then snapshoting after each batch is disabled")
parser.add_argument("--online-cross-val", action="store_true",
help="If option is chosen then enable the online cross validation of the learning rate")
parser.add_argument("--train-single-epoch", action="store_true",
help="If option is chosen then train for single epoch")
parser.add_argument("--set-hybrid", action="store_true",
help="If option is chosen then train using hybrid model")
parser.add_argument("--eval-single-head", action="store_true",
help="If option is chosen then evaluate on a single head setting.")
parser.add_argument("--arch", type=str, default=ARCH,
help="Network Architecture for the experiment.\
\n \nSupported values: %s"%(VALID_ARCHS))
parser.add_argument("--num-runs", type=int, default=NUM_RUNS,
help="Total runs/ experiments over which accuracy is averaged.")
parser.add_argument("--train-iters", type=int, default=TRAIN_ITERS,
help="Number of training iterations for each task.")
parser.add_argument("--batch-size", type=int, default=BATCH_SIZE,
help="Mini-batch size for each task.")
parser.add_argument("--random-seed", type=int, default=RANDOM_SEED,
help="Random Seed.")
parser.add_argument("--learning-rate", type=float, default=LEARNING_RATE,
help="Starting Learning rate for each task.")
parser.add_argument("--optim", type=str, default=OPTIM,
help="Optimizer for the experiment. \
\n \nSupported values: %s"%(VALID_OPTIMS))
parser.add_argument("--imp-method", type=str, default=IMP_METHOD,
help="Model to be used for LLL. \
\n \nSupported values: %s"%(MODELS))
parser.add_argument("--synap-stgth", type=float, default=SYNAP_STGTH,
help="Synaptic strength for the regularization.")
parser.add_argument("--fisher-ema-decay", type=float, default=FISHER_EMA_DECAY,
help="Exponential moving average decay for Fisher calculation at each step.")
parser.add_argument("--fisher-update-after", type=int, default=FISHER_UPDATE_AFTER,
help="Number of training iterations after which the Fisher will be updated.")
parser.add_argument("--do-sampling", action="store_true",
help="Whether to do sampling")
parser.add_argument("--mem-size", type=int, default=SAMPLES_PER_CLASS,
help="Number of samples per class from previous tasks.")
parser.add_argument("--is-herding", action="store_true",
help="Herding based sampling")
parser.add_argument("--data-dir", type=str, default=DATA_DIR,
help="Directory from where the AWA data will be read.\
NOTE: Provide path till <AWA_DIR>/Animals_with_Attributes2")
parser.add_argument("--init-checkpoint", type=str, default=RESNET18_IMAGENET_CHECKPOINT,
help="TF checkpoint file containing initialization for ImageNet.\
NOTE: NPZ file for VGG and TF Checkpoint for ResNet")
parser.add_argument("--log-dir", type=str, default=LOG_DIR,
help="Directory where the plots and model accuracies will be stored.")
return parser.parse_args()
def train_task_sequence(model, sess, saver, datasets, class_attr, num_classes_per_task, cross_validate_mode, train_single_epoch, do_sampling, is_herding,
episodic_mem_size, train_iters, batch_size, num_runs, init_checkpoint, online_cross_val, random_seed):
"""
Train and evaluate LLL system such that we only see a example once
Args:
Returns:
dict A dictionary containing mean and stds for the experiment
"""
# List to store accuracy for each run
runs = []
task_labels_dataset = []
break_training = 0
# Loop over number of runs to average over
for runid in range(num_runs):
print('\t\tRun %d:'%(runid))
# Initialize the random seeds
np.random.seed(random_seed+runid)
random.seed(random_seed+runid)
# Get the task labels from the total number of tasks and full label space
task_labels = []
classes_per_task = num_classes_per_task
classes_appearing_in_tasks = dict()
for cls in range(TOTAL_CLASSES):
classes_appearing_in_tasks[cls] = 0
if online_cross_val:
label_array = np.arange(TOTAL_CLASSES)
for tt in range(model.num_tasks):
offset = tt * classes_per_task
task_labels.append(list(label_array[offset:offset+classes_per_task]))
else:
for tt in range(model.num_tasks):
task_labels.append(random.sample(range(K_FOR_CROSS_VAL*classes_per_task, TOTAL_CLASSES), classes_per_task))
for lab in task_labels[tt]:
classes_appearing_in_tasks[lab] += 1
print('Task: {}, Labels:{}'.format(tt, task_labels[tt]))
print('Class frequency in Tasks: {}'.format(classes_appearing_in_tasks))
# Store the task labels
task_labels_dataset.append(task_labels)
# Initialize all the variables in the model
sess.run(tf.global_variables_initializer())
if PRETRAIN:
# Load the variables from a checkpoint
if model.network_arch == 'RESNET-B':
# Define loader (weights which will be loaded from a checkpoint)
restore_vars = [v for v in model.trainable_vars if 'fc' not in v.name and 'attr_embed' not in v.name]
loader = tf.train.Saver(restore_vars)
load(loader, sess, init_checkpoint)
elif model.network_arch == 'VGG':
# Load the pretrained weights from the npz file
weights = np.load(init_checkpoint)
keys = sorted(weights.keys())
for i, key in enumerate(keys[:-2]): # Load everything except the last layer
sess.run(model.trainable_vars[i].assign(weights[key]))
# Run the init ops
model.init_updates(sess)
# List to store accuracies for a run
evals = []
if model.imp_method == 'S-GEM':
# List to store the episodic memories of the previous tasks
task_based_memory = []
if model.imp_method == 'A-GEM':
# Reserve a space for episodic memory
episodic_images = np.zeros([episodic_mem_size, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS])
episodic_labels = np.zeros([episodic_mem_size, model.num_tasks*TOTAL_CLASSES])
episodic_filled_counter = 0
a_gem_logit_mask = np.zeros([model.num_tasks, model.total_classes])
# Labels for all the tasks that we have seen in the past
prev_task_labels = []
prev_class_attrs = np.zeros([model.total_classes, class_attr.shape[1]])
if do_sampling:
# List to store important samples from the previous tasks
last_task_x = None
last_task_y_ = None
# Mask for softmax
logit_mask = np.zeros(model.total_classes)
max_batch_dimension = 500
# Dict to store the number of times a class has already been seen in the training
class_seen_already = dict()
for cls in range(TOTAL_CLASSES):
class_seen_already[cls] = 0
# Training loop for all the tasks
for task in range(len(task_labels)):
print('\t\tTask %d:'%(task))
# If not the first task then restore weights from previous task
if(task > 0):
model.restore(sess)
# Increment the class seen count
for cls in task_labels[task]:
class_seen_already[cls] += 1
task_train_images, task_train_labels = load_task_specific_data_in_proportion(datasets[0]['train'], task_labels[task], classes_appearing_in_tasks, class_seen_already)
print('Received {} images, {} labels at task {}'.format(task_train_images.shape[0], task_train_labels.shape[0], task))
print('Unique labels in the task: {}'.format(np.unique(np.nonzero(task_train_labels)[1])))
# Assign equal weights to all the examples
task_sample_weights = np.ones([task_train_labels.shape[0]], dtype=np.float32)
num_train_examples = task_train_images.shape[0]
# Train a task observing sequence of data
logit_mask[:] = 0
if train_single_epoch:
# Ceiling operation
num_iters = (num_train_examples + batch_size - 1) // batch_size
else:
num_iters = train_iters
logit_mask_offset = task * TOTAL_CLASSES
classes_adjusted_for_head = [cls + logit_mask_offset for cls in task_labels[task]]
logit_mask[classes_adjusted_for_head] = 1.0
# Randomly suffle the training examples
perm = np.arange(num_train_examples)
np.random.shuffle(perm)
train_x = task_train_images[perm]
train_y = task_train_labels[perm]
task_sample_weights = task_sample_weights[perm]
# Array to store accuracies when training for task T
if cross_validate_mode:
# Because we will evaluate at the end
ftask = 0
elif train_single_epoch:
# Because we will evaluate after every mini-batch of every task
ftask = np.zeros([max_batch_dimension+1, model.num_tasks])
batch_dim_count = 0
else:
# Multi-epoch because we will evaluate after every task
ftask = []
# Attribute mask
masked_class_attrs = np.zeros([model.total_classes, class_attr.shape[1]])
masked_class_attrs[classes_adjusted_for_head] = class_attr[task_labels[task]]
#masked_class_attrs[task_labels[task]] = class_attr[task_labels[task]]
# Number of iterations after which convergence is checked
convergence_iters = int(num_iters * MEASURE_CONVERGENCE_AFTER)
final_train_labels = np.zeros([batch_size, model.total_classes])
head_offset = task * TOTAL_CLASSES
# Training loop for task T
for iters in range(num_iters):
if train_single_epoch and not cross_validate_mode:
if (iters < 11):
# Snapshot the current performance across all tasks after each mini-batch
fbatch = test_task_sequence(model, sess, datasets[0]['test'], class_attr, num_classes_per_task, task_labels, task, online_cross_val)
ftask[batch_dim_count] = fbatch
# Increment the batch_dim_count
batch_dim_count += 1
# Set the output labels over which the model needs to be trained
if model.imp_method == 'A-GEM':
a_gem_logit_mask[:] = 0
a_gem_logit_mask[task][classes_adjusted_for_head] = 1.0
else:
logit_mask[:] = 0
logit_mask[classes_adjusted_for_head] = 1.0
offset = iters * batch_size
if (offset+batch_size <= num_train_examples):
residual = batch_size
else:
residual = num_train_examples - offset
final_train_labels[:residual, head_offset:head_offset+TOTAL_CLASSES] = train_y[offset:offset+residual]
feed_dict = {model.x: train_x[offset:offset+residual], model.y_: final_train_labels[:residual],
model.class_attr: masked_class_attrs,
model.sample_weights: task_sample_weights[offset:offset+residual],
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 0.5,
model.train_phase: True}
if model.imp_method == 'VAN':
feed_dict[model.output_mask] = logit_mask
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'EWC' or model.imp_method == 'M-EWC':
feed_dict[model.output_mask] = logit_mask
# If first iteration of the first task then set the initial value of the running fisher
if task == 0 and iters == 0:
sess.run([model.set_initial_running_fisher], feed_dict=feed_dict)
# Update fisher after every few iterations
if (iters + 1) % model.fisher_update_after == 0:
sess.run(model.set_running_fisher)
sess.run(model.reset_tmp_fisher)
if (iters >= convergence_iters) and (model.imp_method == 'M-EWC'):
_, _, _, _, loss = sess.run([model.weights_old_ops_grouped, model.set_tmp_fisher, model.train, model.update_small_omega,
model.reg_loss], feed_dict=feed_dict)
else:
_, _, loss = sess.run([model.set_tmp_fisher, model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'PI':
feed_dict[model.output_mask] = logit_mask
_, _, _, loss = sess.run([model.weights_old_ops_grouped, model.train, model.update_small_omega,
model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'MAS':
feed_dict[model.output_mask] = logit_mask
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'S-GEM':
if task == 0:
logit_mask[:] = 0
logit_mask[task_labels[task]] = 1.0
feed_dict[model.output_mask] = logit_mask
# Normal application of gradients
_, loss = sess.run([model.train_first_task, model.agem_loss], feed_dict=feed_dict)
else:
# Randomly sample a task from the previous tasks
prev_task = np.random.randint(0, task)
# Set the logit mask for the randomly sampled task
logit_mask[:] = 0
logit_mask[task_labels[prev_task]] = 1.0
prev_class_attrs = np.zeros_like(class_attr)
if online_cross_val:
attr_offset = prev_task * num_classes_per_task
else:
attr_offset = (prev_task + K_FOR_CROSS_VAL) * num_classes_per_task
prev_class_attrs[attr_offset:attr_offset+num_classes_per_task] = class_attr[attr_offset:attr_offset+num_classes_per_task]
# Store the reference gradient
sess.run(model.store_ref_grads, feed_dict={model.x: task_based_memory[prev_task]['images'], model.y_: task_based_memory[prev_task]['labels'],
model.class_attr: prev_class_attrs,
model.keep_prob: 1.0, model.output_mask: logit_mask, model.train_phase: True})
# Compute the gradient for current task and project if need be
logit_mask[:] = 0
logit_mask[task_labels[task]] = 1.0
feed_dict[model.output_mask] = logit_mask
_, loss = sess.run([model.train_subseq_tasks, model.agem_loss], feed_dict=feed_dict)
elif model.imp_method == 'A-GEM':
if task == 0:
a_gem_logit_mask[:] = 0
a_gem_logit_mask[task][classes_adjusted_for_head] = 1.0
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, a_gem_logit_mask)}
feed_dict.update(logit_mask_dict)
feed_dict[model.mem_batch_size] = batch_size
# Normal application of gradients
_, loss = sess.run([model.train_first_task, model.agem_loss], feed_dict=feed_dict)
else:
## Compute and store the reference gradients on the previous tasks
# Set the mask for all the previous tasks so far
a_gem_logit_mask[:] = 0
for tt in range(task):
logit_mask_offset = tt * TOTAL_CLASSES
classes_adjusted_for_head = [cls + logit_mask_offset for cls in task_labels[tt]]
a_gem_logit_mask[tt][classes_adjusted_for_head] = 1.0
if KEEP_EPISODIC_MEMORY_FULL:
mem_sample_mask = np.random.choice(episodic_mem_size, EPS_MEM_BATCH_SIZE, replace=False) # Sample without replacement so that we don't sample an example more than once
else:
if episodic_filled_counter <= EPS_MEM_BATCH_SIZE:
mem_sample_mask = np.arange(episodic_filled_counter)
else:
# Sample a random subset from episodic memory buffer
mem_sample_mask = np.random.choice(episodic_filled_counter, EPS_MEM_BATCH_SIZE, replace=False) # Sample without replacement so that we don't sample an example more than once
# Store the reference gradient
ref_feed_dict = {model.x: episodic_images[mem_sample_mask], model.y_: episodic_labels[mem_sample_mask], model.class_attr: prev_class_attrs,
model.keep_prob: 1.0, model.train_phase: True}
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, a_gem_logit_mask)}
ref_feed_dict.update(logit_mask_dict)
ref_feed_dict[model.mem_batch_size] = float(len(mem_sample_mask))
sess.run(model.store_ref_grads, feed_dict=ref_feed_dict)
# Compute the gradient for current task and project if need be
a_gem_logit_mask[:] = 0
logit_mask_offset = task * TOTAL_CLASSES
classes_adjusted_for_head = [cls + logit_mask_offset for cls in task_labels[task]]
a_gem_logit_mask[task][classes_adjusted_for_head] = 1.0
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, a_gem_logit_mask)}
feed_dict.update(logit_mask_dict)
feed_dict[model.mem_batch_size] = batch_size
_, loss = sess.run([model.train_subseq_tasks, model.agem_loss], feed_dict=feed_dict)
elif model.imp_method == 'RWALK':
feed_dict[model.output_mask] = logit_mask
# If first iteration of the first task then set the initial value of the running fisher
if task == 0 and iters == 0:
sess.run([model.set_initial_running_fisher], feed_dict=feed_dict)
# Store the current value of the weights
sess.run(model.weights_delta_old_grouped)
# Update fisher and importance score after every few iterations
if (iters + 1) % model.fisher_update_after == 0:
# Update the importance score using distance in riemannian manifold
sess.run(model.update_big_omega_riemann)
# Now that the score is updated, compute the new value for running Fisher
sess.run(model.set_running_fisher)
# Store the current value of the weights
sess.run(model.weights_delta_old_grouped)
# Reset the delta_L
sess.run([model.reset_small_omega])
_, _, _, _, loss = sess.run([model.set_tmp_fisher, model.weights_old_ops_grouped,
model.train, model.update_small_omega, model.reg_loss], feed_dict=feed_dict)
if (iters % 100 == 0):
print('Step {:d} {:.3f}'.format(iters, loss))
if (math.isnan(loss)):
print('ERROR: NaNs NaNs NaNs!!!')
break_training = 1
break
print('\t\t\t\tTraining for Task%d done!'%(task))
if model.imp_method == 'A-GEM':
# Update the previous task labels
prev_task_labels.extend(classes_adjusted_for_head)
prev_class_attrs[classes_adjusted_for_head] = class_attr[task_labels[task]]
if break_training:
break
# Compute the inter-task updates, Fisher/ importance scores etc
# Don't calculate the task updates for the last task
if task < (len(task_labels) - 1):
# TODO: For MAS, should the gradients be for current task or all the previous tasks
model.task_updates(sess, task, task_train_images, task_labels[task], num_classes_per_task=num_classes_per_task, class_attr=class_attr, online_cross_val=online_cross_val)
print('\t\t\t\tTask updates after Task%d done!'%(task))
# If importance method is '*-GEM' then store the episodic memory for the task
if 'GEM' in model.imp_method:
data_to_sample_from = {
'images': task_train_images,
'labels': task_train_labels,
}
if model.imp_method == 'S-GEM':
# Get the important samples from the current task
if is_herding: # Sampling based on MoF
# Compute the features of training data
features_dim = model.image_feature_dim
features = np.zeros([num_train_examples, features_dim])
samples_at_a_time = 32
residual = num_train_examples % samples_at_a_time
for i in range(num_train_examples// samples_at_a_time):
offset = i * samples_at_a_time
features[offset:offset+samples_at_a_time] = sess.run(model.features, feed_dict={model.x: task_train_images[offset:offset+samples_at_a_time],
model.y_: task_train_labels[offset:offset+samples_at_a_time], model.keep_prob: 1.0,
model.output_mask: logit_mask, model.train_phase: False})
if residual > 0:
offset = (i + 1) * samples_at_a_time
features[offset:offset+residual] = sess.run(model.features, feed_dict={model.x: task_train_images[offset:offset+residual],
model.y_: task_train_labels[offset:offset+residual], model.keep_prob: 1.0,
model.output_mask: logit_mask, model.train_phase: False})
imp_images, imp_labels = sample_from_dataset_icarl(data_to_sample_from, features, task_labels[task], SAMPLES_PER_CLASS)
else: # Random sampling
# Do the uniform sampling/ only get examples from current task
importance_array = np.ones(num_train_examples, dtype=np.float32)
imp_images, imp_labels = sample_from_dataset(data_to_sample_from, importance_array, task_labels[task], SAMPLES_PER_CLASS)
task_memory = {
'images': deepcopy(imp_images),
'labels': deepcopy(imp_labels),
}
task_based_memory.append(task_memory)
elif model.imp_method == 'A-GEM':
# Do the uniform sampling/ only get examples from current task
importance_array = np.ones(num_train_examples, dtype=np.float32)
if KEEP_EPISODIC_MEMORY_FULL:
update_episodic_memory(data_to_sample_from, importance_array, episodic_mem_size, task, episodic_images, episodic_labels)
else:
imp_images, imp_labels = sample_from_dataset(data_to_sample_from, importance_array, task_labels[task], SAMPLES_PER_CLASS)
if not KEEP_EPISODIC_MEMORY_FULL: # Fill the memory to always keep M/T samples per task
total_imp_samples = imp_images.shape[0]
eps_offset = task * total_imp_samples
episodic_images[eps_offset:eps_offset+total_imp_samples] = imp_images
episodic_labels[eps_offset:eps_offset+total_imp_samples, head_offset:head_offset+TOTAL_CLASSES] = imp_labels
episodic_filled_counter += total_imp_samples
print('Unique labels in the episodic memory: {}'.format(np.unique(np.nonzero(episodic_labels)[1])))
# Inspect episodic memory
if DEBUG_EPISODIC_MEMORY:
# Which labels are present in the memory
unique_labels = np.unique(np.nonzero(episodic_labels)[-1])
print('Unique Labels present in the episodic memory'.format(unique_labels))
print('Labels count:')
for lbl in unique_labels:
print('Label {}: {} samples'.format(lbl, np.where(np.nonzero(episodic_labels)[-1] == lbl)[0].size))
# Is there any space which is not filled
print('Empty space: {}'.format(np.where(np.sum(episodic_labels, axis=1) == 0)))
print('Episodic memory of {} images at task {} saved!'.format(episodic_images.shape[0], task))
# If sampling flag is set, store few of the samples from previous task
if do_sampling:
# Do the uniform sampling/ only get examples from current task
importance_array = np.ones([datasets[task]['train']['images'].shape[0]], dtype=np.float32)
# Get the important samples from the current task
imp_images, imp_labels = sample_from_dataset(datasets[task]['train'], importance_array,
task_labels[task], SAMPLES_PER_CLASS)
if imp_images is not None:
if last_task_x is None:
last_task_x = imp_images
last_task_y_ = imp_labels
else:
last_task_x = np.concatenate((last_task_x, imp_images), axis=0)
last_task_y_ = np.concatenate((last_task_y_, imp_labels), axis=0)
# Delete the importance array now that you don't need it in the current run
del importance_array
print('\t\t\t\tEpisodic memory is saved for Task%d!'%(task))
if cross_validate_mode:
if (task == model.num_tasks - 1) or MULTI_TASK:
# List to store accuracy for all the tasks for the current trained model
ftask = test_task_sequence(model, sess, datasets[0]['test'], class_attr, num_classes_per_task, task_labels, task, online_cross_val)
elif train_single_epoch:
fbatch = test_task_sequence(model, sess, datasets[0]['test'], class_attr, num_classes_per_task, task_labels, task, False)
ftask[batch_dim_count] = fbatch
print('Task: {}, {}'.format(task, fbatch))
else:
# Multi-epoch training, so compute accuracy at the end
ftask = test_task_sequence(model, sess, datasets[0]['test'], class_attr, num_classes_per_task, task_labels, task, online_cross_val)
if SAVE_MODEL_PARAMS:
save(saver, sess, SNAPSHOT_DIR, iters)
if not cross_validate_mode:
# Store the accuracies computed at task T in a list
evals.append(np.array(ftask))
# Reset the optimizer
model.reset_optimizer(sess)
#-> End for loop task
if not cross_validate_mode:
runs.append(np.array(evals))
if break_training:
break
# End for loop runid
if cross_validate_mode:
return np.mean(ftask)
else:
runs = np.array(runs)
return runs, task_labels_dataset
def test_task_sequence(model, sess, test_data, class_attr, num_classes_per_task, all_task_labels, task, online_cross_val):
"""
Snapshot the current performance
"""
final_acc = np.zeros(model.num_tasks)
test_set = 'test'
if model.imp_method == 'A-GEM':
logit_mask = np.zeros([model.num_tasks, model.total_classes])
else:
logit_mask = np.zeros(model.total_classes)
for tt, labels in enumerate(all_task_labels):
if tt > task:
return final_acc
samples_at_a_time = 10
task_images, task_labels = load_task_specific_data(test_data, labels)
global_class_indices = np.column_stack(np.nonzero(task_labels))
logit_mask_offset = tt * TOTAL_CLASSES
classes_adjusted_for_head = [cls + logit_mask_offset for cls in labels]
logit_mask[:] = 0
if model.imp_method == 'A-GEM':
logit_mask[tt][classes_adjusted_for_head] = 1.0
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, logit_mask)}
else:
logit_mask[classes_adjusted_for_head] = 1.0
#masked_class_attrs = np.zeros_like(class_attr)
#masked_class_attrs[labels] = class_attr[labels]
masked_class_attrs = np.zeros([model.total_classes, class_attr.shape[1]])
masked_class_attrs[classes_adjusted_for_head] = class_attr[labels]
final_train_labels = np.zeros([samples_at_a_time, model.total_classes])
head_offset = tt * TOTAL_CLASSES
acc = np.zeros(len(labels))
for cli, cls in enumerate(labels):
class_indices = np.squeeze(global_class_indices[global_class_indices[:,1] == cls][:,np.array([True, False])])
class_indices = np.sort(class_indices, axis=None)
task_test_images = task_images[class_indices]
task_test_labels = task_labels[class_indices]
total_test_samples = task_test_images.shape[0]
total_corrects = 0
if total_test_samples < samples_at_a_time:
i = -1
for i in range(total_test_samples/ samples_at_a_time):
offset = i*samples_at_a_time
final_train_labels[:, head_offset:head_offset+TOTAL_CLASSES] = task_test_labels[offset:offset+samples_at_a_time]
feed_dict = {model.x: task_test_images[offset:offset+samples_at_a_time],
model.y_: final_train_labels,
model.class_attr: masked_class_attrs,
model.keep_prob: 1.0, model.train_phase: False}
if model.imp_method == 'A-GEM':
feed_dict.update(logit_mask_dict)
corrects = sess.run(model.correct_predictions[tt], feed_dict=feed_dict)
else:
feed_dict[model.output_mask] = logit_mask
corrects = sess.run(model.correct_predictions, feed_dict=feed_dict)
total_corrects += np.sum(corrects)
# Compute the corrects on residuals
offset = (i+1)*samples_at_a_time
num_residuals = total_test_samples % samples_at_a_time
final_train_labels[:num_residuals, head_offset:head_offset+TOTAL_CLASSES] = task_test_labels[offset:offset+num_residuals]
feed_dict = {model.x: task_test_images[offset:offset+num_residuals],
model.y_: final_train_labels[:num_residuals],
model.class_attr: masked_class_attrs,
model.keep_prob: 1.0, model.train_phase: False}
if model.imp_method == 'A-GEM':
feed_dict.update(logit_mask_dict)
corrects = sess.run(model.correct_predictions[tt], feed_dict=feed_dict)
else:
feed_dict[model.output_mask] = logit_mask
corrects = sess.run(model.correct_predictions, feed_dict=feed_dict)
total_corrects += np.sum(corrects)
if total_test_samples != 0:
# Mean accuracy on the task
acc[cli] = total_corrects/ float(total_test_samples)
final_acc[tt] = np.mean(acc)
return final_acc
def main():
"""
Create the model and start the training
"""
# Get the CL arguments
args = get_arguments()
# Initialize the random seed of numpy
np.random.seed(args.random_seed)
# Check if the network architecture is valid
if args.arch not in VALID_ARCHS:
raise ValueError("Network architecture %s is not supported!"%(args.arch))
# Check if the method to compute importance is valid
if args.imp_method not in MODELS:
raise ValueError("Importance measure %s is undefined!"%(args.imp_method))
# Check if the optimizer is valid
if args.optim not in VALID_OPTIMS:
raise ValueError("Optimizer %s is undefined!"%(args.optim))
# Create log directories to store the results
if not os.path.exists(args.log_dir):
print('Log directory %s created!'%(args.log_dir))
os.makedirs(args.log_dir)
if args.online_cross_val:
num_tasks = K_FOR_CROSS_VAL
else:
num_tasks = NUM_TASKS - K_FOR_CROSS_VAL
# Load the split AWA dataset
data_labs = [np.arange(TOTAL_CLASSES)]
datasets, AWA_attr = construct_split_awa(data_labs, args.data_dir, AWA_TRAIN_LIST, AWA_VAL_LIST, AWA_TEST_LIST, IMG_HEIGHT, IMG_WIDTH, attr_file=AWA_ATTR_LIST)
if args.online_cross_val:
AWA_attr[K_FOR_CROSS_VAL*CLASSES_PER_TASK:] = 0
else:
AWA_attr[:K_FOR_CROSS_VAL*CLASSES_PER_TASK] = 0
print('Attributes: {}'.format(np.sum(AWA_attr, axis=1)))
if args.cross_validate_mode:
models_list = MODELS
learning_rate_list = [0.1, 0.03, 0.01, 0.001, 0.0003]
else:
models_list = [args.imp_method]
for imp_method in models_list:
if imp_method == 'VAN':
synap_stgth_list = [0]
if args.online_cross_val or args.cross_validate_mode:
pass
else:
learning_rate_list = [0.003]
elif imp_method == 'PI':
if args.online_cross_val or args.cross_validate_mode:
synap_stgth_list = [0.1, 1, 10]
else:
synap_stgth_list = [10]
learning_rate_list = [0.003]
elif imp_method == 'EWC' or imp_method == 'M-EWC':
if args.online_cross_val or args.cross_validate_mode:
synap_stgth_list = [0.1, 1, 10, 100]
else:
synap_stgth_list = [100]
learning_rate_list = [0.003]
elif imp_method == 'MAS':
if args.online_cross_val or args.cross_validate_mode:
synap_stgth_list = [0.1, 1, 10, 100]
else:
synap_stgth_list = [0.1]
learning_rate_list = [0.001]
elif imp_method == 'RWALK':
if args.online_cross_val or args.cross_validate_mode:
synap_stgth_list = [0.1, 1, 10, 100]
else:
synap_stgth_list = [10] # Check again!
learning_rate_list = [0.003]
elif imp_method == 'S-GEM':
synap_stgth_list = [0]
if args.online_cross_val:
pass
else:
learning_rate_list = [args.learning_rate]
elif imp_method == 'A-GEM':
synap_stgth_list = [0]
if args.online_cross_val or args.cross_validate_mode:
pass
else:
learning_rate_list = [0.003]
for synap_stgth in synap_stgth_list:
for lr in learning_rate_list:
# Generate the experiment key and store the meta data in a file
exper_meta_data = {'ARCH': args.arch,
'DATASET': 'SPLIT_AWA',
'HYBRID': args.set_hybrid,
'NUM_RUNS': args.num_runs,
'TRAIN_SINGLE_EPOCH': args.train_single_epoch,
'IMP_METHOD': imp_method,
'SYNAP_STGTH': synap_stgth,
'FISHER_EMA_DECAY': args.fisher_ema_decay,
'FISHER_UPDATE_AFTER': args.fisher_update_after,
'OPTIM': args.optim,
'LR': lr,
'BATCH_SIZE': args.batch_size,
'EPS_MEMORY': args.do_sampling,
'MEM_SIZE': args.mem_size,
'IS_HERDING': args.is_herding}
experiment_id = "SPLIT_AWA_HERDING_%r_HYB_%r_%s_%r_%s_%s_%s_%r_%s-"%(args.is_herding, args.set_hybrid, args.arch, args.train_single_epoch, imp_method,
str(synap_stgth).replace('.', '_'),
str(args.batch_size), args.do_sampling, str(args.mem_size)) + datetime.datetime.now().strftime("%y-%m-%d-%H-%M")
snapshot_experiment_meta_data(args.log_dir, experiment_id, exper_meta_data)
# Reset the default graph
tf.reset_default_graph()
graph = tf.Graph()
with graph.as_default():
# Set the random seed
tf.set_random_seed(args.random_seed)
# Define Input and Output of the model
x = tf.placeholder(tf.float32, shape=[None, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS])
y_ = tf.placeholder(tf.float32, shape=[None, num_tasks*TOTAL_CLASSES])
attr = tf.placeholder(tf.float32, shape=[num_tasks*TOTAL_CLASSES, ATTR_DIMS])
if not args.train_single_epoch:
# Define ops for data augmentation
x_aug = image_scaling(x)
x_aug = random_crop_and_pad_image(x_aug, IMG_HEIGHT, IMG_WIDTH)
# Define the optimizer
if args.optim == 'ADAM':
opt = tf.train.AdamOptimizer(learning_rate=lr)
elif args.optim == 'SGD':
opt = tf.train.GradientDescentOptimizer(learning_rate=lr)
elif args.optim == 'MOMENTUM':
base_lr = tf.constant(lr)
learning_rate = tf.scalar_mul(base_lr, tf.pow((1 - train_step / training_iters), OPT_POWER))
opt = tf.train.MomentumOptimizer(lr, OPT_MOMENTUM)
# Create the Model/ contruct the graph
if args.train_single_epoch:
# When training using a single epoch then there is no need for data augmentation
model = Model(x, y_, num_tasks, opt, imp_method, synap_stgth, args.fisher_update_after,
args.fisher_ema_decay, network_arch=args.arch, is_ATT_DATASET=True, attr=attr)
else:
model = Model(x_aug, y_, num_tasks, opt, imp_method, synap_stgth, args.fisher_update_after,
args.fisher_ema_decay, network_arch=args.arch, is_ATT_DATASET=True, x_test=x, attr=attr)
# Set up tf session and initialize variables.
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
time_start = time.time()
with tf.Session(config=config, graph=graph) as sess:
saver = tf.train.Saver(var_list=tf.global_variables(), max_to_keep=100)
runs, task_labels_dataset = train_task_sequence(model, sess, saver, datasets, AWA_attr, CLASSES_PER_TASK, args.cross_validate_mode,
args.train_single_epoch, args.do_sampling, args.is_herding, args.mem_size*CLASSES_PER_TASK*num_tasks, args.train_iters,
args.batch_size, args.num_runs, args.init_checkpoint, args.online_cross_val, args.random_seed)
# Close the session
sess.close()
time_end = time.time()
time_spent = time_end - time_start
print('Time spent: {}'.format(time_spent))
# Clean up
del model
if args.cross_validate_mode:
# If cross-validation flag is enabled, store the stuff in a text file
cross_validate_dump_file = args.log_dir + '/' + 'SPLIT_AWA_HYBRID_%s_%s'%(imp_method, args.optim) + '.txt'
with open(cross_validate_dump_file, 'a') as f:
f.write('HERDING: {} \t ARCH: {} \t LR:{} \t LAMBDA: {} \t ACC: {}\n'.format(args.is_herding, args.arch, lr, synap_stgth, runs))
else:
# Store all the results in one dictionary to process later
exper_acc = dict(mean=runs)
exper_labels = dict(labels=task_labels_dataset)
# Store the experiment output to a file
snapshot_experiment_eval(args.log_dir, experiment_id, exper_acc)
snapshot_task_labels(args.log_dir, experiment_id, exper_labels)
if __name__ == '__main__':
main()
|
agem-main
|
conv_split_awa_hybrid.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Training script for split CUB experiment with zero shot transfer.
"""
from __future__ import print_function
import argparse
import os
import sys
import math
import time
import datetime
import numpy as np
import tensorflow as tf
from copy import deepcopy
from six.moves import cPickle as pickle
from utils.data_utils import image_scaling, random_crop_and_pad_image, random_horizontal_flip, construct_split_cub
from utils.utils import get_sample_weights, sample_from_dataset, concatenate_datasets, update_episodic_memory_with_less_data, samples_for_each_class, sample_from_dataset_icarl, load_task_specific_data
from utils.vis_utils import plot_acc_multiple_runs, plot_histogram, snapshot_experiment_meta_data, snapshot_experiment_eval, snapshot_task_labels
from model import Model
###############################################################
################ Some definitions #############################
### These will be edited by the command line options ##########
###############################################################
## Training Options
NUM_RUNS = 5 # Number of experiments to average over
TRAIN_ITERS = 2000 # Number of training iterations per task
BATCH_SIZE = 16
LEARNING_RATE = 0.1
RANDOM_SEED = 1234
VALID_OPTIMS = ['SGD', 'MOMENTUM', 'ADAM']
OPTIM = 'SGD'
OPT_MOMENTUM = 0.9
OPT_POWER = 0.9
VALID_ARCHS = ['CNN', 'VGG', 'RESNET-B']
ARCH = 'RESNET-B'
PRETRAIN = True
## Model options
MODELS = ['VAN', 'PI', 'EWC', 'MAS', 'RWALK', 'A-GEM'] #List of valid models
IMP_METHOD = 'EWC'
SYNAP_STGTH = 75000
FISHER_EMA_DECAY = 0.9 # Exponential moving average decay factor for Fisher computation (online Fisher)
FISHER_UPDATE_AFTER = 50 # Number of training iterations for which the F_{\theta}^t is computed (see Eq. 10 in RWalk paper)
SAMPLES_PER_CLASS = 5 # Number of samples per task
IMG_HEIGHT = 224
IMG_WIDTH = 224
IMG_CHANNELS = 3
TOTAL_CLASSES = 200 # Total number of classes in the dataset
EPS_MEM_BATCH_SIZE = 128
DEBUG_EPISODIC_MEMORY = False
KEEP_EPISODIC_MEMORY_FULL = False
K_FOR_CROSS_VAL = 3
## Logging, saving and testing options
LOG_DIR = './split_cub_results'
SNAPSHOT_DIR = './cub_snapshots'
SAVE_MODEL_PARAMS = False
## Evaluation options
## Task split
NUM_TASKS = 20
MULTI_TASK = False
## Dataset specific options
ATTR_DIMS = 312
DATA_DIR='CUB_data/CUB_200_2011/images'
#CUB_TRAIN_LIST = 'dataset_lists/tmp_list.txt'
#CUB_TEST_LIST = 'dataset_lists/tmp_list.txt'
CUB_TRAIN_LIST = 'dataset_lists/CUB_train_list.txt'
CUB_TEST_LIST = 'dataset_lists/CUB_test_list.txt'
CUB_ATTR_LIST = 'dataset_lists/CUB_attr_in_order.pickle'
RESNET18_IMAGENET_CHECKPOINT = './resnet-18-pretrained-imagenet/model.ckpt'
# Define function to load/ store training weights. We will use ImageNet initialization later on
def save(saver, sess, logdir, step):
'''Save weights.
Args:
saver: TensorFlow Saver object.
sess: TensorFlow session.
logdir: path to the snapshots directory.
step: current training step.
'''
model_name = 'model.ckpt'
checkpoint_path = os.path.join(logdir, model_name)
if not os.path.exists(logdir):
os.makedirs(logdir)
saver.save(sess, checkpoint_path, global_step=step)
print('The checkpoint has been created.')
def load(saver, sess, ckpt_path):
'''Load trained weights.
Args:
saver: TensorFlow Saver object.
sess: TensorFlow session.
ckpt_path: path to checkpoint file with parameters.
'''
saver.restore(sess, ckpt_path)
print("Restored model parameters from {}".format(ckpt_path))
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description="Script for split CUB hybrid experiment.")
parser.add_argument("--cross-validate-mode", action="store_true",
help="If option is chosen then snapshoting after each batch is disabled")
parser.add_argument("--online-cross-val", action="store_true",
help="If option is chosen then enable the online cross validation of the learning rate")
parser.add_argument("--train-single-epoch", action="store_true",
help="If option is chosen then train for single epoch")
parser.add_argument("--set-hybrid", action="store_true",
help="If option is chosen then train using hybrid model")
parser.add_argument("--eval-single-head", action="store_true",
help="If option is chosen then evaluate on a single head setting.")
parser.add_argument("--arch", type=str, default=ARCH,
help="Network Architecture for the experiment.\
\n \nSupported values: %s"%(VALID_ARCHS))
parser.add_argument("--num-runs", type=int, default=NUM_RUNS,
help="Total runs/ experiments over which accuracy is averaged.")
parser.add_argument("--train-iters", type=int, default=TRAIN_ITERS,
help="Number of training iterations for each task.")
parser.add_argument("--batch-size", type=int, default=BATCH_SIZE,
help="Mini-batch size for each task.")
parser.add_argument("--random-seed", type=int, default=RANDOM_SEED,
help="Random Seed.")
parser.add_argument("--learning-rate", type=float, default=LEARNING_RATE,
help="Starting Learning rate for each task.")
parser.add_argument("--optim", type=str, default=OPTIM,
help="Optimizer for the experiment. \
\n \nSupported values: %s"%(VALID_OPTIMS))
parser.add_argument("--imp-method", type=str, default=IMP_METHOD,
help="Model to be used for LLL. \
\n \nSupported values: %s"%(MODELS))
parser.add_argument("--synap-stgth", type=float, default=SYNAP_STGTH,
help="Synaptic strength for the regularization.")
parser.add_argument("--fisher-ema-decay", type=float, default=FISHER_EMA_DECAY,
help="Exponential moving average decay for Fisher calculation at each step.")
parser.add_argument("--fisher-update-after", type=int, default=FISHER_UPDATE_AFTER,
help="Number of training iterations after which the Fisher will be updated.")
parser.add_argument("--do-sampling", action="store_true",
help="Whether to do sampling")
parser.add_argument("--mem-size", type=int, default=SAMPLES_PER_CLASS,
help="Number of samples per class from previous tasks.")
parser.add_argument("--is-herding", action="store_true",
help="Herding based sampling")
parser.add_argument("--data-dir", type=str, default=DATA_DIR,
help="Directory from where the CUB data will be read.\
NOTE: Provide path till <CUB_DIR>/images")
parser.add_argument("--init-checkpoint", type=str, default=RESNET18_IMAGENET_CHECKPOINT,
help="TF checkpoint file containing initialization for ImageNet.\
NOTE: NPZ file for VGG and TF Checkpoint for ResNet")
parser.add_argument("--log-dir", type=str, default=LOG_DIR,
help="Directory where the plots and model accuracies will be stored.")
return parser.parse_args()
def train_task_sequence(model, sess, saver, datasets, class_attr, classes_per_task, cross_validate_mode, train_single_epoch, do_sampling, is_herding,
mem_per_class, train_iters, batch_size, num_runs, init_checkpoint, online_cross_val, random_seed):
"""
Train and evaluate LLL system such that we only see a example once
Args:
Returns:
dict A dictionary containing mean and stds for the experiment
"""
# List to store accuracy for each run
runs = []
task_labels_dataset = []
break_training = 0
# Loop over number of runs to average over
for runid in range(num_runs):
print('\t\tRun %d:'%(runid))
# Initialize the random seeds
np.random.seed(random_seed+runid)
# Get the task labels from the total number of tasks and full label space
task_labels = []
total_classes = classes_per_task * model.num_tasks
if online_cross_val:
label_array = np.arange(total_classes)
else:
class_label_offset = K_FOR_CROSS_VAL * classes_per_task
label_array = np.arange(class_label_offset, total_classes+class_label_offset)
np.random.shuffle(label_array)
for tt in range(model.num_tasks):
tt_offset = tt*classes_per_task
task_labels.append(list(label_array[tt_offset:tt_offset+classes_per_task]))
print('Task: {}, Labels:{}'.format(tt, task_labels[tt]))
# Store the task labels
task_labels_dataset.append(task_labels)
# Set episodic memory size
episodic_mem_size = mem_per_class * total_classes
# Initialize all the variables in the model
sess.run(tf.global_variables_initializer())
if PRETRAIN:
# Load the variables from a checkpoint
if model.network_arch == 'RESNET-B':
# Define loader (weights which will be loaded from a checkpoint)
restore_vars = [v for v in model.trainable_vars if 'fc' not in v.name and 'attr_embed' not in v.name]
loader = tf.train.Saver(restore_vars)
load(loader, sess, init_checkpoint)
elif model.network_arch == 'VGG':
# Load the pretrained weights from the npz file
weights = np.load(init_checkpoint)
keys = sorted(weights.keys())
for i, key in enumerate(keys[:-2]): # Load everything except the last layer
sess.run(model.trainable_vars[i].assign(weights[key]))
# Run the init ops
model.init_updates(sess)
# List to store accuracies for a run
evals = []
# List to store the classes that we have so far - used at test time
test_labels = []
if model.imp_method == 'S-GEM':
# List to store the episodic memories of the previous tasks
task_based_memory = []
if model.imp_method == 'A-GEM':
# Reserve a space for episodic memory
episodic_images = np.zeros([episodic_mem_size, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS])
episodic_labels = np.zeros([episodic_mem_size, TOTAL_CLASSES])
episodic_filled_counter = 0
a_gem_logit_mask = np.zeros([model.num_tasks, TOTAL_CLASSES])
# Labels for all the tasks that we have seen in the past
prev_task_labels = []
prev_class_attrs = np.zeros_like(class_attr)
if do_sampling:
# List to store important samples from the previous tasks
last_task_x = None
last_task_y_ = None
# Mask for the softmax
logit_mask = np.zeros(TOTAL_CLASSES)
# Training loop for all the tasks
for task in range(len(task_labels)):
print('\t\tTask %d:'%(task))
# If not the first task then restore weights from previous task
if(task > 0):
model.restore(sess)
# If sampling flag is set append the previous datasets
if do_sampling:
task_tr_images, task_tr_labels = load_task_specific_data(datasets[0]['train'], task_labels[task])
if task > 0:
task_train_images, task_train_labels = concatenate_datasets(task_tr_images, task_tr_labels, last_task_x, last_task_y_)
else:
task_train_images = task_tr_images
task_train_labels = task_tr_labels
else:
# Extract training images and labels for the current task
task_train_images, task_train_labels = load_task_specific_data(datasets[0]['train'], task_labels[task])
# If multi_task is set then train using all the datasets of all the tasks
if MULTI_TASK:
if task == 0:
for t_ in range(1, len(task_labels)):
task_tr_images, task_tr_labels = load_task_specific_data(datasets[0]['train'], task_labels[t_])
task_train_images = np.concatenate((task_train_images, task_tr_images), axis=0)
task_train_labels = np.concatenate((task_train_labels, task_tr_labels), axis=0)
else:
# Skip training for this task
continue
print('Received {} images, {} labels at task {}'.format(task_train_images.shape[0], task_train_labels.shape[0], task))
# Test for the tasks that we've seen so far
test_labels.extend(task_labels[task])
# Declare variables to store sample importance if sampling flag is set
if do_sampling:
# Get the sample weighting
task_sample_weights = get_sample_weights(task_train_labels, test_labels)
else:
# Assign equal weights to all the examples
task_sample_weights = np.ones([task_train_labels.shape[0]], dtype=np.float32)
num_train_examples = task_train_images.shape[0]
# Train a task observing sequence of data
logit_mask[:] = 0
if train_single_epoch:
# Ceiling operation
num_iters = (num_train_examples + batch_size - 1) // batch_size
if cross_validate_mode:
if do_sampling:
logit_mask[test_labels] = 1.0
else:
logit_mask[task_labels[task]] = 1.0
else:
num_iters = train_iters
if do_sampling:
logit_mask[test_labels] = 1.0
else:
logit_mask[task_labels[task]] = 1.0
# Randomly suffle the training examples
perm = np.arange(num_train_examples)
np.random.shuffle(perm)
train_x = task_train_images[perm]
train_y = task_train_labels[perm]
task_sample_weights = task_sample_weights[perm]
# Array to store accuracies when training for task T
ftask = []
if MULTI_TASK:
logit_mask[:] = 1.0
masked_class_attrs = class_attr
else:
# Attribute mask
masked_class_attrs = np.zeros_like(class_attr)
if do_sampling:
masked_class_attrs[test_labels] = class_attr[test_labels]
else:
masked_class_attrs[task_labels[task]] = class_attr[task_labels[task]]
# Training loop for task T
for iters in range(num_iters):
if train_single_epoch and not cross_validate_mode and not MULTI_TASK:
#if (iters <= 50 and iters % 5 == 0) or (iters > 50 and iters % 50 == 0):
if (iters < 10) or (iters % 5 == 0):
# Snapshot the current performance across all tasks after each mini-batch
fbatch = test_task_sequence(model, sess, datasets[0]['test'], class_attr, classes_per_task, task_labels, task)
ftask.append(fbatch)
# Set the output labels over which the model needs to be trained
if model.imp_method == 'A-GEM':
a_gem_logit_mask[:] = 0
a_gem_logit_mask[task][task_labels[task]] = 1.0
else:
logit_mask[:] = 0
if do_sampling:
logit_mask[test_labels] = 1.0
else:
logit_mask[task_labels[task]] = 1.0
if train_single_epoch:
offset = iters * batch_size
if (offset+batch_size <= num_train_examples):
residual = batch_size
else:
residual = num_train_examples - offset
feed_dict = {model.x: train_x[offset:offset+residual], model.y_: train_y[offset:offset+residual],
model.class_attr: masked_class_attrs,
model.sample_weights: task_sample_weights[offset:offset+residual],
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 0.5,
model.train_phase: True}
else:
offset = (iters * batch_size) % (num_train_examples - batch_size)
feed_dict = {model.x: train_x[offset:offset+batch_size], model.y_: train_y[offset:offset+batch_size],
model.class_attr: masked_class_attrs,
model.sample_weights: task_sample_weights[offset:offset+batch_size],
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 0.5,
model.train_phase: True}
if model.imp_method == 'VAN':
feed_dict[model.output_mask] = logit_mask
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'EWC':
feed_dict[model.output_mask] = logit_mask
# If first iteration of the first task then set the initial value of the running fisher
if task == 0 and iters == 0:
sess.run([model.set_initial_running_fisher], feed_dict=feed_dict)
# Update fisher after every few iterations
if (iters + 1) % model.fisher_update_after == 0:
sess.run(model.set_running_fisher)
sess.run(model.reset_tmp_fisher)
_, _, loss = sess.run([model.set_tmp_fisher, model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'PI':
feed_dict[model.output_mask] = logit_mask
_, _, _, loss = sess.run([model.weights_old_ops_grouped, model.train, model.update_small_omega,
model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'MAS':
feed_dict[model.output_mask] = logit_mask
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'S-GEM':
if task == 0:
logit_mask[:] = 0
logit_mask[task_labels[task]] = 1.0
feed_dict[model.output_mask] = logit_mask
# Normal application of gradients
_, loss = sess.run([model.train_first_task, model.agem_loss], feed_dict=feed_dict)
else:
# Randomly sample a task from the previous tasks
prev_task = np.random.randint(0, task)
# Set the logit mask for the randomly sampled task
logit_mask[:] = 0
logit_mask[task_labels[prev_task]] = 1.0
prev_class_attrs = np.zeros_like(class_attr)
prev_class_attrs[task_labels[prev_task]] = class_attr[task_labels[prev_task]]
# Store the reference gradient
sess.run(model.store_ref_grads, feed_dict={model.x: task_based_memory[prev_task]['images'], model.y_: task_based_memory[prev_task]['labels'],
model.class_attr: prev_class_attrs,
model.keep_prob: 1.0, model.output_mask: logit_mask, model.train_phase: True})
# Compute the gradient for current task and project if need be
logit_mask[:] = 0
logit_mask[task_labels[task]] = 1.0
feed_dict[model.output_mask] = logit_mask
_, loss = sess.run([model.train_subseq_tasks, model.agem_loss], feed_dict=feed_dict)
elif model.imp_method == 'A-GEM':
if task == 0:
a_gem_logit_mask[:] = 0
a_gem_logit_mask[task][task_labels[task]] = 1.0
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, a_gem_logit_mask)}
feed_dict.update(logit_mask_dict)
feed_dict[model.mem_batch_size] = batch_size
# Normal application of gradients
_, loss = sess.run([model.train_first_task, model.agem_loss], feed_dict=feed_dict)
else:
## Compute and store the reference gradients on the previous tasks
# Set the mask for all the previous tasks so far
a_gem_logit_mask[:] = 0
for tt in range(task):
a_gem_logit_mask[tt][task_labels[tt]] = 1.0
if KEEP_EPISODIC_MEMORY_FULL:
mem_sample_mask = np.random.choice(episodic_mem_size, EPS_MEM_BATCH_SIZE, replace=False) # Sample without replacement so that we don't sample an example more than once
else:
if episodic_filled_counter <= EPS_MEM_BATCH_SIZE:
mem_sample_mask = np.arange(episodic_filled_counter)
else:
# Sample a random subset from episodic memory buffer
mem_sample_mask = np.random.choice(episodic_filled_counter, EPS_MEM_BATCH_SIZE, replace=False) # Sample without replacement so that we don't sample an example more than once
ref_feed_dict = {model.x: episodic_images[mem_sample_mask], model.y_: episodic_labels[mem_sample_mask],
model.class_attr: prev_class_attrs, model.keep_prob: 1.0, model.train_phase: True}
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, a_gem_logit_mask)}
ref_feed_dict.update(logit_mask_dict)
ref_feed_dict[model.mem_batch_size] = float(len(mem_sample_mask))
sess.run(model.store_ref_grads, feed_dict=ref_feed_dict)
# Compute the gradient for current task and project if need be
a_gem_logit_mask[:] = 0
a_gem_logit_mask[task][task_labels[task]] = 1.0
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, a_gem_logit_mask)}
feed_dict.update(logit_mask_dict)
feed_dict[model.mem_batch_size] = batch_size
_, loss = sess.run([model.train_subseq_tasks, model.agem_loss], feed_dict=feed_dict)
elif model.imp_method == 'RWALK':
feed_dict[model.output_mask] = logit_mask
# If first iteration of the first task then set the initial value of the running fisher
if task == 0 and iters == 0:
sess.run([model.set_initial_running_fisher], feed_dict=feed_dict)
# Store the current value of the weights
sess.run(model.weights_delta_old_grouped)
# Update fisher and importance score after every few iterations
if (iters + 1) % model.fisher_update_after == 0:
# Update the importance score using distance in riemannian manifold
sess.run(model.update_big_omega_riemann)
# Now that the score is updated, compute the new value for running Fisher
sess.run(model.set_running_fisher)
# Store the current value of the weights
sess.run(model.weights_delta_old_grouped)
# Reset the delta_L
sess.run([model.reset_small_omega])
_, _, _, _, loss = sess.run([model.set_tmp_fisher, model.weights_old_ops_grouped,
model.train, model.update_small_omega, model.reg_loss], feed_dict=feed_dict)
if (iters % 50 == 0):
print('Step {:d} {:.3f}'.format(iters, loss))
if (math.isnan(loss)):
print('ERROR: NaNs NaNs NaNs!!!')
break_training = 1
break
print('\t\t\t\tTraining for Task%d done!'%(task))
if model.imp_method == 'A-GEM':
# Update the previous task labels and attributes
prev_task_labels += task_labels[task]
prev_class_attrs[prev_task_labels] = class_attr[prev_task_labels]
if break_training:
break
# Compute the inter-task updates, Fisher/ importance scores etc
# Don't calculate the task updates for the last task
if task < (len(task_labels) - 1):
# TODO: For MAS, should the gradients be for current task or all the previous tasks
model.task_updates(sess, task, task_train_images, task_labels[task], num_classes_per_task=classes_per_task, class_attr=class_attr, online_cross_val=online_cross_val)
print('\t\t\t\tTask updates after Task%d done!'%(task))
# If importance method is '*-GEM' then store the episodic memory for the task
if 'GEM' in model.imp_method:
data_to_sample_from = {
'images': task_train_images,
'labels': task_train_labels,
}
if model.imp_method == 'S-GEM':
# Get the important samples from the current task
if is_herding: # Sampling based on MoF
# Compute the features of training data
features_dim = model.image_feature_dim
features = np.zeros([num_train_examples, features_dim])
samples_at_a_time = 32
residual = num_train_examples % samples_at_a_time
for i in range(num_train_examples// samples_at_a_time):
offset = i * samples_at_a_time
features[offset:offset+samples_at_a_time] = sess.run(model.features, feed_dict={model.x: task_train_images[offset:offset+samples_at_a_time],
model.y_: task_train_labels[offset:offset+samples_at_a_time], model.keep_prob: 1.0,
model.output_mask: logit_mask, model.train_phase: False})
if residual > 0:
offset = (i + 1) * samples_at_a_time
features[offset:offset+residual] = sess.run(model.features, feed_dict={model.x: task_train_images[offset:offset+residual],
model.y_: task_train_labels[offset:offset+residual], model.keep_prob: 1.0,
model.output_mask: logit_mask, model.train_phase: False})
imp_images, imp_labels = sample_from_dataset_icarl(data_to_sample_from, features, task_labels[task], SAMPLES_PER_CLASS)
else: # Random sampling
# Do the uniform sampling/ only get examples from current task
importance_array = np.ones(num_train_examples, dtype=np.float32)
imp_images, imp_labels = sample_from_dataset(data_to_sample_from, importance_array, task_labels[task], SAMPLES_PER_CLASS)
task_memory = {
'images': deepcopy(imp_images),
'labels': deepcopy(imp_labels),
}
task_based_memory.append(task_memory)
elif model.imp_method == 'A-GEM':
if is_herding: # Sampling based on MoF
# Compute the features of training data
features_dim = model.image_feature_dim
features = np.zeros([num_train_examples, features_dim])
samples_at_a_time = 32
residual = num_train_examples % samples_at_a_time
for i in range(num_train_examples// samples_at_a_time):
offset = i * samples_at_a_time
features[offset:offset+samples_at_a_time] = sess.run(model.features, feed_dict={model.x: task_train_images[offset:offset+samples_at_a_time],
model.y_: task_train_labels[offset:offset+samples_at_a_time], model.keep_prob: 1.0,
model.output_mask: logit_mask, model.train_phase: False})
if residual > 0:
offset = (i + 1) * samples_at_a_time
features[offset:offset+residual] = sess.run(model.features, feed_dict={model.x: task_train_images[offset:offset+residual],
model.y_: task_train_labels[offset:offset+residual], model.keep_prob: 1.0,
model.output_mask: logit_mask, model.train_phase: False})
if KEEP_EPISODIC_MEMORY_FULL:
update_episodic_memory(data_to_sample_from, features, episodic_mem_size, task, episodic_images, episodic_labels, task_labels=task_labels[task], is_herding=True)
else:
imp_images, imp_labels = sample_from_dataset_icarl(data_to_sample_from, features, task_labels[task], SAMPLES_PER_CLASS)
else: # Random sampling
# Do the uniform sampling/ only get examples from current task
importance_array = np.ones(num_train_examples, dtype=np.float32)
if KEEP_EPISODIC_MEMORY_FULL:
update_episodic_memory(data_to_sample_from, importance_array, episodic_mem_size, task, episodic_images, episodic_labels)
else:
imp_images, imp_labels = sample_from_dataset(data_to_sample_from, importance_array, task_labels[task], SAMPLES_PER_CLASS)
if not KEEP_EPISODIC_MEMORY_FULL: # Fill the memory to always keep M/T samples per task
total_imp_samples = imp_images.shape[0]
eps_offset = task * total_imp_samples
episodic_images[eps_offset:eps_offset+total_imp_samples] = imp_images
episodic_labels[eps_offset:eps_offset+total_imp_samples] = imp_labels
episodic_filled_counter += total_imp_samples
# Inspect episodic memory
if DEBUG_EPISODIC_MEMORY:
# Which labels are present in the memory
unique_labels = np.unique(np.nonzero(episodic_labels)[-1])
print('Unique Labels present in the episodic memory'.format(unique_labels))
print('Labels count:')
for lbl in unique_labels:
print('Label {}: {} samples'.format(lbl, np.where(np.nonzero(episodic_labels)[-1] == lbl)[0].size))
# Is there any space which is not filled
print('Empty space: {}'.format(np.where(np.sum(episodic_labels, axis=1) == 0)))
print('Episodic memory of {} images at task {} saved!'.format(episodic_images.shape[0], task))
# If sampling flag is set, store few of the samples from previous task
if do_sampling:
# Do the uniform sampling/ only get examples from current task
importance_array = np.ones([task_train_images.shape[0]], dtype=np.float32)
# Get the important samples from the current task
task_data = {
'images': task_tr_images,
'labels': task_tr_labels,
}
imp_images, imp_labels = sample_from_dataset(task_data, importance_array, task_labels[task], SAMPLES_PER_CLASS)
if imp_images is not None:
if last_task_x is None:
last_task_x = imp_images
last_task_y_ = imp_labels
else:
last_task_x = np.concatenate((last_task_x, imp_images), axis=0)
last_task_y_ = np.concatenate((last_task_y_, imp_labels), axis=0)
# Delete the importance array now that you don't need it in the current run
del importance_array
print('\t\t\t\tEpisodic memory is saved for Task%d!'%(task))
if cross_validate_mode:
if (task == model.num_tasks - 1) or MULTI_TASK:
# List to store accuracy for all the tasks for the current trained model
ftask = test_task_sequence(model, sess, datasets[0]['test'], class_attr, classes_per_task, task_labels, task)
elif train_single_epoch:
fbatch = test_task_sequence(model, sess, datasets[0]['test'], class_attr, classes_per_task, task_labels, task)
print('Task: {} Acc: {}'.format(task, fbatch))
ftask.append(fbatch)
else:
# Multi-epoch training, so compute accuracy at the end
ftask = test_task_sequence(model, sess, datasets[0]['test'], class_attr, classes_per_task, task_labels, task)
if SAVE_MODEL_PARAMS:
save(saver, sess, SNAPSHOT_DIR, iters)
if not cross_validate_mode:
# Store the accuracies computed at task T in a list
evals.append(np.array(ftask))
# Reset the optimizer
model.reset_optimizer(sess)
#-> End for loop task
if not cross_validate_mode:
runs.append(np.array(evals))
if break_training:
break
# End for loop runid
if cross_validate_mode:
return np.mean(ftask)
else:
runs = np.array(runs)
return runs, task_labels_dataset
def test_task_sequence(model, sess, test_data, class_attr, num_classes_per_task, test_tasks, task):
"""
Snapshot the current performance
"""
final_acc = np.zeros(model.num_tasks)
if model.imp_method == 'A-GEM':
logit_mask = np.zeros([model.num_tasks, TOTAL_CLASSES])
else:
logit_mask = np.zeros(TOTAL_CLASSES)
for tt, labels in enumerate(test_tasks):
if not MULTI_TASK:
if tt > task:
return final_acc
masked_class_attrs = np.zeros_like(class_attr)
masked_class_attrs[labels] = class_attr[labels]
task_test_images, task_test_labels = load_task_specific_data(test_data, labels)
total_test_samples = task_test_images.shape[0]
samples_at_a_time = 10
total_corrects = 0
logit_mask[:] = 0
if model.imp_method == 'A-GEM':
logit_mask[tt][labels] = 1.0
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, logit_mask)}
else:
logit_mask[labels] = 1.0
for i in range(total_test_samples/ samples_at_a_time):
offset = i*samples_at_a_time
feed_dict = {model.x: task_test_images[offset:offset+samples_at_a_time],
model.y_: task_test_labels[offset:offset+samples_at_a_time],
model.class_attr: masked_class_attrs,
model.keep_prob: 1.0, model.train_phase: False}
if model.imp_method == 'A-GEM':
feed_dict.update(logit_mask_dict)
total_corrects += np.sum(sess.run(model.correct_predictions[tt], feed_dict=feed_dict))
else:
feed_dict[model.output_mask] = logit_mask
total_corrects += np.sum(sess.run(model.correct_predictions, feed_dict=feed_dict))
# Compute the corrects on residuals
offset = (i+1)*samples_at_a_time
num_residuals = total_test_samples % samples_at_a_time
feed_dict = {model.x: task_test_images[offset:offset+num_residuals],
model.y_: task_test_labels[offset:offset+num_residuals],
model.class_attr: masked_class_attrs,
model.keep_prob: 1.0, model.train_phase: False}
if model.imp_method == 'A-GEM':
feed_dict.update(logit_mask_dict)
total_corrects += np.sum(sess.run(model.correct_predictions[tt], feed_dict=feed_dict))
else:
feed_dict[model.output_mask] = logit_mask
total_corrects += np.sum(sess.run(model.correct_predictions, feed_dict=feed_dict))
# Mean accuracy on the task
acc = total_corrects/ float(total_test_samples)
final_acc[tt] = acc
return final_acc
def main():
"""
Create the model and start the training
"""
# Get the CL arguments
args = get_arguments()
# Check if the network architecture is valid
if args.arch not in VALID_ARCHS:
raise ValueError("Network architecture %s is not supported!"%(args.arch))
# Check if the method to compute importance is valid
if args.imp_method not in MODELS:
raise ValueError("Importance measure %s is undefined!"%(args.imp_method))
# Check if the optimizer is valid
if args.optim not in VALID_OPTIMS:
raise ValueError("Optimizer %s is undefined!"%(args.optim))
# Create log directories to store the results
if not os.path.exists(args.log_dir):
print('Log directory %s created!'%(args.log_dir))
os.makedirs(args.log_dir)
# Get the task labels from the total number of tasks and full label space
classes_per_task = TOTAL_CLASSES// NUM_TASKS
if args.online_cross_val:
num_tasks = K_FOR_CROSS_VAL
else:
num_tasks = NUM_TASKS - K_FOR_CROSS_VAL
# Load the split CUB dataset
data_labs = [np.arange(TOTAL_CLASSES)]
datasets, CUB_attr = construct_split_cub(data_labs, args.data_dir, CUB_TRAIN_LIST, CUB_TEST_LIST, IMG_HEIGHT, IMG_WIDTH, attr_file=CUB_ATTR_LIST)
if args.online_cross_val:
CUB_attr[K_FOR_CROSS_VAL*classes_per_task:] = 0
else:
CUB_attr[:K_FOR_CROSS_VAL*classes_per_task] = 0
if args.cross_validate_mode:
models_list = MODELS
learning_rate_list = [0.3, 0.1, 0.01, 0.003, 0.001]
else:
models_list = [args.imp_method]
for imp_method in models_list:
if imp_method == 'VAN':
synap_stgth_list = [0]
if args.online_cross_val or args.cross_validate_mode:
pass
else:
learning_rate_list = [0.03]
elif imp_method == 'PI':
if args.online_cross_val or args.cross_validate_mode:
synap_stgth_list = [0.1, 1, 10]
else:
synap_stgth_list = [0.1]
learning_rate_list = [0.03]
elif imp_method == 'EWC' or imp_method == 'M-EWC':
if args.online_cross_val or args.cross_validate_mode:
synap_stgth_list = [0.1, 1, 10, 100]
else:
synap_stgth_list = [10]
learning_rate_list = [0.03]
elif imp_method == 'MAS':
if args.online_cross_val or args.cross_validate_mode:
synap_stgth_list = [0.1, 1, 10, 100]
else:
synap_stgth_list = [0.1]
learning_rate_list = [0.03]
elif imp_method == 'RWALK':
if args.online_cross_val or args.cross_validate_mode:
synap_stgth_list = [0.1, 1, 10, 100]
else:
synap_stgth_list = [1]
learning_rate_list = [0.03]
elif imp_method == 'S-GEM':
synap_stgth_list = [0]
if args.online_cross_val:
pass
else:
learning_rate_list = [args.learning_rate]
elif imp_method == 'A-GEM':
synap_stgth_list = [0]
if args.online_cross_val or args.cross_validate_mode:
pass
else:
learning_rate_list = [0.03]
for synap_stgth in synap_stgth_list:
for lr in learning_rate_list:
# Generate the experiment key and store the meta data in a file
exper_meta_data = {'ARCH': args.arch,
'DATASET': 'SPLIT_CUB',
'HYBRID': args.set_hybrid,
'NUM_RUNS': args.num_runs,
'TRAIN_SINGLE_EPOCH': args.train_single_epoch,
'IMP_METHOD': imp_method,
'SYNAP_STGTH': synap_stgth,
'FISHER_EMA_DECAY': args.fisher_ema_decay,
'FISHER_UPDATE_AFTER': args.fisher_update_after,
'OPTIM': args.optim,
'LR': lr,
'BATCH_SIZE': args.batch_size,
'EPS_MEMORY': args.do_sampling,
'MEM_SIZE': args.mem_size,
'IS_HERDING': args.is_herding}
experiment_id = "SPLIT_CUB_HERDING_%r_HYB_%r_%s_%r_%s_%s_%s_%r_%s-"%(args.is_herding, args.set_hybrid, args.arch, args.train_single_epoch, imp_method,
str(synap_stgth).replace('.', '_'),
str(args.batch_size), args.do_sampling, str(args.mem_size)) + datetime.datetime.now().strftime("%y-%m-%d-%H-%M")
snapshot_experiment_meta_data(args.log_dir, experiment_id, exper_meta_data)
# Reset the default graph
tf.reset_default_graph()
graph = tf.Graph()
with graph.as_default():
# Set the random seed
tf.set_random_seed(RANDOM_SEED)
# Define Input and Output of the model
x = tf.placeholder(tf.float32, shape=[None, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS])
y_ = tf.placeholder(tf.float32, shape=[None, TOTAL_CLASSES])
attr = tf.placeholder(tf.float32, shape=[TOTAL_CLASSES, ATTR_DIMS])
if not args.train_single_epoch:
# Define ops for data augmentation
x_aug = image_scaling(x)
x_aug = random_crop_and_pad_image(x_aug, IMG_HEIGHT, IMG_WIDTH)
# Define the optimizer
if args.optim == 'ADAM':
opt = tf.train.AdamOptimizer(learning_rate=lr)
elif args.optim == 'SGD':
opt = tf.train.GradientDescentOptimizer(learning_rate=lr)
elif args.optim == 'MOMENTUM':
base_lr = tf.constant(lr)
learning_rate = tf.scalar_mul(base_lr, tf.pow((1 - train_step / training_iters), OPT_POWER))
opt = tf.train.MomentumOptimizer(lr, OPT_MOMENTUM)
# Create the Model/ contruct the graph
if args.train_single_epoch:
# When training using a single epoch then there is no need for data augmentation
model = Model(x, y_, num_tasks, opt, imp_method, synap_stgth, args.fisher_update_after,
args.fisher_ema_decay, network_arch=args.arch, is_ATT_DATASET=True, attr=attr)
else:
model = Model(x_aug, y_, num_tasks, opt, imp_method, synap_stgth, args.fisher_update_after,
args.fisher_ema_decay, network_arch=args.arch, is_ATT_DATASET=True, x_test=x, attr=attr)
# Set up tf session and initialize variables.
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
time_start = time.time()
with tf.Session(config=config, graph=graph) as sess:
saver = tf.train.Saver(var_list=tf.global_variables(), max_to_keep=100)
runs, task_labels_dataset = train_task_sequence(model, sess, saver, datasets, CUB_attr, classes_per_task, args.cross_validate_mode,
args.train_single_epoch, args.do_sampling, args.is_herding, args.mem_size, args.train_iters,
args.batch_size, args.num_runs, args.init_checkpoint, args.online_cross_val, args.random_seed)
# Close the session
sess.close()
time_end = time.time()
time_spent = time_end - time_start
print('Time spent: {}'.format(time_spent))
# Clean up
del model
if args.cross_validate_mode:
# If cross-validation flag is enabled, store the stuff in a text file
cross_validate_dump_file = args.log_dir + '/' + 'SPLIT_CUB_%s_%s'%(imp_method, args.optim) + '.txt'
with open(cross_validate_dump_file, 'a') as f:
f.write('HERDING: {} \t ARCH: {} \t LR:{} \t LAMBDA: {} \t ACC: {}\n'.format(args.is_herding, args.arch, lr, synap_stgth, runs))
else:
# Store all the results in one dictionary to process later
exper_acc = dict(mean=runs)
exper_labels = dict(labels=task_labels_dataset)
# Store the experiment output to a file
snapshot_experiment_eval(args.log_dir, experiment_id, exper_acc)
snapshot_task_labels(args.log_dir, experiment_id, exper_labels)
if __name__ == '__main__':
main()
|
agem-main
|
conv_split_cub_hybrid.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
import tensorflow as tf
import numpy as np
def vgg_conv_layer(x, kernel_size, out_channels, stride, var_list, pad="SAME", name="conv"):
"""
Define API for conv operation. This includes kernel declaration and
conv operation both followed by relu.
"""
in_channels = x.get_shape().as_list()[-1]
with tf.variable_scope(name):
#n = kernel_size * kernel_size * out_channels
n = kernel_size * in_channels
stdv = 1.0 / math.sqrt(n)
w = tf.get_variable('kernel_weights', [kernel_size, kernel_size, in_channels, out_channels],
tf.float32,
initializer=tf.random_uniform_initializer(-stdv, stdv))
b = tf.get_variable('kernel_biases', [out_channels], tf.float32, initializer=tf.random_uniform_initializer(-stdv, stdv))
# Append the variable to the trainable variables list
var_list.append(w)
var_list.append(b)
# Do the convolution operation
bias = tf.nn.bias_add(tf.nn.conv2d(x, w, [1, stride, stride, 1], padding=pad), b)
relu = tf.nn.relu(bias)
return relu
def vgg_fc_layer(x, out_dim, var_list, apply_relu=True, name="fc"):
"""
Define API for the fully connected layer. This includes both the variable
declaration and matmul operation.
"""
in_dim = x.get_shape().as_list()[1]
stdv = 1.0 / math.sqrt(in_dim)
with tf.variable_scope(name):
# Define the weights and biases for this layer
w = tf.get_variable('weights', [in_dim, out_dim], tf.float32,
initializer=tf.random_uniform_initializer(-stdv, stdv))
b = tf.get_variable('biases', [out_dim], tf.float32, initializer=tf.random_uniform_initializer(-stdv, stdv))
# Append the variable to the trainable variables list
var_list.append(w)
var_list.append(b)
# Do the FC operation
output = tf.matmul(x, w) + b
# Apply relu if needed
if apply_relu:
output = tf.nn.relu(output)
return output
|
agem-main
|
utils/vgg_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .data_utils import construct_permute_mnist, construct_split_mnist, construct_split_cifar, construct_split_cub, construct_split_imagenet
from .data_utils import image_scaling, random_crop_and_pad_image, random_horizontal_flip
from .utils import clone_variable_list, create_fc_layer, create_conv_layer, sample_from_dataset, update_episodic_memory, update_episodic_memory_with_less_data, concatenate_datasets
from .utils import samples_for_each_class, sample_from_dataset_icarl, get_sample_weights, compute_fgt, load_task_specific_data, load_task_specific_data_in_proportion
from .utils import average_acc_stats_across_runs, average_fgt_stats_across_runs, update_reservior
from .vis_utils import plot_acc_multiple_runs, plot_histogram, snapshot_experiment_meta_data, snapshot_experiment_eval, snapshot_task_labels
from .resnet_utils import _conv, _fc, _bn, _residual_block, _residual_block_first
from .vgg_utils import vgg_conv_layer, vgg_fc_layer
|
agem-main
|
utils/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
import tensorflow as tf
import numpy as np
def _conv(x, kernel_size, out_channels, stride, var_list, pad="SAME", name="conv"):
"""
Define API for conv operation. This includes kernel declaration and
conv operation both.
"""
in_channels = x.get_shape().as_list()[-1]
with tf.variable_scope(name):
#n = kernel_size * kernel_size * out_channels
n = kernel_size * in_channels
stdv = 1.0 / math.sqrt(n)
w = tf.get_variable('kernel', [kernel_size, kernel_size, in_channels, out_channels],
tf.float32,
initializer=tf.random_uniform_initializer(-stdv, stdv))
#initializer=tf.random_normal_initializer(stddev=np.sqrt(2.0/n)))
# Append the variable to the trainable variables list
var_list.append(w)
# Do the convolution operation
output = tf.nn.conv2d(x, w, [1, stride, stride, 1], padding=pad)
return output
def _fc(x, out_dim, var_list, name="fc", is_cifar=False):
"""
Define API for the fully connected layer. This includes both the variable
declaration and matmul operation.
"""
in_dim = x.get_shape().as_list()[1]
stdv = 1.0 / math.sqrt(in_dim)
with tf.variable_scope(name):
# Define the weights and biases for this layer
w = tf.get_variable('weights', [in_dim, out_dim], tf.float32,
initializer=tf.random_uniform_initializer(-stdv, stdv))
#initializer=tf.truncated_normal_initializer(stddev=0.1))
if is_cifar:
b = tf.get_variable('biases', [out_dim], tf.float32, initializer=tf.random_uniform_initializer(-stdv, stdv))
else:
b = tf.get_variable('biases', [out_dim], tf.float32, initializer=tf.constant_initializer(0))
# Append the variable to the trainable variables list
var_list.append(w)
var_list.append(b)
# Do the FC operation
output = tf.matmul(x, w) + b
return output
def _bn(x, var_list, train_phase, name='bn_'):
"""
Batch normalization on convolutional maps.
Args:
Return:
"""
n_out = x.get_shape().as_list()[3]
with tf.variable_scope(name):
beta = tf.get_variable('beta', shape=[n_out], dtype=tf.float32, initializer=tf.constant_initializer(0.0))
gamma = tf.get_variable('gamma', shape=[n_out], dtype=tf.float32, initializer=tf.constant_initializer(1.0))
var_list.append(beta)
var_list.append(gamma)
batch_mean, batch_var = tf.nn.moments(x, [0,1,2], name='moments')
ema = tf.train.ExponentialMovingAverage(decay=0.9)
def mean_var_with_update():
ema_apply_op = ema.apply([batch_mean, batch_var])
with tf.control_dependencies([ema_apply_op]):
return tf.identity(batch_mean), tf.identity(batch_var)
mean, var = tf.cond(train_phase,
mean_var_with_update,
lambda: (ema.average(batch_mean), ema.average(batch_var)))
normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)
return normed
def _residual_block(x, trainable_vars, train_phase, apply_relu=True, name="unit"):
"""
ResNet block when the number of channels across the skip connections are the same
"""
in_channels = x.get_shape().as_list()[-1]
with tf.variable_scope(name) as scope:
shortcut = x
x = _conv(x, 3, in_channels, 1, trainable_vars, name='conv_1')
x = _bn(x, trainable_vars, train_phase, name="bn_1")
x = tf.nn.relu(x)
x = _conv(x, 3, in_channels, 1, trainable_vars, name='conv_2')
x = _bn(x, trainable_vars, train_phase, name="bn_2")
x = x + shortcut
if apply_relu == True:
x = tf.nn.relu(x)
return x
def _residual_block_first(x, out_channels, strides, trainable_vars, train_phase, apply_relu=True, name="unit", is_ATT_DATASET=False):
"""
A generic ResNet Block
"""
in_channels = x.get_shape().as_list()[-1]
with tf.variable_scope(name) as scope:
# Figure out the shortcut connection first
if in_channels == out_channels:
if strides == 1:
shortcut = tf.identity(x)
else:
shortcut = tf.nn.max_pool(x, [1, strides, strides, 1], [1, strides, strides, 1], 'VALID')
else:
shortcut = _conv(x, 1, out_channels, strides, trainable_vars, name="shortcut")
if not is_ATT_DATASET:
shortcut = _bn(shortcut, trainable_vars, train_phase, name="bn_0")
# Residual block
x = _conv(x, 3, out_channels, strides, trainable_vars, name="conv_1")
x = _bn(x, trainable_vars, train_phase, name="bn_1")
x = tf.nn.relu(x)
x = _conv(x, 3, out_channels, 1, trainable_vars, name="conv_2")
x = _bn(x, trainable_vars, train_phase, name="bn_2")
x = x + shortcut
if apply_relu:
x = tf.nn.relu(x)
return x
|
agem-main
|
utils/resnet_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Define utility functions for manipulating datasets
"""
import os
import numpy as np
import sys
from copy import deepcopy
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from six.moves.urllib.request import urlretrieve
from six.moves import cPickle as pickle
import tarfile
import zipfile
import random
import cv2
#IMG_MEAN = np.array((104.00698793,116.66876762,122.67891434), dtype=np.float32)
IMG_MEAN = np.array((103.94,116.78,123.68), dtype=np.float32)
############################################################
### Data augmentation utils ################################
############################################################
def image_scaling(images):
"""
Randomly scales the images between 0.5 to 1.5 times the original size.
Args:
images: Training images to scale.
"""
scale = tf.random_uniform([1], minval=0.5, maxval=1.5, dtype=tf.float32, seed=None)
h_new = tf.to_int32(tf.multiply(tf.to_float(tf.shape(images)[1]), scale))
w_new = tf.to_int32(tf.multiply(tf.to_float(tf.shape(images)[2]), scale))
new_shape = tf.squeeze(tf.stack([h_new, w_new]), squeeze_dims=[1])
images = tf.image.resize_images(images, new_shape)
result = tf.map_fn(lambda img: tf.image.random_flip_left_right(img), images)
return result
def random_crop_and_pad_image(images, crop_h, crop_w):
"""
Randomly crop and pads the input images.
Args:
images: Training i mages to crop/ pad.
crop_h: Height of cropped segment.
crop_w: Width of cropped segment.
"""
image_shape = tf.shape(images)
image_pad = tf.image.pad_to_bounding_box(images, 0, 0, tf.maximum(crop_h, image_shape[1]), tf.maximum(crop_w, image_shape[2]))
img_crop = tf.map_fn(lambda img: tf.random_crop(img, [crop_h,crop_w,3]), image_pad)
return img_crop
def random_horizontal_flip(x):
"""
Randomly flip a batch of images horizontally
Args:
x Tensor of shape B x H x W x C
Returns:
random_flipped Randomly flipped tensor of shape B x H x W x C
"""
# Define random horizontal flip
flips = [(slice(None, None, None), slice(None, None, random.choice([-1, None])), slice(None, None, None))
for _ in xrange(x.shape[0])]
random_flipped = np.array([img[flip] for img, flip in zip(x, flips)])
return random_flipped
############################################################
### AWA dataset utils #####################################
############################################################
def _AWA_read_img_from_file(data_dir, file_name, img_height, img_width):
count = 0
imgs = []
labels = []
def dense_to_one_hot(labels_dense, num_classes=50):
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
with open(file_name) as f:
for line in f:
img_name, img_label = line.split()
img_file = data_dir.rstrip('\/') + '/' + img_name
img = cv2.imread(img_file).astype(np.float32)
# HWC -> WHC, compatible with caffe weights
#img = np.transpose(img, [1, 0, 2])
img = cv2.resize(img, (img_width, img_height))
# Convert RGB to BGR
img_r, img_g, img_b = np.split(img, 3, axis=2)
img = np.concatenate((img_b, img_g, img_r), axis=2)
# Extract mean
img -= IMG_MEAN
imgs += [img]
labels += [int(img_label)]
count += 1
if count % 1000 == 0:
print 'Finish reading {:07d}'.format(count)
# Convert the labels to one-hot
y = dense_to_one_hot(np.array(labels))
return np.array(imgs), y
def _AWA_get_data(data_dir, train_list_file, val_list_file, test_list_file, img_height, img_width):
""" Reads and parses examples from AWA dataset """
dataset = dict()
dataset['train'] = []
dataset['validation'] = []
dataset['test'] = []
num_val_img = 0 # you can change the number of validation images here TODO: Pass this as argument
train_img = []
train_label = []
validation_img = []
validation_label = []
test_img = []
test_label = []
# Read train, validation and test files
train_img, train_label = _AWA_read_img_from_file(data_dir, train_list_file, img_height, img_width)
#validation_img, validation_label = _AWA_read_img_from_file(data_dir, val_list_file, img_height, img_width)
test_img, test_label = _AWA_read_img_from_file(data_dir, test_list_file, img_height, img_width)
dataset['train'].append(train_img)
dataset['train'].append(train_label)
#dataset['validation'].append(validation_img)
#dataset['validation'].append(validation_label)
dataset['test'].append(test_img)
dataset['test'].append(test_label)
return dataset
def construct_split_awa(task_labels, data_dir, train_list_file, val_list_file, test_list_file, img_height, img_width, attr_file=None):
"""
Construct Split AWA dataset
Args:
task_labels Labels of different tasks
data_dir Data directory from where the AWA dataset will be read
train_list_file File containing names of training images
al_list_file File containing names of val images
test_list_file File containing names of test images
img_height Height of image
img_width Width of image
attr_file File from where to load the attributes
"""
# Get the awa dataset
awa_data = _AWA_get_data(data_dir, train_list_file, val_list_file, test_list_file, img_height, img_width)
# Get the attribute vector
if attr_file:
with open(attr_file, 'rb') as f:
awa_attr = pickle.load(f)
# Define a list for storing the data for different tasks
datasets = []
# Data splits
#sets = ["train", "validation", "test"]
sets = ["train", "test"]
for task in task_labels:
for set_name in sets:
this_set = awa_data[set_name]
global_class_indices = np.column_stack(np.nonzero(this_set[1]))
count = 0
for cls in task:
if count == 0:
class_indices = np.squeeze(global_class_indices[global_class_indices[:,1] ==
cls][:,np.array([True, False])])
else:
class_indices = np.append(class_indices, np.squeeze(global_class_indices[global_class_indices[:,1] ==\
cls][:,np.array([True, False])]))
count += 1
class_indices = np.sort(class_indices, axis=None)
if set_name == "train":
train = {
'images':deepcopy(this_set[0][class_indices, :]),
'labels':deepcopy(this_set[1][class_indices, :]),
}
elif set_name == "validation":
validation = {
'images':deepcopy(this_set[0][class_indices, :]),
'labels':deepcopy(this_set[1][class_indices, :]),
}
elif set_name == "test":
test = {
'images':deepcopy(this_set[0][class_indices, :]),
'labels':deepcopy(this_set[1][class_indices, :]),
}
awa = {
'train': train,
#'validation': validation,
'test': test,
}
datasets.append(awa)
if attr_file:
return datasets, awa_attr
else:
return datasets
############################################################
### CUB dataset utils #####################################
############################################################
def _CUB_read_img_from_file(data_dir, file_name, img_height, img_width):
count = 0
imgs = []
labels = []
def dense_to_one_hot(labels_dense, num_classes=200):
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
with open(file_name) as f:
for line in f:
img_name, img_label = line.split()
img_file = data_dir.rstrip('\/') + '/' + img_name
img = cv2.imread(img_file).astype(np.float32)
# HWC -> WHC, compatible with caffe weights
#img = np.transpose(img, [1, 0, 2])
img = cv2.resize(img, (img_width, img_height))
# Convert RGB to BGR
img_r, img_g, img_b = np.split(img, 3, axis=2)
img = np.concatenate((img_b, img_g, img_r), axis=2)
# Extract mean
img -= IMG_MEAN
imgs += [img]
labels += [int(img_label)]
count += 1
if count % 1000 == 0:
print 'Finish reading {:07d}'.format(count)
# Convert the labels to one-hot
y = dense_to_one_hot(np.array(labels))
return np.array(imgs), y
def _CUB_get_data(data_dir, train_list_file, test_list_file, img_height, img_width):
""" Reads and parses examples from CUB dataset """
dataset = dict()
dataset['train'] = []
dataset['test'] = []
num_val_img = 0 # you can change the number of validation images here TODO: Pass this as argument
train_img = []
train_label = []
test_img = []
test_label = []
# Read train and test files
train_img, train_label = _CUB_read_img_from_file(data_dir, train_list_file, img_height, img_width)
test_img, test_label = _CUB_read_img_from_file(data_dir, test_list_file, img_height, img_width)
dataset['train'].append(train_img)
dataset['train'].append(train_label)
dataset['test'].append(test_img)
dataset['test'].append(test_label)
return dataset
def construct_split_cub(task_labels, data_dir, train_list_file, test_list_file, img_height, img_width, attr_file=None):
"""
Construct Split CUB-200 dataset
Args:
task_labels Labels of different tasks
data_dir Data directory from where the CUB-200 dataset will be read
train_list_file File containing names of training images
test_list_file File containing names of test images
img_height Height of image
img_width Width of image
attr_fil File from where to load the attributes
"""
# Get the cub dataset
cub_data = _CUB_get_data(data_dir, train_list_file, test_list_file, img_height, img_width)
# Get the attribute vector
if attr_file:
with open(attr_file, 'rb') as f:
cub_attr = pickle.load(f)
# Define a list for storing the data for different tasks
datasets = []
# Data splits
sets = ["train", "test"]
for task in task_labels:
for set_name in sets:
this_set = cub_data[set_name]
global_class_indices = np.column_stack(np.nonzero(this_set[1]))
count = 0
for cls in task:
if count == 0:
class_indices = np.squeeze(global_class_indices[global_class_indices[:,1] ==
cls][:,np.array([True, False])])
else:
class_indices = np.append(class_indices, np.squeeze(global_class_indices[global_class_indices[:,1] ==\
cls][:,np.array([True, False])]))
count += 1
class_indices = np.sort(class_indices, axis=None)
if set_name == "train":
train = {
'images':deepcopy(this_set[0][class_indices, :]),
'labels':deepcopy(this_set[1][class_indices, :]),
}
elif set_name == "test":
test = {
'images':deepcopy(this_set[0][class_indices, :]),
'labels':deepcopy(this_set[1][class_indices, :]),
}
cub = {
'train': train,
'test': test,
}
datasets.append(cub)
if attr_file:
return datasets, cub_attr
else:
return datasets
############################################################
### CIFAR download utils ###################################
############################################################
CIFAR_10_URL = "http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
CIFAR_100_URL = "http://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
CIFAR_10_DIR = "/cifar_10"
CIFAR_100_DIR = "/cifar_100"
def construct_split_cifar(task_labels, is_cifar_100=True):
"""
Construct Split CIFAR-10 and CIFAR-100 datasets
Args:
task_labels Labels of different tasks
data_dir Data directory where the CIFAR data will be saved
"""
data_dir = 'CIFAR_data'
# Get the cifar dataset
cifar_data = _get_cifar(data_dir, is_cifar_100)
# Define a list for storing the data for different tasks
datasets = []
# Data splits
sets = ["train", "validation", "test"]
for task in task_labels:
for set_name in sets:
this_set = cifar_data[set_name]
global_class_indices = np.column_stack(np.nonzero(this_set[1]))
count = 0
for cls in task:
if count == 0:
class_indices = np.squeeze(global_class_indices[global_class_indices[:,1] ==
cls][:,np.array([True, False])])
else:
class_indices = np.append(class_indices, np.squeeze(global_class_indices[global_class_indices[:,1] ==\
cls][:,np.array([True, False])]))
count += 1
class_indices = np.sort(class_indices, axis=None)
if set_name == "train":
train = {
'images':deepcopy(this_set[0][class_indices, :]),
'labels':deepcopy(this_set[1][class_indices, :]),
}
elif set_name == "validation":
validation = {
'images':deepcopy(this_set[0][class_indices, :]),
'labels':deepcopy(this_set[1][class_indices, :]),
}
elif set_name == "test":
test = {
'images':deepcopy(this_set[0][class_indices, :]),
'labels':deepcopy(this_set[1][class_indices, :]),
}
cifar = {
'train': train,
'validation': validation,
'test': test,
}
datasets.append(cifar)
return datasets
def _get_cifar(data_dir, is_cifar_100):
"""
Get the CIFAR-10 and CIFAR-100 datasets
Args:
data_dir Directory where the downloaded data will be stored
"""
x_train = None
y_train = None
x_validation = None
y_validation = None
x_test = None
y_test = None
l = None
# Download the dataset if needed
_cifar_maybe_download_and_extract(data_dir)
# Dictionary to store the dataset
dataset = dict()
dataset['train'] = []
dataset['validation'] = []
dataset['test'] = []
def dense_to_one_hot(labels_dense, num_classes=100):
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
if is_cifar_100:
# Load the training data of CIFAR-100
f = open(data_dir + CIFAR_100_DIR + '/train', 'rb')
datadict = pickle.load(f)
f.close()
_X = datadict['data']
_Y = np.array(datadict['fine_labels'])
_Y = dense_to_one_hot(_Y, num_classes=100)
_X = np.array(_X, dtype=float) / 255.0
_X = _X.reshape([-1, 3, 32, 32])
_X = _X.transpose([0, 2, 3, 1])
# Compute the data mean for normalization
x_train_mean = np.mean(_X, axis=0)
x_train = _X[:40000]
y_train = _Y[:40000]
x_validation = _X[40000:]
y_validation = _Y[40000:]
else:
# Load all the training batches of the CIFAR-10
for i in range(5):
f = open(data_dir + CIFAR_10_DIR + '/data_batch_' + str(i + 1), 'rb')
datadict = pickle.load(f)
f.close()
_X = datadict['data']
_Y = np.array(datadict['labels'])
_Y = dense_to_one_hot(_Y, num_classes=10)
_X = np.array(_X, dtype=float) / 255.0
_X = _X.reshape([-1, 3, 32, 32])
_X = _X.transpose([0, 2, 3, 1])
if x_train is None:
x_train = _X
y_train = _Y
else:
x_train = np.concatenate((x_train, _X), axis=0)
y_train = np.concatenate((y_train, _Y), axis=0)
# Compute the data mean for normalization
x_train_mean = np.mean(x_train, axis=0)
x_validation = x_train[:40000] # We don't use validation set with CIFAR-10
y_validation = y_train[40000:]
# Normalize the train and validation sets
x_train -= x_train_mean
x_validation -= x_train_mean
dataset['train'].append(x_train)
dataset['train'].append(y_train)
dataset['train'].append(l)
dataset['validation'].append(x_validation)
dataset['validation'].append(y_validation)
dataset['validation'].append(l)
if is_cifar_100:
# Load the test batch of CIFAR-100
f = open(data_dir + CIFAR_100_DIR + '/test', 'rb')
datadict = pickle.load(f)
f.close()
_X = datadict['data']
_Y = np.array(datadict['fine_labels'])
_Y = dense_to_one_hot(_Y, num_classes=100)
else:
# Load the test batch of CIFAR-10
f = open(data_dir + CIFAR_10_DIR + '/test_batch', 'rb')
datadict = pickle.load(f)
f.close()
_X = datadict["data"]
_Y = np.array(datadict['labels'])
_Y = dense_to_one_hot(_Y, num_classes=10)
_X = np.array(_X, dtype=float) / 255.0
_X = _X.reshape([-1, 3, 32, 32])
_X = _X.transpose([0, 2, 3, 1])
x_test = _X
y_test = _Y
# Normalize the test set
x_test -= x_train_mean
dataset['test'].append(x_test)
dataset['test'].append(y_test)
dataset['test'].append(l)
return dataset
def _print_download_progress(count, block_size, total_size):
"""
Show the download progress of the cifar data
"""
pct_complete = float(count * block_size) / total_size
msg = "\r- Download progress: {0:.1%}".format(pct_complete)
sys.stdout.write(msg)
sys.stdout.flush()
def _cifar_maybe_download_and_extract(data_dir):
"""
Routine to download and extract the cifar dataset
Args:
data_dir Directory where the downloaded data will be stored
"""
cifar_10_directory = data_dir + CIFAR_10_DIR
cifar_100_directory = data_dir + CIFAR_100_DIR
# If the data_dir does not exist, create the directory and download
# the data
if not os.path.exists(data_dir):
os.makedirs(data_dir)
url = CIFAR_10_URL
filename = url.split('/')[-1]
file_path = os.path.join(data_dir, filename)
zip_cifar_10 = file_path
file_path, _ = urlretrieve(url=url, filename=file_path, reporthook=_print_download_progress)
print()
print("Download finished. Extracting files.")
if file_path.endswith(".zip"):
zipfile.ZipFile(file=file_path, mode="r").extractall(data_dir)
elif file_path.endswith((".tar.gz", ".tgz")):
tarfile.open(name=file_path, mode="r:gz").extractall(data_dir)
print("Done.")
url = CIFAR_100_URL
filename = url.split('/')[-1]
file_path = os.path.join(data_dir, filename)
zip_cifar_100 = file_path
file_path, _ = urlretrieve(url=url, filename=file_path, reporthook=_print_download_progress)
print()
print("Download finished. Extracting files.")
if file_path.endswith(".zip"):
zipfile.ZipFile(file=file_path, mode="r").extractall(data_dir)
elif file_path.endswith((".tar.gz", ".tgz")):
tarfile.open(name=file_path, mode="r:gz").extractall(data_dir)
print("Done.")
os.rename(data_dir + "/cifar-10-batches-py", cifar_10_directory)
os.rename(data_dir + "/cifar-100-python", cifar_100_directory)
os.remove(zip_cifar_10)
os.remove(zip_cifar_100)
#########################################
## MNIST Utils ##########################
#########################################
def reformat_mnist(datasets):
"""
Routine to Reformat the mnist dataset into a 3d tensor
"""
image_size = 28 # Height of MNIST dataset
num_channels = 1 # Gray scale
for i in range(len(datasets)):
sets = ["train", "validation", "test"]
for set_name in sets:
datasets[i]['%s'%set_name]['images'] = datasets[i]['%s'%set_name]['images'].reshape\
((-1, image_size, image_size, num_channels)).astype(np.float32)
return datasets
def construct_permute_mnist(num_tasks):
"""
Construct a dataset of permutted mnist images
Args:
num_tasks Number of tasks
Returns
dataset A permutted mnist dataset
"""
# Download and store mnist dataset
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
datasets = []
for i in range(num_tasks):
perm_inds = range(mnist.train.images.shape[1])
np.random.shuffle(perm_inds)
copied_mnist = deepcopy(mnist)
sets = ["train", "validation", "test"]
for set_name in sets:
this_set = getattr(copied_mnist, set_name) # shallow copy
this_set._images = np.transpose(np.array([this_set.images[:,c] for c in perm_inds]))
if set_name == "train":
train = {
'images':this_set._images,
'labels':this_set.labels,
}
elif set_name == "validation":
validation = {
'images':this_set._images,
'labels':this_set.labels,
}
elif set_name == "test":
test = {
'images':this_set._images,
'labels':this_set.labels,
}
dataset = {
'train': train,
'validation': validation,
'test': test,
}
datasets.append(dataset)
return datasets
def construct_split_mnist(task_labels):
"""
Construct a split mnist dataset
Args:
task_labels List of split labels
Returns:
dataset A list of split datasets
"""
# Download and store mnist dataset
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
datasets = []
sets = ["train", "validation", "test"]
for task in task_labels:
for set_name in sets:
this_set = getattr(mnist, set_name)
global_class_indices = np.column_stack(np.nonzero(this_set.labels))
count = 0
for cls in task:
if count == 0:
class_indices = np.squeeze(global_class_indices[global_class_indices[:,1] ==\
cls][:,np.array([True, False])])
else:
class_indices = np.append(class_indices, np.squeeze(global_class_indices[global_class_indices[:,1] ==\
cls][:,np.array([True, False])]))
count += 1
class_indices = np.sort(class_indices, axis=None)
if set_name == "train":
train = {
'images':deepcopy(mnist.train.images[class_indices, :]),
'labels':deepcopy(mnist.train.labels[class_indices, :]),
}
elif set_name == "validation":
validation = {
'images':deepcopy(mnist.validation.images[class_indices, :]),
'labels':deepcopy(mnist.validation.labels[class_indices, :]),
}
elif set_name == "test":
test = {
'images':deepcopy(mnist.test.images[class_indices, :]),
'labels':deepcopy(mnist.test.labels[class_indices, :]),
}
mnist2 = {
'train': train,
'validation': validation,
'test': test,
}
datasets.append(mnist2)
return datasets
###################################################
###### ImageNet Utils #############################
###################################################
def construct_split_imagenet(task_labels, data_dir):
"""
Construct Split ImageNet dataset
Args:
task_labels Labels of different tasks
data_dir Data directory from where to load the imagenet data
"""
# Load the imagenet dataset
imagenet_data = _load_imagenet(data_dir)
# Define a list for storing the data for different tasks
datasets = []
# Data splits
sets = ["train", "test"]
for task in task_labels:
for set_name in sets:
this_set = imagenet_data[set_name]
global_class_indices = np.column_stack(np.nonzero(this_set[1]))
count = 0
for cls in task:
if count == 0:
class_indices = np.squeeze(global_class_indices[global_class_indices[:,1] ==
cls][:,np.array([True, False])])
else:
class_indices = np.append(class_indices, np.squeeze(global_class_indices[global_class_indices[:,1] ==\
cls][:,np.array([True, False])]))
count += 1
class_indices = np.sort(class_indices, axis=None)
if set_name == "train":
train = {
'images':deepcopy(this_set[0][class_indices, :]),
'labels':deepcopy(this_set[1][class_indices, :]),
}
elif set_name == "test":
test = {
'images':deepcopy(this_set[0][class_indices, :]),
'labels':deepcopy(this_set[1][class_indices, :]),
}
imagenet = {
'train': train,
'test': test,
}
datasets.append(imagenet)
return datasets
def _load_imagenet(data_dir):
"""
Load the ImageNet data
Args:
data_dir Directory where the pickle files have been dumped
"""
x_train = None
y_train = None
x_test = None
y_test = None
# Dictionary to store the dataset
dataset = dict()
dataset['train'] = []
dataset['test'] = []
def dense_to_one_hot(labels_dense, num_classes=100):
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
# Load the training batches
for i in range(4):
f = open(data_dir + '/train_batch_' + str(i), 'rb')
datadict = pickle.load(f)
f.close()
_X = datadict['data']
_Y = np.array(datadict['labels'])
# Convert the lables to one-hot
_Y = dense_to_one_hot(_Y)
# Normalize the images
_X = np.array(_X, dtype=float)/ 255.0
_X = _X.reshape([-1, 224, 224, 3])
if x_train is None:
x_train = _X
y_train = _Y
else:
x_train = np.concatenate((x_train, _X), axis=0)
y_train = np.concatenate((y_train, _Y), axis=0)
dataset['train'].append(x_train)
dataset['train'].append(y_train)
# Load test batches
for i in range(4):
f = open(data_dir + '/test_batch_' + str(i), 'rb')
datadict = pickle.load(f)
f.close()
_X = datadict['data']
_Y = np.array(datadict['labels'])
# Convert the lables to one-hot
_Y = dense_to_one_hot(_Y)
# Normalize the images
_X = np.array(_X, dtype=float)/ 255.0
_X = _X.reshape([-1, 224, 224, 3])
if x_test is None:
x_test = _X
y_test = _Y
else:
x_test = np.concatenate((x_test, _X), axis=0)
y_test = np.concatenate((y_test, _Y), axis=0)
dataset['test'].append(x_test)
dataset['test'].append(y_test)
return dataset
|
agem-main
|
utils/data_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Define some utility functions
"""
import numpy as np
import tensorflow as tf
def clone_variable_list(variable_list):
"""
Clone the variable list
"""
return [tf.identity(var) for var in variable_list]
def create_fc_layer(input, w, b, apply_relu=True):
"""
Construct a Fully Connected layer
Args:
w Weights
b Biases
apply_relu Apply relu (T/F)?
Returns:
Output of an FC layer
"""
with tf.name_scope('fc_layer'):
output = tf.matmul(input, w) + b
# Apply relu
if apply_relu:
output = tf.nn.relu(output)
return output
def create_conv_layer(input, w, b, stride=1, apply_relu=True):
"""
Construct a convolutional layer
Args:
w Weights
b Biases
pre_activations List where the pre_activations will be stored
apply_relu Apply relu (T/F)?
Returns:
Output of a conv layer
"""
with tf.name_scope('conv_layer'):
# Do the convolution operation
output = tf.nn.conv2d(input, w, [1, stride, stride, 1], padding='SAME') + b
# Apply relu
if apply_relu:
output = tf.nn.relu(output)
return output
def load_task_specific_data_in_proportion(datasets, task_labels, classes_appearing_in_tasks, class_seen_already):
"""
Loads task specific data from the datasets proportionate to classes appearing in different tasks
"""
global_class_indices = np.column_stack(np.nonzero(datasets['labels']))
count = 0
for cls in task_labels:
if count == 0:
class_indices = np.squeeze(global_class_indices[global_class_indices[:,1] == cls][:,np.array([True, False])])
total_class_instances = class_indices.size
num_instances_to_choose = total_class_instances // classes_appearing_in_tasks[cls]
offset = (class_seen_already[cls] - 1) * num_instances_to_choose
final_class_indices = class_indices[offset: offset+num_instances_to_choose]
else:
current_class_indices = np.squeeze(global_class_indices[global_class_indices[:,1] == cls][:,np.array([True, False])])
total_class_instances = current_class_indices.size
num_instances_to_choose = total_class_instances // classes_appearing_in_tasks[cls]
offset = (class_seen_already[cls] - 1) * num_instances_to_choose
final_class_indices = np.append(final_class_indices, current_class_indices[offset: offset+num_instances_to_choose])
count += 1
final_class_indices = np.sort(final_class_indices, axis=None)
return datasets['images'][final_class_indices, :], datasets['labels'][final_class_indices, :]
def load_task_specific_data(datasets, task_labels):
"""
Loads task specific data from the datasets
"""
global_class_indices = np.column_stack(np.nonzero(datasets['labels']))
count = 0
for cls in task_labels:
if count == 0:
class_indices = np.squeeze(global_class_indices[global_class_indices[:,1] == cls][:,np.array([True, False])])
else:
class_indices = np.append(class_indices, np.squeeze(global_class_indices[global_class_indices[:,1] == cls][:,np.array([True, False])]))
count += 1
class_indices = np.sort(class_indices, axis=None)
return datasets['images'][class_indices, :], datasets['labels'][class_indices, :]
def samples_for_each_class(dataset_labels, task):
"""
Numbers of samples for each class in the task
Args:
dataset_labels Labels to count samples from
task Labels with in a task
Returns
"""
num_samples = np.zeros([len(task)], dtype=np.float32)
i = 0
for label in task:
global_class_indices = np.column_stack(np.nonzero(dataset_labels))
class_indices = np.squeeze(global_class_indices[global_class_indices[:,1] == label][:,np.array([True, False])])
class_indices = np.sort(class_indices, axis=None)
num_samples[i] = len(class_indices)
i += 1
return num_samples
def get_sample_weights(labels, tasks):
weights = np.zeros([labels.shape[0]], dtype=np.float32)
for label in tasks:
global_class_indices = np.column_stack(np.nonzero(labels))
class_indices = np.array(np.squeeze(global_class_indices[global_class_indices[:,1] == label][:,np.array([True, False])]))
total_class_samples = class_indices.shape[0]
weights[class_indices] = 1.0/ total_class_samples
# Rescale the weights such that min is 1. This will make the weights of less observed
# examples 1.
weights /= weights.min()
return weights
def update_episodic_memory_with_less_data(task_dataset, importance_array, total_mem_size, task, episodic_images, episodic_labels, task_labels=None, is_herding=False):
"""
Update the episodic memory when the task data is less than the memory size
Args:
Returns:
"""
num_examples_in_task = task_dataset['images'].shape[0]
# Empty spaces in the episodic memory
empty_spaces = np.sum(np.sum(episodic_labels, axis=1) == 0)
if empty_spaces >= num_examples_in_task:
# Find where the empty spaces are in order
empty_indices = np.where(np.sum(episodic_labels, axis=1) == 0)[0]
# Store the whole task data in the episodic memory
episodic_images[empty_indices[:num_examples_in_task]] = task_dataset['images']
episodic_labels[empty_indices[:num_examples_in_task]] = task_dataset['labels']
elif empty_spaces == 0:
# Compute the amount of space in the episodic memory for the new task
space_for_new_task = total_mem_size// (task + 1) # task 0, 1, ...
# Get the indices to update in the episodic memory
eps_mem_indices = np.random.choice(total_mem_size, space_for_new_task, replace=False) # Sample without replacement
# Get the indices of important samples from the task dataset
label_importance = importance_array + 1e-32
label_importance /= np.sum(label_importance) # Convert to a probability distribution
task_mem_indices = np.random.choice(num_examples_in_task, space_for_new_task, p=label_importance, replace=False) # Sample without replacement
# Update the episodic memory
episodic_images[eps_mem_indices] = task_dataset['images'][task_mem_indices]
episodic_labels[eps_mem_indices] = task_dataset['labels'][task_mem_indices]
else:
# When there is some free space but not enough to store the whole task
# Find where the empty spaces are in order
empty_indices = np.where(np.sum(episodic_labels, axis=1) == 0)[0]
# Store some of the examples from task in the memory
episodic_images[empty_indices] = task_dataset['images'][:len(empty_indices)]
episodic_labels[empty_indices] = task_dataset['labels'][:len(empty_indices)]
# Adjust the remanining samples in the episodic memory
space_for_new_task = (total_mem_size // (task + 1)) - len(empty_indices) # task 0, 1, ...
# Get the indices to update in the episodic memory
eps_mem_indices = np.random.choice((total_mem_size - len(empty_indices)), space_for_new_task, replace=False) # Sample without replacement
# Get the indices of important samples from the task dataset
label_importance = importance_array[len(empty_indices):] + 1e-32
label_importance /= np.sum(label_importance) # Convert to a probability distribution
updated_num_examples_in_task = num_examples_in_task - len(empty_indices)
task_mem_indices = np.random.choice(updated_num_examples_in_task, space_for_new_task, p=label_importance, replace=False) # Sample without replacement
task_mem_indices += len(empty_indices) # Add the offset
# Update the episodic memory
episodic_images[eps_mem_indices] = task_dataset['images'][task_mem_indices]
episodic_labels[eps_mem_indices] = task_dataset['labels'][task_mem_indices]
def update_episodic_memory(task_dataset, importance_array, total_mem_size, task, episodic_images, episodic_labels, task_labels=None, is_herding=False):
"""
Update the episodic memory with new task data
Args:
Reruns:
"""
num_examples_in_task = task_dataset['images'].shape[0]
# Compute the amount of space in the episodic memory for the new task
space_for_new_task = total_mem_size// (task + 1) # task 0, 1, ...
# Get the indices to update in the episodic memory
eps_mem_indices = np.random.choice(total_mem_size, space_for_new_task, replace=False) # Sample without replacement
if is_herding and task_labels is not None:
# Get the samples based on herding
imp_images, imp_labels = sample_from_dataset_icarl(task_dataset, importance_array, task_labels, space_for_new_task//len(task_labels))
episodic_images[eps_mem_indices[np.arange(imp_images.shape[0])]] = imp_images
episodic_labels[eps_mem_indices[np.arange(imp_images.shape[0])]] = imp_labels
else:
# Get the indices of important samples from the task dataset
label_importance = importance_array + 1e-32
label_importance /= np.sum(label_importance) # Convert to a probability distribution
task_mem_indices = np.random.choice(num_examples_in_task, space_for_new_task, p=label_importance, replace=False) # Sample without replacement
# Update the episodic memory
episodic_images[eps_mem_indices] = task_dataset['images'][task_mem_indices]
episodic_labels[eps_mem_indices] = task_dataset['labels'][task_mem_indices]
def sample_from_dataset(dataset, importance_array, task, samples_count, preds=None):
"""
Samples from a dataset based on a probability distribution
Args:
dataset Dataset to sample from
importance_array Importance scores (not necessarily have to be a prob distribution)
task Labels with in a task
samples_count Number of samples to return
Return:
images Important images
labels Important labels
"""
count = 0
# For each label in the task extract the important samples
for label in task:
global_class_indices = np.column_stack(np.nonzero(dataset['labels']))
class_indices = np.squeeze(global_class_indices[global_class_indices[:,1] == label][:,np.array([True, False])])
class_indices = np.sort(class_indices, axis=None)
if (preds is not None):
# Find the indices where prediction match the correct label
pred_indices = np.where(preds == label)[0]
# Find the correct prediction indices
correct_pred_indices = np.intersect1d(pred_indices, class_indices)
else:
correct_pred_indices = class_indices
# Extract the importance for the label
label_importance = importance_array[correct_pred_indices] + 1e-32
label_importance /= np.sum(label_importance)
actual_samples_count = min(samples_count, np.count_nonzero(label_importance))
#print('Storing {} samples from {} class'.format(actual_samples_count, label))
# If no samples are correctly classified then skip saving the samples
if (actual_samples_count != 0):
# Extract the important indices
imp_indices = np.random.choice(correct_pred_indices, actual_samples_count, p=label_importance, replace=False)
if count == 0:
images = dataset['images'][imp_indices]
labels = dataset['labels'][imp_indices]
else:
images = np.vstack((images, dataset['images'][imp_indices]))
labels = np.vstack((labels, dataset['labels'][imp_indices]))
count += 1
if count != 0:
return images, labels
else:
return None, None
def concatenate_datasets(current_images, current_labels, prev_images, prev_labels):
"""
Concatnates current dataset with the previous one. This will be used for
adding important samples from the previous datasets
Args:
current_images Images of current dataset
current_labels Labels of current dataset
prev_images List containing images of previous datasets
prev_labels List containing labels of previous datasets
Returns:
images Concatenated images
labels Concatenated labels
"""
"""
images = current_images
labels = current_labels
for i in range(len(prev_images)):
images = np.vstack((images, prev_images[i]))
labels = np.vstack((labels, prev_labels[i]))
"""
images = np.concatenate((current_images, prev_images), axis=0)
labels = np.concatenate((current_labels, prev_labels), axis=0)
return images, labels
def sample_from_dataset_icarl(dataset, features, task, samples_count, preds=None):
"""
Samples from a dataset based on a icarl - mean of features
Args:
dataset Dataset to sample from
features Features - activation before the last layer
task Labels with in a task
samples_count Number of samples to return
Return:
images Important images
labels Important labels
"""
print('Herding based sampling!')
#samples_count = min(samples_count, dataset['images'].shape[0])
count = 0
# For each label in the task extract the important samples
for label in task:
global_class_indices = np.column_stack(np.nonzero(dataset['labels']))
class_indices = np.squeeze(global_class_indices[global_class_indices[:,1] == label][:,np.array([True, False])])
class_indices = np.sort(class_indices, axis=None)
if (preds is not None):
# Find the indices where prediction match the correct label
pred_indices = np.where(preds == label)[0]
# Find the correct prediction indices
correct_pred_indices = np.intersect1d(pred_indices, class_indices)
else:
correct_pred_indices = class_indices
mean_feature = np.mean(features[correct_pred_indices, :], axis=0)
actual_samples_count = min(samples_count, len(correct_pred_indices))
# If no samples are correctly classified then skip saving the samples
imp_indices = np.zeros(actual_samples_count, dtype=np.int32)
sample_sum= np.zeros(mean_feature.shape)
if (actual_samples_count != 0):
# Extract the important indices
for i in range(actual_samples_count):
sample_mean = (features[correct_pred_indices, :] +
np.tile(sample_sum, [len(correct_pred_indices),1]))/ float(i + 1)
norm_distance = np.linalg.norm((np.tile(mean_feature, [len(correct_pred_indices),1])
- sample_mean), ord=2, axis=1)
imp_indices[i] = correct_pred_indices[np.argmin(norm_distance)]
sample_sum = sample_sum + features[imp_indices[i], :]
if count == 0:
images = dataset['images'][imp_indices]
labels = dataset['labels'][imp_indices]
else:
images = np.vstack((images, dataset['images'][imp_indices]))
labels = np.vstack((labels, dataset['labels'][imp_indices]))
count += 1
if count != 0:
return images, labels
else:
return None, None
def average_acc_stats_across_runs(data, key):
"""
Compute the average accuracy statistics (mean and std) across runs
"""
num_runs = data.shape[0]
avg_acc = np.zeros(num_runs)
for i in range(num_runs):
avg_acc[i] = np.mean(data[i][-1])
return avg_acc.mean()*100, avg_acc.std()*100
def average_fgt_stats_across_runs(data, key):
"""
Compute the forgetting statistics (mean and std) across runs
"""
num_runs = data.shape[0]
fgt = np.zeros(num_runs)
wst_fgt = np.zeros(num_runs)
for i in range(num_runs):
fgt[i] = compute_fgt(data[i])
return fgt.mean(), fgt.std()
def compute_fgt(data):
"""
Given a TxT data matrix, compute average forgetting at T-th task
"""
num_tasks = data.shape[0]
T = num_tasks - 1
fgt = 0.0
for i in range(T):
fgt += np.max(data[:T,i]) - data[T, i]
avg_fgt = fgt/ float(num_tasks - 1)
return avg_fgt
def update_reservior(current_image, current_label, episodic_images, episodic_labels, M, N):
"""
Update the episodic memory with current example using the reservior sampling
"""
if M > N:
episodic_images[N] = current_image
episodic_labels[N] = current_label
else:
j = np.random.randint(0, N)
if j < M:
episodic_images[j] = current_image
episodic_labels[j] = current_label
|
agem-main
|
utils/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Define some utility functions
"""
import numpy as np
import matplotlib
matplotlib.use('agg')
import matplotlib.colors as colors
import matplotlib.cm as cmx
import matplotlib.pyplot as plt
import matplotlib.figure as figure
from six.moves import cPickle as pickle
def snapshot_experiment_eval(logdir, experiment_id, data):
"""
Store the output of the experiment in a file
"""
snapshot_file = logdir + '/' + experiment_id + '.pickle'
with open(snapshot_file, 'wb') as f:
pickle.dump(data, f)
print('Experimental Eval has been snapshotted to %s!'%(snapshot_file))
def snapshot_task_labels(logdir, experiment_id, data):
"""
Store the output of the experiment in a file
"""
snapshot_file = logdir + '/' + experiment_id + '_task_labels.pickle'
with open(snapshot_file, 'wb') as f:
pickle.dump(data, f)
print('Experimental Eval has been snapshotted to %s!'%(snapshot_file))
def snapshot_experiment_meta_data(logdir, experiment_id, exper_meta_data):
"""
Store the meta-data of the experiment in a file
"""
meta_file = logdir + '/' + experiment_id + '.txt'
with open(meta_file, 'wb') as f:
for key in exper_meta_data:
print('{}: {}'.format(key, exper_meta_data[key]))
f.write('{}:{} \n'.format(key, exper_meta_data[key]))
print('Experimental meta-data has been snapshotted to %s!'%(meta_file))
def plot_acc_multiple_runs(data, task_labels, valid_measures, n_stats, plot_name=None):
"""
Plots the accuracies
Args:
task_labels List of tasks
n_stats Number of runs
plot_name Name of the file where the plot will be saved
Returns:
"""
n_tasks = len(task_labels)
plt.figure(figsize=(14, 3))
axs = [plt.subplot(1,n_tasks+1,1)]
for i in range(1, n_tasks + 1):
axs.append(plt.subplot(1, n_tasks+1, i+1, sharex=axs[0], sharey=axs[0]))
fmt_chars = ['o', 's', 'd']
fmts = []
for i in range(len(valid_measures)):
fmts.append(fmt_chars[i%len(fmt_chars)])
plot_keys = sorted(data['mean'].keys())
for k, cval in enumerate(plot_keys):
label = "c=%g"%cval
mean_vals = data['mean'][cval]
std_vals = data['std'][cval]
for j in range(n_tasks+1):
plt.sca(axs[j])
errorbar_kwargs = dict(fmt="%s-"%fmts[k], markersize=5)
if j < n_tasks:
norm= np.sqrt(n_stats) # np.sqrt(n_stats) for SEM or 1 for STDEV
axs[j].errorbar(np.arange(n_tasks)+1, mean_vals[:, j], yerr=std_vals[:, j]/norm, label=label, **errorbar_kwargs)
else:
mean_stuff = []
std_stuff = []
for i in range(len(data['mean'][cval])):
mean_stuff.append(data['mean'][cval][i][:i+1].mean())
std_stuff.append(np.sqrt((data['std'][cval][i][:i+1]**2).sum())/(n_stats*np.sqrt(n_stats)))
plt.errorbar(range(1,n_tasks+1), mean_stuff, yerr=std_stuff, label="%s"%valid_measures[k], **errorbar_kwargs)
plt.xticks(np.arange(n_tasks)+1)
plt.xlim((1.0,5.5))
"""
# Uncomment this if clutter along y-axis needs to be removed
if j == 0:
axs[j].set_yticks([0.5,1])
else:
plt.setp(axs[j].get_yticklabels(), visible=False)
plt.ylim((0.45,1.1))
"""
for i, ax in enumerate(axs):
if i < n_tasks:
ax.set_title((['Task %d (%d to %d)'%(j+1,task_labels[j][0], task_labels[j][-1])\
for j in range(n_tasks)] + ['average'])[i], fontsize=8)
else:
ax.set_title("Average", fontsize=8)
ax.axhline(0.5, color='k', linestyle=':', label="chance", zorder=0)
handles, labels = axs[-1].get_legend_handles_labels()
# Reorder legend so chance is last
axs[-1].legend([handles[j] for j in [i for i in range(len(valid_measures)+1)]],
[labels[j] for j in [i for i in range(len(valid_measures)+1)]], loc='best', fontsize=6)
axs[0].set_xlabel("Tasks")
axs[0].set_ylabel("Accuracy")
plt.gcf().tight_layout()
plt.grid('on')
if plot_name == None:
plt.show()
else:
plt.savefig(plot_name)
def plot_histogram(data, n_bins=10, plot_name='my_hist'):
plt.hist(data, bins=n_bins)
plt.savefig(plot_name)
plt.close()
|
agem-main
|
utils/vis_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .model import Model
|
agem-main
|
model/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Model defintion
"""
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from IPython import display
from utils import clone_variable_list, create_fc_layer, create_conv_layer
from utils.resnet_utils import _conv, _fc, _bn, _residual_block, _residual_block_first
from utils.vgg_utils import vgg_conv_layer, vgg_fc_layer
PARAM_XI_STEP = 1e-3
NEG_INF = -1e32
EPSILON = 1e-32
HYBRID_ALPHA = 0.5
TRAIN_ENTROPY_BASED_SUM = False
def weight_variable(shape, name='fc', init_type='default'):
"""
Define weight variables
Args:
shape Shape of the bias variable tensor
Returns:
A tensor of size shape initialized from a random normal
"""
with tf.variable_scope(name):
if init_type == 'default':
weights = tf.get_variable('weights', shape, tf.float32, initializer=tf.truncated_normal_initializer(stddev=0.1))
#weights = tf.Variable(tf.truncated_normal(shape, stddev=0.1), name='weights')
elif init_type == 'zero':
weights = tf.get_variable('weights', shape, tf.float32, initializer=tf.constant_initializer(0.1))
#weights = tf.Variable(tf.constant(0.1, shape=shape, dtype=np.float32), name='weights')
return weights
def bias_variable(shape, name='fc'):
"""
Define bias variables
Args:
shape Shape of the bias variable tensor
Returns:
A tensor of size shape initialized from a constant
"""
with tf.variable_scope(name):
biases = tf.get_variable('biases', shape, initializer=tf.constant_initializer(0.1))
return biases
#return tf.Variable(tf.constant(0.1, shape=shape, dtype=np.float32), name='biases') #TODO: Should we initialize it from 0
class Model:
"""
A class defining the model
"""
def __init__(self, x_train, y_, num_tasks, opt, imp_method, synap_stgth, fisher_update_after, fisher_ema_decay, network_arch='FC-S',
is_ATT_DATASET=False, x_test=None, attr=None):
"""
Instantiate the model
"""
# Define some placeholders which are used to feed the data to the model
self.y_ = y_
if imp_method == 'PNN':
self.train_phase = []
self.total_classes = int(self.y_[0].get_shape()[1])
self.train_phase = [tf.placeholder(tf.bool, name='train_phase_%d'%(i)) for i in range(num_tasks)]
self.output_mask = [tf.placeholder(dtype=tf.float32, shape=[self.total_classes]) for i in range(num_tasks)]
else:
self.total_classes = int(self.y_.get_shape()[1])
self.train_phase = tf.placeholder(tf.bool, name='train_phase')
if (imp_method == 'A-GEM' or imp_method == 'ER') and 'FC-' not in network_arch: # Only for Split-X setups
self.output_mask = [tf.placeholder(dtype=tf.float32, shape=[self.total_classes]) for i in range(num_tasks)]
self.mem_batch_size = tf.placeholder(dtype=tf.float32, shape=())
else:
self.output_mask = tf.placeholder(dtype=tf.float32, shape=[self.total_classes])
self.sample_weights = tf.placeholder(tf.float32, shape=[None])
self.task_id = tf.placeholder(dtype=tf.int32, shape=())
self.store_grad_batches = tf.placeholder(dtype=tf.float32, shape=())
self.keep_prob = tf.placeholder(dtype=tf.float32, shape=())
self.train_samples = tf.placeholder(dtype=tf.float32, shape=())
self.training_iters = tf.placeholder(dtype=tf.float32, shape=())
self.train_step = tf.placeholder(dtype=tf.float32, shape=())
self.violation_count = tf.Variable(0, dtype=tf.float32, trainable=False)
self.is_ATT_DATASET = is_ATT_DATASET # To use a different (standard one) ResNet-18 for CUB
if x_test is not None:
# If CUB datatset then use augmented x (x_train) for training and non-augmented x (x_test) for testing
self.x = tf.cond(self.train_phase, lambda: tf.identity(x_train), lambda: tf.identity(x_test))
train_shape = x_train.get_shape().as_list()
x = tf.reshape(self.x, [-1, train_shape[1], train_shape[2], train_shape[3]])
else:
# We don't use data augmentation for other datasets
self.x = x_train
x = self.x
# Class attributes for zero shot transfer
self.class_attr = attr
if self.class_attr is not None:
self.attr_dims = int(self.class_attr.get_shape()[1])
# Save the arguments passed from the main script
self.opt = opt
self.num_tasks = num_tasks
self.imp_method = imp_method
self.fisher_update_after = fisher_update_after
self.fisher_ema_decay = fisher_ema_decay
self.network_arch = network_arch
# A scalar variable for previous syanpse strength
self.synap_stgth = tf.constant(synap_stgth, shape=[1], dtype=tf.float32)
self.triplet_loss_scale = 2.1
# Define different variables
self.weights_old = []
self.star_vars = []
self.small_omega_vars = []
self.big_omega_vars = []
self.big_omega_riemann_vars = []
self.fisher_diagonal_at_minima = []
self.hebbian_score_vars = []
self.running_fisher_vars = []
self.tmp_fisher_vars = []
self.max_fisher_vars = []
self.min_fisher_vars = []
self.max_score_vars = []
self.min_score_vars = []
self.normalized_score_vars = []
self.score_vars = []
self.normalized_fisher_at_minima_vars = []
self.weights_delta_old_vars = []
self.ref_grads = []
self.projected_gradients_list = []
if self.class_attr is not None:
self.loss_and_train_ops_for_attr_vector(x, self.y_)
else:
self.loss_and_train_ops_for_one_hot_vector(x, self.y_)
# Set the operations to reset the optimier when needed
self.reset_optimizer_ops()
####################################################################################
#### Internal APIs of the class. These should not be called/ exposed externally ####
####################################################################################
def loss_and_train_ops_for_one_hot_vector(self, x, y_):
"""
Loss and training operations for the training of one-hot vector based classification model
"""
# Define approproate network
if self.network_arch == 'FC-S':
input_dim = int(x.get_shape()[1])
layer_dims = [input_dim, 256, 256, self.total_classes]
if self.imp_method == 'PNN':
self.task_logits = []
self.task_pruned_logits = []
self.unweighted_entropy = []
for i in range(self.num_tasks):
if i == 0:
self.task_logits.append(self.init_fc_column_progNN(layer_dims, x))
self.task_pruned_logits.append(tf.where(tf.tile(tf.equal(self.output_mask[i][None,:], 1.0), [tf.shape(self.task_logits[i])[0], 1]), self.task_logits[i], NEG_INF*tf.ones_like(self.task_logits[i])))
self.unweighted_entropy.append(tf.squeeze(tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_[i], logits=self.task_pruned_logits[i])))) # mult by mean(y_[i]) puts unwaranted loss to 0
else:
self.task_logits.append(self.extensible_fc_column_progNN(layer_dims, x, i))
self.task_pruned_logits.append(tf.where(tf.tile(tf.equal(self.output_mask[i][None,:], 1.0), [tf.shape(self.task_logits[i])[0], 1]), self.task_logits[i], NEG_INF*tf.ones_like(self.task_logits[i])))
self.unweighted_entropy.append(tf.squeeze(tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_[i], logits=self.task_pruned_logits[i])))) # mult by mean(y_[i]) puts unwaranted loss to 0
else:
self.fc_variables(layer_dims)
logits = self.fc_feedforward(x, self.weights, self.biases)
elif self.network_arch == 'FC-B':
input_dim = int(x.get_shape()[1])
layer_dims = [input_dim, 2000, 2000, self.total_classes]
self.fc_variables(layer_dims)
logits = self.fc_feedforward(x, self.weights, self.biases)
elif self.network_arch == 'CNN':
num_channels = int(x.get_shape()[-1])
self.image_size = int(x.get_shape()[1])
kernels = [3, 3, 3, 3, 3]
depth = [num_channels, 32, 32, 64, 64, 512]
self.conv_variables(kernels, depth)
logits = self.conv_feedforward(x, self.weights, self.biases, apply_dropout=True)
elif self.network_arch == 'VGG':
# VGG-16
logits = self.vgg_16_conv_feedforward(x)
elif 'RESNET-' in self.network_arch:
if self.network_arch == 'RESNET-S':
# Same resnet-18 as used in GEM paper
kernels = [3, 3, 3, 3, 3]
filters = [20, 20, 40, 80, 160]
strides = [1, 0, 2, 2, 2]
elif self.network_arch == 'RESNET-B':
# Standard ResNet-18
kernels = [7, 3, 3, 3, 3]
filters = [64, 64, 128, 256, 512]
strides = [2, 0, 2, 2, 2]
if self.imp_method == 'PNN':
self.task_logits = []
self.task_pruned_logits = []
self.unweighted_entropy = []
for i in range(self.num_tasks):
if i == 0:
self.task_logits.append(self.init_resent_column_progNN(x, kernels, filters, strides))
else:
self.task_logits.append(self.extensible_resnet_column_progNN(x, kernels, filters, strides, i))
self.task_pruned_logits.append(tf.where(tf.tile(tf.equal(self.output_mask[i][None,:], 1.0), [tf.shape(self.task_logits[i])[0], 1]), self.task_logits[i], NEG_INF*tf.ones_like(self.task_logits[i])))
self.unweighted_entropy.append(tf.squeeze(tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_[i], logits=self.task_pruned_logits[i]))))
elif self.imp_method == 'A-GEM' or self.imp_method == 'ER':
logits = self.resnet18_conv_feedforward(x, kernels, filters, strides)
self.task_pruned_logits = []
self.unweighted_entropy = []
for i in range(self.num_tasks):
self.task_pruned_logits.append(tf.where(tf.tile(tf.equal(self.output_mask[i][None,:], 1.0), [tf.shape(logits)[0], 1]), logits, NEG_INF*tf.ones_like(logits)))
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_, logits=self.task_pruned_logits[i])
adjusted_entropy = tf.reduce_sum(tf.cast(tf.tile(tf.equal(self.output_mask[i][None,:], 1.0), [tf.shape(y_)[0], 1]), dtype=tf.float32) * y_, axis=1) * cross_entropy
self.unweighted_entropy.append(tf.reduce_sum(adjusted_entropy)) # We will average it later on
else:
logits = self.resnet18_conv_feedforward(x, kernels, filters, strides)
# Prune the predictions to only include the classes for which
# the training data is present
if (self.imp_method != 'PNN') and ((self.imp_method != 'A-GEM' and self.imp_method != 'ER') or 'FC-' in self.network_arch):
self.pruned_logits = tf.where(tf.tile(tf.equal(self.output_mask[None,:], 1.0), [tf.shape(logits)[0], 1]), logits, NEG_INF*tf.ones_like(logits))
# Create list of variables for storing different measures
# Note: This method has to be called before calculating fisher
# or any other importance measure
self.init_vars()
# Different entropy measures/ loss definitions
if (self.imp_method != 'PNN') and ((self.imp_method != 'A-GEM' and self.imp_method != 'ER') or 'FC-' in self.network_arch):
self.mse = 2.0*tf.nn.l2_loss(self.pruned_logits) # tf.nn.l2_loss computes sum(T**2)/ 2
self.weighted_entropy = tf.reduce_mean(tf.losses.softmax_cross_entropy(y_,
self.pruned_logits, self.sample_weights, reduction=tf.losses.Reduction.NONE))
self.unweighted_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_,
logits=self.pruned_logits))
# Create operations for loss and gradient calculation
self.loss_and_gradients(self.imp_method)
if self.imp_method != 'PNN':
# Store the current weights before doing a train step
self.get_current_weights()
# For GEM variants train ops will be defined later
if 'GEM' not in self.imp_method:
# Define the training operation here as Pathint ops depend on the train ops
self.train_op()
# Create operations to compute importance depending on the importance methods
if self.imp_method == 'EWC':
self.create_fisher_ops()
elif self.imp_method == 'M-EWC':
self.create_fisher_ops()
self.create_pathint_ops()
self.combined_fisher_pathint_ops()
elif self.imp_method == 'PI':
self.create_pathint_ops()
elif self.imp_method == 'RWALK':
self.create_fisher_ops()
self.create_pathint_ops()
elif self.imp_method == 'MAS':
self.create_hebbian_ops()
elif self.imp_method == 'A-GEM' or self.imp_method == 'S-GEM':
self.create_stochastic_gem_ops()
if self.imp_method != 'PNN':
# Create weight save and store ops
self.weights_store_ops()
# Summary operations for visualization
tf.summary.scalar("unweighted_entropy", self.unweighted_entropy)
for v in self.trainable_vars:
tf.summary.histogram(v.name.replace(":", "_"), v)
self.merged_summary = tf.summary.merge_all()
# Accuracy measure
if (self.imp_method == 'PNN') or ((self.imp_method == 'A-GEM' or self.imp_method == 'ER') and 'FC-' not in self.network_arch):
self.correct_predictions = []
self.accuracy = []
for i in range(self.num_tasks):
if self.imp_method == 'PNN':
self.correct_predictions.append(tf.equal(tf.argmax(self.task_pruned_logits[i], 1), tf.argmax(y_[i], 1)))
else:
self.correct_predictions.append(tf.equal(tf.argmax(self.task_pruned_logits[i], 1), tf.argmax(y_, 1)))
self.accuracy.append(tf.reduce_mean(tf.cast(self.correct_predictions[i], tf.float32)))
else:
self.correct_predictions = tf.equal(tf.argmax(self.pruned_logits, 1), tf.argmax(y_, 1))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_predictions, tf.float32))
def loss_and_train_ops_for_attr_vector(self, x, y_):
"""
Loss and training operations for the training of joined embedding model
"""
# Define approproate network
if self.network_arch == 'FC-S':
input_dim = int(x.get_shape()[1])
layer_dims = [input_dim, 256, 256, self.total_classes]
self.fc_variables(layer_dims)
logits = self.fc_feedforward(x, self.weights, self.biases)
elif self.network_arch == 'FC-B':
input_dim = int(x.get_shape()[1])
layer_dims = [input_dim, 2000, 2000, self.total_classes]
self.fc_variables(layer_dims)
logits = self.fc_feedforward(x, self.weights, self.biases)
elif self.network_arch == 'CNN':
num_channels = int(x.get_shape()[-1])
self.image_size = int(x.get_shape()[1])
kernels = [3, 3, 3, 3, 3]
depth = [num_channels, 32, 32, 64, 64, 512]
self.conv_variables(kernels, depth)
logits = self.conv_feedforward(x, self.weights, self.biases, apply_dropout=True)
elif self.network_arch == 'VGG':
# VGG-16
phi_x = self.vgg_16_conv_feedforward(x)
elif self.network_arch == 'RESNET-S':
# Standard ResNet-18
kernels = [3, 3, 3, 3, 3]
filters = [20, 20, 40, 80, 160]
strides = [1, 0, 2, 2, 2]
# Get the image features
phi_x = self.resnet18_conv_feedforward(x, kernels, filters, strides)
elif self.network_arch == 'RESNET-B':
# Standard ResNet-18
kernels = [7, 3, 3, 3, 3]
filters = [64, 64, 128, 256, 512]
strides = [2, 0, 2, 2, 2]
# Get the image features
phi_x = self.resnet18_conv_feedforward(x, kernels, filters, strides)
# Get the attributes embedding
attr_embed = self.get_attribute_embedding(self.class_attr) # Does not contain biases yet, Dimension: TOTAL_CLASSES x image_feature_dim
# Add the biases now
last_layer_biases = bias_variable([self.total_classes], name='attr_embed_b')
self.trainable_vars.append(last_layer_biases)
# Now that we have all the trainable variables, initialize the different book keeping variables
# Note: This method has to be called before calculating fisher
# or any other importance measure
self.init_vars()
# Compute the logits for the ZST case
zst_logits = tf.matmul(phi_x, tf.transpose(attr_embed)) + last_layer_biases
# Prune the predictions to only include the classes for which
# the training data is present
if self.imp_method == 'A-GEM':
pruned_zst_logits = []
self.unweighted_entropy = []
for i in range(self.num_tasks):
pruned_zst_logits.append(tf.where(tf.tile(tf.equal(self.output_mask[i][None,:], 1.0), [tf.shape(zst_logits)[0], 1]), zst_logits, NEG_INF*tf.ones_like(zst_logits)))
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_, logits=pruned_zst_logits[i])
adjusted_entropy = tf.reduce_sum(tf.cast(tf.tile(tf.equal(self.output_mask[i][None,:], 1.0), [tf.shape(y_)[0], 1]), dtype=tf.float32) * y_, axis=1) * cross_entropy
self.unweighted_entropy.append(tf.reduce_sum(adjusted_entropy))
else:
pruned_zst_logits = tf.where(tf.tile(tf.equal(self.output_mask[None,:], 1.0),
[tf.shape(zst_logits)[0], 1]), zst_logits, NEG_INF*tf.ones_like(zst_logits))
self.unweighted_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_, logits=pruned_zst_logits))
self.mse = 2.0*tf.nn.l2_loss(pruned_zst_logits) # tf.nn.l2_loss computes sum(T**2)/ 2
# Create operations for loss and gradient calculation
self.loss_and_gradients(self.imp_method)
# Store the current weights before doing a train step
self.get_current_weights()
if 'GEM' not in self.imp_method:
self.train_op()
# Create operations to compute importance depending on the importance methods
if self.imp_method == 'EWC':
self.create_fisher_ops()
elif self.imp_method == 'M-EWC':
self.create_fisher_ops()
self.create_pathint_ops()
self.combined_fisher_pathint_ops()
elif self.imp_method == 'PI':
self.create_pathint_ops()
elif self.imp_method == 'RWALK':
self.create_fisher_ops()
self.create_pathint_ops()
elif self.imp_method == 'MAS':
self.create_hebbian_ops()
elif (self.imp_method == 'A-GEM') or (self.imp_method == 'S-GEM'):
self.create_stochastic_gem_ops()
# Create weight save and store ops
self.weights_store_ops()
# Summary operations for visualization
tf.summary.scalar("triplet_loss", self.unweighted_entropy)
for v in self.trainable_vars:
tf.summary.histogram(v.name.replace(":", "_"), v)
self.merged_summary = tf.summary.merge_all()
# Accuracy measure
if self.imp_method == 'A-GEM' and 'FC-' not in self.network_arch:
self.correct_predictions = []
self.accuracy = []
for i in range(self.num_tasks):
self.correct_predictions.append(tf.equal(tf.argmax(pruned_zst_logits[i], 1), tf.argmax(y_, 1)))
self.accuracy.append(tf.reduce_mean(tf.cast(self.correct_predictions[i], tf.float32)))
else:
self.correct_predictions = tf.equal(tf.argmax(pruned_zst_logits, 1), tf.argmax(y_, 1))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_predictions, tf.float32))
def init_fc_column_progNN(self, layer_dims, h, apply_dropout=False):
"""
Defines the first column of Progressive NN - FC Networks
"""
self.trainable_vars = []
self.h_pnn = []
self.trainable_vars.append([])
self.h_pnn.append([])
self.h_pnn[0].append(h)
for i in range(len(layer_dims)-1):
w = weight_variable([layer_dims[i], layer_dims[i+1]], name='fc_w_%d_t0'%(i))
b = bias_variable([layer_dims[i+1]], name='fc_b_%d_t0'%(i))
self.trainable_vars[0].append(w)
self.trainable_vars[0].append(b)
if i == len(layer_dims) - 2:
# Last layer (logits) - don't apply the relu
h = create_fc_layer(h, w, b, apply_relu=False)
else:
h = create_fc_layer(h, w, b)
if apply_dropout:
h = tf.nn.dropout(h, 1)
self.h_pnn[0].append(h)
return h
def extensible_fc_column_progNN(self, layer_dims, h, task, apply_dropout=False):
"""
Define the subsequent columns of the progressive NN - FC Networks
"""
self.trainable_vars.append([])
self.h_pnn.append([])
self.h_pnn[task].append(h)
for i in range(len(layer_dims)-1):
w = weight_variable([layer_dims[i], layer_dims[i+1]], name='fc_w_%d_t%d'%(i, task))
b = bias_variable([layer_dims[i+1]], name='fc_b_%d_t%d'%(i, task))
self.trainable_vars[task].append(w)
self.trainable_vars[task].append(b)
preactivation = create_fc_layer(h, w, b, apply_relu=False)
for tt in range(task):
U_w = weight_variable([layer_dims[i], layer_dims[i+1]], name='fc_uw_%d_t%d_tt%d'%(i, task, tt))
U_b = bias_variable([layer_dims[i+1]], name='fc_ub_%d_t%d_tt%d'%(i, task, tt))
self.trainable_vars[task].append(U_w)
self.trainable_vars[task].append(U_b)
preactivation += create_fc_layer(self.h_pnn[tt][i], U_w, U_b, apply_relu=False)
if i == len(layer_dims) - 2:
# Last layer (logits) - don't apply the relu
h = preactivation
else:
# layer < last layer, apply relu
h = tf.nn.relu(preactivation)
if apply_dropout:
h = tf.nn.dropout(h)
self.h_pnn[task].append(h)
return h
def init_resent_column_progNN(self, x, kernels, filters, strides):
"""
Defines the first column of Progressive NN - ResNet-18
"""
self.trainable_vars = []
self.h_pnn = []
self.trainable_vars.append([])
self.h_pnn.append([])
self.h_pnn[0].append(x)
# Conv1
h = _conv(x, kernels[0], filters[0], strides[0], self.trainable_vars[0], name='conv_1_t0')
h = _bn(h, self.trainable_vars[0], self.train_phase[0], name='bn_1_t0')
h = tf.nn.relu(h)
self.h_pnn[0].append(h)
# Conv2_x
h = _residual_block(h, self.trainable_vars[0], self.train_phase[0], name='conv2_1_t0')
h = _residual_block(h, self.trainable_vars[0], self.train_phase[0], name='conv2_2_t0')
self.h_pnn[0].append(h)
# Conv3_x
h = _residual_block_first(h, filters[2], strides[2], self.trainable_vars[0], self.train_phase[0], name='conv3_1_t0', is_ATT_DATASET=self.is_ATT_DATASET)
h = _residual_block(h, self.trainable_vars[0], self.train_phase[0], name='conv3_2_t0')
self.h_pnn[0].append(h)
# Conv4_x
h = _residual_block_first(h, filters[3], strides[3], self.trainable_vars[0], self.train_phase[0], name='conv4_1_t0', is_ATT_DATASET=self.is_ATT_DATASET)
h = _residual_block(h, self.trainable_vars[0], self.train_phase[0], name='conv4_2_t0')
self.h_pnn[0].append(h)
# Conv5_x
h = _residual_block_first(h, filters[4], strides[4], self.trainable_vars[0], self.train_phase[0], name='conv5_1_t0', is_ATT_DATASET=self.is_ATT_DATASET)
h = _residual_block(h, self.trainable_vars[0], self.train_phase[0], name='conv5_2_t0')
self.h_pnn[0].append(h)
# Apply average pooling
h = tf.reduce_mean(h, [1, 2])
if self.network_arch == 'RESNET-S':
logits = _fc(h, self.total_classes, self.trainable_vars[0], name='fc_1_t0', is_cifar=True)
else:
logits = _fc(h, self.total_classes, self.trainable_vars[0], name='fc_1_t0')
self.h_pnn[0].append(logits)
return logits
def extensible_resnet_column_progNN(self, x, kernels, filters, strides, task):
"""
Define the subsequent columns of the progressive NN - ResNet-18
"""
self.trainable_vars.append([])
self.h_pnn.append([])
self.h_pnn[task].append(x)
# Conv1
h = _conv(x, kernels[0], filters[0], strides[0], self.trainable_vars[task], name='conv_1_t%d'%(task))
h = _bn(h, self.trainable_vars[task], self.train_phase[task], name='bn_1_t%d'%(task))
# Add lateral connections
for tt in range(task):
U_w = weight_variable([1, 1, self.h_pnn[tt][0].get_shape().as_list()[-1], h.get_shape().as_list()[-1]], name='conv_1_w_t%d_tt%d'%(task, tt))
U_b = bias_variable([h.get_shape().as_list()[-1]], name='conv_1_b_t%d_tt%d'%(task, tt))
self.trainable_vars[task].append(U_w)
self.trainable_vars[task].append(U_b)
h += create_conv_layer(self.h_pnn[tt][0], U_w, U_b, apply_relu=False)
h = tf.nn.relu(h)
self.h_pnn[task].append(h)
# Conv2_x
h = _residual_block(h, self.trainable_vars[task], self.train_phase[task], name='conv2_1_t%d'%(task))
h = _residual_block(h, self.trainable_vars[task], self.train_phase[task], apply_relu=False, name='conv2_2_t%d'%(task))
# Add lateral connections
for tt in range(task):
U_w = weight_variable([1, 1, self.h_pnn[tt][1].get_shape().as_list()[-1], h.get_shape().as_list()[-1]], name='conv_2_w_t%d_tt%d'%(task, tt))
U_b = bias_variable([h.get_shape().as_list()[-1]], name='conv_2_b_t%d_tt%d'%(task, tt))
self.trainable_vars[task].append(U_w)
self.trainable_vars[task].append(U_b)
h += create_conv_layer(self.h_pnn[tt][1], U_w, U_b, apply_relu=False)
h = tf.nn.relu(h)
self.h_pnn[task].append(h)
# Conv3_x
h = _residual_block_first(h, filters[2], strides[2], self.trainable_vars[task], self.train_phase[task], name='conv3_1_t%d'%(task), is_ATT_DATASET=self.is_ATT_DATASET)
h = _residual_block(h, self.trainable_vars[task], self.train_phase[task], apply_relu=False, name='conv3_2_t%d'%(task))
# Add lateral connections
for tt in range(task):
U_w = weight_variable([1, 1, self.h_pnn[tt][2].get_shape().as_list()[-1], h.get_shape().as_list()[-1]], name='conv_3_w_t%d_tt%d'%(task, tt))
U_b = bias_variable([h.get_shape().as_list()[-1]], name='conv_3_b_t%d_tt%d'%(task, tt))
self.trainable_vars[task].append(U_w)
self.trainable_vars[task].append(U_b)
h += create_conv_layer(self.h_pnn[tt][2], U_w, U_b, stride=strides[2], apply_relu=False)
h = tf.nn.relu(h)
self.h_pnn[task].append(h)
# Conv4_x
h = _residual_block_first(h, filters[3], strides[3], self.trainable_vars[task], self.train_phase[task], name='conv4_1_t%d'%(task), is_ATT_DATASET=self.is_ATT_DATASET)
h = _residual_block(h, self.trainable_vars[task], self.train_phase[task], apply_relu=False, name='conv4_2_t%d'%(task))
# Add lateral connections
for tt in range(task):
U_w = weight_variable([1, 1, self.h_pnn[tt][3].get_shape().as_list()[-1], h.get_shape().as_list()[-1]], name='conv_4_w_t%d_tt%d'%(task, tt))
U_b = bias_variable([h.get_shape().as_list()[-1]], name='conv_4_b_t%d_tt%d'%(task, tt))
self.trainable_vars[task].append(U_w)
self.trainable_vars[task].append(U_b)
h += create_conv_layer(self.h_pnn[tt][3], U_w, U_b, stride=strides[3], apply_relu=False)
h = tf.nn.relu(h)
self.h_pnn[task].append(h)
# Conv5_x
h = _residual_block_first(h, filters[4], strides[4], self.trainable_vars[task], self.train_phase[task], name='conv5_1_t%d'%(task), is_ATT_DATASET=self.is_ATT_DATASET)
h = _residual_block(h, self.trainable_vars[task], self.train_phase[task], apply_relu=False, name='conv5_2_t%d'%(task))
# Add lateral connections
for tt in range(task):
U_w = weight_variable([1, 1, self.h_pnn[tt][4].get_shape().as_list()[-1], h.get_shape().as_list()[-1]], name='conv_5_w_t%d_tt%d'%(task, tt))
U_b = bias_variable([h.get_shape().as_list()[-1]], name='conv_5_b_t%d_tt%d'%(task, tt))
self.trainable_vars[task].append(U_w)
self.trainable_vars[task].append(U_b)
h += create_conv_layer(self.h_pnn[tt][4], U_w, U_b, stride=strides[4], apply_relu=False)
h = tf.nn.relu(h)
self.h_pnn[task].append(h)
# Apply average pooling
h = tf.reduce_mean(h, [1, 2])
if self.network_arch == 'RESNET-S':
logits = _fc(h, self.total_classes, self.trainable_vars[task], name='fc_1_t%d'%(task), is_cifar=True)
else:
logits = _fc(h, self.total_classes, self.trainable_vars[task], name='fc_1_t%d'%(task))
for tt in range(task):
h_tt = tf.reduce_mean(self.h_pnn[tt][5], [1, 2])
U_w = weight_variable([h_tt.get_shape().as_list()[1], self.total_classes], name='fc_uw_1_t%d_tt%d'%(task, tt))
U_b = bias_variable([self.total_classes], name='fc_ub_1_t%d_tt%d'%(task, tt))
self.trainable_vars[task].append(U_w)
self.trainable_vars[task].append(U_b)
logits += create_fc_layer(h_tt, U_w, U_b, apply_relu=False)
self.h_pnn[task].append(logits)
return logits
def fc_variables(self, layer_dims):
"""
Defines variables for a 3-layer fc network
Args:
Returns:
"""
self.weights = []
self.biases = []
self.trainable_vars = []
for i in range(len(layer_dims)-1):
w = weight_variable([layer_dims[i], layer_dims[i+1]], name='fc_%d'%(i))
b = bias_variable([layer_dims[i+1]], name='fc_%d'%(i))
self.weights.append(w)
self.biases.append(b)
self.trainable_vars.append(w)
self.trainable_vars.append(b)
def fc_feedforward(self, h, weights, biases, apply_dropout=False):
"""
Forward pass through a fc network
Args:
h Input image (tensor)
weights List of weights for a fc network
biases List of biases for a fc network
apply_dropout Whether to apply droupout (True/ False)
Returns:
Logits of a fc network
"""
if apply_dropout:
h = tf.nn.dropout(h, 1) # Apply dropout on Input?
for (w, b) in list(zip(weights, biases))[:-1]:
h = create_fc_layer(h, w, b)
if apply_dropout:
h = tf.nn.dropout(h, 1) # Apply dropout on hidden layers?
# Store image features
self.features = h
self.image_feature_dim = h.get_shape().as_list()[-1]
return create_fc_layer(h, weights[-1], biases[-1], apply_relu=False)
def conv_variables(self, kernel, depth):
"""
Defines variables of a 5xconv-1xFC convolutional network
Args:
Returns:
"""
self.weights = []
self.biases = []
self.trainable_vars = []
div_factor = 1
for i in range(len(kernel)):
w = weight_variable([kernel[i], kernel[i], depth[i], depth[i+1]], name='conv_%d'%(i))
b = bias_variable([depth[i+1]], name='conv_%d'%(i))
self.weights.append(w)
self.biases.append(b)
self.trainable_vars.append(w)
self.trainable_vars.append(b)
# Since we maxpool after every two conv layers
if ((i+1) % 2 == 0):
div_factor *= 2
flat_units = (self.image_size // div_factor) * (self.image_size // div_factor) * depth[-1]
w = weight_variable([flat_units, self.total_classes], name='fc_%d'%(i))
b = bias_variable([self.total_classes], name='fc_%d'%(i))
self.weights.append(w)
self.biases.append(b)
self.trainable_vars.append(w)
self.trainable_vars.append(b)
def conv_feedforward(self, h, weights, biases, apply_dropout=True):
"""
Forward pass through a convolutional network
Args:
h Input image (tensor)
weights List of weights for a conv network
biases List of biases for a conv network
apply_dropout Whether to apply droupout (True/ False)
Returns:
Logits of a conv network
"""
for i, (w, b) in enumerate(list(zip(weights, biases))[:-1]):
# Apply conv operation till the second last layer, which is a FC layer
h = create_conv_layer(h, w, b)
if ((i+1) % 2 == 0):
# Apply max pool after every two conv layers
h = tf.nn.max_pool(h, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# Apply dropout
if apply_dropout:
h = tf.nn.dropout(h, self.keep_prob)
# Construct FC layers
shape = h.get_shape().as_list()
h = tf.reshape(h, [-1, shape[1] * shape[2] * shape[3]])
# Store image features
self.features = h
self.image_feature_dim = h.get_shape().as_list()[-1]
return create_fc_layer(h, weights[-1], biases[-1], apply_relu=False)
def vgg_16_conv_feedforward(self, h):
"""
Forward pass through a VGG 16 network
Return:
Logits of a VGG 16 network
"""
self.trainable_vars = []
# Conv1
h = vgg_conv_layer(h, 3, 64, 1, self.trainable_vars, name='conv1_1')
h = vgg_conv_layer(h, 3, 64, 1, self.trainable_vars, name='conv1_2')
h = tf.nn.max_pool(h, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1')
# Conv2
h = vgg_conv_layer(h, 3, 128, 1, self.trainable_vars, name='conv2_1')
h = vgg_conv_layer(h, 3, 128, 1, self.trainable_vars, name='conv2_2')
h = tf.nn.max_pool(h, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool2')
# Conv3
h = vgg_conv_layer(h, 3, 256, 1, self.trainable_vars, name='conv3_1')
h = vgg_conv_layer(h, 3, 256, 1, self.trainable_vars, name='conv3_2')
h = vgg_conv_layer(h, 3, 256, 1, self.trainable_vars, name='conv3_3')
h = tf.nn.max_pool(h, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool3')
# Conv4
h = vgg_conv_layer(h, 3, 512, 1, self.trainable_vars, name='conv4_1')
h = vgg_conv_layer(h, 3, 512, 1, self.trainable_vars, name='conv4_2')
h = vgg_conv_layer(h, 3, 512, 1, self.trainable_vars, name='conv4_3')
h = tf.nn.max_pool(h, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool4')
# Conv5
h = vgg_conv_layer(h, 3, 512, 1, self.trainable_vars, name='conv5_1')
h = vgg_conv_layer(h, 3, 512, 1, self.trainable_vars, name='conv5_2')
h = vgg_conv_layer(h, 3, 512, 1, self.trainable_vars, name='conv5_3')
h = tf.nn.max_pool(h, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool5')
# FC layers
shape = h.get_shape().as_list()
h = tf.reshape(h, [-1, shape[1] * shape[2] * shape[3]])
# fc6
h = vgg_fc_layer(h, 4096, self.trainable_vars, apply_relu=True, name='fc6')
# fc7
h = vgg_fc_layer(h, 4096, self.trainable_vars, apply_relu=True, name='fc7')
# Store image features
self.features = h
self.image_feature_dim = h.get_shape().as_list()[-1]
# fc8
if self.class_attr is not None:
# Return the image features
return h
else:
logits = vgg_fc_layer(h, self.total_classes, self.trainable_vars, apply_relu=False, name='fc8')
return logits
def resnet18_conv_feedforward(self, h, kernels, filters, strides):
"""
Forward pass through a ResNet-18 network
Returns:
Logits of a resnet-18 conv network
"""
self.trainable_vars = []
# Conv1
h = _conv(h, kernels[0], filters[0], strides[0], self.trainable_vars, name='conv_1')
h = _bn(h, self.trainable_vars, self.train_phase, name='bn_1')
h = tf.nn.relu(h)
# Conv2_x
h = _residual_block(h, self.trainable_vars, self.train_phase, name='conv2_1')
h = _residual_block(h, self.trainable_vars, self.train_phase, name='conv2_2')
# Conv3_x
h = _residual_block_first(h, filters[2], strides[2], self.trainable_vars, self.train_phase, name='conv3_1', is_ATT_DATASET=self.is_ATT_DATASET)
h = _residual_block(h, self.trainable_vars, self.train_phase, name='conv3_2')
# Conv4_x
h = _residual_block_first(h, filters[3], strides[3], self.trainable_vars, self.train_phase, name='conv4_1', is_ATT_DATASET=self.is_ATT_DATASET)
h = _residual_block(h, self.trainable_vars, self.train_phase, name='conv4_2')
# Conv5_x
h = _residual_block_first(h, filters[4], strides[4], self.trainable_vars, self.train_phase, name='conv5_1', is_ATT_DATASET=self.is_ATT_DATASET)
h = _residual_block(h, self.trainable_vars, self.train_phase, name='conv5_2')
# Apply average pooling
h = tf.reduce_mean(h, [1, 2])
# Store the feature mappings
self.features = h
self.image_feature_dim = h.get_shape().as_list()[-1]
if self.class_attr is not None:
# Return the image features
return h
else:
if self.network_arch == 'RESNET-S':
logits = _fc(h, self.total_classes, self.trainable_vars, name='fc_1', is_cifar=True)
else:
logits = _fc(h, self.total_classes, self.trainable_vars, name='fc_1')
return logits
def get_attribute_embedding(self, attr):
"""
Get attribute embedding using a simple FC network
Returns:
Embedding vector of k x ATTR_DIMS
"""
w = weight_variable([self.attr_dims, self.image_feature_dim], name='attr_embed_w')
self.trainable_vars.append(w)
# Return the inner product of attribute matrix and weight vector.
return tf.matmul(attr, w) # Dimension should be TOTAL_CLASSES x image_feature_dim
def loss_and_gradients(self, imp_method):
"""
Defines task based and surrogate losses and their
gradients
Args:
Returns:
"""
reg = 0.0
if imp_method == 'VAN' or imp_method == 'PNN' or imp_method == 'ER' or 'GEM' in imp_method:
pass
elif imp_method == 'EWC' or imp_method == 'M-EWC':
reg = tf.add_n([tf.reduce_sum(tf.square(w - w_star) * f) for w, w_star,
f in zip(self.trainable_vars, self.star_vars, self.normalized_fisher_at_minima_vars)])
elif imp_method == 'PI':
reg = tf.add_n([tf.reduce_sum(tf.square(w - w_star) * f) for w, w_star,
f in zip(self.trainable_vars, self.star_vars, self.big_omega_vars)])
elif imp_method == 'MAS':
reg = tf.add_n([tf.reduce_sum(tf.square(w - w_star) * f) for w, w_star,
f in zip(self.trainable_vars, self.star_vars, self.hebbian_score_vars)])
elif imp_method == 'RWALK':
reg = tf.add_n([tf.reduce_sum(tf.square(w - w_star) * (f + scr)) for w, w_star,
f, scr in zip(self.trainable_vars, self.star_vars, self.normalized_fisher_at_minima_vars,
self.normalized_score_vars)])
"""
# ***** DON't USE THIS WITH MULTI-HEAD SETTING SINCE THIS WILL UPDATE ALL THE WEIGHTS *****
# If CNN arch, then use the weight decay
if self.is_ATT_DATASET:
self.unweighted_entropy += tf.add_n([0.0005 * tf.nn.l2_loss(v) for v in self.trainable_vars if 'weights' in v.name or 'kernel' in v.name])
"""
if imp_method == 'PNN':
# Compute the gradients of regularized loss
self.reg_gradients_vars = []
for i in range(self.num_tasks):
self.reg_gradients_vars.append([])
self.reg_gradients_vars[i] = self.opt.compute_gradients(self.unweighted_entropy[i], var_list=self.trainable_vars[i])
elif imp_method != 'A-GEM': # For A-GEM we will define the losses and gradients later on
if imp_method == 'ER' and 'FC-' not in self.network_arch:
self.reg_loss = tf.add_n([self.unweighted_entropy[i] for i in range(self.num_tasks)])/ self.mem_batch_size
else:
# Regularized training loss
self.reg_loss = tf.squeeze(self.unweighted_entropy + self.synap_stgth * reg)
# Compute the gradients of the vanilla loss
self.vanilla_gradients_vars = self.opt.compute_gradients(self.unweighted_entropy,
var_list=self.trainable_vars)
# Compute the gradients of regularized loss
self.reg_gradients_vars = self.opt.compute_gradients(self.reg_loss,
var_list=self.trainable_vars)
def train_op(self):
"""
Defines the training operation (a single step during training)
Args:
Returns:
"""
if self.imp_method == 'VAN' or self.imp_method == 'ER':
# Define training operation
self.train = self.opt.apply_gradients(self.reg_gradients_vars)
elif self.imp_method == 'PNN':
# Define training operation
self.train = [self.opt.apply_gradients(self.reg_gradients_vars[i]) for i in range(self.num_tasks)]
elif self.imp_method == 'FTR_EXT':
# Define a training operation for the first and subsequent tasks
self.train = self.opt.apply_gradients(self.reg_gradients_vars)
self.train_classifier = self.opt.apply_gradients(self.reg_gradients_vars[-2:])
else:
# Get the value of old weights first
with tf.control_dependencies([self.weights_old_ops_grouped]):
# Define a training operation
self.train = self.opt.apply_gradients(self.reg_gradients_vars)
def init_vars(self):
"""
Defines different variables that will be used for the
weight consolidation
Args:
Returns:
"""
if self.imp_method == 'PNN':
return
for v in range(len(self.trainable_vars)):
# List of variables for weight updates
self.weights_old.append(tf.Variable(tf.zeros(self.trainable_vars[v].get_shape()), trainable=False))
self.weights_delta_old_vars.append(tf.Variable(tf.zeros(self.trainable_vars[v].get_shape()), trainable=False))
self.star_vars.append(tf.Variable(tf.zeros(self.trainable_vars[v].get_shape()), trainable=False,
name=self.trainable_vars[v].name.rsplit(':')[0]+'_star'))
# List of variables for pathint method
self.small_omega_vars.append(tf.Variable(tf.zeros(self.trainable_vars[v].get_shape()), trainable=False))
self.big_omega_vars.append(tf.Variable(tf.zeros(self.trainable_vars[v].get_shape()), trainable=False))
self.big_omega_riemann_vars.append(tf.Variable(tf.zeros(self.trainable_vars[v].get_shape()), trainable=False))
# List of variables to store fisher information
self.fisher_diagonal_at_minima.append(tf.Variable(tf.zeros(self.trainable_vars[v].get_shape()), trainable=False))
self.normalized_fisher_at_minima_vars.append(tf.Variable(tf.zeros(self.trainable_vars[v].get_shape()), trainable=False, dtype=tf.float32))
self.tmp_fisher_vars.append(tf.Variable(tf.zeros(self.trainable_vars[v].get_shape()), trainable=False))
self.running_fisher_vars.append(tf.Variable(tf.zeros(self.trainable_vars[v].get_shape()), trainable=False))
self.score_vars.append(tf.Variable(tf.zeros(self.trainable_vars[v].get_shape()), trainable=False))
# New variables for conv setting for fisher and score normalization
self.max_fisher_vars.append(tf.Variable(tf.zeros(1), dtype=tf.float32, trainable=False))
self.min_fisher_vars.append(tf.Variable(tf.zeros(1), dtype=tf.float32, trainable=False))
self.max_score_vars.append(tf.Variable(tf.zeros(1), dtype=tf.float32, trainable=False))
self.min_score_vars.append(tf.Variable(tf.zeros(1), dtype=tf.float32, trainable=False))
self.normalized_score_vars.append(tf.Variable(tf.zeros(self.trainable_vars[v].get_shape()), trainable=False))
if self.imp_method == 'MAS':
# List of variables to store hebbian information
self.hebbian_score_vars.append(tf.Variable(tf.zeros(self.trainable_vars[v].get_shape()), trainable=False))
elif self.imp_method == 'A-GEM' or self.imp_method == 'S-GEM':
self.ref_grads.append(tf.Variable(tf.zeros(self.trainable_vars[v].get_shape()), trainable=False))
self.projected_gradients_list.append(tf.Variable(tf.zeros(self.trainable_vars[v].get_shape()), trainable=False))
def get_current_weights(self):
"""
Get the values of current weights
Note: These weights are different from star_vars as those
store the weights after training for the last task.
Args:
Returns:
"""
weights_old_ops = []
weights_delta_old_ops = []
for v in range(len(self.trainable_vars)):
weights_old_ops.append(tf.assign(self.weights_old[v], self.trainable_vars[v]))
weights_delta_old_ops.append(tf.assign(self.weights_delta_old_vars[v], self.trainable_vars[v]))
self.weights_old_ops_grouped = tf.group(*weights_old_ops)
self.weights_delta_old_grouped = tf.group(*weights_delta_old_ops)
def weights_store_ops(self):
"""
Defines weight restoration operations
Args:
Returns:
"""
restore_weights_ops = []
set_star_vars_ops = []
for v in range(len(self.trainable_vars)):
restore_weights_ops.append(tf.assign(self.trainable_vars[v], self.star_vars[v]))
set_star_vars_ops.append(tf.assign(self.star_vars[v], self.trainable_vars[v]))
self.restore_weights = tf.group(*restore_weights_ops)
self.set_star_vars = tf.group(*set_star_vars_ops)
def reset_optimizer_ops(self):
"""
Defines operations to reset the optimizer
Args:
Returns:
"""
# Set the operation for resetting the optimizer
self.optimizer_slots = [self.opt.get_slot(var, name) for name in self.opt.get_slot_names()\
for var in tf.global_variables() if self.opt.get_slot(var, name) is not None]
self.slot_names = self.opt.get_slot_names()
self.opt_init_op = tf.variables_initializer(self.optimizer_slots)
def create_pathint_ops(self):
"""
Defines operations for path integral-based importance
Args:
Returns:
"""
reset_small_omega_ops = []
update_small_omega_ops = []
update_big_omega_ops = []
update_big_omega_riemann_ops = []
for v in range(len(self.trainable_vars)):
# Make sure that the variables are updated before calculating delta(theta)
with tf.control_dependencies([self.train]):
update_small_omega_ops.append(tf.assign_add(self.small_omega_vars[v],
-(self.vanilla_gradients_vars[v][0] * (self.trainable_vars[v] - self.weights_old[v]))))
# Ops to reset the small omega
reset_small_omega_ops.append(tf.assign(self.small_omega_vars[v], self.small_omega_vars[v]*0.0))
if self.imp_method == 'PI':
# Update the big omegas at the end of the task using the Eucldeian distance
update_big_omega_ops.append(tf.assign_add(self.big_omega_vars[v],
tf.nn.relu(tf.div(self.small_omega_vars[v], (PARAM_XI_STEP + tf.square(self.trainable_vars[v] - self.star_vars[v]))))))
elif self.imp_method == 'RWALK':
# Update the big omegas after small intervals using distance in riemannian manifold (KL-divergence)
update_big_omega_riemann_ops.append(tf.assign_add(self.big_omega_riemann_vars[v],
tf.nn.relu(tf.div(self.small_omega_vars[v],
(PARAM_XI_STEP + self.running_fisher_vars[v] * tf.square(self.trainable_vars[v] - self.weights_delta_old_vars[v]))))))
self.update_small_omega = tf.group(*update_small_omega_ops)
self.reset_small_omega = tf.group(*reset_small_omega_ops)
if self.imp_method == 'PI':
self.update_big_omega = tf.group(*update_big_omega_ops)
elif self.imp_method == 'RWALK':
self.update_big_omega_riemann = tf.group(*update_big_omega_riemann_ops)
self.big_omega_riemann_reset = [tf.assign(tensor, tf.zeros_like(tensor)) for tensor in self.big_omega_riemann_vars]
if self.imp_method == 'RWALK':
# For the first task, scale the scores so that division does not have an effect
self.scale_score = [tf.assign(s, s*2.0) for s in self.big_omega_riemann_vars]
# To reduce the rigidity after each task the importance scores are averaged
self.update_score = [tf.assign_add(scr, tf.div(tf.add(scr, riemm_omega), 2.0))
for scr, riemm_omega in zip(self.score_vars, self.big_omega_riemann_vars)]
# Get the min and max in each layer of the scores
self.get_max_score_vars = [tf.assign(var, tf.expand_dims(tf.squeeze(tf.reduce_max(scr, keepdims=True)),
axis=0)) for var, scr in zip(self.max_score_vars, self.score_vars)]
self.get_min_score_vars = [tf.assign(var, tf.expand_dims(tf.squeeze(tf.reduce_min(scr, keepdims=True)),
axis=0)) for var, scr in zip(self.min_score_vars, self.score_vars)]
self.max_score = tf.reduce_max(tf.convert_to_tensor(self.max_score_vars))
self.min_score = tf.reduce_min(tf.convert_to_tensor(self.min_score_vars))
with tf.control_dependencies([self.max_score, self.min_score]):
self.normalize_scores = [tf.assign(tgt, (var - self.min_score)/ (self.max_score - self.min_score + EPSILON))
for tgt, var in zip(self.normalized_score_vars, self.score_vars)]
# Sparsify all the layers except last layer
sparsify_score_ops = []
for v in range(len(self.normalized_score_vars) - 2):
sparsify_score_ops.append(tf.assign(self.normalized_score_vars[v],
tf.nn.dropout(self.normalized_score_vars[v], self.keep_prob)))
self.sparsify_scores = tf.group(*sparsify_score_ops)
def create_fisher_ops(self):
"""
Defines the operations to compute online update of Fisher
Args:
Returns:
"""
ders = tf.gradients(self.unweighted_entropy, self.trainable_vars)
fisher_ema_at_step_ops = []
fisher_accumulate_at_step_ops = []
# ops for running fisher
self.set_tmp_fisher = [tf.assign_add(f, tf.square(d)) for f, d in zip(self.tmp_fisher_vars, ders)]
# Initialize the running fisher to non-zero value
self.set_initial_running_fisher = [tf.assign(r_f, s_f) for r_f, s_f in zip(self.running_fisher_vars,
self.tmp_fisher_vars)]
self.set_running_fisher = [tf.assign(f, (1 - self.fisher_ema_decay) * f + (1.0/ self.fisher_update_after) *
self.fisher_ema_decay * tmp) for f, tmp in zip(self.running_fisher_vars, self.tmp_fisher_vars)]
self.get_fisher_at_minima = [tf.assign(var, f) for var, f in zip(self.fisher_diagonal_at_minima,
self.running_fisher_vars)]
self.reset_tmp_fisher = [tf.assign(tensor, tf.zeros_like(tensor)) for tensor in self.tmp_fisher_vars]
# Get the min and max in each layer of the Fisher
self.get_max_fisher_vars = [tf.assign(var, tf.expand_dims(tf.squeeze(tf.reduce_max(scr, keepdims=True)), axis=0))
for var, scr in zip(self.max_fisher_vars, self.fisher_diagonal_at_minima)]
self.get_min_fisher_vars = [tf.assign(var, tf.expand_dims(tf.squeeze(tf.reduce_min(scr, keepdims=True)), axis=0))
for var, scr in zip(self.min_fisher_vars, self.fisher_diagonal_at_minima)]
self.max_fisher = tf.reduce_max(tf.convert_to_tensor(self.max_fisher_vars))
self.min_fisher = tf.reduce_min(tf.convert_to_tensor(self.min_fisher_vars))
with tf.control_dependencies([self.max_fisher, self.min_fisher]):
self.normalize_fisher_at_minima = [tf.assign(tgt,
(var - self.min_fisher)/ (self.max_fisher - self.min_fisher + EPSILON))
for tgt, var in zip(self.normalized_fisher_at_minima_vars, self.fisher_diagonal_at_minima)]
self.clear_attr_embed_reg = tf.assign(self.normalized_fisher_at_minima_vars[-2], tf.zeros_like(self.normalized_fisher_at_minima_vars[-2]))
# Sparsify all the layers except last layer
sparsify_fisher_ops = []
for v in range(len(self.normalized_fisher_at_minima_vars) - 2):
sparsify_fisher_ops.append(tf.assign(self.normalized_fisher_at_minima_vars[v],
tf.nn.dropout(self.normalized_fisher_at_minima_vars[v], self.keep_prob)))
self.sparsify_fisher = tf.group(*sparsify_fisher_ops)
def combined_fisher_pathint_ops(self):
"""
Define the operations to refine Fisher information based on parameters convergence
Args:
Returns:
"""
#self.refine_fisher_at_minima = [tf.assign(f, f*(1.0/(s+1e-12))) for f, s in zip(self.fisher_diagonal_at_minima, self.small_omega_vars)]
self.refine_fisher_at_minima = [tf.assign(f, f*tf.exp(-100.0*s)) for f, s in zip(self.fisher_diagonal_at_minima, self.small_omega_vars)]
def create_hebbian_ops(self):
"""
Define operations for hebbian measure of importance (MAS)
"""
# Compute the gradients of mse loss
self.mse_gradients = tf.gradients(self.mse, self.trainable_vars)
#with tf.control_dependencies([self.mse_gradients]):
# Keep on adding gradients to the omega
self.accumulate_hebbian_scores = [tf.assign_add(omega, tf.abs(grad)) for omega, grad in zip(self.hebbian_score_vars, self.mse_gradients)]
# Average across the total images
self.average_hebbian_scores = [tf.assign(omega, omega*(1.0/self.train_samples)) for omega in self.hebbian_score_vars]
# Reset the hebbian importance variables
self.reset_hebbian_scores = [tf.assign(omega, tf.zeros_like(omega)) for omega in self.hebbian_score_vars]
def create_stochastic_gem_ops(self):
"""
Define operations for Stochastic GEM
"""
if 'FC-' in self.network_arch or self.imp_method == 'S-GEM':
self.agem_loss = self.unweighted_entropy
else:
self.agem_loss = tf.add_n([self.unweighted_entropy[i] for i in range(self.num_tasks)])/ self.mem_batch_size
ref_grads = tf.gradients(self.agem_loss, self.trainable_vars)
# Reference gradient for previous tasks
self.store_ref_grads = [tf.assign(ref, grad) for ref, grad in zip(self.ref_grads, ref_grads)]
flat_ref_grads = tf.concat([tf.reshape(grad, [-1]) for grad in self.ref_grads], 0)
# Grandient on the current task
task_grads = tf.gradients(self.agem_loss, self.trainable_vars)
flat_task_grads = tf.concat([tf.reshape(grad, [-1]) for grad in task_grads], 0)
with tf.control_dependencies([flat_task_grads]):
dotp = tf.reduce_sum(tf.multiply(flat_task_grads, flat_ref_grads))
ref_mag = tf.reduce_sum(tf.multiply(flat_ref_grads, flat_ref_grads))
proj = flat_task_grads - ((dotp/ ref_mag) * flat_ref_grads)
self.reset_violation_count = self.violation_count.assign(0)
def increment_violation_count():
with tf.control_dependencies([tf.assign_add(self.violation_count, 1)]):
return tf.identity(self.violation_count)
self.violation_count = tf.cond(tf.greater_equal(dotp, 0), lambda: tf.identity(self.violation_count), increment_violation_count)
projected_gradients = tf.cond(tf.greater_equal(dotp, 0), lambda: tf.identity(flat_task_grads), lambda: tf.identity(proj))
# Convert the flat projected gradient vector into a list
offset = 0
store_proj_grad_ops = []
for v in self.projected_gradients_list:
shape = v.get_shape()
v_params = 1
for dim in shape:
v_params *= dim.value
store_proj_grad_ops.append(tf.assign(v, tf.reshape(projected_gradients[offset:offset+v_params], shape)))
offset += v_params
self.store_proj_grads = tf.group(*store_proj_grad_ops)
# Define training operations for the tasks > 1
with tf.control_dependencies([self.store_proj_grads]):
self.train_subseq_tasks = self.opt.apply_gradients(zip(self.projected_gradients_list, self.trainable_vars))
# Define training operations for the first task
self.first_task_gradients_vars = self.opt.compute_gradients(self.agem_loss, var_list=self.trainable_vars)
self.train_first_task = self.opt.apply_gradients(self.first_task_gradients_vars)
#################################################################################
#### External APIs of the class. These will be called/ exposed externally #######
#################################################################################
def reset_optimizer(self, sess):
"""
Resets the optimizer state
Args:
sess TF session
Returns:
"""
# Call the reset optimizer op
sess.run(self.opt_init_op)
def set_active_outputs(self, sess, labels):
"""
Set the mask for the labels seen so far
Args:
sess TF session
labels Mask labels
Returns:
"""
new_mask = np.zeros(self.total_classes)
new_mask[labels] = 1.0
"""
for l in labels:
new_mask[l] = 1.0
"""
sess.run(self.output_mask.assign(new_mask))
def init_updates(self, sess):
"""
Initialization updates
Args:
sess TF session
Returns:
"""
# Set the star values to the initial weights, so that we can calculate
# big_omegas reliably
if self.imp_method != 'PNN':
sess.run(self.set_star_vars)
def task_updates(self, sess, task, train_x, train_labels, num_classes_per_task=10, class_attr=None, online_cross_val=False):
"""
Updates different variables when a task is completed
Args:
sess TF session
task Task ID
train_x Training images for the task
train_labels Labels in the task
class_attr Class attributes (only needed for ZST transfer)
Returns:
"""
if self.imp_method == 'VAN' or self.imp_method == 'PNN':
# We'll store the current parameters at the end of this function
pass
elif self.imp_method == 'EWC':
# Get the fisher at the end of a task
sess.run(self.get_fisher_at_minima)
# Normalize the fisher
sess.run([self.get_max_fisher_vars, self.get_min_fisher_vars])
sess.run([self.min_fisher, self.max_fisher, self.normalize_fisher_at_minima])
# Don't regularize over the attribute-embedding vectors
#sess.run(self.clear_attr_embed_reg)
# Reset the tmp fisher vars
sess.run(self.reset_tmp_fisher)
elif self.imp_method == 'M-EWC':
# Get the fisher at the end of a task
sess.run(self.get_fisher_at_minima)
# Refine Fisher based on the convergence info
sess.run(self.refine_fisher_at_minima)
# Normalize the fisher
sess.run([self.get_max_fisher_vars, self.get_min_fisher_vars])
sess.run([self.min_fisher, self.max_fisher, self.normalize_fisher_at_minima])
# Reset the tmp fisher vars
sess.run(self.reset_tmp_fisher)
# Reset the small_omega_vars
sess.run(self.reset_small_omega)
elif self.imp_method == 'PI':
# Update big omega variables
sess.run(self.update_big_omega)
# Reset the small_omega_vars because big_omega_vars are updated before it
sess.run(self.reset_small_omega)
elif self.imp_method == 'RWALK':
if task == 0:
# If first task then scale by a factor of 2, so that subsequent averaging does not hurt
sess.run(self.scale_score)
# Get the updated importance score
sess.run(self.update_score)
# Normalize the scores
sess.run([self.get_max_score_vars, self.get_min_score_vars])
sess.run([self.min_score, self.max_score, self.normalize_scores])
# Sparsify scores
"""
# TODO: Tmp remove this?
kp = 0.8 + (task*0.5)
if (kp > 1):
kp = 1.0
"""
#sess.run(self.sparsify_scores, feed_dict={self.keep_prob: kp})
# Get the fisher at the end of a task
sess.run(self.get_fisher_at_minima)
# Normalize fisher
sess.run([self.get_max_fisher_vars, self.get_min_fisher_vars])
sess.run([self.min_fisher, self.max_fisher, self.normalize_fisher_at_minima])
# Sparsify fisher
#sess.run(self.sparsify_fisher, feed_dict={self.keep_prob: kp})
# Store the weights
sess.run(self.weights_delta_old_grouped)
# Reset the small_omega_vars because big_omega_vars are updated before it
sess.run(self.reset_small_omega)
# Reset the big_omega_riemann because importance score is stored in the scores array
sess.run(self.big_omega_riemann_reset)
# Reset the tmp fisher vars
sess.run(self.reset_tmp_fisher)
elif self.imp_method == 'MAS':
# zero out any previous values
sess.run(self.reset_hebbian_scores)
if self.class_attr is not None:
# Define mask based on the class attributes
masked_class_attrs = np.zeros_like(class_attr)
masked_class_attrs[train_labels] = class_attr[train_labels]
# Logits mask
logit_mask = np.zeros(self.total_classes)
logit_mask[train_labels] = 1.0
# Loop over the entire training dataset to compute the parameter importance
batch_size = 10
num_samples = train_x.shape[0]
for iters in range(num_samples// batch_size):
offset = iters * batch_size
if self.class_attr is not None:
sess.run(self.accumulate_hebbian_scores, feed_dict={self.x: train_x[offset:offset+batch_size], self.keep_prob: 1.0,
self.class_attr: masked_class_attrs, self.output_mask: logit_mask, self.train_phase: False})
else:
sess.run(self.accumulate_hebbian_scores, feed_dict={self.x: train_x[offset:offset+batch_size], self.keep_prob: 1.0,
self.output_mask: logit_mask, self.train_phase: False})
# Average the hebbian scores across the training examples
sess.run(self.average_hebbian_scores, feed_dict={self.train_samples: num_samples})
# Store current weights
self.init_updates(sess)
def restore(self, sess):
"""
Restore the weights from the star variables
Args:
sess TF session
Returns:
"""
sess.run(self.restore_weights)
|
agem-main
|
model/model.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
from typing import List
import argparse
import numpy as np
import random
import torch
import torch.cuda
import sys
from src.torchrun_utils import init_distributed_mode_torchrun
from src import dist_utils, slurm, util
from src.index_io import load_or_initialize_index
from src.model_io import create_checkpoint_directories, load_or_initialize_atlas_model
from src.options import get_options
from train import train
import torch.distributed as dist
os.environ["TOKENIZERS_PARALLELISM"] = "true"
NCONTEXT: str = "40"
PBSZ: str = "1"
PRECISION: str = "bf16"
GOLD_SCORE_MODE: str = "ppmean"
GPU_MAX_LENGTH: str = "384"
GEN_MAX_LENGTH: str = "32"
EPSILON: str = "0.01"
SMALL_EPSILON: str = "4e-5"
DROPOUT: str = "0.1"
WARMUP_STEPS: str = "5"
EVAL_FREQ: str = "10"
LOG_FREQ: str = "5"
NO_REFRESH: str = "-1"
CHECK_FREQS: List[str] = ["--warmup_steps", "--save_freq", "--eval_freq"]
PORT: str = str(random.randrange(15000, 16000))
def get_argument_value(all_args: List[str], argument_name: str) -> int:
argument_idx = all_args.index(argument_name)
return int(all_args[argument_idx + 1])
def check_valid_input_params(all_args: List[str], total_steps: int) -> None:
for freq in CHECK_FREQS:
try:
arg_val = get_argument_value(all_args, freq)
except ValueError:
print(f"List does not contain value {freq}")
assert arg_val < total_steps, f"The {freq} cannot be higher than the total steps {total_steps}. "
def set_parser_options(parser: argparse.Namespace, passed_args: List[str]) -> argparse.ArgumentParser:
"""
Sets the default options for finetuning an Atlas model for a q&a task.
"""
total_steps = get_argument_value(passed_args, "--total_steps")
all_args = [
"--write_results",
"--train_retriever",
"--query_side_retriever_training",
"--use_gradient_checkpoint_reader",
"--use_gradient_checkpoint_retriever",
"--shard_optim",
"--shard_grads",
"--temperature_gold",
EPSILON,
"--temperature_score",
EPSILON,
"--refresh_index",
"-1",
"--dropout",
DROPOUT,
"--lr",
SMALL_EPSILON,
"--lr_retriever",
SMALL_EPSILON,
"--scheduler",
"linear",
"--weight_decay",
EPSILON,
"--generation_max_length",
GEN_MAX_LENGTH,
"--target_maxlength",
GEN_MAX_LENGTH,
"--gold_score_mode",
GOLD_SCORE_MODE,
"--precision",
PRECISION,
"--text_maxlength",
GPU_MAX_LENGTH,
"--per_gpu_batch_size",
PBSZ,
"--n_context",
NCONTEXT,
"--retriever_n_context",
NCONTEXT,
"--task",
"qa",
"--refresh_index",
NO_REFRESH,
"--warmup_steps",
WARMUP_STEPS,
"--save_freq",
str(total_steps - 1),
"--eval_freq",
EVAL_FREQ,
"--log_freq",
LOG_FREQ,
"--main_port",
PORT,
] + passed_args
check_valid_input_params(all_args, total_steps)
return parser.parse_args(all_args)
if __name__ == "__main__":
options = get_options()
opt = set_parser_options(options.parser, sys.argv[1:])
torch.manual_seed(opt.seed)
if "TORCHELASTIC_RUN_ID" in os.environ:
init_distributed_mode_torchrun(opt)
torch.cuda.set_device(dist.get_rank())
else:
slurm.init_distributed_mode(opt)
slurm.init_signal_handler()
checkpoint_path, saved_index_path = create_checkpoint_directories(opt)
logger = util.init_logger(opt.is_main, opt.is_distributed, os.path.join(checkpoint_path, "run.log"))
if opt.is_main:
options.print_options(opt)
logger.info(f"world size: {dist_utils.get_world_size()}")
index, passages = load_or_initialize_index(opt)
model, optimizer, scheduler, retr_optimizer, retr_scheduler, opt, step = load_or_initialize_atlas_model(opt)
if opt.is_distributed:
if opt.shard_grads:
import fairscale.nn.data_parallel
model.reader = fairscale.nn.data_parallel.ShardedDataParallel(
model.reader, optimizer, auto_refresh_trainable=False
)
if opt.train_retriever:
model.retriever = fairscale.nn.data_parallel.ShardedDataParallel(
model.retriever, retr_optimizer, auto_refresh_trainable=False
)
else:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[opt.local_rank],
output_device=opt.local_rank,
find_unused_parameters=True,
)
model._set_static_graph()
logger.info("Start finetuning")
dist_utils.barrier()
train(
model,
index,
passages,
optimizer,
scheduler,
retr_optimizer,
retr_scheduler,
step,
opt,
checkpoint_path,
)
|
atlas-main
|
finetune_qa.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import time
from collections import defaultdict
import numpy as np
import torch
import torch.cuda
import logging
from evaluate import evaluate
from src import dist_utils, slurm, util
from src.index_io import load_or_initialize_index, save_embeddings_and_index
from src.model_io import create_checkpoint_directories, load_or_initialize_atlas_model, save_atlas_model
from src.options import get_options
from src.tasks import get_task
os.environ["TOKENIZERS_PARALLELISM"] = "true"
GRAD_SCALE_UPPER_BOUND_MEAN: int = 1000
GRAD_SCALE_LOWER_BOUND_MEAN: float = 0.01
THRESHOLD_GRAD_STATS: int = 100
logger = logging.getLogger(__name__)
def train(
model,
index,
passages,
optimizer,
scheduler,
retr_optimizer,
retr_scheduler,
step,
opt,
checkpoint_path,
):
tb_logger = util.init_tb_logger(os.path.join(opt.checkpoint_dir, opt.name), is_main=opt.is_main)
run_stats = util.WeightedAvgStats()
unwrapped_model = util.get_unwrapped_model_if_wrapped(model)
# different seed for different sampling depending on global_rank
torch.manual_seed(opt.global_rank + opt.seed)
scale = 2.0
grad_stats = defaultdict(lambda: [])
task = get_task(opt, unwrapped_model.reader_tokenizer)
index_refresh_scheduler = util.IndexRefreshScheduler(
opt.refresh_index, opt.freeze_retriever_steps, opt.train_retriever
)
while step < opt.total_steps:
data_iterator = task.data_iterator(
opt.train_data, opt.global_rank, opt.world_size, repeat_if_less_than_world_size=True, opt=opt
)
data_iterator = filter(None, map(task.process, data_iterator))
data_iterator = task.batch_iterator(data_iterator, opt.per_gpu_batch_size, drop_last=True, shuffle=opt.shuffle)
for i, batch in enumerate(data_iterator):
iter_stats = {}
model.train()
if not opt.use_file_passages and index_refresh_scheduler.is_time_to_refresh(step):
if not (step == 0 and opt.load_index_path is not None): # Dont refresh index if just loaded it
indexing_start = time.time()
unwrapped_model.build_index(index, passages, opt.per_gpu_embedder_batch_size, logger)
iter_stats["runtime/indexing"] = (time.time() - indexing_start, 1)
if opt.save_index_path is not None:
save_embeddings_and_index(index, opt)
step += 1
train_step_start = time.time()
reader_loss, retriever_loss = model(
index=index,
query=batch["query"],
target=batch["target"],
target_tokens=batch.get("target_tokens"),
passages=batch["passages"] if opt.use_file_passages else None,
batch_metadata=batch.get("metadata"),
filtering_fun=task.filter,
train_retriever=opt.train_retriever and step > opt.freeze_retriever_steps,
iter_stats=iter_stats,
)
if retriever_loss is not None and opt.train_retriever:
train_loss = reader_loss.float() + retriever_loss
else:
train_loss = reader_loss
iter_stats["loss/train_loss"] = (train_loss.item(), len(batch["query"]))
backward_start = time.time()
train_loss = scale * train_loss
train_loss.backward()
iter_stats["runtime/backward"] = (time.time() - backward_start, 1)
model_update_start = time.time()
stats = util.compute_grad_stats(model)
if stats["skip_example"]:
model.zero_grad()
# continue
else:
for k, v in stats.items():
grad_stats[k].append(v)
if len(grad_stats["max"]) >= THRESHOLD_GRAD_STATS:
if np.mean(grad_stats["max"]) > GRAD_SCALE_UPPER_BOUND_MEAN:
scale /= 2
elif np.mean(grad_stats["mean"]) < GRAD_SCALE_LOWER_BOUND_MEAN:
scale *= 2
# print(f'Scale: {scale}')
grad_stats.clear()
if step % opt.accumulation_steps == 0 and not stats["skip_example"]:
if opt.is_distributed and opt.shard_optim:
optimizer.clip_grad_norm(scale * opt.clip)
if opt.train_retriever:
retr_optimizer.clip_grad_norm(scale * opt.clip)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), scale * opt.clip)
optimizer.step(scale=scale)
scheduler.step()
if opt.train_retriever:
retr_optimizer.step(scale=scale)
retr_scheduler.step()
model.zero_grad()
iter_stats["runtime/model_update"] = (time.time() - model_update_start, 1)
iter_stats["runtime/train_step"] = (time.time() - train_step_start, 1)
run_stats.update(iter_stats)
if step % opt.log_freq == 0:
log = f"{step} / {opt.total_steps}"
for k, v in sorted(run_stats.average_stats.items()):
log += f" | {k}: {v:.3g}"
if tb_logger:
tb_logger.add_scalar(k, v, step)
log += f" | lr: {scheduler.get_last_lr()[0]:0.2g}"
log += f" | Memory: {torch.cuda.max_memory_allocated()//1e9} GiB"
if tb_logger:
tb_logger.add_scalar("lr", scheduler.get_last_lr()[0], step)
logger.info(log)
run_stats.reset()
if step % opt.eval_freq == 0:
for data_path in opt.eval_data:
dataset_name = os.path.basename(data_path)
metrics = evaluate(model, index, opt, data_path, step)
log_message = f"Dataset: {dataset_name}"
for k, v in metrics.items():
log_message += f" | {v:.3f} {k}"
if tb_logger:
tb_logger.add_scalar(f"{dataset_name}/{k}", v, step)
logger.info(log_message)
if step % opt.save_freq == 0:
save_atlas_model(
unwrapped_model,
optimizer,
scheduler,
retr_optimizer,
retr_scheduler,
step,
opt,
checkpoint_path,
f"step-{step}",
)
if step > opt.total_steps:
exit()
if __name__ == "__main__":
options = get_options()
opt = options.parse()
torch.manual_seed(opt.seed)
slurm.init_distributed_mode(opt)
slurm.init_signal_handler()
checkpoint_path, saved_index_path = create_checkpoint_directories(opt)
logger = util.init_logger(opt.is_main, opt.is_distributed, os.path.join(checkpoint_path, "run.log"))
if opt.is_main:
options.print_options(opt)
logger.info(f"world size: {dist_utils.get_world_size()}")
index, passages = load_or_initialize_index(opt)
model, optimizer, scheduler, retr_optimizer, retr_scheduler, opt, step = load_or_initialize_atlas_model(opt)
if opt.is_distributed:
if opt.shard_grads:
import fairscale.nn.data_parallel
model.reader = fairscale.nn.data_parallel.ShardedDataParallel(
model.reader, optimizer, auto_refresh_trainable=False
)
if opt.train_retriever:
model.retriever = fairscale.nn.data_parallel.ShardedDataParallel(
model.retriever, retr_optimizer, auto_refresh_trainable=False
)
else:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[opt.local_rank],
output_device=opt.local_rank,
find_unused_parameters=True,
)
model._set_static_graph()
logger.info("Start training")
dist_utils.barrier()
train(
model,
index,
passages,
optimizer,
scheduler,
retr_optimizer,
retr_scheduler,
step,
opt,
checkpoint_path,
)
|
atlas-main
|
train.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import time
from collections import defaultdict
import numpy as np
import torch
import torch.cuda
import torch.distributed as dist
from src import dist_utils, slurm, util
from src.index_io import load_or_initialize_index, save_embeddings_and_index
from src.model_io import create_checkpoint_directories, load_or_initialize_atlas_model
from src.options import get_options
from src.tasks import get_task
os.environ["TOKENIZERS_PARALLELISM"] = "true"
def _get_eval_data_iterator(opt, data_path, task):
data_iterator = task.data_iterator(data_path, opt.global_rank, opt.world_size, opt=opt, is_eval=True)
data_iterator = filter(None, map(task.process, data_iterator))
data_iterator = list(task.batch_iterator(data_iterator, opt.per_gpu_batch_size))
if dist.is_initialized():
len_data = torch.tensor(len(data_iterator), device=torch.device("cuda"))
dist.all_reduce(len_data, torch.distributed.ReduceOp.MAX)
dist.barrier()
if len(data_iterator) < len_data.item():
data_iterator.extend([{} for _ in range(len_data.item() - len(data_iterator))])
return data_iterator
@torch.no_grad()
def run_retrieval_only(model, index, opt, data_path, step=None):
model.eval()
metrics = defaultdict(lambda: [])
dataset_wpred = []
unwrapped_model = util.get_unwrapped_model_if_wrapped(model)
reader_tokenizer = unwrapped_model.reader_tokenizer
task = get_task(opt, reader_tokenizer)
data_iterator = _get_eval_data_iterator(opt, data_path, task)
for i, batch in enumerate(data_iterator):
query = batch.get("query", [""])
answers = batch.get("target", [""])
batch_metadata = batch.get("metadata")
query_enc = model.retriever_tokenize(query)
retrieved_passages, _ = unwrapped_model.retrieve(
index,
opt.n_context,
query,
query_enc["input_ids"].cuda(),
query_enc["attention_mask"].cuda(),
batch_metadata=batch_metadata,
filtering_fun=task.filter,
)
# If example is a padding example then skip step
if (len(query) == 0) or (len(query[0]) == 0):
continue
for k in range(len(retrieved_passages)):
if opt.write_results:
gold = [answers[k]] if not "answers" in batch else batch["answers"][k]
ex = {"query": query[k], "answers": gold, "passages": retrieved_passages[k]}
if batch_metadata is not None:
ex["metadata"] = batch_metadata[k]
if "id" in batch:
ex["id"] = batch["id"][k]
dataset_wpred.append(ex)
if opt.write_results:
dataset_name, _ = os.path.splitext(os.path.basename(data_path))
dataset_name = f"{dataset_name}-step-{step}"
util.save_distributed_dataset(dataset_wpred, dataset_name, opt)
return metrics
@torch.no_grad()
def evaluate(model, index, opt, data_path, step=None):
model.eval()
metrics = defaultdict(lambda: [])
dataset_wpred = []
unwrapped_model = util.get_unwrapped_model_if_wrapped(model)
reader_tokenizer = unwrapped_model.reader_tokenizer
task = get_task(opt, reader_tokenizer)
data_iterator = _get_eval_data_iterator(opt, data_path, task)
for i, batch in enumerate(data_iterator):
query = batch.get("query", [""])
answers = batch.get("target", [""])
batch_metadata = batch.get("metadata")
target_tokens = batch.get("target_tokens")
query_enc, labels, decoder_input_ids = unwrapped_model.tokenize(query, answers, target_tokens=target_tokens)
if not opt.use_file_passages:
query_ids_retriever = query_enc["input_ids"].cuda()
query_mask_retriever = query_enc["attention_mask"].cuda()
retrieved_passages, _ = unwrapped_model.retrieve(
index,
opt.n_context,
query,
query_ids_retriever,
query_mask_retriever,
batch_metadata=batch_metadata,
filtering_fun=task.filter,
)
else:
assert "passages" in batch, "cant use use_file_passages mode without passing in passages"
retrieved_passages = [p[: opt.n_context] for p in batch["passages"]]
# If example is a padding example then skip step
if (len(query) == 0) or (len(query[0]) == 0):
continue
reader_tokens, _ = unwrapped_model.tokenize_passages(query, retrieved_passages)
if "eval_loss" in task.metrics:
eval_loss, logits = unwrapped_model.compute_reader_loss_and_logits(reader_tokens, decoder_input_ids, labels)
metrics["eval_loss"].append(eval_loss)
generation = unwrapped_model.generate(
reader_tokens, query, choices=batch["choices"] if "choices" in batch else None
)
for k, g in enumerate(generation):
if opt.decoder_prompt_format is not None:
query_ids = reader_tokenizer.encode(
opt.decoder_prompt_format.format_map({"query": query[k]}), add_special_tokens=False
)
g = g[len(query_ids) + 1 :]
pred = reader_tokenizer.decode(g, skip_special_tokens=True)
gold = [answers[k]] if not "answers" in batch else batch["answers"][k]
sample_metrics = task.evaluation(pred, gold)
for key, value in sample_metrics.items():
metrics[key].append(value)
if opt.write_results:
ex = {"query": query[k], "answers": gold, "generation": pred}
if not opt.dont_write_passages:
ex["passages"] = retrieved_passages[k]
if batch_metadata is not None:
ex["metadata"] = batch_metadata[k]
if opt.task == "multiple_choice":
ex["choice_logits"] = task.get_choice_logits(logits[k])
if "id" in batch:
ex["id"] = batch["id"][k]
dataset_wpred.append(ex)
metrics, dataset_wpred = task.evaluation_postprocessing(metrics, dataset_wpred)
metrics = util.avg_dist_dict(task.metrics, metrics)
metrics = {key: value if key == "eval_loss" else 100 * value for key, value in metrics.items()}
if opt.write_results:
dataset_name, _ = os.path.splitext(os.path.basename(data_path))
dataset_name = f"{dataset_name}-step-{step}"
util.save_distributed_dataset(dataset_wpred, dataset_name, opt)
return metrics
if __name__ == "__main__":
options = get_options()
opt = options.parse()
torch.manual_seed(opt.seed)
slurm.init_distributed_mode(opt)
slurm.init_signal_handler()
checkpoint_path, saved_index_path = create_checkpoint_directories(opt)
logger = util.init_logger(opt.is_main, opt.is_distributed, os.path.join(checkpoint_path, "run.log"))
if opt.is_main:
options.print_options(opt)
logger.info(f"world size: {dist_utils.get_world_size()}")
index, passages = load_or_initialize_index(opt)
model, _, _, _, _, opt, step = load_or_initialize_atlas_model(opt, eval_only=True)
logger.info("Start Evaluation")
dist_utils.barrier()
if not opt.use_file_passages and opt.load_index_path is None:
indexing_start = time.time()
model.build_index(index, passages, opt.per_gpu_embedder_batch_size, logger)
if opt.save_index_path is not None:
save_embeddings_and_index(index, opt)
for data_path in opt.eval_data:
dataset_name = os.path.basename(data_path)
logger.info(f"Start Evaluation on {data_path}")
if opt.retrieve_only:
run_retrieval_only(model, index, opt, data_path, step)
else:
metrics = evaluate(model, index, opt, data_path, step)
log_message = f"Dataset: {dataset_name}"
for k, v in metrics.items():
log_message += f" | {v:.3f} {k}"
logger.info(log_message)
|
atlas-main
|
evaluate.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import os
SUBCATEGORIES = {
"humanities": [
"high_school_european_history",
"high_school_us_history",
"high_school_world_history",
"prehistory",
"formal_logic",
"logical_fallacies",
"moral_disputes",
"moral_scenarios",
"philosophy",
"world_religions",
"international_law",
"jurisprudence",
"professional_law",
],
"Soc Sci.": [
"high_school_government_and_politics",
"public_relations",
"security_studies",
"us_foreign_policy",
"human_sexuality",
"sociology",
"econometrics",
"high_school_macroeconomics",
"high_school_microeconomics",
"high_school_geography",
"high_school_psychology",
"professional_psychology",
],
"STEM": [
"astronomy",
"college_physics",
"conceptual_physics",
"high_school_physics",
"college_chemistry",
"high_school_chemistry",
"college_biology",
"high_school_biology",
"college_computer_science",
"computer_security",
"high_school_computer_science",
"machine_learning",
"abstract_algebra",
"college_mathematics",
"elementary_mathematics",
"high_school_mathematics",
"high_school_statistics",
"electrical_engineering",
],
"other": [
"global_facts",
"miscellaneous",
"professional_accounting",
"business_ethics",
"management",
"marketing",
"anatomy",
"clinical_knowledge",
"college_medicine",
"human_aging",
"medical_genetics",
"nutrition",
"professional_medicine",
"virology",
],
"all": [
"abstract_algebra",
"anatomy",
"astronomy",
"business_ethics",
"clinical_knowledge",
"college_biology",
"college_chemistry",
"college_computer_science",
"college_mathematics",
"college_medicine",
"college_physics",
"computer_security",
"conceptual_physics",
"econometrics",
"electrical_engineering",
"elementary_mathematics",
"formal_logic",
"global_facts",
"high_school_biology",
"high_school_chemistry",
"high_school_computer_science",
"high_school_european_history",
"high_school_geography",
"high_school_government_and_politics",
"high_school_macroeconomics",
"high_school_mathematics",
"high_school_microeconomics",
"high_school_physics",
"high_school_psychology",
"high_school_statistics",
"high_school_us_history",
"high_school_world_history",
"human_aging",
"human_sexuality",
"international_law",
"jurisprudence",
"logical_fallacies",
"machine_learning",
"management",
"marketing",
"medical_genetics",
"miscellaneous",
"moral_disputes",
"moral_scenarios",
"nutrition",
"philosophy",
"prehistory",
"professional_accounting",
"professional_law",
"professional_medicine",
"professional_psychology",
"public_relations",
"security_studies",
"sociology",
"us_foreign_policy",
"virology",
"world_religions",
],
}
def load_predictions_file(file):
predictions = {}
for line in open(file):
dp = json.loads(line)
if "permuatations" in dp:
dp["permutations"] = dp["permuatations"]
original = [p for p in dp["permutations"] if p["metadata"]["is_original"]][0]
dataset = original["metadata"]["dataset"].replace("_test", "").replace("_valid", "")
uuid = original["metadata"]["question"] + str(original["metadata"]["options"])
original_prediction = max(original["choice_logits"].items(), key=lambda x: x[1])[0]
debiased_prediction = dp["generation"]
predictions.setdefault(dataset, {})[uuid] = {
"prediction": original_prediction,
"debiased_prediction": debiased_prediction,
}
return predictions
def load_predictions(path, step=None, split=None):
if os.path.isdir(path):
predictions = {}
for domain in os.listdir(path):
predictions_path = os.path.join(path, domain, f"{domain}.{split}-step-{step}.jsonl")
if not os.path.exists(predictions_path):
raise ValueError(f"{predictions_path} expected but missing")
predictions.update(load_predictions_file(predictions_path))
else:
predictions = load_predictions_file(path)
return predictions
def load_gold_file(file):
gold = {}
for line in open(file):
dp = json.loads(line)
dataset = dp["dataset"].replace("_test", "").replace("_valid", "")
uuid = dp["question"] + str(dp["options"])
gold_answer = dp["answer"]
gold.setdefault(dataset, {})[uuid] = gold_answer
return gold
def score_categories(gold_answers, predictions, categories):
acc = []
debiased_acc = []
for cat in categories:
preds = predictions[cat]
golds = gold_answers[cat]
for question in golds.keys():
pred = preds[question]
gold = golds[question]
acc.append(pred["prediction"] == gold)
debiased_acc.append(pred["debiased_prediction"] == gold)
acc = sum(acc) / len(acc)
debiased_acc = sum(debiased_acc) / len(debiased_acc)
return acc, debiased_acc
def main(predictions_file, gold_file, step=None, split=None):
print(f"predictions for {predictions_file}")
print(f"{'category': >15}\t{'Acc(%)':>15}\t{'Debias Acc(%)':>15}")
predictions = load_predictions(predictions_file, step, split)
gold_answers = load_gold_file(gold_file)
print("-" * 47)
for category_name, categories in SUBCATEGORIES.items():
scores, debiased_scores = score_categories(gold_answers, predictions, categories)
sc, db = f"{100*scores:0.2f}", f"{100*debiased_scores:0.2f}"
print(f"{category_name: >15}\t{sc:>15}\t{db:>15}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--predictions_path",
type=str,
help="Path to the written predictions file",
)
parser.add_argument(
"--gold_path",
type=str,
help="Path to the written predictions file (zero-shot, 5-shot multi, full) or directory containing models (5-shot)",
)
parser.add_argument(
"--step",
type=int,
default=16,
help="only for 5-shot, specify the step to evaluate",
)
parser.add_argument(
"--split",
type=str,
default="valid",
help="only for 5-shot, specify the split to evaluate",
)
args = parser.parse_args()
main(args.predictions_path, args.gold_path, step=args.step, split=args.split)
|
atlas-main
|
evaluation_scripts/evaluate_mmlu_predictions.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import json
from pathlib import Path
import argparse
import shutil
import tarfile
from download_tools import maybe_download_file
# random 64 examples used with Atlas
nq_64shot = [
27144,
14489,
49702,
38094,
6988,
60660,
65643,
48249,
48085,
52629,
48431,
7262,
34659,
24332,
44839,
17721,
50819,
62279,
37021,
77405,
52556,
23802,
40974,
64678,
69673,
77277,
18419,
25635,
1513,
11930,
5542,
13453,
52754,
65663,
67400,
42409,
74541,
33159,
65445,
28572,
74069,
7162,
19204,
63509,
12244,
48532,
72778,
37507,
70300,
29927,
18186,
27579,
58411,
63559,
4347,
59383,
57392,
42014,
77920,
45592,
32321,
3422,
61041,
34051,
]
# random 64 examples used with Atlas
triviaqa_64shot = [
75927,
38807,
452,
68095,
44621,
34592,
36091,
65286,
56484,
48197,
34692,
28011,
16670,
62641,
37865,
6658,
45724,
37527,
17740,
31133,
8010,
48573,
53670,
15514,
25996,
54404,
10739,
55105,
66122,
73324,
41202,
71253,
41258,
51344,
60092,
50455,
65078,
36169,
33408,
55106,
40526,
65582,
66337,
39766,
77174,
17289,
7367,
50930,
21151,
21809,
52804,
26110,
54414,
73358,
11459,
66019,
41084,
13349,
39059,
6626,
25540,
15110,
53320,
61313,
]
def convert_triviaqa(ex):
target = ex["Answer"]["Value"]
if target.isupper():
target = target.title()
return {
"question": ex["Question"],
"answers": ex["Answer"]["Aliases"],
"target": target,
}
def convert_nq(ex):
return {"question": ex["question"], "answers": ex["answer"]}
def preprocess_triviaqa(orig_dir, output_dir, index_dir):
data, index = {}, {}
for split in ["train", "dev", "test"]:
with open(index_dir / ("TQA." + split + ".idx.json"), "r") as fin:
index[split] = json.load(fin)
with open(orig_dir / "triviaqa-unfiltered" / "unfiltered-web-train.json") as fin:
originaltrain = json.load(fin)["Data"]
with open(orig_dir / "triviaqa-unfiltered" / "unfiltered-web-dev.json") as fin:
originaldev = json.load(fin)["Data"]
data["train"] = [convert_triviaqa(originaltrain[k]) for k in index["train"]]
data["train.64-shot"] = [convert_triviaqa(originaltrain[k]) for k in triviaqa_64shot]
data["dev"] = [convert_triviaqa(originaltrain[k]) for k in index["dev"]]
data["test"] = [convert_triviaqa(originaldev[k]) for k in index["test"]]
for split in data:
with open(output_dir / (split + ".jsonl"), "w") as fout:
for ex in data[split]:
json.dump(ex, fout, ensure_ascii=False)
fout.write("\n")
def preprocess_nq(orig_dir, output_dir, index_dir):
data, index = {}, {}
for split in ["train", "dev", "test"]:
with open(index_dir / ("NQ." + split + ".idx.json"), "r") as fin:
index[split] = json.load(fin)
originaltrain, originaldev = [], []
with open(orig_dir / "NQ-open.dev.jsonl") as fin:
for k, example in enumerate(fin):
example = json.loads(example)
originaldev.append(example)
with open(orig_dir / "NQ-open.train.jsonl") as fin:
for k, example in enumerate(fin):
example = json.loads(example)
originaltrain.append(example)
data["train"] = [convert_nq(originaltrain[k]) for k in index["train"]]
data["train.64-shot"] = [convert_nq(originaltrain[k]) for k in nq_64shot]
data["dev"] = [convert_nq(originaltrain[k]) for k in index["dev"]]
data["test"] = [convert_nq(originaldev[k]) for k in index["test"]]
for split in data:
with open(output_dir / (split + ".jsonl"), "w") as fout:
for ex in data[split]:
json.dump(ex, fout, ensure_ascii=False)
fout.write("\n")
def main(args):
output_dir = Path(args.output_directory)
index_tar = output_dir / "index.tar"
index_dir = output_dir / "dataindex"
original_triviaqa_dir = output_dir / "original_triviaqa"
triviaqa_dir = output_dir / "triviaqa_data"
triviaqa_tar = output_dir / "triviaqa_data.tar"
nq_dir = output_dir / "nq_data"
original_nq_dir = output_dir / "original_naturalquestions"
if args.overwrite:
print("Overwriting NaturalQuestions and TriviaQA")
download_triviaqa = True
download_nq = True
else:
download_triviaqa = not triviaqa_dir.exists()
download_nq = not nq_dir.exists()
if download_triviaqa or download_nq:
index_url = "https://dl.fbaipublicfiles.com/FiD/data/dataindex.tar.gz"
maybe_download_file(index_url, index_tar)
if not os.path.exists(index_dir):
with tarfile.open(index_tar) as tar:
tar.extractall(index_dir)
if download_triviaqa:
triviaqa_dir.mkdir(parents=True, exist_ok=True)
original_triviaqa_url = "http://nlp.cs.washington.edu/triviaqa/data/triviaqa-unfiltered.tar.gz"
maybe_download_file(original_triviaqa_url, triviaqa_tar)
if not os.path.exists(original_triviaqa_dir):
with tarfile.open(triviaqa_tar) as tar:
tar.extractall(original_triviaqa_dir)
preprocess_triviaqa(original_triviaqa_dir, triviaqa_dir, index_dir)
else:
print("TriviaQA data already exists, not overwriting")
if download_nq:
nq_dir.mkdir(parents=True, exist_ok=True)
nq_dev_url = "https://raw.githubusercontent.com/google-research-datasets/natural-questions/master/nq_open/NQ-open.dev.jsonl"
nq_train_url = "https://raw.githubusercontent.com/google-research-datasets/natural-questions/master/nq_open/NQ-open.train.jsonl"
maybe_download_file(nq_dev_url, original_nq_dir / "NQ-open.dev.jsonl")
maybe_download_file(nq_train_url, original_nq_dir / "NQ-open.train.jsonl")
preprocess_nq(original_nq_dir, nq_dir, index_dir)
else:
print("NaturalQuestions data already exists, not overwriting")
triviaqa_tar.unlink(missing_ok=True)
index_tar.unlink(missing_ok=True)
if original_triviaqa_dir.exists():
shutil.rmtree(original_triviaqa_dir)
if original_nq_dir.exists():
shutil.rmtree(original_nq_dir)
if index_dir.exists():
shutil.rmtree(index_dir)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--output_directory",
type=str,
default="./data/",
help="Path to the file to which the dataset is written.",
)
parser.add_argument("--overwrite", action="store_true", help="Overwrite data")
args = parser.parse_args()
main(args)
|
atlas-main
|
preprocessing/prepare_qa.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import os
from pathlib import Path
from download_tools import maybe_download_file
URLS = {
"train": "https://storage.googleapis.com/gresearch/templama/train.json",
"valid": "https://storage.googleapis.com/gresearch/templama/val.json",
"test": "https://storage.googleapis.com/gresearch/templama/test.json",
}
def prep_question(question):
return question.replace("_X_", "<extra_id_0>")
def maybe_download_data(output_directory):
paths = {}
for split, url in URLS.items():
dest = output_directory / f"{split}.original.jsonl"
maybe_download_file(url, dest)
paths[split] = dest
return paths
def _parse(path, years_to_parse):
items = []
for line in open(path):
if line.strip() != "":
items.append(json.loads(line))
mapper = {}
for i in items:
if i["date"] in years_to_parse:
mapper.setdefault(i["query"], []).append(i)
return mapper
def _dump(output_path, objects_to_write):
with open(output_path, "w") as f:
for item in objects_to_write:
f.write(json.dumps(item) + "\n")
def _get_export_obj(obj):
return {
"question": prep_question(obj["query"]),
"answers": list(set([n["name"] for n in obj["answer"]])),
"metadata": {"original_instance": obj},
}
def main(output_directory, years_to_compare=["2017", "2020"]):
os.makedirs(output_directory, exist_ok=True)
paths = maybe_download_data(output_directory)
for split, path in paths.items():
to_write = {y: [] for y in years_to_compare}
query2items = _parse(path, years_to_compare)
for _, objects in query2items.items():
if len(objects) == 1: # question doesnt have different answers at different years
continue
first_answer, later_answers = objects[0], objects[1:]
previous_answer_strings = set([n["name"] for n in first_answer["answer"]])
different_later_answers = []
for later_answer in later_answers:
if all([n["name"] not in previous_answer_strings for n in later_answer["answer"]]):
different_later_answers.append(later_answer)
if len(different_later_answers) > 0:
to_write[first_answer["date"]].append(_get_export_obj(first_answer))
for d in different_later_answers:
to_write[d["date"]].append(_get_export_obj(d))
for date, items in to_write.items():
output_path = output_directory / f"temp_lama.{split}.{date}.jsonl"
_dump(output_path, items)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--output_directory",
type=str,
default="./data",
help="Path to the file to which the dataset is written.",
)
args = parser.parse_args()
output_directory = Path(args.output_directory) / "data" / "templama_data"
main(output_directory)
|
atlas-main
|
preprocessing/prepare_templama.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from download_tools import get_download_path, get_s3_path, maybe_download_file
BASE_URL = "https://dl.fbaipublicfiles.com/atlas"
PASSAGE_FNAME = "passages.{shard}.pt"
EMBEDDING_FNAME = "embeddings.{shard}.pt"
N_SHARDS = 128
AVAILABLE_INDICES = [
{
"index": "indices/atlas/wiki/xxl",
"description": "Precomputed index for the wiki-dec2018 corpus for the pretrained atlas xxl model",
},
{
"index": "indices/atlas/wiki/xl",
"description": "Precomputed index for the wiki-dec2018 corpus for the pretrained atlas xl model",
},
{
"index": "indices/atlas/wiki/large",
"description": "Precomputed index for the wiki-dec2018 corpus for the pretrained atlas large model",
},
{
"index": "indices/atlas/wiki/base",
"description": "Precomputed index for the wiki-dec2018 corpus for the pretrained atlas base model",
},
{
"index": "indices/atlas_nq/wiki/xxl",
"description": "Precomputed index for the wiki-dec2018 corpus for the natural-questions-finetuned atlas xxl model",
},
{
"index": "indices/atlas_nq/wiki/xl",
"description": "Precomputed index for the wiki-dec2018 corpus for the natural-questions-finetuned atlas xl model",
},
{
"index": "indices/atlas_nq/wiki/large",
"description": "Precomputed index for the wiki-dec2018 corpus for the natural-questions-finetuned atlas large model",
},
{
"index": "indices/atlas_nq/wiki/base",
"description": "Precomputed index for the wiki-dec2018 corpus for the natural-questions-finetuned atlas base model",
},
]
def _helpstr():
helpstr = "The following indices are available for download: "
for m in AVAILABLE_INDICES:
helpstr += f'\nIndex name: {m["index"]:<30} Description: {m["description"]}'
helpstr += "\nDownload by passing --index {index name}"
return helpstr
def get_passage_path(index, shard_number):
passage_filename = PASSAGE_FNAME.format(shard=shard_number)
return f"{index}/{passage_filename}"
def get_embedding_path(index, shard_number):
embedding_filename = EMBEDDING_FNAME.format(shard=shard_number)
return f"{index}/{embedding_filename}"
def main(output_directory, requested_index):
for shard in range(N_SHARDS):
passage_path = get_passage_path(requested_index, shard)
source = get_s3_path(passage_path)
target = get_download_path(output_directory, passage_path)
maybe_download_file(source, target)
embedding_path = get_embedding_path(requested_index, shard)
source = get_s3_path(embedding_path)
target = get_download_path(output_directory, embedding_path)
maybe_download_file(source, target)
if __name__ == "__main__":
help_str = _helpstr()
choices = list([a["index"] for a in AVAILABLE_INDICES])
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
"--output_directory",
type=str,
default="./data",
help="Path to the file to which the dataset is written.",
)
parser.add_argument(
"--index",
type=str,
choices=choices,
help=help_str,
)
args = parser.parse_args()
main(args.output_directory, args.index)
|
atlas-main
|
preprocessing/download_index.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import wget
BASE_URL = "https://dl.fbaipublicfiles.com/atlas"
def maybe_download_file(source, target):
if not os.path.exists(target):
os.makedirs(os.path.dirname(target), exist_ok=True)
print(f"Downloading {source} to {target}")
wget.download(source, out=str(target))
print()
def get_s3_path(path):
return f"{BASE_URL}/{path}"
def get_download_path(output_dir, path):
return os.path.join(output_dir, path)
|
atlas-main
|
preprocessing/download_tools.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import json
from pathlib import Path
import argparse
from download_tools import maybe_download_file
fever_64shot = [
23236,
131610,
70907,
110333,
83874,
121714,
17355,
115320,
9907,
42725,
43614,
139489,
30589,
76963,
5916,
7241,
68848,
59902,
113855,
110886,
102332,
79223,
24359,
105929,
131435,
118883,
8152,
119911,
28803,
111318,
29503,
43420,
39533,
15214,
29807,
29242,
10288,
111860,
77451,
102160,
77982,
132435,
2875,
47721,
92378,
128574,
24721,
83985,
41521,
97851,
137243,
74916,
85056,
135,
130085,
19233,
2887,
124345,
91769,
63969,
50865,
135928,
143220,
124300,
]
def main(args):
output_dir = Path(args.output_directory)
fever_path, fever_url = {}, {}
fever_dir = output_dir / "fever_data"
fever_path["train"] = fever_dir / "train.jsonl"
fever_path["train-64"] = fever_dir / "train-64.jsonl"
fever_path["dev"] = fever_dir / "dev.jsonl"
fever_path["test"] = fever_dir / "test.jsonl"
fever_url["train"] = "https://fever.ai/download/fever/train.jsonl"
fever_url["dev"] = "https://fever.ai/download/fever/shared_task_dev.jsonl"
fever_url["test"] = "https://fever.ai/download/fever/shared_task_test.jsonl"
for split in ["train", "dev", "test"]:
if args.overwrite or not fever_path[split].exists():
maybe_download_file(fever_url[split], fever_path[split])
else:
print(f"{split} file already exists, not overwriting, use --overwrite instead")
with open(fever_path["train"]) as fin:
with open(fever_path["train-64"], "w") as fout:
for k, line in enumerate(fin):
if k in fever_64shot:
ex = json.loads(line)
json.dump(ex, fout)
fout.write("\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--output_directory",
type=str,
default="./data/",
help="Path to the file to which the dataset is written.",
)
parser.add_argument("--overwrite", action="store_true", help="Overwrite data")
args = parser.parse_args()
main(args)
|
atlas-main
|
preprocessing/prepare_fever.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from download_tools import get_download_path, get_s3_path, maybe_download_file
BASE_URL = "https://dl.fbaipublicfiles.com/atlas"
MODEL_FILE_NAME = "model.pth.tar"
AVAILABLE_MODELS = [
{"model": "models/atlas/xxl", "description": "Pretrained Atlas XXL model"},
{"model": "models/atlas/xl", "description": "Pretrained Atlas XL model"},
{"model": "models/atlas/large", "description": "Pretrained Atlas Large model"},
{"model": "models/atlas/base", "description": "Pretrained Atlas Base model"},
{"model": "models/atlas_nq/xxl", "description": "Atlas XXL model, finetuned on Natural Questions"},
{"model": "models/atlas_nq/xl", "description": "Atlas XL model, finetuned on Natural Questions"},
{"model": "models/atlas_nq/large", "description": "Atlas large model, finetuned on Natural Questions"},
{"model": "models/atlas_nq/base", "description": "Atlas base model, finetuned on Natural Questions"},
]
def _helpstr():
helpstr = "The following models are available for download: "
for m in AVAILABLE_MODELS:
helpstr += f'\nModel name: {m["model"]:<30} Description: {m["description"]}'
helpstr += "\ndownload by passing --model {model name}"
return helpstr
def main(output_directory, requested_model):
model_path = f"{requested_model}/{MODEL_FILE_NAME}"
source = get_s3_path(model_path)
target = get_download_path(output_directory, model_path)
maybe_download_file(source, target)
if __name__ == "__main__":
help_str = _helpstr()
choices = list([a["model"] for a in AVAILABLE_MODELS])
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
"--output_directory",
type=str,
default="./data",
help="Path to the file to which the dataset is written.",
)
parser.add_argument(
"--model",
type=str,
choices=choices,
help=help_str,
)
args = parser.parse_args()
main(args.output_directory, args.model)
|
atlas-main
|
preprocessing/download_model.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import csv
import json
import os
import random
import tarfile
from pathlib import Path
from download_tools import maybe_download_file
DATA_URL = "https://people.eecs.berkeley.edu/~hendrycks/data.tar"
def maybe_download_data(output_directory):
os.makedirs(output_directory, exist_ok=True)
# download tar:
orig_data_tar = output_directory / "data.tar"
maybe_download_file(DATA_URL, orig_data_tar)
untarred_orig_data = Path(output_directory) / "data"
if not os.path.exists(untarred_orig_data):
with tarfile.open(orig_data_tar) as tar:
tar.extractall(output_directory)
return untarred_orig_data
def build_mmlu_instance(name, line):
question, option_a, option_b, option_c, option_d, answer = line
return {
"question": question,
"options": {"A": option_a, "B": option_b, "C": option_c, "D": option_d},
"answer": answer,
"dataset": name,
}
def get_dataset_name_from_path(path):
return os.path.basename(path).replace(".csv", "")
def parse_mmlu_csv(path):
output = []
name = get_dataset_name_from_path(path)
with open(path) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=",")
for line in csv_reader:
obj = build_mmlu_instance(name, line)
output.append(obj)
return output
def parse_all_mmlu_data(directory):
all_data = {}
for split in ["auxiliary_train", "dev", "val", "test"]:
for fi in os.listdir(directory / split):
path_to_read = directory / split / fi
name = get_dataset_name_from_path(path_to_read)
all_data.setdefault(split, {})[name] = parse_mmlu_csv(path_to_read)
return all_data
def dump(items, path):
with open(path, "w") as f:
for item in items:
f.write(json.dumps(item) + "\n")
def make_five_shot_data(data, output_directory):
indiv_train_path = output_directory / "individual_train"
indiv_valid_path = output_directory / "individual_valid"
indiv_test_path = output_directory / "individual_test"
os.makedirs(indiv_train_path, exist_ok=True)
os.makedirs(indiv_valid_path, exist_ok=True)
os.makedirs(indiv_test_path, exist_ok=True)
for domain, items in data["dev"].items():
domain = "_".join(domain.split("_")[:-1])
dump_path = indiv_train_path / f"{domain}.5-shot-train.jsonl"
dump(items, dump_path)
for domain, items in data["val"].items():
domain = "_".join(domain.split("_")[:-1])
dump_path = indiv_valid_path / f"{domain}.val.jsonl"
dump(items, dump_path)
for domain, items in data["test"].items():
domain = "_".join(domain.split("_")[:-1])
dump_path = indiv_test_path / f"{domain}.test.jsonl"
dump(items, dump_path)
combined_val = [item for _, items in data["val"].items() for item in items]
dump(combined_val, output_directory / f"combined_valid.jsonl")
combined_test = [item for _, items in data["test"].items() for item in items]
dump(combined_test, output_directory / f"combined_test.jsonl")
def make_five_shot_multitask_data(data, output_directory):
indiv_valid_path = output_directory / "individual_valid"
indiv_test_path = output_directory / "individual_test"
os.makedirs(indiv_valid_path, exist_ok=True)
os.makedirs(indiv_test_path, exist_ok=True)
for domain, items in data["val"].items():
domain = "_".join(domain.split("_")[:-1])
dump_path = indiv_valid_path / f"{domain}.val.jsonl"
dump(items, dump_path)
for domain, items in data["test"].items():
domain = "_".join(domain.split("_")[:-1])
dump_path = indiv_test_path / f"{domain}.test.jsonl"
dump(items, dump_path)
combined_train = [item for _, items in data["dev"].items() for item in items]
dump(combined_train, output_directory / f"train.jsonl")
combined_val = [item for _, items in data["val"].items() for item in items]
dump(combined_val, output_directory / f"combined_valid.jsonl")
combined_test = [item for _, items in data["test"].items() for item in items]
dump(combined_test, output_directory / f"combined_test.jsonl")
def make_full_transfer_data(data, output_directory):
indiv_valid_path = output_directory / "individual_valid"
indiv_test_path = output_directory / "individual_test"
os.makedirs(indiv_valid_path, exist_ok=True)
os.makedirs(indiv_test_path, exist_ok=True)
for domain, items in data["val"].items():
domain = "_".join(domain.split("_")[:-1])
dump_path = indiv_valid_path / f"{domain}.val.jsonl"
dump(items, dump_path)
for domain, items in data["test"].items():
domain = "_".join(domain.split("_")[:-1])
dump_path = indiv_test_path / f"{domain}.test.jsonl"
dump(items, dump_path)
combined_auxilary = [item for _, items in data["auxiliary_train"].items() for item in items]
random.seed(10)
random.shuffle(combined_auxilary)
auxillary_valid = combined_auxilary[-5000:]
auxiliary_train = combined_auxilary[:-5000]
dump(auxillary_valid, output_directory / f"auxillary_valid.jsonl")
combined_train = [item for _, items in data["dev"].items() for item in items]
full_train = auxiliary_train + combined_train
dump(full_train, output_directory / f"train.jsonl")
combined_val = [item for _, items in data["val"].items() for item in items]
dump(combined_val, output_directory / f"combined_valid.jsonl")
combined_test = [item for _, items in data["test"].items() for item in items]
dump(combined_test, output_directory / f"combined_test.jsonl")
def main(output_directory):
original_data_directory = maybe_download_data(output_directory)
all_data = parse_all_mmlu_data(original_data_directory)
make_five_shot_data(all_data, output_directory / "5-shot")
make_five_shot_multitask_data(all_data, output_directory / "5-shot-multitask")
make_full_transfer_data(all_data, output_directory / "full")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
"""Downloads, parses and creates train, validation and test files for MMLU.
We consider 3 tasks:
* 5-shot: learn a model with 5 examples for each domain.
* 5-shot-multitask: Learn a single model using the combination of 5 examples from each domain.
* full: Learn a single model using training data from MMLU's auxialluary datasets, plus the training data from 5-shot-multitask.
In each case, overall test accuracy would be the micro average over each domains' test set (as defined by the orginal authors).
The script will download the data, and create the following directory structure:
├── data.tar # original data
├── 5-shot
│ ├── combined_test.jsonl
│ ├── combined_valid.jsonl
│ ├── individual_test
│ │ ├── {domain}.test.jsonl
│ ├── individual_train
│ │ ├── {domain}.5-shot-train.jsonl
│ └── individual_valid
│ ├── {domain}.val.jsonl
├── 5-shot-multitask
│ ├── combined_test.jsonl
│ ├── combined_valid.jsonl
│ ├── individual_test
│ │ ├── {domain}.test.jsonl
│ ├── individual_valid
│ │ ├── {domain}.val.jsonl
│ └── train.jsonl
└── full
├── auxillary_valid.jsonl
├── combined_test.jsonl
├── combined_valid.jsonl
├── individual_test
│ ├── {domain}.test.jsonl
├── individual_valid
│ ├── {domain}.val.jsonl
└── train.jsonl
* For 5-shot, train models 5-shot/individual_train/{domain}.5-shot-train.jsonl and test on 5-shot/individual_test/{domain}.test.jsonl
* For 5-shot-multitask, train models 5-shot-multitask/train.jsonl and test on 5-shot-multitask/combined_test.jsonl
* For the full data task, train models full/train.jsonl and test on full/combined_test.jsonl
"""
)
parser.add_argument(
"--output_directory",
type=str,
default="./data/",
help="Path to the file to which the dataset is written.",
)
args = parser.parse_args()
output_directory = Path(args.output_directory) / "data" / "mmlu_data"
main(output_directory)
|
atlas-main
|
preprocessing/prepare_mmlu.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from download_tools import get_download_path, get_s3_path, maybe_download_file
AVAILABLE_CORPORA = {
"corpora/wiki/enwiki-dec2017": {
"corpus": "corpora/wiki/enwiki-dec2017",
"description": "Wikipedia dump from Dec 2017, preprocessed into passages",
"files": ["text-list-100-sec.jsonl", "infobox.jsonl"],
},
"corpora/wiki/enwiki-dec2018": {
"corpus": "corpora/wiki/enwiki-dec2018",
"description": "Wikipedia dump from Dec 2018, preprocessed into passages",
"files": ["text-list-100-sec.jsonl", "infobox.jsonl"],
},
"corpora/wiki/enwiki-aug2019": {
"corpus": "corpora/wiki/enwiki-aug2019",
"description": "Wikipedia dump from Aug 2019, preprocessed into passages",
"files": ["text-list-100-sec.jsonl", "infobox.jsonl"],
},
"corpora/wiki/enwiki-dec2020": {
"corpus": "corpora/wiki/enwiki-dec2020",
"description": "Wikipedia dump from Dec 2020, preprocessed into passages",
"files": ["text-list-100-sec.jsonl", "infobox.jsonl"],
},
"corpora/wiki/enwiki-dec2021": {
"corpus": "corpora/wiki/enwiki-dec2021",
"description": "Wikipedia dump from Dec 2021, preprocessed into passages",
"files": ["text-list-100-sec.jsonl", "infobox.jsonl"],
},
}
def _helpstr():
helpstr = "The following corpora are available for download: "
for m in AVAILABLE_CORPORA.values():
helpstr += f'\nCorpus name: {m["corpus"]:<30} Description: {m["description"]}'
helpstr += "\ndownload by passing --corpus {corpus name}"
return helpstr
def main(output_directory, requested_corpus):
AVAILABLE_CORPORA[requested_corpus]
for filename in AVAILABLE_CORPORA[requested_corpus]["files"]:
path = f"{requested_corpus}/{filename}"
source = get_s3_path(path)
target = get_download_path(output_directory, path)
maybe_download_file(source, target)
if __name__ == "__main__":
help_str = _helpstr()
choices = list(AVAILABLE_CORPORA.keys())
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
"--output_directory",
type=str,
default="./data",
help="Path to the file to which the dataset is written.",
)
parser.add_argument(
"--corpus",
type=str,
choices=choices,
help=help_str,
)
args = parser.parse_args()
main(args.output_directory, args.corpus)
|
atlas-main
|
preprocessing/download_corpus.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
from pathlib import Path
logger = logging.getLogger(__name__)
class Options:
def __init__(self):
self.parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
self.initialize_parser()
def initialize_parser(self):
# basic parameters
self.parser.add_argument(
"--name", type=str, default="experiment_name", help="name of the experiment - also used as directory name "
)
self.parser.add_argument(
"--checkpoint_dir",
type=str,
default="./checkpoint/",
help="models are saved here",
)
self.parser.add_argument(
"--model_path",
type=str,
default="none",
help="Path to a pretrained model to initialize from (pass 'none' to init from t5 and contriever)",
)
self.parser.add_argument(
"--per_gpu_batch_size",
default=1,
type=int,
help="Batch size per GPU/CPU for training.",
)
self.parser.add_argument(
"--per_gpu_embedder_batch_size",
default=512,
type=int,
help="Embedder's batch size per GPU.",
)
self.parser.add_argument(
"--local_rank",
type=int,
default=-1,
help="For distributed training: local_rank",
)
self.parser.add_argument(
"--main_port",
type=int,
default=-1,
help="Main port (for multi-node jobs)",
)
self.parser.add_argument("--seed", type=int, default=0, help="random seed for initialization")
self.parser.add_argument(
"--log_freq",
type=int,
default=100,
help="log train stats <log_freq> steps during training",
)
self.parser.add_argument(
"--eval_freq",
type=int,
default=500,
help="evaluate model every <eval_freq> steps during training",
)
self.parser.add_argument(
"--save_freq",
type=int,
default=5000,
help="save model every <save_freq> steps during training",
)
self.parser.add_argument(
"--train_data", nargs="+", default=[], help="list of space-separated paths to jsonl-formatted train sets"
)
self.parser.add_argument(
"--eval_data",
nargs="+",
default=[],
help="list of space-separated paths to jsonl-formatted evaluation sets",
)
self.parser.add_argument("--write_results", action="store_true", help="save evaluation results to file")
self.parser.add_argument(
"--dont_write_passages",
action="store_true",
help="if writing results, passages can take up a lot of space, pass this flag not to write passages as part of dumped results",
)
def add_optim_options(self):
self.parser.add_argument("--warmup_steps", type=int, default=1000, help="number of learning rate warmup steps")
self.parser.add_argument("--total_steps", type=int, default=1000, help="total number of training steps")
self.parser.add_argument(
"--scheduler_steps",
type=int,
default=None,
help="total number of step for the scheduler, if None then scheduler_total_step = total_step",
)
self.parser.add_argument("--accumulation_steps", type=int, default=1, help="gradient accumulation")
self.parser.add_argument("--dropout", type=float, default=0.1, help="dropout rate")
self.parser.add_argument("--lr", type=float, default=1e-4, help="learning rate")
self.parser.add_argument("--lr_retriever", type=float, default=1e-5, help="learning rate for retriever")
self.parser.add_argument("--clip", type=float, default=1.0, help="gradient clipping")
self.parser.add_argument(
"--scheduler",
type=str,
default="cosine",
choices=["linear", "cosine", "fixed"],
help="learning rate schedule to use",
)
self.parser.add_argument(
"--weight_decay", type=float, default=0.1, help="amount of weight decay to apply in training"
)
self.parser.add_argument(
"--save_optimizer", action="store_true", help="Pass flag to save optimizer state in saved checkpoints"
)
self.parser.add_argument("--epsilon", type=float, default=1e-6, help="adamw epsilon value")
self.parser.add_argument("--alpha", type=float, default=1.0, help="adamw alpha value")
self.parser.add_argument("--beta2", type=float, default=0.999, help="adamw beta2 value")
self.parser.add_argument(
"--refresh_index",
type=str,
default="-1",
help="index refresh schedule. format: startstep-endstep:refreshrate,startstep-endstep:refreshrate "
"e.g. --refresh_index 0-100:10,100-1000000:500 will refresh the index every 10 steps for the first 100 steps, "
"and then every 500 steps from step 100 to 1M."
"Syntactic Sugar for a fixed schedule: can just pass in a single number e.g. --refresh_index 100 will refresh the index every 100 steps. "
"-1 to never refresh.",
)
self.parser.add_argument("--shuffle", action="store_true", help="shuffle data for training")
# memory optimizations:
self.parser.add_argument(
"--precision",
type=str,
default="fp32",
choices=["fp16", "fp32", "bf16"],
help="numerical precision - recommend bf16 if available, fp16 likely to be unstable for training",
)
self.parser.add_argument(
"--shard_optim",
action="store_true",
help="train-time memory optimization: shards optimizer state over available GPUs using sharded data parallel, recommended for larger models",
)
self.parser.add_argument(
"--shard_grads",
action="store_true",
help="train-time memory optimization: shards gradients over available GPUs using sharded data parallel, recommended for larger models",
)
self.parser.add_argument(
"--use_gradient_checkpoint_reader",
action="store_true",
help="use gradient checkpointing in the reader",
)
self.parser.add_argument(
"--use_gradient_checkpoint_retriever",
action="store_true",
help="use gradient checkpointing for retriever",
)
def add_modeling_options(self):
self.parser.add_argument(
"--reader_model_type",
required=True,
type=str,
help="t5 Architecture for reader FID model, e.g. google/t5-xl-lm-adapt",
choices=[
"t5-small",
"t5-base",
"t5-large",
"t5-3b",
"t5-11b",
"google/t5-v1_1-base",
"google/t5-v1_1-large",
"google/t5-v1_1-xl",
"google/t5-v1_1-xxl",
"google/t5-base-lm-adapt",
"google/t5-large-lm-adapt",
"google/t5-xl-lm-adapt",
"google/t5-xxl-lm-adapt",
],
)
self.parser.add_argument(
"--text_maxlength",
type=int,
default=200,
help="maximum number of tokens in input text segments (concatenated question+passage). Inputs longer than this will be truncated.",
)
self.parser.add_argument(
"--target_maxlength",
type=int,
default=None,
help="Maximum length of target outputs in tokens when training the model. Targets longer than this will be truncated. No truncation if -1",
)
self.parser.add_argument("--n_context", type=int, default=1, help="number of top k passages to pass to reader")
# Retriever modelling options
self.parser.add_argument(
"--passages",
nargs="+",
help="list of paths to jsonl files containing passages to index and retrieve from. Unused if loading a saved index using --load_index_path",
)
self.parser.add_argument(
"--max_passages",
type=int,
default=-1,
help="maximum number of passages to index. -1 to read all passages in passage files",
)
self.parser.add_argument(
"--retriever_model_path",
type=str,
default="facebook/contriever",
help="path to contriever model to init from (overridden if passing a value to --model_path ",
)
self.parser.add_argument(
"--retrieve_only",
action="store_true",
help="Pass this to prevent loading a reader, and only run retrieval evaluation",
)
self.parser.add_argument(
"--train_retriever", action="store_true", help="Pass to train retriever as well as reader"
)
self.parser.add_argument(
"--use_file_passages",
action="store_true",
help='uses passages in "passages" field in train or eval jsonl files rather than retrieving passages',
)
self.parser.add_argument(
"--retriever_n_context",
type=int,
default=5,
help="number of top k passages to use to train the retriever with",
)
self.parser.add_argument(
"--gold_score_mode",
type=str,
choices=["evalnormsum", "loop", "ppmean", "emdr", "pdist", "adist"],
default="ppmean",
help="retriever training method. `pdist` is the name used in the paper for `ppmean`. `adist` is the name used in the paper for `evalnormsum`",
)
self.parser.add_argument(
"--closed_book",
action="store_true",
help="Dont use retrieval - reduces to T5. Overrides n_context, n_context_retriever and encoder_format if they are set",
)
self.parser.add_argument(
"--temperature_score", type=float, default=0.01, help="softmax temperature for retriever"
)
self.parser.add_argument(
"--temperature_gold",
type=float,
default=0.01,
help="softmax temperature for target distribution for retriever distillation",
)
self.parser.add_argument("--compute_crossattention_stats", action="store_true")
self.parser.add_argument(
"--filtering_overretrieve_ratio",
type=int,
default=2,
help="if filtering, over-retrieve the topK by this factor, and then filter out undesirable results. Useful, Set to 1 only if using a task that doesn't filter retrieved results",
)
self.parser.add_argument("--freeze_retriever_steps", type=int, default=-1, help="freezes retriever for n steps")
self.parser.add_argument(
"--query_side_retriever_training",
action="store_true",
help="pass to enable query-side finetuning of retriever (unties the parameters of the contriever encoder's passage and query encoders, and freezes the passage encoder. Useful to avoid index refreshes.",
)
self.parser.add_argument(
"--retrieve_with_rerank",
action="store_true",
help="pass this to enable reranking with fresh passage encoder for retriever",
)
self.parser.add_argument(
"--n_to_rerank_with_retrieve_with_rerank",
type=int,
default=128,
help="n passages to rerank when passing --retrieve_with_rerank. Higher is slower but more accurate. Recommend 64-128",
)
# input and output formatting options:
self.parser.add_argument(
"--decoder_format", # TODO: decide whether to remove functionality
type=str,
default=None,
help="format for decoder, model will be train on the format and evaluation will be performed with the format contrary to the decoder_prompt_format option",
)
self.parser.add_argument( # TODO: decide whether to remove functionality
"--decoder_prompt_format",
type=str,
default=None,
help='format for decoder prompting, for instance "what is the answer to {query}:"',
)
self.parser.add_argument(
"--encoder_format",
type=str,
default="{query} title: {title} context: {text}",
help="format string for reader's encoder preprocessing",
)
self.parser.add_argument(
"--retriever_format",
type=str,
default="{title} {text}",
help="format string for retriever's encoder preprocessing",
)
# Generation options
self.parser.add_argument("--generation_max_length", type=int, default=128)
self.parser.add_argument("--generation_min_length", type=int, default=None)
self.parser.add_argument("--generation_length_penalty", type=float, default=1.0)
self.parser.add_argument("--generation_num_beams", type=int, default=1)
# Task-specific options:
self.parser.add_argument(
"--task",
type=str,
default=None,
choices=["base", "mlm", "lm", "multiple_choice", "kilt", "section", "fever", "qa"],
help="Task performed by the model. Used to setup preprocessing, retrieval filtering, evaluations, etc.",
)
# MLM task options:
self.parser.add_argument(
"--mlm_noise_density",
type=float,
default=0.15,
help="how much of an input text should be masked by masking spans ",
)
self.parser.add_argument(
"--mlm_mean_noise_span_length", type=float, default=3, help="average length of an MLM masking span"
)
self.parser.add_argument(
"--min_words_per_lm_instance",
type=int,
default=None,
help="Instances with fewer than min_words_per_lm_instance instances will be skipped for MLM/LM/Section Generation",
)
# LM task options:
self.parser.add_argument(
"--min_lm_context_ratio",
type=float,
default=0.5,
help="Splits text into two segments for language modelling.'\
'Left segment is conditioning context, right segment is for generating.'\
'The left segment must be more than min_lm_context_ratio of the the right segment",
)
self.parser.add_argument(
"--max_lm_context_ratio",
type=float,
default=0.5,
help="Splits text into two segments for language modelling.'\
'Left segment is conditioning context, right segment is for generating.'\
'The left segment must be less than than max_lm_context_ratio of the the right segment",
)
# Open-domain task options:
self.parser.add_argument(
"--qa_prompt_format",
type=str,
default="question: {question} answer: <extra_id_0>",
help="How to format question as input prompts when using --task qa",
)
# Multiple Choice task options:
self.parser.add_argument(
"--multiple_choice_num_options",
type=int,
default=4,
help="How many choice options for multiple choice QA (MMLU is 4)",
)
self.parser.add_argument(
"--multiple_choice_train_permutations",
choices=["single", "cyclic", "all"],
default="single",
type=str,
help="Whether to train with answer order permutations When training on multiple choice (e.g. MMLU)."
" Can improve results by de-biasing models's preferences for arbitrary answer orderings. Recommend training with 'all'. "
"single: no permutations. cyclic: cyclic permutations. all: all possible answer order permutations'",
)
self.parser.add_argument(
"--multiple_choice_eval_permutations",
choices=["single", "cyclic", "all"],
default="single",
type=str,
help="Whether to evaluate with answer order permutations for multiple choice (e.g. MMLU)."
" Can improve results by de-biasing models's preferences for arbitrary answer orderings. Best results with 'all' but very slow. 'cyclic' is a good compromise. "
"single: no permutations. cyclic: cyclic permutations. all: all possible answer order permutations'",
)
def add_index_options(self):
self.parser.add_argument(
"--load_index_path",
default=None,
type=str,
help="path for loading the index, passage embeddings and passages",
)
self.parser.add_argument(
"--save_index_path",
default=None,
type=str,
help="path for saving the index and/or embeddings",
)
self.parser.add_argument(
"--save_index_n_shards",
default=128,
type=int,
help="how many shards to save an index to file with. Must be an integer multiple of the number of workers.",
)
self.parser.add_argument(
"--index_mode",
type=str,
default="flat",
help="Use flat torch index or a faiss index for retrieving the k nearest neighbors",
choices=["flat", "faiss"],
)
# faiss options:
self.parser.add_argument(
"--faiss_index_type",
type=str,
default="flat",
help="IVFFlat, IndexFlatIP, IVFScalarQuantizer or IndexIVFPQ with faiss-gpu",
choices=["ivfflat", "flat", "ivfsq", "ivfpq", "pq"],
)
self.parser.add_argument("--faiss_code_size", type=int, default=None, help="Parameter for PQ/SQ quantization")
def print_options(self, opt):
message = "\n"
for k, v in sorted(vars(opt).items()):
comment = ""
default_value = self.parser.get_default(k)
if v != default_value:
comment = f"\t(default: {default_value})"
message += f"{k:>30}: {str(v):<40}{comment}\n"
expr_dir = Path(opt.checkpoint_dir) / opt.name
with open(expr_dir / "opt.log", "wt") as opt_file:
opt_file.write(message)
opt_file.write("\n")
logger.info(message)
def parse(self):
opt = self.parser.parse_args()
if opt.closed_book: # override flags to enable closed book mode
opt.n_context = 1
opt.retriever_n_context = 1
opt.encoder_format = "{query}"
opt.use_file_passages = True
if opt.gold_score_mode == "pdist": # allow paper name of retriever losses
opt.gold_score_mode = "ppmean"
if opt.gold_score_mode == "adist": # allow paper name of retriever losses
opt.gold_score_mode = "evalnormsum"
if (
opt.use_file_passages
): # if passing use_file_passges, the following should be false (There is no retreiver loaded in this case)
opt.train_retriever = False
opt.query_side_retriever_training = False
opt.use_gradient_checkpoint_retriever = False
return opt
def get_options():
options = Options()
options.add_index_options()
options.add_modeling_options()
options.add_optim_options()
return options
|
atlas-main
|
src/options.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import math
import os
import pickle
from typing import Optional, Set, Tuple, Union, Any
import faiss
import faiss.contrib.torch_utils
import numpy as np
import torch
from src import dist_utils
from src.retrievers import EMBEDDINGS_DIM
FAISSGPUIndex = Union[
faiss.GpuIndexIVFFlat, faiss.GpuIndexIVFPQ, faiss.GpuIndexIVFScalarQuantizer, faiss.GpuIndexFlatIP
]
FAISSIndex = Union[FAISSGPUIndex, faiss.IndexPQ]
GPUIndexConfig = Union[
faiss.GpuIndexIVFPQConfig,
faiss.GpuIndexIVFFlatConfig,
faiss.GpuIndexIVFScalarQuantizerConfig,
faiss.GpuIndexFlatConfig,
]
BITS_PER_CODE: int = 8
CHUNK_SPLIT: int = 3
def serialize_listdocs(ids):
ids = pickle.dumps(ids)
ids = torch.tensor(list(ids), dtype=torch.uint8).cuda()
return ids
def deserialize_listdocs(ids):
return [pickle.loads(x.cpu().numpy().tobytes()) for x in ids]
class DistributedIndex(object):
def __init__(self):
self.embeddings = None
self.doc_map = dict()
self.is_in_gpu = True
def init_embeddings(self, passages, dim: Optional[int] = EMBEDDINGS_DIM):
self.doc_map = {i: doc for i, doc in enumerate(passages)}
self.embeddings = torch.zeros(dim, (len(passages)), dtype=torch.float16)
if self.is_in_gpu:
self.embeddings = self.embeddings.cuda()
def _get_saved_embedding_path(self, save_dir: str, shard: int) -> str:
return os.path.join(save_dir, f"embeddings.{shard}.pt")
def _get_saved_passages_path(self, save_dir: str, shard: int) -> str:
return os.path.join(save_dir, f"passages.{shard}.pt")
def save_index(self, path: str, total_saved_shards: int, overwrite_saved_passages: bool = False) -> None:
"""
Saves index state to disk, which can later be loaded by the load_index method.
Specifically, it saves the embeddings and passages into total_saved_shards separate file shards.
This option enables loading the index in another session with a different number of workers, as long as the number of workers is divisible by total_saved_shards.
Note that the embeddings will always be saved to disk (it will overwrite any embeddings previously saved there).
The passages will only be saved to disk if they have not already been written to the save directory before, unless the option --overwrite_saved_passages is passed.
"""
assert self.embeddings is not None
rank = dist_utils.get_rank()
ws = dist_utils.get_world_size()
assert total_saved_shards % ws == 0, f"N workers must be a multiple of shards to save"
shards_per_worker = total_saved_shards // ws
n_embeddings = self.embeddings.shape[1]
embeddings_per_shard = math.ceil(n_embeddings / shards_per_worker)
assert n_embeddings == len(self.doc_map), len(self.doc_map)
for shard_ind, (shard_start) in enumerate(range(0, n_embeddings, embeddings_per_shard)):
shard_end = min(shard_start + embeddings_per_shard, n_embeddings)
shard_id = shard_ind + rank * shards_per_worker # get global shard number
passage_shard_path = self._get_saved_passages_path(path, shard_id)
if not os.path.exists(passage_shard_path) or overwrite_saved_passages:
passage_shard = [self.doc_map[i] for i in range(shard_start, shard_end)]
with open(passage_shard_path, "wb") as fobj:
pickle.dump(passage_shard, fobj, protocol=pickle.HIGHEST_PROTOCOL)
embeddings_shard = self.embeddings[:, shard_start:shard_end]
embedding_shard_path = self._get_saved_embedding_path(path, shard_id)
torch.save(embeddings_shard, embedding_shard_path)
def load_index(self, path: str, total_saved_shards: int):
"""
Loads sharded embeddings and passages files (no index is loaded).
"""
rank = dist_utils.get_rank()
ws = dist_utils.get_world_size()
assert total_saved_shards % ws == 0, f"N workers must be a multiple of shards to save"
shards_per_worker = total_saved_shards // ws
passages = []
embeddings = []
for shard_id in range(rank * shards_per_worker, (rank + 1) * shards_per_worker):
passage_shard_path = self._get_saved_passages_path(path, shard_id)
with open(passage_shard_path, "rb") as fobj:
passages.append(pickle.load(fobj))
embeddings_shard_path = self._get_saved_embedding_path(path, shard_id)
embeddings.append(torch.load(embeddings_shard_path, map_location="cpu").cuda())
self.doc_map = {}
n_passages = 0
for chunk in passages:
for p in chunk:
self.doc_map[n_passages] = p
n_passages += 1
self.embeddings = torch.concat(embeddings, dim=1)
def _compute_scores_and_indices(self, allqueries: torch.tensor, topk: int) -> Tuple[torch.tensor, torch.tensor]:
"""
Computes the distance matrix for the query embeddings and embeddings chunk and returns the k-nearest neighbours and corresponding scores.
"""
scores = torch.matmul(allqueries.half(), self.embeddings)
scores, indices = torch.topk(scores, topk, dim=1)
return scores, indices
@torch.no_grad()
def search_knn(self, queries, topk):
"""
Conducts exhaustive search of the k-nearest neighbours using the inner product metric.
"""
allqueries = dist_utils.varsize_all_gather(queries)
allsizes = dist_utils.get_varsize(queries)
allsizes = np.cumsum([0] + allsizes.cpu().tolist())
# compute scores for the part of the index located on each process
scores, indices = self._compute_scores_and_indices(allqueries, topk)
indices = indices.tolist()
docs = [[self.doc_map[x] for x in sample_indices] for sample_indices in indices]
if torch.distributed.is_initialized():
docs = [docs[allsizes[k] : allsizes[k + 1]] for k in range(len(allsizes) - 1)]
docs = [serialize_listdocs(x) for x in docs]
scores = [scores[allsizes[k] : allsizes[k + 1]] for k in range(len(allsizes) - 1)]
gather_docs = [dist_utils.varsize_gather(docs[k], dst=k, dim=0) for k in range(dist_utils.get_world_size())]
gather_scores = [
dist_utils.varsize_gather(scores[k], dst=k, dim=1) for k in range(dist_utils.get_world_size())
]
rank_scores = gather_scores[dist_utils.get_rank()]
rank_docs = gather_docs[dist_utils.get_rank()]
scores = torch.cat(rank_scores, dim=1)
rank_docs = deserialize_listdocs(rank_docs)
merge_docs = [[] for _ in range(queries.size(0))]
for docs in rank_docs:
for k, x in enumerate(docs):
merge_docs[k].extend(x)
docs = merge_docs
_, subindices = torch.topk(scores, topk, dim=1)
scores = scores.tolist()
subindices = subindices.tolist()
# Extract topk scores and associated ids
scores = [[scores[k][j] for j in idx] for k, idx in enumerate(subindices)]
docs = [[docs[k][j] for j in idx] for k, idx in enumerate(subindices)]
return docs, scores
def is_index_trained(self) -> bool:
return True
class DistributedFAISSIndex(DistributedIndex):
def __init__(self, index_type: str, code_size: Optional[int] = None):
super().__init__()
self.embeddings = None
self.doc_map = dict()
self.faiss_gpu_index = None
self.gpu_resources = None
self.faiss_index_trained = False
self.faiss_index_type = index_type
self.code_size = code_size
self.is_in_gpu = False
def _get_faiss_index_filename(self, save_index_path: str) -> str:
"""
Creates the filename to save the trained index to using the index type, code size (if not None) and rank.
"""
rank = dist_utils.get_rank()
if self.code_size:
return save_index_path + f"/index{self.faiss_index_type}_{str(self.code_size)}_rank_{rank}.faiss"
return save_index_path + f"/index{self.faiss_index_type}_rank_{rank}.faiss"
def _add_embeddings_to_gpu_index(self) -> None:
"""
Add embeddings to index and sets the nprobe parameter.
"""
assert self.faiss_gpu_index is not None, "The FAISS GPU index was not correctly instantiated."
assert self.faiss_gpu_index.is_trained == True, "The FAISS index has not been trained."
if self.faiss_gpu_index.ntotal == 0:
self._add_embeddings_by_chunks()
def _add_embeddings_by_chunks(self) -> None:
_, num_points = self.embeddings.shape
chunk_size = num_points // CHUNK_SPLIT
split_embeddings = [
self.embeddings[:, 0:chunk_size],
self.embeddings[:, chunk_size : 2 * chunk_size],
self.embeddings[:, 2 * chunk_size : num_points],
]
for embeddings_chunk in split_embeddings:
if isinstance(self.faiss_gpu_index, FAISSGPUIndex.__args__):
self.faiss_gpu_index.add(self._cast_to_torch32(embeddings_chunk.T))
else:
self.faiss_gpu_index.add(self._cast_to_numpy(embeddings_chunk.T))
def _compute_scores_and_indices(self, allqueries: torch.tensor, topk: int) -> Tuple[torch.tensor, torch.tensor]:
"""
Computes the distance matrix for the query embeddings and embeddings chunk and returns the k-nearest neighbours and corresponding scores.
"""
_, num_points = self.embeddings.shape
self.faiss_gpu_index.nprobe = math.floor(math.sqrt(num_points))
self._add_embeddings_to_gpu_index()
if isinstance(self.faiss_gpu_index, FAISSGPUIndex.__args__):
scores, indices = self.faiss_gpu_index.search(self._cast_to_torch32(allqueries), topk)
else:
np_scores, indices = self.faiss_gpu_index.search(self._cast_to_numpy(allqueries), topk)
scores = torch.from_numpy(np_scores).cuda()
return scores.half(), indices
def save_index(self, save_index_path: str, save_index_n_shards: int) -> None:
"""
Saves the embeddings and passages and if there is a FAISS index, it saves it.
"""
super().save_index(save_index_path, save_index_n_shards)
self._save_faiss_index(save_index_path)
def _save_faiss_index(self, path: str) -> None:
"""
Moves the GPU FAISS index to CPU and saves it to a .faiss file.
"""
index_path = self._get_faiss_index_filename(path)
assert self.faiss_gpu_index is not None, "There is no FAISS index to save."
cpu_index = faiss.index_gpu_to_cpu(self.faiss_gpu_index)
faiss.write_index(cpu_index, index_path)
def _load_faiss_index(self, load_index_path: str) -> None:
"""
Loads a FAISS index and moves it to the GPU.
"""
faiss_cpu_index = faiss.read_index(load_index_path)
# move to GPU
self._move_index_to_gpu(faiss_cpu_index)
def load_index(self, path: str, total_saved_shards: int) -> None:
"""
Loads passage embeddings and passages and a faiss index (if it exists).
Otherwise, it initialises and trains the index in the GPU with GPU FAISS.
"""
super().load_index(path, total_saved_shards)
load_index_path = self._get_faiss_index_filename(path)
if os.path.exists(load_index_path):
self._load_faiss_index(load_index_path)
else:
self.train_index()
def is_index_trained(self) -> bool:
if self.faiss_gpu_index is None:
return self.faiss_index_trained
return not self.faiss_gpu_index.is_trained
def _initialise_index(self) -> None:
"""
Initialises the index in the GPU with GPU FAISS.
Supported gpu index types: IVFFlat, IndexFlatIP, IndexIVFPQ, IVFSQ.
"""
dimension, num_points = self.embeddings.shape
# @TODO: Add support to set the n_list and n_probe parameters.
n_list = math.floor(math.sqrt(num_points))
self.faiss_gpu_index = self.gpu_index_factory(dimension, n_list)
@torch.no_grad()
def _set_gpu_options(self) -> faiss.GpuMultipleClonerOptions:
"""
Returns the GPU cloner options neccessary when moving a CPU index to the GPU.
"""
cloner_opts = faiss.GpuClonerOptions()
cloner_opts.useFloat16 = True
cloner_opts.usePrecomputed = False
cloner_opts.indicesOptions = faiss.INDICES_32_BIT
return cloner_opts
@torch.no_grad()
def _set_index_config_options(self, index_config: GPUIndexConfig) -> GPUIndexConfig:
"""
Returns the GPU config options for GPU indexes.
"""
index_config.device = torch.cuda.current_device()
index_config.indicesOptions = faiss.INDICES_32_BIT
index_config.useFloat16 = True
return index_config
def _create_PQ_index(self, dimension) -> FAISSIndex:
"""
GPU config options for PQ index
"""
cpu_index = faiss.index_factory(dimension, "PQ" + str(self.code_size), faiss.METRIC_INNER_PRODUCT)
cfg = self._set_gpu_options()
return faiss.index_cpu_to_gpu(self.gpu_resources, self.embeddings.get_device(), cpu_index, cfg)
@torch.no_grad()
def gpu_index_factory(self, dimension: int, n_list: Optional[int] = None) -> FAISSIndex:
"""
Instantiates and returns the selected GPU index class.
"""
self.gpu_resources = faiss.StandardGpuResources()
if self.faiss_index_type == "ivfflat":
config = self._set_index_config_options(faiss.GpuIndexIVFFlatConfig())
return faiss.GpuIndexIVFFlat(
self.gpu_resources,
dimension,
n_list,
faiss.METRIC_INNER_PRODUCT,
config,
)
elif self.faiss_index_type == "flat":
config = self._set_index_config_options(faiss.GpuIndexFlatConfig())
return faiss.GpuIndexFlatIP(self.gpu_resources, dimension, config)
elif self.faiss_index_type == "pq":
return self._create_PQ_index(dimension)
elif self.faiss_index_type == "ivfpq":
config = self._set_index_config_options(faiss.GpuIndexIVFPQConfig())
return faiss.GpuIndexIVFPQ(
self.gpu_resources,
dimension,
n_list,
self.code_size,
BITS_PER_CODE,
faiss.METRIC_INNER_PRODUCT,
config,
)
elif self.faiss_index_type == "ivfsq":
config = self._set_index_config_options(faiss.GpuIndexIVFScalarQuantizerConfig())
qtype = faiss.ScalarQuantizer.QT_4bit
return faiss.GpuIndexIVFScalarQuantizer(
self.gpu_resources,
dimension,
n_list,
qtype,
faiss.METRIC_INNER_PRODUCT,
True,
config,
)
else:
raise ValueError("unsupported index type")
@torch.no_grad()
def train_index(self) -> None:
"""
It initialises the index and trains it according to the refresh index schedule.
"""
if self.faiss_gpu_index is None:
self._initialise_index()
self.faiss_gpu_index.reset()
if isinstance(self.faiss_gpu_index, FAISSGPUIndex.__args__):
self.faiss_gpu_index.train(self._cast_to_torch32(self.embeddings.T))
else:
self.faiss_gpu_index.train(self._cast_to_numpy(self.embeddings.T))
@torch.no_grad()
def _cast_to_torch32(self, embeddings: torch.tensor) -> torch.tensor:
"""
Converts a torch tensor to a contiguous float 32 torch tensor.
"""
return embeddings.type(torch.float32).contiguous()
@torch.no_grad()
def _cast_to_numpy(self, embeddings: torch.tensor) -> np.ndarray:
"""
Converts a torch tensor to a contiguous numpy float 32 ndarray.
"""
return embeddings.cpu().to(dtype=torch.float16).numpy().astype("float32").copy(order="C")
@torch.no_grad()
def _move_index_to_gpu(self, cpu_index: FAISSIndex) -> None:
"""
Moves a loaded index to GPU.
"""
self.gpu_resources = faiss.StandardGpuResources()
cfg = self._set_gpu_options()
self.faiss_gpu_index = faiss.index_cpu_to_gpu(self.gpu_resources, torch.cuda.current_device(), cpu_index, cfg)
|
atlas-main
|
src/index.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import datetime
import os
import signal
import socket
import subprocess
import sys
from logging import getLogger
import torch
logger = getLogger()
GLOO_GROUP = None
def sig_handler(signum, frame):
logger.warning("Signal handler called with signal " + str(signum))
prod_id = int(os.environ["SLURM_PROCID"])
logger.warning("Host: %s - Global rank: %i" % (socket.gethostname(), prod_id))
if prod_id == 0:
logger.warning("Requeuing job " + os.environ["SLURM_JOB_ID"])
os.system("scontrol requeue " + os.environ["SLURM_JOB_ID"])
else:
logger.warning("Not the main process, no need to requeue.")
sys.exit(-1)
def term_handler(signum, frame):
logger.warning("Signal handler called with signal " + str(signum))
logger.warning("Bypassing SIGTERM.")
def init_signal_handler():
"""
Handle signals sent by SLURM for time limit / pre-emption.
"""
signal.signal(signal.SIGUSR1, sig_handler)
signal.signal(signal.SIGTERM, term_handler)
# logger.warning("Signal handler installed.")
def init_distributed_mode(params):
"""
Handle single and multi-GPU / multi-node / SLURM jobs.
Initialize the following variables:
- n_nodes
- node_id
- local_rank
- global_rank
- world_size
"""
# params.is_slurm_job = 'SLURM_JOB_ID' in os.environ
params.is_slurm_job = "SLURM_JOB_ID" in os.environ and not "WORLD_SIZE" in os.environ
has_local_rank = hasattr(params, "local_rank")
# SLURM job
if params.is_slurm_job and has_local_rank:
assert params.local_rank == -1 # on the cluster, this is handled by SLURM
SLURM_VARIABLES = [
"SLURM_JOB_ID",
"SLURM_JOB_NODELIST",
"SLURM_JOB_NUM_NODES",
"SLURM_NTASKS",
"SLURM_TASKS_PER_NODE",
"SLURM_MEM_PER_NODE",
"SLURM_MEM_PER_CPU",
"SLURM_NODEID",
"SLURM_PROCID",
"SLURM_LOCALID",
"SLURM_TASK_PID",
]
PREFIX = "%i - " % int(os.environ["SLURM_PROCID"])
for name in SLURM_VARIABLES:
value = os.environ.get(name, None)
# print(PREFIX + "%s: %s" % (name, str(value)))
# # job ID
# params.job_id = os.environ['SLURM_JOB_ID']
# number of nodes / node ID
params.n_nodes = int(os.environ["SLURM_JOB_NUM_NODES"])
params.node_id = int(os.environ["SLURM_NODEID"])
# local rank on the current node / global rank
params.local_rank = int(os.environ["SLURM_LOCALID"])
params.global_rank = int(os.environ["SLURM_PROCID"])
# number of processes / GPUs per node
params.world_size = int(os.environ["SLURM_NTASKS"])
params.n_gpu_per_node = params.world_size // params.n_nodes
# define master address and master port
hostnames = subprocess.check_output(["scontrol", "show", "hostnames", os.environ["SLURM_JOB_NODELIST"]])
params.main_addr = hostnames.split()[0].decode("utf-8")
assert 10001 <= params.main_port <= 20000 or params.world_size == 1
# print(PREFIX + "Master address: %s" % params.master_addr)
# print(PREFIX + "Master port : %i" % params.master_port)
# set environment variables for 'env://'
os.environ["MASTER_ADDR"] = params.main_addr
os.environ["MASTER_PORT"] = str(params.main_port)
os.environ["WORLD_SIZE"] = str(params.world_size)
os.environ["RANK"] = str(params.global_rank)
params.is_distributed = True
# multi-GPU job (local or multi-node) - jobs started with torch.distributed.launch
elif has_local_rank and params.local_rank != -1:
assert params.main_port == -1
# read environment variables
params.global_rank = int(os.environ["RANK"])
params.world_size = int(os.environ["WORLD_SIZE"])
params.n_gpu_per_node = int(os.environ["NGPU"])
# number of nodes / node ID
params.n_nodes = params.world_size // params.n_gpu_per_node
params.node_id = params.global_rank // params.n_gpu_per_node
params.is_distributed = True
else:
params.local_rank = 0
params.global_rank = 0
params.world_size = 1
params.is_distributed = False
params.n_nodes = 1
params.node_id = 0
params.n_gpu_per_node = 1
# define whether this is the master process / if we are in distributed mode
params.is_main = params.node_id == 0 and params.local_rank == 0
params.multi_node = params.n_nodes > 1
params.multi_gpu = params.world_size > 1
# summary
PREFIX = "%i - " % params.global_rank
# set GPU device
if params.is_distributed:
torch.cuda.set_device(params.local_rank)
device = torch.device("cuda", params.local_rank)
else:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
params.device = device
# initialize multi-GPU
if params.is_distributed:
# http://pytorch.apachecn.org/en/0.3.0/distributed.html#environment-variable-initialization
# 'env://' will read these environment variables:
# MASTER_PORT - required; has to be a free port on machine with rank 0
# MASTER_ADDR - required (except for rank 0); address of rank 0 node
# WORLD_SIZE - required; can be set either here, or in a call to init function
# RANK - required; can be set either here, or in a call to init function
# print("Initializing PyTorch distributed ...")
# Fix for if gloo sockets are inconsistent
p1 = subprocess.Popen(["ip", "r"], stdout=subprocess.PIPE)
p2 = subprocess.Popen(["grep", "default"], stdin=p1.stdout, stdout=subprocess.PIPE)
p1.stdout.close()
gloo_socket_ifname = subprocess.check_output(["awk", "{print $5}"], stdin=p2.stdout).decode("utf-8").strip()
p2.stdout.close()
os.environ["GLOO_SOCKET_IFNAME"] = gloo_socket_ifname
torch.distributed.init_process_group(
init_method="env://",
backend="nccl",
)
global GLOO_GROUP
GLOO_GROUP = torch.distributed.new_group(
list(range(params.world_size)), backend="gloo", timeout=datetime.timedelta(0, 600)
)
def get_gloo_group():
global GLOO_GROUP
assert GLOO_GROUP is not None
return GLOO_GROUP
|
atlas-main
|
src/slurm.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import json
import logging
import math
import sys
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple, Union
import numpy as np
import torch
from src import dist_utils
Number = Union[float, int]
logger = logging.getLogger(__name__)
def init_logger(is_main=True, is_distributed=False, filename=None):
if is_distributed:
torch.distributed.barrier()
handlers = [logging.StreamHandler(sys.stdout)]
if filename is not None:
handlers.append(logging.FileHandler(filename=filename))
logging.basicConfig(
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if is_main else logging.WARN,
format="[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s",
handlers=handlers,
)
logging.getLogger("transformers.tokenization_utils").setLevel(logging.ERROR)
logging.getLogger("transformers.tokenization_utils_base").setLevel(logging.ERROR)
return logger
def init_tb_logger(dirname, is_main):
tb_logger = None
if is_main:
try:
from torch.utils import tensorboard
tb_logger = tensorboard.SummaryWriter(dirname)
except:
logger.warning("Tensorboard is not available.")
return tb_logger
def cast_to_precision(model, precision):
if precision == "fp32":
return model
elif precision == "fp16":
model.to(torch.float16)
elif precision == "bf16":
model.to(torch.bfloat16)
else:
raise ValueError(f"unsupported precision {precision}, must be one of fp32, fp16, bf16")
return model
class WarmupLinearScheduler(torch.optim.lr_scheduler.LambdaLR):
def __init__(self, optimizer, warmup, total, ratio, last_epoch=-1):
self.warmup = warmup
self.total = total
self.ratio = ratio
super(WarmupLinearScheduler, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup:
return (1 - self.ratio) * step / float(max(1, self.warmup)) + self.ratio
return max(
0.0,
1.0 + (self.ratio - 1) * (step - self.warmup) / float(max(1.0, self.total - self.warmup)),
)
class CosineScheduler(torch.optim.lr_scheduler.LambdaLR):
def __init__(self, optimizer, warmup, total, ratio=0.1, last_epoch=-1):
self.warmup = warmup
self.total = total
self.ratio = ratio
super(CosineScheduler, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup:
return float(step) / self.warmup
s = float(step - self.warmup) / (self.total - self.warmup)
return self.ratio + (1.0 - self.ratio) * math.cos(0.5 * math.pi * s)
class FixedScheduler(torch.optim.lr_scheduler.LambdaLR):
def __init__(self, optimizer, warmup, total, ratio, last_epoch=-1):
self.warmup = warmup
self.total = total
self.ratio = ratio
super(FixedScheduler, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup:
return float(step) / self.warmup
return 1.0
class IndexRefreshScheduler(object):
def __init__(self, format_str: str, freeze_retriever_steps: int, train_retriever: bool):
"""Build an index refresh scheduler
format_str: string that specifies the schedule.
has the format: startstep-endstep:refreshrate,startstep-endstep:refreshrate
e.g. format_str="0-100:10,100-1000000:500" will refresh the index every 10 steps for the first 100 steps
and then every 500 steps from step 100 to 1M.
Syntactic Sugar for a fixed schedule: can just pass in a single number
e.g. format_str="100" will refresh the index every 100 steps
-1 to never refresh
)
"""
self.format_str = format_str
self.train_retriever = train_retriever
self.freeze_retriever_steps = freeze_retriever_steps
self.steps2rates = IndexRefreshScheduler.parse_index_refresh_schedule_string(format_str)
@classmethod
def parse_index_refresh_schedule_string(cls, format_str):
parsed = []
if format_str == "-1":
parsed = [(0, 2**32, 2**32)]
elif format_str.isdigit():
parsed = [(0, 2**32, int(format_str))]
else:
for piece in format_str.split(","):
startend, rate = piece.split(":")
start, end = startend.split("-")
parsed.append((int(start), int(end), int(rate)))
return parsed
def is_time_to_refresh(self, step):
if not (self.train_retriever or step == 0): # if retriever is not trained only refresh at step 0
return False
if not step == 0 and step < self.freeze_retriever_steps: # freeze first steps
return False
for st, en, rate in self.steps2rates:
if st <= step < en:
steps_since_refresh_schedule_change = step - st
return (steps_since_refresh_schedule_change % rate) == 0
logger.warn(
"cant calculate refresh rate for this step, I dont have data here"
" its likely training step is higher than the specificed refresh rate see --index_refresh_rate for help."
)
return False
def set_dropout(model, dropout_rate):
for mod in model.modules():
if isinstance(mod, torch.nn.Dropout):
mod.p = dropout_rate
def set_optim(opt, model):
from src.AdamWFP32Copy import AdamWFP32Copy
retr_optimizer = None
optim_class = AdamWFP32Copy
optim_args = {"weight_decay": opt.weight_decay, "betas": (0.9, opt.beta2), "eps": opt.epsilon}
if opt.is_distributed and opt.shard_optim:
from fairscale.optim.oss import OSS
optim_args["optim"] = optim_class
optim_args["force_broadcast_object"] = True
optim_class = OSS
optimizer = optim_class(params=model.reader.parameters(), lr=opt.lr, **optim_args)
if opt.train_retriever:
retr_optimizer = optim_class(params=model.retriever.parameters(), lr=opt.lr_retriever, **optim_args)
retr_scheduler = None
scheduler_args = {"warmup": opt.warmup_steps, "total": opt.total_steps, "ratio": 0.1}
if opt.scheduler == "linear":
scheduler_class = WarmupLinearScheduler
elif opt.scheduler == "cosine":
scheduler_class = CosineScheduler
elif opt.scheduler == "fixed":
scheduler_class = FixedScheduler
else:
raise ValueError
scheduler = scheduler_class(optimizer, **scheduler_args)
if opt.train_retriever:
retr_scheduler = scheduler_class(retr_optimizer, **scheduler_args)
return optimizer, scheduler, retr_optimizer, retr_scheduler
def compute_grad_stats(model):
with torch.no_grad():
stats = []
for name, p in get_unwrapped_model_if_wrapped(model).reader.named_parameters():
if p.grad is not None:
s1 = torch.min(torch.abs(p.grad)).item()
s2 = torch.max(torch.abs(p.grad)).item()
s3 = torch.mean(torch.abs(p.grad)).item()
s4 = torch.linalg.norm(p.grad).item()
stats += [s1, s2, s3, s4]
else:
stats += [0.0, 0.0, 0.0, 0.0]
stats = torch.Tensor(stats).cuda()
if torch.distributed.is_initialized():
torch.distributed.all_reduce(stats)
stats = stats.view(-1, 4)
res = {}
res["skip_example"] = (torch.any(torch.isinf(stats)) or torch.any(torch.isnan(stats))).item()
res["min"] = stats.min(0)[0][0].item()
res["max"] = stats.max(0)[0][1].item()
res["mean"] = stats.mean(0)[2].item()
return res
def write_output(glob_path, output_path):
files = list(glob_path.glob("*.txt"))
files.sort()
with open(output_path, "w") as outfile:
for path in files:
with open(path, "r") as f:
lines = f.readlines()
for line in lines:
outfile.write(line)
path.unlink()
glob_path.rmdir()
def save_distributed_dataset(data, dataset_name, opt):
dir_path = Path(opt.checkpoint_dir) / opt.name
write_path = dir_path / "tmp_dir"
write_path.mkdir(exist_ok=True)
tmp_path = write_path / f"{opt.global_rank}.json"
with open(tmp_path, "w") as fw:
json.dump(data, fw)
if opt.is_distributed:
torch.distributed.barrier()
if opt.is_main:
final_path = dir_path / f"{dataset_name}.jsonl"
logger.info(f"Writing dataset with scores at {final_path}")
results_path = list(write_path.glob("*.json"))
results_path.sort()
alldata = []
for path in results_path:
with open(path, "r") as f:
data = json.load(f)
alldata.extend(data)
path.unlink()
with open(final_path, "w") as fout:
for ex in alldata:
json.dump(ex, fout, ensure_ascii=False)
fout.write("\n")
write_path.rmdir()
def avg_dist_dict(keys, dictionary):
avg = {}
for m in keys:
v = dictionary[m]
if len(v) > 0:
avg[m] = np.mean(v)
else:
avg[m] = 0.0
avg[m] = dist_utils.weighted_average(avg[m], len(v))[0]
return avg
class WeightedAvgStats:
"""provides an average over a bunch of stats"""
def __init__(self):
self.raw_stats: Dict[str, float] = defaultdict(float)
self.total_weights: Dict[str, float] = defaultdict(float)
def update(self, vals: Dict[str, Tuple[Number, Number]]) -> None:
for key, (value, weight) in vals.items():
self.raw_stats[key] += value * weight
self.total_weights[key] += weight
@property
def stats(self) -> Dict[str, float]:
return {x: self.raw_stats[x] / self.total_weights[x] for x in self.raw_stats.keys()}
@property
def tuple_stats(self) -> Dict[str, Tuple[float, float]]:
return {x: (self.raw_stats[x] / self.total_weights[x], self.total_weights[x]) for x in self.raw_stats.keys()}
def reset(self) -> None:
self.raw_stats = defaultdict(float)
self.total_weights = defaultdict(float)
@property
def average_stats(self) -> Dict[str, float]:
keys = sorted(self.raw_stats.keys())
if torch.distributed.is_initialized():
torch.distributed.broadcast_object_list(keys, src=0)
global_dict = {}
for k in keys:
if not k in self.total_weights:
v = 0.0
else:
v = self.raw_stats[k] / self.total_weights[k]
v, _ = dist_utils.weighted_average(v, self.total_weights[k])
global_dict[k] = v
return global_dict
def get_unwrapped_model_if_wrapped(model):
if hasattr(model, "module"):
return model.module
return model
|
atlas-main
|
src/util.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import copy
import torch
from src.modeling_bert import BertModel
EMBEDDINGS_DIM: int = 768
class Contriever(BertModel):
def __init__(self, config, pooling="average", **kwargs):
super().__init__(config, add_pooling_layer=False)
if not hasattr(config, "pooling"):
self.config.pooling = pooling
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=None,
output_hidden_states=None,
normalize=False,
):
model_output = super().forward(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
last_hidden = model_output["last_hidden_state"]
last_hidden = last_hidden.masked_fill(~attention_mask[..., None].bool(), 0.0).clone()
if self.config.pooling == "average":
emb = last_hidden.sum(dim=1).clone() / attention_mask.sum(dim=1)[..., None].clone()
elif self.config.pooling == "sqrt":
emb = last_hidden.sum(dim=1) / torch.sqrt(attention_mask.sum(dim=1)[..., None].float())
elif self.config.pooling == "cls":
emb = last_hidden[:, 0]
if normalize:
emb = torch.nn.functional.normalize(emb, dim=-1).clone()
return emb
class BaseRetriever(torch.nn.Module):
"""A retriever needs to be able to embed queries and passages, and have a forward function"""
def __init__(self, *args, **kwargs):
super(BaseRetriever, self).__init__()
def embed_queries(self, *args, **kwargs):
raise NotImplementedError()
def embed_passages(self, *args, **kwargs):
raise NotImplementedError()
def forward(self, *args, is_passages=False, **kwargs):
if is_passages:
return self.embed_passages(*args, **kwargs)
else:
return self.embed_queries(*args, **kwargs)
def gradient_checkpointing_enable(self):
for m in self.children():
m.gradient_checkpointing_enable()
def gradient_checkpointing_disable(self):
for m in self.children():
m.gradient_checkpointing_disable()
class DualEncoderRetriever(BaseRetriever):
"""Wrapper for standard contriever, or other dual encoders that parameter-share"""
def __init__(self, opt, contriever):
super(DualEncoderRetriever, self).__init__()
self.opt = opt
self.contriever = contriever
def _embed(self, *args, **kwargs):
return self.contriever(*args, **kwargs)
def embed_queries(self, *args, **kwargs):
return self._embed(*args, **kwargs)
def embed_passages(self, *args, **kwargs):
return self._embed(*args, **kwargs)
class UntiedDualEncoderRetriever(BaseRetriever):
"""Like DualEncoderRetriever, but dedicated encoders for passage and query embedding"""
def __init__(self, opt, query_encoder, passage_encoder=None):
"""Create the module: if passage_encoder is none, one will be created as a deep copy of query_encoder"""
super(UntiedDualEncoderRetriever, self).__init__()
self.opt = opt
self.query_contriever = query_encoder
if passage_encoder is None:
passage_encoder = copy.deepcopy(query_encoder) if hasattr(query_encoder, "module") else query_encoder
self.passage_contriever = passage_encoder
def embed_queries(self, *args, **kwargs):
return self.query_contriever(*args, **kwargs)
def embed_passages(self, *args, **kwargs):
if self.opt.query_side_retriever_training:
is_train = self.passage_contriever.training
self.passage_contriever.eval()
with torch.no_grad():
passage_emb = self.passage_contriever(*args, **kwargs)
if is_train:
self.passage_contriever.train()
else:
passage_emb = self.passage_contriever(*args, **kwargs)
return passage_emb
|
atlas-main
|
src/retrievers.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import string
from collections import Counter
from typing import Callable
import numpy as np
import regex
from rouge import Rouge
rouge = Rouge()
logger = logging.getLogger(__name__)
# Normalization and score functions from SQuAD evaluation script https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/
def normalize_answer(s: str) -> str:
def remove_articles(text):
return regex.sub(r"\b(a|an|the)\b", " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def em(prediction, ground_truth, normalize_fn):
return float(normalize_fn(prediction) == normalize_fn(ground_truth))
def f1(prediction, ground_truth, normalize_fn):
prediction_tokens = normalize_fn(prediction).split()
ground_truth_tokens = normalize_fn(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def rouge_wrapper(prediction, ground_truth):
try:
result = rouge.get_scores(prediction, ground_truth, avg=True)
return result["rouge-1"]["f"], result["rouge-2"]["f"], result["rouge-l"]["f"]
except:
return 0.0, 0.0, 0.0
def f1_score(prediction, ground_truths, normalize_fn: Callable[[str], str] = lambda x: x):
return max([f1(prediction, gt, normalize_fn) for gt in ground_truths])
def exact_match_score(prediction, ground_truths, normalize_fn: Callable[[str], str] = lambda x: x):
return max([em(prediction, gt, normalize_fn) for gt in ground_truths])
def rouge_score(prediction, ground_truths):
ground_truths = [x for x in ground_truths if len(x) > 0]
if (
len(prediction) == 0 or len(ground_truths) == 0
): # check if empty prediction or if there is no hypothesis with len > 0
return 0.0, 0.0, 0.0
scores = [rouge_wrapper(prediction, gt) for gt in ground_truths]
rouge1 = max(s[0] for s in scores)
rouge2 = max(s[1] for s in scores)
rougel = max(s[2] for s in scores)
return rouge1, rouge2, rougel
|
atlas-main
|
src/evaluation.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import logging
from src import dist_utils
from src.index import DistributedFAISSIndex, DistributedIndex
logger = logging.getLogger(__name__)
def load_passages(filenames, maxload=-1):
def process_jsonl(
fname,
counter,
passages,
world_size,
global_rank,
maxload,
):
def load_item(line):
if line.strip() != "":
item = json.loads(line)
assert "id" in item
if "title" in item and "section" in item and len(item["section"]) > 0:
item["title"] = f"{item['title']}: {item['section']}"
return item
else:
print("empty line")
for line in open(fname):
if maxload > -1 and counter >= maxload:
break
ex = None
if (counter % world_size) == global_rank:
ex = load_item(line)
passages.append(ex)
counter += 1
return passages, counter
counter = 0
passages = []
global_rank = dist_utils.get_rank()
world_size = dist_utils.get_world_size()
for filename in filenames:
passages, counter = process_jsonl(
filename,
counter,
passages,
world_size,
global_rank,
maxload,
)
return passages
def save_embeddings_and_index(index, opt: argparse.Namespace) -> None:
"""
Saves embeddings and passages files. It also saves faiss index files if FAISS mode is used.
"""
index.save_index(opt.save_index_path, opt.save_index_n_shards)
def load_or_initialize_index(opt):
if opt.index_mode == "flat":
index = DistributedIndex()
elif opt.index_mode == "faiss":
index = DistributedFAISSIndex(opt.faiss_index_type, opt.faiss_code_size)
else:
raise ValueError(f"unsupported index mode {opt.index_mode}")
if opt.load_index_path is not None:
logger.info(f"Loading index from: {opt.load_index_path} with index mode: {opt.index_mode}")
if opt.index_mode == "faiss":
logger.info(f"loading faiss index type {opt.faiss_index_type} with parameters {opt.faiss_code_size}")
index.load_index(opt.load_index_path, opt.save_index_n_shards)
passages = [index.doc_map[i] for i in range(len(index.doc_map))]
else:
logger.info(f"Loading passages from: {opt.passages}")
passages = []
if not opt.use_file_passages:
passages = load_passages(opt.passages, opt.max_passages)
index.init_embeddings(passages)
return index, passages
|
atlas-main
|
src/index_io.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import errno
import logging
import os
from pathlib import Path
from typing import Dict, List, Tuple, Union
import torch
import transformers
import src.fid
from src import dist_utils
from src.atlas import Atlas
from src.retrievers import Contriever, DualEncoderRetriever, UntiedDualEncoderRetriever
from src.util import cast_to_precision, set_dropout, set_optim
Number = Union[float, int]
logger = logging.getLogger(__name__)
def get_checkpoint_path(opt):
checkpoint_path = Path(opt.checkpoint_dir) / opt.name
return checkpoint_path
def create_checkpoint_directories(opt):
checkpoint_path = get_checkpoint_path(opt)
os.makedirs(checkpoint_path, exist_ok=True)
if opt.save_index_path:
os.makedirs(opt.save_index_path, exist_ok=True)
dist_utils.barrier()
return checkpoint_path, opt.save_index_path
def load_retriever(opt, opt_checkpoint=None):
if opt.use_file_passages:
return None, None
contriever_encoder = Contriever.from_pretrained(opt.retriever_model_path)
retriever_tokenizer = transformers.AutoTokenizer.from_pretrained(opt.retriever_model_path)
# once you have done query side training you cannot go back to a parameter-tied retriever
if opt_checkpoint is not None:
retriever_is_untied = opt_checkpoint.query_side_retriever_training or opt.query_side_retriever_training
else:
retriever_is_untied = opt.query_side_retriever_training
if retriever_is_untied:
retriever = UntiedDualEncoderRetriever(opt, contriever_encoder)
else:
retriever = DualEncoderRetriever(opt, contriever_encoder)
return retriever, retriever_tokenizer
def _convert_state_dict_from_dual_encoder_retriever(state_dict):
"""handles when we want to load an UntiedDualEncoderRetriever from a DualEncoderRetriever state dict"""
new_state_dict = {}
for k, tensor in state_dict.items():
if k.startswith("retriever"):
new_state_dict[k.replace("retriever.contriever", "retriever.passage_contriever")] = tensor
new_state_dict[k.replace("retriever.contriever", "retriever.query_contriever")] = tensor
else:
new_state_dict[k] = tensor
return new_state_dict
def load_reader(opt):
reader = None
if not opt.retrieve_only:
reader = src.fid.FiD.from_pretrained(opt.reader_model_type)
if opt.compute_crossattention_stats or "eval" in opt.gold_score_mode or "std" in opt.gold_score_mode:
reader.overwrite_forward_crossattention()
reader.create_crossattention_storage()
reader_tokenizer = transformers.AutoTokenizer.from_pretrained(opt.reader_model_type)
return reader, reader_tokenizer
def _set_reader_encoder_cfg(model, opt):
if model.reader is not None:
cfg = model.reader.encoder.config
cfg.n_context = opt.n_context
cfg.bsz = opt.per_gpu_batch_size
def _cast_atlas_to_precision(atlas_model, precision):
if atlas_model.reader is not None:
atlas_model.reader = cast_to_precision(atlas_model.reader, precision)
if atlas_model.retriever is not None and precision == "bf16":
atlas_model.retriever = cast_to_precision(atlas_model.retriever, precision)
def _cast_and_set_attrs_and_send_to_device(model, opt):
_set_reader_encoder_cfg(model, opt)
set_dropout(model, opt.dropout)
_cast_atlas_to_precision(model, opt.precision)
model = model.to(opt.device)
return model
def _load_atlas_model_state(opt, opt_checkpoint, model, model_dict):
model_dict = {
k.replace("retriever.module", "retriever").replace("reader.module", "reader"): v for k, v in model_dict.items()
}
if opt.query_side_retriever_training and not opt_checkpoint.query_side_retriever_training:
model_dict = _convert_state_dict_from_dual_encoder_retriever(model_dict)
if opt.retrieve_only: # dont load reader if in retrieve only mode
model_dict = {k: v for k, v in model_dict.items() if not k.startswith("reader")}
if opt.use_file_passages: # dont load retriever if in use_file_passages mode
model_dict = {k: v for k, v in model_dict.items() if not k.startswith("retriever")}
model.load_state_dict(model_dict)
model = _cast_and_set_attrs_and_send_to_device(model, opt)
return model
def load_atlas_model(dir_path, opt, reset_params=False, eval_only=False):
epoch_path = os.path.realpath(dir_path)
save_path = os.path.join(epoch_path, "model.pth.tar")
logger.info(f"Loading {epoch_path}")
logger.info(f"loading checkpoint {save_path}")
checkpoint = torch.load(save_path, map_location="cpu")
opt_checkpoint = checkpoint["opt"]
step = checkpoint["step"]
model_dict = checkpoint["model"]
reader, reader_tokenizer = load_reader(opt)
retriever, retriever_tokenizer = load_retriever(opt, opt_checkpoint)
model = Atlas(opt, reader, retriever, reader_tokenizer, retriever_tokenizer)
model = _load_atlas_model_state(opt, opt_checkpoint, model, model_dict)
if eval_only:
return model, None, None, None, None, opt_checkpoint, step
if not reset_params:
optimizer, scheduler, retr_optimizer, retr_scheduler = set_optim(opt_checkpoint, model)
scheduler.load_state_dict(checkpoint["scheduler"])
optimizer.load_state_dict(checkpoint["optimizer"])
else:
optimizer, scheduler, retr_optimizer, retr_scheduler = set_optim(opt, model)
return model, optimizer, scheduler, retr_optimizer, retr_scheduler, opt_checkpoint, step
def init_atlas_model(opt, eval_only):
reader, reader_tokenizer = load_reader(opt)
retriever, retriever_tokenizer = load_retriever(opt)
model = Atlas(opt, reader, retriever, reader_tokenizer, retriever_tokenizer)
model = _cast_and_set_attrs_and_send_to_device(model, opt)
if eval_only:
return model, None, None, None, None, opt, 0
optimizer, scheduler, retr_optimizer, retr_scheduler = set_optim(opt, model)
return model, optimizer, scheduler, retr_optimizer, retr_scheduler, opt, 0
def load_or_initialize_atlas_model(opt, eval_only=False):
"""
Either initializes a Atlas from t5 and contriever or loads one from disk.
if opt.model_path is "none" and {opt.checkpoint_dir/opt.name} doesn't exist, it will init a Atlas
or, if opt.model_path is "none" and {opt.checkpoint_dir/opt.name} does exist, it will load the Atlas at opt.checkpoint_dir/opt.name/latest
or, if opt.model_path is not "none" it will load the saved Atlas in opt.model_path
"""
checkpoint_path = get_checkpoint_path(opt)
latest_checkpoint_path = os.path.join(checkpoint_path, "checkpoint", "latest")
if opt.model_path == "none":
if not os.path.exists(latest_checkpoint_path): # Fresh run:
return init_atlas_model(opt, eval_only)
else: # Resume run
load_path, reset_params = latest_checkpoint_path, False
else: # fresh finetune run, initialized from old model
load_path, reset_params = opt.model_path, True
model, optimizer, scheduler, retr_optimizer, retr_scheduler, opt_checkpoint, loaded_step = load_atlas_model(
load_path, opt, reset_params=reset_params, eval_only=eval_only
)
logger.info(f"Model loaded from {load_path}")
step = 0 if opt.model_path != "none" else loaded_step
return model, optimizer, scheduler, retr_optimizer, retr_scheduler, opt, step
def save_atlas_model(model, optimizer, scheduler, retr_optimizer, retr_scheduler, step, opt, dir_path, name):
if opt.save_optimizer and opt.shard_optim:
optimizer.consolidate_state_dict()
if retr_optimizer:
retr_optimizer.consolidate_state_dict()
if not opt.is_main:
return 0
def symlink_force(target, link_name):
try:
os.symlink(target, link_name)
except OSError as e:
if e.errno == errno.EEXIST:
os.remove(link_name)
os.symlink(target, link_name)
else:
raise e
model_to_save = model.module if hasattr(model, "module") else model
path = os.path.join(dir_path, "checkpoint")
epoch_path = os.path.join(path, name) # "step-%s" % step)
os.makedirs(epoch_path, exist_ok=True)
cp = os.path.join(path, "latest")
fp = os.path.join(epoch_path, "model.pth.tar")
optim_state = optimizer.state_dict() if opt.save_optimizer else None
if retr_optimizer and opt.save_optimizer:
retr_optim_state = retr_optimizer.state_dict()
else:
retr_optim_state = None
checkpoint = {
"step": step,
"model": model_to_save.state_dict(),
"optimizer": optim_state,
"retr_optimizer": retr_optim_state,
"scheduler": scheduler.state_dict(),
"retr_scheduler": retr_scheduler.state_dict() if retr_scheduler else None,
"opt": opt,
}
torch.save(checkpoint, fp)
symlink_force(epoch_path, cp)
if opt.save_optimizer and opt.shard_optim:
optimizer._all_states = []
|
atlas-main
|
src/model_io.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch.optim import adamw as _adamw
AdamW = _adamw.AdamW
adamw = _adamw.F.adamw
class AdamWFP32Copy(AdamW):
r"""Implements AdamW algorithm.
.. math::
\begin{aligned}
&\rule{110mm}{0.4pt} \\
&\textbf{input} : \gamma \text{(lr)}, \: \beta_1, \beta_2
\text{(betas)}, \: \theta_0 \text{(params)}, \: f(\theta) \text{(objective)},
\: \epsilon \text{ (epsilon)} \\
&\hspace{13mm} \lambda \text{(weight decay)}, \: \textit{amsgrad},
\: \textit{maximize} \\
&\textbf{initialize} : m_0 \leftarrow 0 \text{ (first moment)}, v_0 \leftarrow 0
\text{ ( second moment)}, \: \widehat{v_0}^{max}\leftarrow 0 \\[-1.ex]
&\rule{110mm}{0.4pt} \\
&\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\
&\hspace{5mm}\textbf{if} \: \textit{maximize}: \\
&\hspace{10mm}g_t \leftarrow -\nabla_{\theta} f_t (\theta_{t-1}) \\
&\hspace{5mm}\textbf{else} \\
&\hspace{10mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\
&\hspace{5mm} \theta_t \leftarrow \theta_{t-1} - \gamma \lambda \theta_{t-1} \\
&\hspace{5mm}m_t \leftarrow \beta_1 m_{t-1} + (1 - \beta_1) g_t \\
&\hspace{5mm}v_t \leftarrow \beta_2 v_{t-1} + (1-\beta_2) g^2_t \\
&\hspace{5mm}\widehat{m_t} \leftarrow m_t/\big(1-\beta_1^t \big) \\
&\hspace{5mm}\widehat{v_t} \leftarrow v_t/\big(1-\beta_2^t \big) \\
&\hspace{5mm}\textbf{if} \: amsgrad \\
&\hspace{10mm}\widehat{v_t}^{max} \leftarrow \mathrm{max}(\widehat{v_t}^{max},
\widehat{v_t}) \\
&\hspace{10mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t}/
\big(\sqrt{\widehat{v_t}^{max}} + \epsilon \big) \\
&\hspace{5mm}\textbf{else} \\
&\hspace{10mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t}/
\big(\sqrt{\widehat{v_t}} + \epsilon \big) \\
&\rule{110mm}{0.4pt} \\[-1.ex]
&\bf{return} \: \theta_t \\[-1.ex]
&\rule{110mm}{0.4pt} \\[-1.ex]
\end{aligned}
For further details regarding the algorithm we refer to `Decoupled Weight Decay Regularization`_.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay coefficient (default: 1e-2)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False)
maximize (bool, optional): maximize the params based on the objective, instead of
minimizing (default: False)
foreach (bool, optional): whether foreach implementation of optimizer
is used (default: None)
.. _Decoupled Weight Decay Regularization:
https://arxiv.org/abs/1711.05101
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
@torch.no_grad()
def step(self, closure=None, scale=1.0):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad = []
grads = []
exp_avgs = []
exp_avg_sqs = []
state_sums = []
max_exp_avg_sqs = []
state_steps = []
amsgrad = group["amsgrad"]
beta1, beta2 = group["betas"]
for p in group["params"]:
if p.grad is None:
continue
pgrad = p.grad
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
state["float32copy"] = p.to(torch.float32, memory_format=torch.preserve_format)
p = state["float32copy"]
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p, memory_format=torch.preserve_format)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(p, memory_format=torch.preserve_format)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state["max_exp_avg_sq"] = torch.zeros_like(p, memory_format=torch.preserve_format)
p = state["float32copy"]
params_with_grad.append(p)
# grads.append(p.grad)
if pgrad.is_sparse:
raise RuntimeError("AdamW does not support sparse gradients")
grads.append(pgrad.float() / scale)
exp_avgs.append(state["exp_avg"])
exp_avg_sqs.append(state["exp_avg_sq"])
if amsgrad:
max_exp_avg_sqs.append(state["max_exp_avg_sq"])
# update the steps for each param group update
state["step"] += 1
# record the step after step update
state_steps.append(state["step"])
adam_params = {
"params": params_with_grad,
"grads": grads,
"exp_avgs": exp_avgs,
"exp_avg_sqs": exp_avg_sqs,
"max_exp_avg_sqs": max_exp_avg_sqs,
"state_steps": state_steps,
"amsgrad": amsgrad,
"beta1": beta1,
"beta2": beta2,
"lr": group["lr"],
"weight_decay": group["weight_decay"],
"eps": group["eps"],
}
if "maximize" in group:
adam_params["maximize"] = group["maximize"]
if "foreach" in group:
adam_params["foreach"] = group["foreach"]
adamw(**adam_params)
for p in group["params"]:
if p.grad is None:
continue
state = self.state[p]
p.copy_(state["float32copy"])
return loss
|
atlas-main
|
src/AdamWFP32Copy.py
|
# coding=utf-8
# Copyright 2018 Mesh TensorFlow authors, T5 Authors and HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch T5 model. """
import copy
import math
import os
import warnings
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from torch.utils.checkpoint import checkpoint
from transformers.activations import ACT2FN
from transformers.file_utils import (
DUMMY_INPUTS,
DUMMY_MASK,
add_start_docstrings,
add_start_docstrings_to_model_forward,
is_torch_fx_proxy,
replace_return_docstrings,
)
from transformers.modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPastAndCrossAttentions,
Seq2SeqLMOutput,
Seq2SeqModelOutput,
)
from transformers.modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer
from transformers.utils import logging
from transformers.utils.model_parallel_utils import assert_device_map, get_device_map
from transformers.models.t5.configuration_t5 import T5Config
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "T5Config"
_TOKENIZER_FOR_DOC = "T5Tokenizer"
_CHECKPOINT_FOR_DOC = "t5-small"
####################################################
# This dict contains ids and associated url
# for the pretrained weights provided with the models
####################################################
T5_PRETRAINED_MODEL_ARCHIVE_LIST = [
"t5-small",
"t5-base",
"t5-large",
"t5-3b",
"t5-11b",
# See all T5 models at https://huggingface.co/models?filter=t5
]
####################################################
# This is a conversion method from TF 1.0 to PyTorch
# More details: https://medium.com/huggingface/from-tensorflow-to-pytorch-265f40ef2a28
####################################################
def load_tf_weights_in_t5(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
tf_weights = {}
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
tf_weights[name] = array
for txt_name in names:
name = txt_name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info(f"Skipping {'/'.join(name)}")
tf_weights.pop(txt_name, None)
continue
if "_slot_" in name[-1]:
logger.info(f"Skipping {'/'.join(name)}")
tf_weights.pop(txt_name, None)
continue
pointer = model
array = tf_weights[txt_name]
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] in ["kernel", "scale", "embedding"]:
pointer = getattr(pointer, "weight")
elif scope_names[0] == "self_attention":
pointer = getattr(pointer, "layer")
pointer = pointer[0]
elif scope_names[0] == "enc_dec_attention":
pointer = getattr(pointer, "layer")
pointer = pointer[1]
elif scope_names[0] == "dense_relu_dense":
pointer = getattr(pointer, "layer")
pointer = pointer[2]
elif scope_names[0] == "rms_norm":
if hasattr(pointer, "layer_norm"):
pointer = getattr(pointer, "layer_norm")
elif hasattr(pointer, "final_layer_norm"):
pointer = getattr(pointer, "final_layer_norm")
elif scope_names[0] == "scale":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
elif scope_names[0] == "decoder" and name[1] == "logits":
continue
elif scope_names[0] == "logits":
pointer = getattr(pointer, "lm_head")
elif scope_names[0] == "wi" and len(scope_names) > 1 and scope_names[1].isdigit():
pointer = getattr(pointer, f"wi_{scope_names[1]}")
continue
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info(f"Skipping {'/'.join(name)}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if scope_names[0] not in ["kernel", "scale", "embedding"]:
pointer = getattr(pointer, "weight")
if scope_names[0] != "embedding":
logger.info(f"Transposing numpy weight of shape {array.shape} for {name}")
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array.astype(np.float32))
tf_weights.pop(txt_name, None)
logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}.")
return model
####################################################
# PyTorch Models are constructed by sub-classing
# - torch.nn.Module for the layers and
# - PreTrainedModel for the models (it-self a sub-class of nn.Module)
####################################################
PARALLELIZE_DOCSTRING = r"""
This is an experimental feature and is a subject to change at a moment's notice.
Uses a device map to distribute attention modules of the model across several devices. If no device map is given,
it will evenly distribute blocks across all devices.
Args:
device_map (`Dict[int, list]`, optional, defaults to None):
A dictionary that maps attention modules to devices. Note that the embedding module and LMHead are always
automatically mapped to the first device (for esoteric reasons). That means that the first device should
have fewer attention modules mapped to it than other devices. For reference, the t5 models have the
following number of attention modules:
- t5-small: 6
- t5-base: 12
- t5-large: 24
- t5-3b: 24
- t5-11b: 24
Example:
```python
# Here is an example of a device map on a machine with 4 GPUs using t5-3b, which has a total of 24 attention modules:
model = T5ForConditionalGeneration.from_pretrained('t5-3b')
device_map = {0: [0, 1, 2],
1: [3, 4, 5, 6, 7, 8, 9],
2: [10, 11, 12, 13, 14, 15, 16],
3: [17, 18, 19, 20, 21, 22, 23]}
model.parallelize(device_map)
```
"""
DEPARALLELIZE_DOCSTRING = r"""
Moves the model to cpu from a model parallel state.
Example:
```python
# On a 4 GPU machine with t5-3b:
model = T5ForConditionalGeneration.from_pretrained('t5-3b')
device_map = {0: [0, 1, 2],
1: [3, 4, 5, 6, 7, 8, 9],
2: [10, 11, 12, 13, 14, 15, 16],
3: [17, 18, 19, 20, 21, 22, 23]}
model.parallelize(device_map) # Splits the model across several devices
model.deparallelize() # Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache()
```
"""
class T5LayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
Construct a layernorm module in the T5 style No bias and no subtraction of mean.
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
# layer norm should always be calculated in float32
variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
# convert into half-precision if necessary
if self.weight.dtype in [torch.float16, torch.bfloat16]:
hidden_states = hidden_states.to(self.weight.dtype)
return self.weight * hidden_states
class T5DenseReluDense(nn.Module):
def __init__(self, config):
super().__init__()
self.wi = nn.Linear(config.d_model, config.d_ff, bias=False)
self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(self, hidden_states):
hidden_states = self.wi(hidden_states)
hidden_states = nn.functional.relu(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.wo(hidden_states)
# hidden_states = torch.clamp(hidden_states, -1000, 1000)
return hidden_states
class T5DenseGatedGeluDense(nn.Module):
def __init__(self, config):
super().__init__()
self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False)
self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False)
self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
self.dropout = nn.Dropout(config.dropout_rate)
self.gelu_act = ACT2FN["gelu_new"]
def forward(self, hidden_states):
hidden_gelu = self.wi_0(hidden_states)
hidden_gelu = self.gelu_act(hidden_gelu.float()).type_as(hidden_states)
hidden_linear = self.wi_1(hidden_states)
hidden_states = hidden_gelu * hidden_linear
hidden_states = self.dropout(hidden_states)
hidden_states = self.wo(hidden_states)
# hidden_states = torch.clamp(hidden_states, -1000, 1000)
return hidden_states
class T5LayerFF(nn.Module):
def __init__(self, config):
super().__init__()
if config.feed_forward_proj == "relu":
self.DenseReluDense = T5DenseReluDense(config)
elif config.feed_forward_proj == "gated-gelu":
self.DenseReluDense = T5DenseGatedGeluDense(config)
else:
raise ValueError(
f"{self.config.feed_forward_proj} is not supported. Choose between `relu` and `gated-gelu`"
)
self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(self, hidden_states):
forwarded_states = self.layer_norm(hidden_states)
forwarded_states = self.DenseReluDense(forwarded_states)
hidden_states = hidden_states + self.dropout(forwarded_states)
return hidden_states
class T5Attention(nn.Module):
def __init__(self, config: T5Config, has_relative_attention_bias=False):
super().__init__()
self.is_decoder = config.is_decoder
self.has_relative_attention_bias = has_relative_attention_bias
self.relative_attention_num_buckets = config.relative_attention_num_buckets
self.d_model = config.d_model
self.key_value_proj_dim = config.d_kv
self.n_heads = config.num_heads
self.dropout = config.dropout_rate
self.inner_dim = self.n_heads * self.key_value_proj_dim
# Mesh TensorFlow initialization to avoid scaling before softmax
self.q = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.k = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.v = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.o = nn.Linear(self.inner_dim, self.d_model, bias=False)
if self.has_relative_attention_bias:
self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads)
self.pruned_heads = set()
self.gradient_checkpointing = False
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads)
# Prune linear layers
self.q = prune_linear_layer(self.q, index)
self.k = prune_linear_layer(self.k, index)
self.v = prune_linear_layer(self.v, index)
self.o = prune_linear_layer(self.o, index, dim=1)
# Update hyper params
self.n_heads = self.n_heads - len(heads)
self.inner_dim = self.key_value_proj_dim * self.n_heads
self.pruned_heads = self.pruned_heads.union(heads)
@staticmethod
def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
"""
Adapted from Mesh Tensorflow:
https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
Translate relative position to a bucket number for relative attention. The relative position is defined as
memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
This should allow for more graceful generalization to longer sequences than the model has been trained on
Args:
relative_position: an int32 Tensor
bidirectional: a boolean - whether the attention is bidirectional
num_buckets: an integer
max_distance: an integer
Returns:
a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
"""
relative_buckets = 0
if bidirectional:
num_buckets //= 2
relative_buckets += (relative_position > 0).to(torch.long) * num_buckets
relative_position = torch.abs(relative_position)
else:
relative_position = -torch.min(relative_position, torch.zeros_like(relative_position))
# now relative_position is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = relative_position < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
relative_postion_if_large = max_exact + (
torch.log(relative_position.float() / max_exact)
/ math.log(max_distance / max_exact)
* (num_buckets - max_exact)
).to(torch.long)
relative_postion_if_large = torch.min(
relative_postion_if_large, torch.full_like(relative_postion_if_large, num_buckets - 1)
)
relative_buckets += torch.where(is_small, relative_position, relative_postion_if_large)
return relative_buckets
def compute_bias(self, query_length, key_length):
"""Compute binned relative position bias"""
context_position = torch.arange(
query_length, dtype=torch.long, device=self.relative_attention_bias.weight.device
)[:, None]
memory_position = torch.arange(key_length, dtype=torch.long, device=self.relative_attention_bias.weight.device)[
None, :
]
relative_position = memory_position - context_position # shape (query_length, key_length)
relative_position_bucket = self._relative_position_bucket(
relative_position, # shape (query_length, key_length)
bidirectional=(not self.is_decoder),
num_buckets=self.relative_attention_num_buckets,
)
values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads)
values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length)
return values
def forward(
self,
hidden_states,
mask=None,
key_value_states=None,
position_bias=None,
past_key_value=None,
layer_head_mask=None,
query_length=None,
use_cache=False,
output_attentions=False,
):
"""
Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states).
"""
# Input is (batch_size, seq_length, dim)
# Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length)
# past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head)
batch_size, seq_length = hidden_states.shape[:2]
real_seq_length = seq_length
if past_key_value is not None:
assert (
len(past_key_value) == 2
), f"past_key_value should have 2 past states: keys and values. Got { len(past_key_value)} past states"
real_seq_length += past_key_value[0].shape[2] if query_length is None else query_length
key_length = real_seq_length if key_value_states is None else key_value_states.shape[1]
def shape(states):
"""projection"""
return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
def unshape(states):
"""reshape"""
return states.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim)
def project(hidden_states, proj_layer, key_value_states, past_key_value):
"""projects hidden states correctly to key/query states"""
if key_value_states is None:
# self-attn
# (batch_size, n_heads, seq_length, dim_per_head)
hidden_states = shape(proj_layer(hidden_states))
elif past_key_value is None:
# cross-attn
# (batch_size, n_heads, seq_length, dim_per_head)
hidden_states = shape(proj_layer(key_value_states))
if past_key_value is not None:
if key_value_states is None:
# self-attn
# (batch_size, n_heads, key_length, dim_per_head)
hidden_states = torch.cat([past_key_value, hidden_states], dim=2)
else:
# cross-attn
hidden_states = past_key_value
return hidden_states
# get query states
query_states = shape(self.q(hidden_states)) # (batch_size, n_heads, seq_length, dim_per_head)
# get key/value states
key_states = project(
hidden_states, self.k, key_value_states, past_key_value[0] if past_key_value is not None else None
)
value_states = project(
hidden_states, self.v, key_value_states, past_key_value[1] if past_key_value is not None else None
)
# compute scores
scores = torch.matmul(
query_states, key_states.transpose(3, 2)
) # equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9
if position_bias is None:
if not self.has_relative_attention_bias:
position_bias = torch.zeros(
(1, self.n_heads, real_seq_length, key_length), device=scores.device, dtype=scores.dtype
)
if self.gradient_checkpointing and self.training:
position_bias.requires_grad = True
else:
position_bias = self.compute_bias(real_seq_length, key_length)
# if key and values are already calculated
# we want only the last query position bias
if past_key_value is not None:
position_bias = position_bias[:, :, -hidden_states.size(1) :, :]
if mask is not None:
position_bias = position_bias + mask # (batch_size, n_heads, seq_length, key_length)
scores += position_bias
attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(
scores
) # (batch_size, n_heads, seq_length, key_length)
attn_weights = nn.functional.dropout(
attn_weights, p=self.dropout, training=self.training
) # (batch_size, n_heads, seq_length, key_length)
# Mask heads if we want to
if layer_head_mask is not None:
attn_weights = attn_weights * layer_head_mask
attn_output = unshape(torch.matmul(attn_weights, value_states)) # (batch_size, seq_length, dim)
attn_output = self.o(attn_output)
present_key_value_state = (key_states, value_states) if (self.is_decoder and use_cache) else None
outputs = (attn_output,) + (present_key_value_state,) + (position_bias,)
if output_attentions:
outputs = outputs + (attn_weights,)
return outputs
class T5LayerSelfAttention(nn.Module):
def __init__(self, config, has_relative_attention_bias=False):
super().__init__()
self.SelfAttention = T5Attention(config, has_relative_attention_bias=has_relative_attention_bias)
self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(
self,
hidden_states,
attention_mask=None,
position_bias=None,
layer_head_mask=None,
past_key_value=None,
use_cache=False,
output_attentions=False,
):
normed_hidden_states = self.layer_norm(hidden_states)
attention_output = self.SelfAttention(
normed_hidden_states,
mask=attention_mask,
position_bias=position_bias,
layer_head_mask=layer_head_mask,
past_key_value=past_key_value,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states = hidden_states + self.dropout(attention_output[0])
outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them
return outputs
class T5LayerCrossAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.EncDecAttention = T5Attention(config, has_relative_attention_bias=False)
self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(
self,
hidden_states,
key_value_states,
attention_mask=None,
position_bias=None,
layer_head_mask=None,
past_key_value=None,
use_cache=False,
query_length=None,
output_attentions=False,
):
normed_hidden_states = self.layer_norm(hidden_states)
attention_output = self.EncDecAttention(
normed_hidden_states,
mask=attention_mask,
key_value_states=key_value_states,
position_bias=position_bias,
layer_head_mask=layer_head_mask,
past_key_value=past_key_value,
use_cache=use_cache,
query_length=query_length,
output_attentions=output_attentions,
)
layer_output = hidden_states + self.dropout(attention_output[0])
outputs = (layer_output,) + attention_output[1:] # add attentions if we output them
return outputs
class T5Block(nn.Module):
def __init__(self, config, has_relative_attention_bias=False):
super().__init__()
self.is_decoder = config.is_decoder
self.layer = nn.ModuleList()
self.layer.append(T5LayerSelfAttention(config, has_relative_attention_bias=has_relative_attention_bias))
if self.is_decoder:
self.layer.append(T5LayerCrossAttention(config))
self.layer.append(T5LayerFF(config))
def forward(
self,
hidden_states,
attention_mask=None,
position_bias=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
encoder_decoder_position_bias=None,
layer_head_mask=None,
cross_attn_layer_head_mask=None,
past_key_value=None,
use_cache=False,
output_attentions=False,
return_dict=True,
):
if past_key_value is not None:
assert self.is_decoder, "Only decoder can use `past_key_values`"
expected_num_past_key_values = 2 if encoder_hidden_states is None else 4
if len(past_key_value) != expected_num_past_key_values:
raise ValueError(
f"There should be {expected_num_past_key_values} past states. "
f"{'2 (past / key) for cross attention. ' if expected_num_past_key_values == 4 else ''}"
f"Got {len(past_key_value)} past key / value states"
)
self_attn_past_key_value = past_key_value[:2]
cross_attn_past_key_value = past_key_value[2:]
else:
self_attn_past_key_value, cross_attn_past_key_value = None, None
self_attention_outputs = self.layer[0](
hidden_states,
attention_mask=attention_mask,
position_bias=position_bias,
layer_head_mask=layer_head_mask,
past_key_value=self_attn_past_key_value,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states, present_key_value_state = self_attention_outputs[:2]
attention_outputs = self_attention_outputs[2:] # Keep self-attention outputs and relative position weights
# clamp inf values to enable fp16 training
# if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
if torch.isinf(hidden_states).any():
print("a")
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
do_cross_attention = self.is_decoder and encoder_hidden_states is not None
if do_cross_attention:
# the actual query length is unknown for cross attention
# if using past key value states. Need to inject it here
if present_key_value_state is not None:
query_length = present_key_value_state[0].shape[2]
else:
query_length = None
cross_attention_outputs = self.layer[1](
hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
position_bias=encoder_decoder_position_bias,
layer_head_mask=cross_attn_layer_head_mask,
past_key_value=cross_attn_past_key_value,
query_length=query_length,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states = cross_attention_outputs[0]
# clamp inf values to enable fp16 training
# if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
if torch.isinf(hidden_states).any():
print("b")
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
# Combine self attn and cross attn key value states
if present_key_value_state is not None:
present_key_value_state = present_key_value_state + cross_attention_outputs[1]
# Keep cross-attention outputs and relative position weights
attention_outputs = attention_outputs + cross_attention_outputs[2:]
# Apply Feed Forward layer
hidden_states = self.layer[-1](hidden_states)
# clamp inf values to enable fp16 training
# if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
if torch.isinf(hidden_states).any():
print(f"c {torch.linalg.norm(hidden_states).item()}")
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if use_cache:
outputs = outputs + (present_key_value_state,) + attention_outputs
else:
outputs = outputs + attention_outputs
return outputs # hidden-states, present_key_value_states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights)
class T5PreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = T5Config
load_tf_weights = load_tf_weights_in_t5
base_model_prefix = "transformer"
is_parallelizable = True
supports_gradient_checkpointing = True
@property
def dummy_inputs(self):
input_ids = torch.tensor(DUMMY_INPUTS)
input_mask = torch.tensor(DUMMY_MASK)
dummy_inputs = {
"decoder_input_ids": input_ids,
"input_ids": input_ids,
"decoder_attention_mask": input_mask,
}
return dummy_inputs
def _init_weights(self, module):
"""Initialize the weights"""
factor = self.config.initializer_factor # Used for testing weights initialization
if isinstance(module, T5LayerNorm):
module.weight.data.fill_(factor * 1.0)
elif isinstance(module, (T5Model, T5ForConditionalGeneration, T5EncoderModel)):
# Mesh TensorFlow embeddings initialization
# See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624
module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0)
elif isinstance(module, T5DenseReluDense):
# Mesh TensorFlow FF initialization
# See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56
# and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89
module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
if hasattr(module.wi, "bias") and module.wi.bias is not None:
module.wi.bias.data.zero_()
module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5))
if hasattr(module.wo, "bias") and module.wo.bias is not None:
module.wo.bias.data.zero_()
elif isinstance(module, T5DenseGatedGeluDense):
module.wi_0.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
if hasattr(module.wi_0, "bias") and module.wi_0.bias is not None:
module.wi_0.bias.data.zero_()
module.wi_1.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
if hasattr(module.wi_1, "bias") and module.wi_1.bias is not None:
module.wi_1.bias.data.zero_()
module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5))
if hasattr(module.wo, "bias") and module.wo.bias is not None:
module.wo.bias.data.zero_()
elif isinstance(module, T5Attention):
# Mesh TensorFlow attention initialization to avoid scaling before softmax
# See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136
d_model = self.config.d_model
key_value_proj_dim = self.config.d_kv
n_heads = self.config.num_heads
module.q.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5))
module.k.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5))
module.v.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5))
module.o.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5))
if module.has_relative_attention_bias:
module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((d_model) ** -0.5))
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, (T5Attention, T5Stack)):
module.gradient_checkpointing = value
def _shift_right(self, input_ids):
decoder_start_token_id = self.config.decoder_start_token_id
pad_token_id = self.config.pad_token_id
assert (
decoder_start_token_id is not None
), "self.model.config.decoder_start_token_id has to be defined. In T5 it is usually set to the pad_token_id. See T5 docs for more information"
# shift inputs to the right
if is_torch_fx_proxy(input_ids):
# Item assignment is not supported natively for proxies.
shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id)
shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1)
else:
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
shifted_input_ids[..., 0] = decoder_start_token_id
assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined."
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
assert torch.all(shifted_input_ids >= 0).item(), "Verify that `shifted_input_ids` has only positive values"
return shifted_input_ids
class T5Stack(T5PreTrainedModel):
def __init__(self, config, embed_tokens=None):
super().__init__(config)
self.embed_tokens = embed_tokens
self.is_decoder = config.is_decoder
self.block = nn.ModuleList(
[T5Block(config, has_relative_attention_bias=bool(i == 0)) for i in range(config.num_layers)]
)
self.final_layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
# Initialize weights and apply final processing
self.post_init()
# Model parallel
self.model_parallel = False
self.device_map = None
self.gradient_checkpointing = False
@add_start_docstrings(PARALLELIZE_DOCSTRING)
def parallelize(self, device_map=None):
# Check validity of device_map
self.device_map = (
get_device_map(len(self.block), range(torch.cuda.device_count())) if device_map is None else device_map
)
assert_device_map(self.device_map, len(self.block))
self.model_parallel = True
self.first_device = "cpu" if "cpu" in self.device_map.keys() else "cuda:" + str(min(self.device_map.keys()))
self.last_device = "cuda:" + str(max(self.device_map.keys()))
# Load onto devices
for k, v in self.device_map.items():
for layer in v:
cuda_device = "cuda:" + str(k)
self.block[layer] = self.block[layer].to(cuda_device)
# Set embed_tokens to first layer
self.embed_tokens = self.embed_tokens.to(self.first_device)
# Set final layer norm to last device
self.final_layer_norm = self.final_layer_norm.to(self.last_device)
@add_start_docstrings(PARALLELIZE_DOCSTRING)
def deparallelize(self):
self.model_parallel = False
self.device_map = None
self.first_device = "cpu"
self.last_device = "cpu"
for i in range(len(self.block)):
self.block[i] = self.block[i].to("cpu")
self.embed_tokens = self.embed_tokens.to("cpu")
self.final_layer_norm = self.final_layer_norm.to("cpu")
torch.cuda.empty_cache()
def get_input_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, new_embeddings):
self.embed_tokens = new_embeddings
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
inputs_embeds=None,
head_mask=None,
cross_attn_head_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
# Model parallel
if self.model_parallel:
torch.cuda.set_device(self.first_device)
self.embed_tokens = self.embed_tokens.to(self.first_device)
use_cache = use_cache if use_cache is not None else self.config.use_cache
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
err_msg_prefix = "decoder_" if self.is_decoder else ""
raise ValueError(
f"You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time"
)
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
err_msg_prefix = "decoder_" if self.is_decoder else ""
raise ValueError(f"You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds")
if inputs_embeds is None:
assert self.embed_tokens is not None, "You have to initialize the model with valid token embeddings"
inputs_embeds = self.embed_tokens(input_ids)
batch_size, seq_length = input_shape
# required mask seq length can be calculated via length of past
mask_seq_length = past_key_values[0][0].shape[2] + seq_length if past_key_values is not None else seq_length
if use_cache is True:
assert self.is_decoder, f":obj:`use_cache` can only be set to `True` if {self} is used as a decoder"
if attention_mask is None:
attention_mask = torch.ones(batch_size, mask_seq_length).to(inputs_embeds.device)
if self.is_decoder and encoder_attention_mask is None and encoder_hidden_states is not None:
encoder_seq_length = encoder_hidden_states.shape[1]
encoder_attention_mask = torch.ones(
batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long
)
# initialize past_key_values with `None` if past does not exist
if past_key_values is None:
past_key_values = [None] * len(self.block)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, inputs_embeds.device)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
head_mask = self.get_head_mask(head_mask, self.config.num_layers)
cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers)
present_key_value_states = () if use_cache else None
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
all_cross_attentions = () if (output_attentions and self.is_decoder) else None
position_bias = None
encoder_decoder_position_bias = None
hidden_states = self.dropout(inputs_embeds)
for i, (layer_module, past_key_value) in enumerate(zip(self.block, past_key_values)):
layer_head_mask = head_mask[i]
cross_attn_layer_head_mask = cross_attn_head_mask[i]
# Model parallel
if self.model_parallel:
torch.cuda.set_device(hidden_states.device)
# Ensure that attention_mask is always on the same device as hidden_states
if attention_mask is not None:
attention_mask = attention_mask.to(hidden_states.device)
if position_bias is not None:
position_bias = position_bias.to(hidden_states.device)
if encoder_hidden_states is not None:
encoder_hidden_states = encoder_hidden_states.to(hidden_states.device)
if encoder_extended_attention_mask is not None:
encoder_extended_attention_mask = encoder_extended_attention_mask.to(hidden_states.device)
if encoder_decoder_position_bias is not None:
encoder_decoder_position_bias = encoder_decoder_position_bias.to(hidden_states.device)
if layer_head_mask is not None:
layer_head_mask = layer_head_mask.to(hidden_states.device)
if cross_attn_layer_head_mask is not None:
cross_attn_layer_head_mask = cross_attn_layer_head_mask.to(hidden_states.device)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warn(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return tuple(module(*inputs, use_cache, output_attentions))
return custom_forward
layer_outputs = checkpoint(
create_custom_forward(layer_module),
hidden_states,
extended_attention_mask,
position_bias,
encoder_hidden_states,
encoder_extended_attention_mask,
encoder_decoder_position_bias,
layer_head_mask,
cross_attn_layer_head_mask,
None, # past_key_value is always None with gradient checkpointing
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask=extended_attention_mask,
position_bias=position_bias,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
encoder_decoder_position_bias=encoder_decoder_position_bias,
layer_head_mask=layer_head_mask,
cross_attn_layer_head_mask=cross_attn_layer_head_mask,
past_key_value=past_key_value,
use_cache=use_cache,
output_attentions=output_attentions,
)
# layer_outputs is a tuple with:
# hidden-states, key-value-states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights)
if use_cache is False:
layer_outputs = layer_outputs[:1] + (None,) + layer_outputs[1:]
hidden_states, present_key_value_state = layer_outputs[:2]
# We share the position biases between the layers - the first layer store them
# layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights),
# (cross-attention position bias), (cross-attention weights)
position_bias = layer_outputs[2]
if self.is_decoder and encoder_hidden_states is not None:
encoder_decoder_position_bias = layer_outputs[4 if output_attentions else 3]
# append next layer key value states
if use_cache:
present_key_value_states = present_key_value_states + (present_key_value_state,)
if output_attentions:
all_attentions = all_attentions + (layer_outputs[3],)
if self.is_decoder:
all_cross_attentions = all_cross_attentions + (layer_outputs[5],)
# Model Parallel: If it's the last layer for that device, put things on the next device
if self.model_parallel:
for k, v in self.device_map.items():
if i == v[-1] and "cuda:" + str(k) != self.last_device:
hidden_states = hidden_states.to("cuda:" + str(k + 1))
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.dropout(hidden_states)
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
present_key_value_states,
all_hidden_states,
all_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=present_key_value_states,
hidden_states=all_hidden_states,
attentions=all_attentions,
cross_attentions=all_cross_attentions,
)
T5_START_DOCSTRING = r"""
The T5 model was proposed in [Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer](https://arxiv.org/abs/1910.10683) by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang,
Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu. It's an encoder decoder transformer pre-trained in a text-to-text
denoising generative setting.
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module)
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config ([`T5Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model
weights.
"""
T5_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you
should be able to pad the inputs on both the right and the left.
Indices can be obtained using [`T5Tokenizer`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for
detail.
[What are input IDs?](../glossary#input-ids)
To know more on how to prepare `input_ids` for pretraining take a look a [T5 Training](./t5#training).
attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`T5Tokenizer`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for
details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
T5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If
`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
To know more on how to prepare `decoder_input_ids` for pretraining take a look at [T5 Training](./t5#training).
decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will
also be used by default.
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules in the encoder. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in
`[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`:
*attentions*) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a
sequence of hidden states at the output of the last layer of the encoder. Used in the cross-attention of
the decoder.
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids`
(those that don't have their past key value states given to this model) of shape `(batch_size, 1)`
instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds`
have to be input (see `past_key_values`). This is useful if you want more control over how to convert
`decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds`
takes the value of `inputs_embeds`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up
decoding (see `past_key_values`).
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
T5_ENCODER_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you
should be able to pad the inputs on both the right and the left.
Indices can be obtained using [`T5Tokenizer`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for
detail.
To know more on how to prepare `input_ids` for pretraining take a look a [T5 Training](./t5#training).
attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
# Warning message for FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask
__HEAD_MASK_WARNING_MSG = """
The input argument `head_mask` was split into two arguments `head_mask` and `decoder_head_mask`. Currently,
`decoder_head_mask` is set to copy `head_mask`, but this feature is deprecated and will be removed in future versions.
If you do not want to use any `decoder_head_mask` now, please set `decoder_head_mask = torch.ones(num_layers,
num_heads)`.
"""
@add_start_docstrings(
"The bare T5 Model transformer outputting raw hidden-states without any specific head on top.",
T5_START_DOCSTRING,
)
class T5Model(T5PreTrainedModel):
_keys_to_ignore_on_load_missing = [
r"encoder\.embed_tokens\.weight",
r"decoder\.embed_tokens\.weight",
]
_keys_to_ignore_on_load_unexpected = [
r"decoder\.block\.0\.layer\.1\.EncDecAttention\.relative_attention_bias\.weight",
]
def __init__(self, config: T5Config):
super().__init__(config)
self.shared = nn.Embedding(config.vocab_size, config.d_model)
encoder_config = copy.deepcopy(config)
encoder_config.is_decoder = False
encoder_config.use_cache = False
encoder_config.is_encoder_decoder = False
self.encoder = T5Stack(encoder_config, self.shared)
decoder_config = copy.deepcopy(config)
decoder_config.is_decoder = True
decoder_config.is_encoder_decoder = False
decoder_config.num_layers = config.num_decoder_layers
self.decoder = T5Stack(decoder_config, self.shared)
# Initialize weights and apply final processing
self.post_init()
# Model parallel
self.model_parallel = False
self.device_map = None
@add_start_docstrings(PARALLELIZE_DOCSTRING)
def parallelize(self, device_map=None):
self.device_map = (
get_device_map(len(self.encoder.block), range(torch.cuda.device_count()))
if device_map is None
else device_map
)
assert_device_map(self.device_map, len(self.encoder.block))
self.encoder.parallelize(self.device_map)
self.decoder.parallelize(self.device_map)
self.model_parallel = True
@add_start_docstrings(DEPARALLELIZE_DOCSTRING)
def deparallelize(self):
self.encoder.deparallelize()
self.decoder.deparallelize()
self.encoder = self.encoder.to("cpu")
self.decoder = self.decoder.to("cpu")
self.model_parallel = False
self.device_map = None
torch.cuda.empty_cache()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
self.decoder.set_input_embeddings(new_embeddings)
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
encoder_outputs=None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Returns:
Example:
```python
>>> from transformers import T5Tokenizer, T5Model
>>> tokenizer = T5Tokenizer.from_pretrained('t5-small')
>>> model = T5Model.from_pretrained('t5-small')
>>> input_ids = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt").input_ids # Batch size 1
>>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1
>>> # forward pass
>>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
>>> last_hidden_states = outputs.last_hidden_state
```"""
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask
if head_mask is not None and decoder_head_mask is None:
if self.config.num_layers == self.config.num_decoder_layers:
warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning)
decoder_head_mask = head_mask
# Encode if needed (training, first prediction pass)
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
hidden_states = encoder_outputs[0]
if self.model_parallel:
torch.cuda.set_device(self.decoder.first_device)
# Set device for model parallelism
if self.model_parallel:
torch.cuda.set_device(self.decoder.first_device)
hidden_states = hidden_states.to(self.decoder.first_device)
if decoder_input_ids is not None:
decoder_input_ids = decoder_input_ids.to(self.decoder.first_device)
if attention_mask is not None:
attention_mask = attention_mask.to(self.decoder.first_device)
if decoder_attention_mask is not None:
decoder_attention_mask = decoder_attention_mask.to(self.decoder.first_device)
# Decode
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
inputs_embeds=decoder_inputs_embeds,
past_key_values=past_key_values,
encoder_hidden_states=hidden_states,
encoder_attention_mask=attention_mask,
head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return Seq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
@add_start_docstrings("""T5 Model with a `language modeling` head on top. """, T5_START_DOCSTRING)
class T5ForConditionalGeneration(T5PreTrainedModel):
_keys_to_ignore_on_load_missing = [
r"encoder\.embed_tokens\.weight",
r"decoder\.embed_tokens\.weight",
r"lm_head\.weight",
]
_keys_to_ignore_on_load_unexpected = [
r"decoder\.block\.0\.layer\.1\.EncDecAttention\.relative_attention_bias\.weight",
]
def __init__(self, config):
super().__init__(config)
self.model_dim = config.d_model
self.shared = nn.Embedding(config.vocab_size, config.d_model)
encoder_config = copy.deepcopy(config)
encoder_config.is_decoder = False
encoder_config.use_cache = False
encoder_config.is_encoder_decoder = False
self.encoder = T5Stack(encoder_config, self.shared)
decoder_config = copy.deepcopy(config)
decoder_config.is_decoder = True
decoder_config.is_encoder_decoder = False
decoder_config.num_layers = config.num_decoder_layers
self.decoder = T5Stack(decoder_config, self.shared)
self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
# Model parallel
self.model_parallel = False
self.device_map = None
@add_start_docstrings(PARALLELIZE_DOCSTRING)
def parallelize(self, device_map=None):
self.device_map = (
get_device_map(len(self.encoder.block), range(torch.cuda.device_count()))
if device_map is None
else device_map
)
assert_device_map(self.device_map, len(self.encoder.block))
self.encoder.parallelize(self.device_map)
self.decoder.parallelize(self.device_map)
self.lm_head = self.lm_head.to(self.decoder.first_device)
self.model_parallel = True
@add_start_docstrings(DEPARALLELIZE_DOCSTRING)
def deparallelize(self):
self.encoder.deparallelize()
self.decoder.deparallelize()
self.encoder = self.encoder.to("cpu")
self.decoder = self.decoder.to("cpu")
self.lm_head = self.lm_head.to("cpu")
self.model_parallel = False
self.device_map = None
torch.cuda.empty_cache()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
self.decoder.set_input_embeddings(new_embeddings)
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def get_output_embeddings(self):
return self.lm_head
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
@add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
encoder_outputs=None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for
labels in `[0, ..., config.vocab_size]`
Returns:
Examples:
```python
>>> from transformers import T5Tokenizer, T5ForConditionalGeneration
>>> tokenizer = T5Tokenizer.from_pretrained('t5-small')
>>> model = T5ForConditionalGeneration.from_pretrained('t5-small')
>>> # training
>>> input_ids = tokenizer('The <extra_id_0> walks in <extra_id_1> park', return_tensors='pt').input_ids
>>> labels = tokenizer('<extra_id_0> cute dog <extra_id_1> the <extra_id_2>', return_tensors='pt').input_ids
>>> outputs = model(input_ids=input_ids, labels=labels)
>>> loss = outputs.loss
>>> logits = outputs.logits
>>> # inference
>>> input_ids = tokenizer("summarize: studies have shown that owning a dog is good for you", return_tensors="pt").input_ids # Batch size 1
>>> outputs = model.generate(input_ids)
>>> print(tokenizer.decode(outputs[0], skip_special_tokens=True))
>>> # studies have shown that owning a dog is good for you.
```"""
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask
if head_mask is not None and decoder_head_mask is None:
if self.config.num_layers == self.config.num_decoder_layers:
warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning)
decoder_head_mask = head_mask
# Encode if needed (training, first prediction pass)
if encoder_outputs is None:
# Convert encoder inputs in embeddings if needed
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
hidden_states = encoder_outputs[0]
if self.model_parallel:
torch.cuda.set_device(self.decoder.first_device)
if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:
# get decoder inputs from shifting lm labels to the right
decoder_input_ids = self._shift_right(labels)
# Set device for model parallelism
if self.model_parallel:
torch.cuda.set_device(self.decoder.first_device)
hidden_states = hidden_states.to(self.decoder.first_device)
if decoder_input_ids is not None:
decoder_input_ids = decoder_input_ids.to(self.decoder.first_device)
if attention_mask is not None:
attention_mask = attention_mask.to(self.decoder.first_device)
if decoder_attention_mask is not None:
decoder_attention_mask = decoder_attention_mask.to(self.decoder.first_device)
# Decode
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
inputs_embeds=decoder_inputs_embeds,
past_key_values=past_key_values,
encoder_hidden_states=hidden_states,
encoder_attention_mask=attention_mask,
head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = decoder_outputs[0]
# Set device for model parallelism
if self.model_parallel:
torch.cuda.set_device(self.encoder.first_device)
self.lm_head = self.lm_head.to(self.encoder.first_device)
sequence_output = sequence_output.to(self.lm_head.weight.device)
if self.config.tie_word_embeddings:
# Rescale output before projecting on vocab
# See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586
sequence_output = sequence_output * (self.model_dim**-0.5)
lm_logits = self.lm_head(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-100)
loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))
# TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666
if not return_dict:
output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs
return ((loss,) + output) if loss is not None else output
return Seq2SeqLMOutput(
loss=loss,
logits=lm_logits,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
def prepare_inputs_for_generation(
self,
input_ids,
past=None,
attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
use_cache=None,
encoder_outputs=None,
**kwargs,
):
# cut decoder_input_ids if past is used
if past is not None:
input_ids = input_ids[:, -1:]
return {
"decoder_input_ids": input_ids,
"past_key_values": past,
"encoder_outputs": encoder_outputs,
"attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
"use_cache": use_cache,
}
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
return self._shift_right(labels)
def _reorder_cache(self, past, beam_idx):
# if decoder past is not included in output
# speedy decoding is disabled and no need to reorder
if past is None:
logger.warning("You might want to consider setting `use_cache=True` to speed up decoding")
return past
reordered_decoder_past = ()
for layer_past_states in past:
# get the correct batch idx from layer past batch dim
# batch dim of `past` is at 2nd position
reordered_layer_past_states = ()
for layer_past_state in layer_past_states:
# need to set correct `past` for each of the four key / value states
reordered_layer_past_states = reordered_layer_past_states + (
layer_past_state.index_select(0, beam_idx.to(layer_past_state.device)),
)
assert reordered_layer_past_states[0].shape == layer_past_states[0].shape
assert len(reordered_layer_past_states) == len(layer_past_states)
reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,)
return reordered_decoder_past
@add_start_docstrings(
"The bare T5 Model transformer outputting encoder's raw hidden-states without any specific head on top.",
T5_START_DOCSTRING,
)
class T5EncoderModel(T5PreTrainedModel):
authorized_missing_keys = [
r"encoder\.embed_tokens\.weight",
]
def __init__(self, config: T5Config):
super().__init__(config)
self.shared = nn.Embedding(config.vocab_size, config.d_model)
encoder_config = copy.deepcopy(config)
encoder_config.use_cache = False
encoder_config.is_encoder_decoder = False
self.encoder = T5Stack(encoder_config, self.shared)
# Initialize weights and apply final processing
self.post_init()
# Model parallel
self.model_parallel = False
self.device_map = None
@add_start_docstrings(PARALLELIZE_DOCSTRING)
def parallelize(self, device_map=None):
self.device_map = (
get_device_map(len(self.encoder.block), range(torch.cuda.device_count()))
if device_map is None
else device_map
)
assert_device_map(self.device_map, len(self.encoder.block))
self.encoder.parallelize(self.device_map)
self.model_parallel = True
@add_start_docstrings(DEPARALLELIZE_DOCSTRING)
def deparallelize(self):
self.encoder.deparallelize()
self.encoder = self.encoder.to("cpu")
self.model_parallel = False
self.device_map = None
torch.cuda.empty_cache()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
def get_encoder(self):
return self.encoder
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(T5_ENCODER_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Returns:
Example:
```python
>>> from transformers import T5Tokenizer, T5EncoderModel
>>> tokenizer = T5Tokenizer.from_pretrained('t5-small')
>>> model = T5EncoderModel.from_pretrained('t5-small')
>>> input_ids = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt").input_ids # Batch size 1
>>> outputs = model(input_ids=input_ids)
>>> last_hidden_states = outputs.last_hidden_state
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
return encoder_outputs
|
atlas-main
|
src/modeling_t5.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
import math
import os
import warnings
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from packaging import version
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from transformers.activations import ACT2FN
from transformers.modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
NextSentencePredictorOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from transformers.modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from transformers.utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from transformers.models.bert.configuration_bert import BertConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "bert-base-uncased"
_CONFIG_FOR_DOC = "BertConfig"
_TOKENIZER_FOR_DOC = "BertTokenizer"
BERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"bert-base-uncased",
"bert-large-uncased",
"bert-base-cased",
"bert-large-cased",
"bert-base-multilingual-uncased",
"bert-base-multilingual-cased",
"bert-base-chinese",
"bert-base-german-cased",
"bert-large-uncased-whole-word-masking",
"bert-large-cased-whole-word-masking",
"bert-large-uncased-whole-word-masking-finetuned-squad",
"bert-large-cased-whole-word-masking-finetuned-squad",
"bert-base-cased-finetuned-mrpc",
"bert-base-german-dbmdz-cased",
"bert-base-german-dbmdz-uncased",
"cl-tohoku/bert-base-japanese",
"cl-tohoku/bert-base-japanese-whole-word-masking",
"cl-tohoku/bert-base-japanese-char",
"cl-tohoku/bert-base-japanese-char-whole-word-masking",
"TurkuNLP/bert-base-finnish-cased-v1",
"TurkuNLP/bert-base-finnish-uncased-v1",
"wietsedv/bert-base-dutch-cased",
# See all BERT models at https://huggingface.co/models?filter=bert
]
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
Construct a layernorm module in the T5 style No bias and no subtraction of mean.
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
# layer norm should always be calculated in float32
mean = hidden_states.to(torch.float32).mean(-1, keepdim=True)
variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
hidden_states = (hidden_states - mean) * torch.rsqrt(variance + self.variance_epsilon)
# convert into half-precision if necessary
if self.weight.dtype in [torch.float16, torch.bfloat16]:
hidden_states = hidden_states.to(self.weight.dtype)
return self.weight * hidden_states + self.bias
def load_tf_weights_in_bert(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info(f"Skipping {'/'.join(name)}")
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info(f"Skipping {'/'.join(name)}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
if pointer.shape != array.shape:
raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array)
return model
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
if version.parse(torch.__version__) > version.parse("1.6.0"):
self.register_buffer(
"token_type_ids",
torch.zeros(self.position_ids.size(), dtype=torch.long),
persistent=False,
)
def forward(
self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
# Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
# when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
# issue #5664
if token_type_ids is None:
if hasattr(self, "token_type_ids"):
buffered_token_type_ids = self.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings.float()).type_as(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config, position_embedding_type=None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = position_embedding_type or getattr(config, "position_embedding_type", "absolute")
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
self.is_decoder = config.is_decoder
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.Tensor]:
mixed_query_layer = self.query(hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_layer = past_key_value[0]
value_layer = past_key_value[1]
attention_mask = encoder_attention_mask
elif is_cross_attention:
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
attention_mask = encoder_attention_mask
elif past_key_value is not None:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
else:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_layer, value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
if self.position_embedding_type == "relative_key":
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == "relative_key_query":
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores.float(), dim=-1).type_as(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
if self.is_decoder:
outputs = outputs + (past_key_value,)
return outputs
class BertSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = hidden_states + input_tensor
hidden_states = self.LayerNorm(hidden_states.float()).type_as(hidden_states)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config, position_embedding_type=None):
super().__init__()
self.self = BertSelfAttention(config, position_embedding_type=position_embedding_type)
self.output = BertSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.Tensor]:
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class BertIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = hidden_states + input_tensor
hidden_states = self.LayerNorm(hidden_states.float()).type_as(hidden_states)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = BertAttention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
if not self.is_decoder:
raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
self.crossattention = BertAttention(config, position_embedding_type="absolute")
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.Tensor]:
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
)
attention_output = self_attention_outputs[0]
# if decoder, the last output is tuple of self-attn cache
if self.is_decoder:
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
else:
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
cross_attn_present_key_value = None
if self.is_decoder and encoder_hidden_states is not None:
if not hasattr(self, "crossattention"):
raise ValueError(
f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
)
# cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
cross_attn_past_key_value,
output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
# add cross-attn cache to positions 3,4 of present_key_value tuple
cross_attn_present_key_value = cross_attention_outputs[-1]
present_key_value = present_key_value + cross_attn_present_key_value
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
# if decoder, return the attn key/values as the last output
if self.is_decoder:
outputs = outputs + (present_key_value,)
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = False,
output_hidden_states: Optional[bool] = False,
return_dict: Optional[bool] = True,
) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if use_cache else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
class BertPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states.float()).type_as(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertLMPredictionHead(config)
def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super().__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertLMPredictionHead(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class BertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = BertConfig
load_tf_weights = load_tf_weights_in_bert
base_model_prefix = "bert"
supports_gradient_checkpointing = True
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, BertEncoder):
module.gradient_checkpointing = value
@dataclass
class BertForPreTrainingOutput(ModelOutput):
"""
Output type of [`BertForPreTraining`].
Args:
loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
Total loss as the sum of the masked language modeling loss and the next sequence prediction
(classification) loss.
prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
before SoftMax).
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
prediction_logits: torch.FloatTensor = None
seq_relationship_logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
BERT_START_DOCSTRING = r"""
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`BertConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
BERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`BertTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare Bert Model transformer outputting raw hidden-states without any specific head on top.",
BERT_START_DOCSTRING,
)
class BertModel(BertPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in [Attention is
all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
`add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
"""
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPoolingAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
r"""
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
if hasattr(self.embeddings, "token_type_ids"):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@add_start_docstrings(
"""
Bert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next
sentence prediction (classification)` head.
""",
BERT_START_DOCSTRING,
)
class BertForPreTraining(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.cls = BertPreTrainingHeads(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=BertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
next_sentence_label: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.Tensor], BertForPreTrainingOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked),
the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence
pair (see `input_ids` docstring) Indices should be in `[0, 1]`:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
kwargs (`Dict[str, any]`, optional, defaults to *{}*):
Used to hide legacy arguments that have been deprecated.
Returns:
Example:
```python
>>> from transformers import BertTokenizer, BertForPreTraining
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
>>> model = BertForPreTraining.from_pretrained("bert-base-uncased")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.prediction_logits
>>> seq_relationship_logits = outputs.seq_relationship_logits
```
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output, pooled_output = outputs[:2]
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
total_loss = None
if labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
if not return_dict:
output = (prediction_scores, seq_relationship_score) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return BertForPreTrainingOutput(
loss=total_loss,
prediction_logits=prediction_scores,
seq_relationship_logits=seq_relationship_score,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""Bert Model with a `language modeling` head on top for CLM fine-tuning.""", BERT_START_DOCSTRING
)
class BertLMHeadModel(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
if not config.is_decoder:
logger.warning("If you want to use `BertLMHeadModel` as a standalone, add `is_decoder=True.`")
self.bert = BertModel(config, add_pooling_layer=False)
self.cls = BertOnlyMLMHead(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
past_key_values: Optional[List[torch.Tensor]] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
r"""
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
if the model is configured as a decoder.
encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used
in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be
in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100`
are ignored (masked), the loss is only computed for the tokens with labels n `[0, ...,
config.vocab_size]`
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up
decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
Returns:
Example:
```python
>>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained("bert-base-cased")
>>> config = BertConfig.from_pretrained("bert-base-cased")
>>> config.is_decoder = True
>>> model = BertLMHeadModel.from_pretrained("bert-base-cased", config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
```
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
lm_loss = None
if labels is not None:
# we are doing next-token prediction; shift prediction scores and input ids by one
shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
labels = labels[:, 1:].contiguous()
loss_fct = CrossEntropyLoss()
lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((lm_loss,) + output) if lm_loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=lm_loss,
logits=prediction_scores,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_shape)
# cut decoder_input_ids if past is used
if past is not None:
input_ids = input_ids[:, -1:]
return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past}
def _reorder_cache(self, past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
@add_start_docstrings("""Bert Model with a `language modeling` head on top.""", BERT_START_DOCSTRING)
class BertForMaskedLM(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
if config.is_decoder:
logger.warning(
"If you want to use `BertForMaskedLM` make sure `config.is_decoder=False` for "
"bi-directional self-attention."
)
self.bert = BertModel(config, add_pooling_layer=False)
self.cls = BertOnlyMLMHead(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.Tensor], MaskedLMOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
effective_batch_size = input_shape[0]
# add a dummy token
if self.config.pad_token_id is None:
raise ValueError("The PAD token should be defined for generation")
attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
dummy_token = torch.full(
(effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device
)
input_ids = torch.cat([input_ids, dummy_token], dim=1)
return {"input_ids": input_ids, "attention_mask": attention_mask}
@add_start_docstrings(
"""Bert Model with a `next sentence prediction (classification)` head on top.""",
BERT_START_DOCSTRING,
)
class BertForNextSentencePrediction(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyNSPHead(config)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**kwargs,
) -> Union[Tuple[torch.Tensor], NextSentencePredictorOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see `input_ids` docstring). Indices should be in `[0, 1]`:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
Returns:
Example:
```python
>>> from transformers import BertTokenizer, BertForNextSentencePrediction
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
>>> model = BertForNextSentencePrediction.from_pretrained("bert-base-uncased")
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
>>> encoding = tokenizer(prompt, next_sentence, return_tensors="pt")
>>> outputs = model(**encoding, labels=torch.LongTensor([1]))
>>> logits = outputs.logits
>>> assert logits[0, 0] < logits[0, 1] # next sentence was random
```
"""
if "next_sentence_label" in kwargs:
warnings.warn(
"The `next_sentence_label` argument is deprecated and will be removed in a future version, use `labels` instead.",
FutureWarning,
)
labels = kwargs.pop("next_sentence_label")
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
seq_relationship_scores = self.cls(pooled_output)
next_sentence_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1))
if not return_dict:
output = (seq_relationship_scores,) + outputs[2:]
return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output
return NextSentencePredictorOutput(
loss=next_sentence_loss,
logits=seq_relationship_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
output) e.g. for GLUE tasks.
""",
BERT_START_DOCSTRING,
)
class BertForSequenceClassification(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.bert = BertModel(config)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
BERT_START_DOCSTRING,
)
class BertForMultipleChoice(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, 1)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
BERT_START_DOCSTRING,
)
class BertForTokenClassification(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config, add_pooling_layer=False)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
BERT_START_DOCSTRING,
)
class BertForQuestionAnswering(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
start_positions: Optional[torch.Tensor] = None,
end_positions: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]:
r"""
start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
|
atlas-main
|
src/modeling_bert.py
|
# coding=utf-8
# Copyright 2018 Mesh TensorFlow authors, T5 Authors and HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import types
import torch
from torch import nn
from transformers.utils import logging
from src.modeling_t5 import T5ForConditionalGeneration, T5Stack
logger = logging.get_logger(__name__)
class FiDStack(T5Stack):
def __init__(self, config, embed_tokens=None):
super().__init__(config, embed_tokens=embed_tokens)
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
inputs_embeds=None,
head_mask=None,
cross_attn_head_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
if not self.is_decoder:
input_ids = input_ids.view(input_ids.size(0) * self.config.n_context, -1)
attention_mask = attention_mask.view(attention_mask.size(0) * self.config.n_context, -1)
output = super().forward(
input_ids=input_ids,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
inputs_embeds=inputs_embeds,
head_mask=head_mask,
cross_attn_head_mask=cross_attn_head_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not self.is_decoder:
if not return_dict:
last_hidden_states = output[0]
last_hidden_state = last_hidden_states.view(self.config.bsz, -1, last_hidden_states.size(-1))
output = tuple(
last_hidden_state,
*output[1:],
)
else:
last_hidden_state = output.last_hidden_state
output.last_hidden_state = last_hidden_state.view(self.config.bsz, -1, last_hidden_state.size(-1))
return output
class FiD(T5ForConditionalGeneration):
_keys_to_ignore_on_load_missing = [
r"encoder\.embed_tokens\.weight",
r"decoder\.embed_tokens\.weight",
r"lm_head\.weight",
]
_keys_to_ignore_on_load_unexpected = [
r"decoder\.block\.0\.layer\.1\.EncDecAttention\.relative_attention_bias\.weight",
]
def __init__(self, config):
super().__init__(config)
self.model_dim = config.d_model
self.shared = nn.Embedding(config.vocab_size, config.d_model)
encoder_config = copy.deepcopy(config)
encoder_config.is_decoder = False
encoder_config.use_cache = False
encoder_config.is_encoder_decoder = False
self.encoder = FiDStack(encoder_config, self.shared)
decoder_config = copy.deepcopy(config)
decoder_config.is_decoder = True
decoder_config.is_encoder_decoder = False
decoder_config.num_layers = config.num_decoder_layers
self.decoder = FiDStack(decoder_config, self.shared)
self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
# Model parallel
self.model_parallel = False
self.device_map = None
def set_checkpoint(self, use_checkpoint):
"""
Enable or disable checkpointing in the encoder.
See https://pytorch.org/docs/stable/checkpoint.html
"""
for mod in self.encoder.encoder.block:
mod.use_checkpoint = use_checkpoint
def reset_score_storage(self):
"""
Reset score storage, only used when cross-attention scores are saved
to train a retriever.
"""
for mod in self.decoder.block:
mod.layer[1].EncDecAttention.score_storage = None
mod.layer[1].EncDecAttention.normalized_score_storage = None
mod.layer[1].EncDecAttention.prob_storage = None
@torch.no_grad()
def get_crossattention_scores(self, n_passages, mask, labels, ids, mode="all", mask_query=None):
"""
Cross-attention scores are aggregated to obtain a single scalar per
passage. This scalar can be seen as a similarity score between the
question and the input passage. It is obtained by averaging the
cross-attention scores obtained on the first decoded token over heads,
layers, and tokens of the input passage.
More details in Distilling Knowledge from Reader to Retriever:
https://arxiv.org/abs/2012.04584.
"""
scores, norms, probs = [], [], []
for mod in self.decoder.block:
scores.append(mod.layer[1].EncDecAttention.score_storage)
norms.append(mod.layer[1].EncDecAttention.normalized_score_storage)
probs.append(mod.layer[1].EncDecAttention.prob_storage)
scores = torch.stack(scores)
norms = torch.stack(norms)
probs = torch.stack(probs)
output = {}
if "scores" in mode or "all" in mode:
self.aggregate_value(scores, mask, labels, n_passages, ids, mask_query, output, prefix="scores")
if "probs" in mode or "all" in mode:
self.aggregate_value(probs, mask, labels, n_passages, ids, mask_query, output, prefix="probs")
if "norms" in mode or "all" in mode:
self.aggregate_value(norms, mask, labels, n_passages, ids, mask_query, output, prefix="norms")
return output
def aggregate_value(self, scores, mask, labels, n_passages, ids, mask_query=None, output={}, prefix=""):
n_layers, bsz, n_tokens, total_tokens = scores.size()
ids = ids.view(bsz, n_passages, -1)
scores = scores.view(n_layers, bsz, n_tokens, n_passages, -1)
mask = mask.view(bsz, n_passages, -1)
scores = scores.masked_fill(~mask[None, :, None], 0.0)
ntokens_sum = 256 * n_layers * (~(labels == -100)).sum(dim=[1])[:, None]
ntokens_wquery = mask.sum(dim=[2]) * n_layers * (~(labels == -100)).sum(dim=[1])[:, None]
ntokens_first = mask.sum(dim=[2]) * n_layers
# Compute scores based on topk scores
scores = scores.sum(dim=[0])
for k in [5, 10, 20]:
topkscores = self.get_topk_score(k, scores, mask, labels, n_layers)
output[f"{prefix}top{k}"] = topkscores
scores = scores.masked_fill((labels == -100)[:, :, None, None], 0.0)
scores_wquery = scores.sum(dim=[1, 3])
scores_wquery_sepmask = scores.masked_fill(~(ids == 1)[:, None], 0).sum(dim=[1, 3])
output[f"{prefix}nosep"] = scores_wquery_sepmask / ntokens_sum
output[f"{prefix}first"] = scores[:, 0].sum(dim=[2]) / ntokens_first
output[f"{prefix}sum"] = scores_wquery / ntokens_sum
output[f"{prefix}avg"] = scores_wquery / ntokens_wquery
scores_woquery = None
# Compute scores based on scores without query
if not mask_query is None:
output[f"{prefix}woquery"] = self.get_woquery_score(scores, mask_query, mask, labels, n_layers)
return output
def get_topk_score(self, topk, scores, mask, labels, n_layers):
topkscores = torch.topk(scores, k=topk, dim=-1)[0].sum(dim=[3])
topkscores = topkscores.masked_fill((labels == -100)[:, :, None], 0.0)
ntokens_top = n_layers * (~(labels == -100)).sum(dim=[1])[:, None]
topkscores = topkscores.sum(dim=1) / (topk * ntokens_top)
return topkscores
def get_woquery_score(self, scores, mask_query, mask, labels, n_layers):
if scores.size(-1) > mask_query.size(-1):
zero_padding = torch.zeros(
[mask_query.size(0), scores.size(-1) - mask_query.size(-1)], device=mask_query.device, dtype=torch.bool
)
mask_query = torch.cat([mask_query, zero_padding], dim=-1)
mask_query = mask * (~mask_query[:, None])
scores_woquery = scores.masked_fill(~mask_query[:, None], 0.0)
# ntokens_woquery = mask_query.sum(dim=[2]) * n_layers * (~(labels==-100)).sum(dim=[1])[:, None]
ntokens_woquery = 256 * n_layers * (~(labels == -100)).sum(dim=[1])[:, None]
scores_woquery = scores_woquery.sum(dim=[1, 3])
return scores_woquery / ntokens_woquery
def overwrite_forward_crossattention(self):
"""
Replace cross-attention forward function, only used to save
cross-attention scores.
"""
for mod in self.decoder.block:
xattn = mod.layer[1].EncDecAttention
xattn.forward = types.MethodType(cross_attention_forward, xattn)
def create_crossattention_storage(self):
for mod in self.decoder.block:
xattn = mod.layer[1].EncDecAttention
xattn.score_storage = None
xattn.normalized_score_storage = None
xattn.prob_storage = None
def cross_attention_forward(
self,
hidden_states,
mask=None,
key_value_states=None,
position_bias=None,
past_key_value=None,
layer_head_mask=None,
query_length=None,
use_cache=False,
output_attentions=False,
):
"""
Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states).
"""
# Input is (batch_size, seq_length, dim)
# Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length)
# past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head)
batch_size, seq_length = hidden_states.shape[:2]
real_seq_length = seq_length
if past_key_value is not None:
assert (
len(past_key_value) == 2
), f"past_key_value should have 2 past states: keys and values. Got { len(past_key_value)} past states"
real_seq_length += past_key_value[0].shape[2] if query_length is None else query_length
key_length = real_seq_length if key_value_states is None else key_value_states.shape[1]
def shape(states):
"""projection"""
return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
def unshape(states):
"""reshape"""
return states.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim)
def project(hidden_states, proj_layer, key_value_states, past_key_value):
"""projects hidden states correctly to key/query states"""
if key_value_states is None:
# self-attn
# (batch_size, n_heads, seq_length, dim_per_head)
hidden_states = shape(proj_layer(hidden_states))
elif past_key_value is None:
# cross-attn
# (batch_size, n_heads, seq_length, dim_per_head)
hidden_states = shape(proj_layer(key_value_states))
if past_key_value is not None:
if key_value_states is None:
# self-attn
# (batch_size, n_heads, key_length, dim_per_head)
hidden_states = torch.cat([past_key_value, hidden_states], dim=2)
else:
# cross-attn
hidden_states = past_key_value
return hidden_states
# get query states
query_states = shape(self.q(hidden_states)) # (batch_size, n_heads, seq_length, dim_per_head)
# get key/value states
key_states = project(
hidden_states, self.k, key_value_states, past_key_value[0] if past_key_value is not None else None
)
value_states = project(
hidden_states, self.v, key_value_states, past_key_value[1] if past_key_value is not None else None
)
# compute scores
scores = torch.matmul(
query_states, key_states.transpose(3, 2)
) # equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9
if position_bias is None:
if not self.has_relative_attention_bias:
position_bias = torch.zeros(
(1, self.n_heads, real_seq_length, key_length), device=scores.device, dtype=scores.dtype
)
if self.gradient_checkpointing and self.training:
position_bias.requires_grad = True
else:
position_bias = self.compute_bias(real_seq_length, key_length)
# if key and values are already calculated
# we want only the last query position bias
if past_key_value is not None:
position_bias = position_bias[:, :, -hidden_states.size(1) :, :]
if mask is not None:
position_bias = position_bias + mask # (batch_size, n_heads, seq_length, key_length)
scores += position_bias
attn_weights = nn.functional.softmax(scores.float(), dim=-1) # .type_as(scores)
if hasattr(self, "score_storage"):
with torch.no_grad():
self.score_storage = scores.detach().mean(dim=1)
self.prob_storage = attn_weights.detach().mean(dim=1)
self.normalized_score_storage = (
(torch.norm(value_states.float(), dim=-1)[:, :, None] * attn_weights).detach().mean(dim=1)
)
attn_weights = nn.functional.dropout(attn_weights.type_as(scores), p=self.dropout, training=self.training)
# Mask heads if we want to
if layer_head_mask is not None:
attn_weights = attn_weights * layer_head_mask
attn_output = unshape(torch.matmul(attn_weights, value_states)) # (batch_size, seq_length, dim)
attn_output = self.o(attn_output)
present_key_value_state = (key_states, value_states) if (self.is_decoder and use_cache) else None
outputs = (attn_output,) + (present_key_value_state,) + (position_bias,)
if output_attentions:
outputs = outputs + (attn_weights,)
return outputs
|
atlas-main
|
src/fid.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import copy
import logging
import math
import time
from functools import reduce
from typing import List, Optional, Union
import numpy as np
import torch
import torch.nn as nn
from src import dist_utils
from src.retrievers import EMBEDDINGS_DIM
logger = logging.getLogger(__name__)
IGNORE_INDEX: int = -100
BERT_MAX_SEQ_LENGTH: int = 512
def encode_passages(batch, tokenizer, max_length):
bsz = len(batch)
n = max([len(example) for example in batch])
batch = [example + [""] * (n - len(example)) for example in batch]
batch = reduce(lambda a, b: a + b, batch)
tokens = tokenizer(
batch,
padding="max_length",
max_length=max_length,
return_tensors="pt",
truncation=True,
)
tokens = {k: v.view(bsz, n, -1) for k, v in tokens.items()}
return tokens
class Atlas(nn.Module):
def __init__(self, opt, reader, retriever, reader_tokenizer, retriever_tokenizer):
super(Atlas, self).__init__()
self.reader = reader
self.retriever = retriever
self.reader_tokenizer = reader_tokenizer
self.retriever_tokenizer = retriever_tokenizer
self.opt = opt
self.READER_ALL_TOKENS = list(self.reader_tokenizer.vocab.values())
def _get_fp16_retriever_copy(self):
if hasattr(self.retriever, "module"):
retriever_to_copy = self.retriever.module
else:
retriever_to_copy = self.retriever
return copy.deepcopy(retriever_to_copy).half().eval()
@torch.no_grad()
def build_index(self, index, passages, gpu_embedder_batch_size, logger=None):
n_batch = math.ceil(len(passages) / gpu_embedder_batch_size)
retrieverfp16 = self._get_fp16_retriever_copy()
total = 0
for i in range(n_batch):
batch = passages[i * gpu_embedder_batch_size : (i + 1) * gpu_embedder_batch_size]
batch = [self.opt.retriever_format.format(**example) for example in batch]
batch_enc = self.retriever_tokenizer(
batch,
padding="longest",
return_tensors="pt",
max_length=min(self.opt.text_maxlength, gpu_embedder_batch_size),
truncation=True,
)
embeddings = retrieverfp16(**_to_cuda(batch_enc), is_passages=True)
index.embeddings[:, total : total + len(embeddings)] = embeddings.T
total += len(embeddings)
if i % 500 == 0 and i > 0:
logger.info(f"Number of passages encoded: {total}")
dist_utils.barrier()
logger.info(f"{total} passages encoded on process: {dist_utils.get_rank()}")
if not index.is_index_trained():
logger.info(f"Building faiss indices")
index.train_index()
@torch.no_grad()
def _retrieve(
self,
index,
topk,
query,
query_ids_retriever,
query_mask_retriever,
batch_metadata=None,
filtering_fun=None,
iter_stats={},
):
self.retriever.eval()
if len(query) > 0:
query_emb = self.retriever(query_ids_retriever, query_mask_retriever, is_passages=False)
else:
query_emb = torch.empty((0, EMBEDDINGS_DIM)).cuda() # TODO: broken
if self.training:
self.retriever.train()
search_start = time.time()
if filtering_fun is not None:
passages, scores = index.search_knn(query_emb, topk * self.opt.filtering_overretrieve_ratio)
passages, scores = filtering_fun(batch_metadata, passages, scores, topk, training=self.training)
else:
passages, scores = index.search_knn(query_emb, topk)
iter_stats["runtime/search"] = (time.time() - search_start, 1)
return passages, scores, query_emb
@torch.no_grad()
def retrieve_with_rerank(
self,
index,
topk,
query,
query_ids_retriever,
query_mask_retriever,
batch_metadata=None,
filtering_fun=None,
iter_stats={},
):
bsz = len(query)
to_rerank = self.opt.n_to_rerank_with_retrieve_with_rerank
# first, do the retrieval
passages, _, query_emb = self._retrieve(
index,
to_rerank,
query,
query_ids_retriever,
query_mask_retriever,
batch_metadata,
filtering_fun,
iter_stats,
)
retrieverfp16 = self._get_fp16_retriever_copy()
fstr = self.opt.retriever_format
flat_passage_strings = [fstr.format(**p) for ps in passages for p in ps]
encoder_batch_size = min(len(flat_passage_strings), self.opt.per_gpu_embedder_batch_size)
passage_emb, output_passages, output_scores = (
query_emb.new_zeros(len(flat_passage_strings), query_emb.shape[-1]),
[],
[],
)
for b in range(0, len(flat_passage_strings), encoder_batch_size):
batch = flat_passage_strings[b : b + encoder_batch_size]
batch_enc = self.retriever_tokenizer(
batch,
padding="longest",
return_tensors="pt",
max_length=min(self.opt.text_maxlength, BERT_MAX_SEQ_LENGTH),
truncation=True,
)
batch_emb = retrieverfp16(**_to_cuda(batch_enc), is_passages=True).to(query_emb)
passage_emb[b : b + encoder_batch_size] = batch_emb
passage_emb = passage_emb.view(bsz, to_rerank, -1)
retriever_scores = torch.einsum("id, ijd->ij", [query_emb, passage_emb])
top_retriever_scores, top_retriever_inds = torch.topk(retriever_scores, topk, dim=1)
for i in range(bsz):
output_passages.append([passages[i][j] for j in top_retriever_inds[i]])
output_scores.append(top_retriever_scores[i].tolist())
return output_passages, output_scores
@torch.no_grad()
def retrieve(self, *args, **kwargs):
retrieve_func = self.retrieve_with_rerank if self.opt.retrieve_with_rerank else self._retrieve
passages, scores = retrieve_func(*args, **kwargs)[:2]
return passages, scores
def append_query(self, query, passages):
return [self.opt.encoder_format.format(query=query, **p) for p in passages]
def retriever_tokenize(self, query):
if self.retriever_tokenizer:
query_enc = self.retriever_tokenizer(
query,
max_length=min(self.opt.text_maxlength, BERT_MAX_SEQ_LENGTH),
padding="max_length",
truncation=True,
return_tensors="pt",
)
query_enc = _to_cuda(query_enc)
else:
query_enc = None
return _to_cuda(query_enc)
def reader_tokenize(self, query, target, target_tokens):
if target_tokens is None:
if self.opt.decoder_prompt_format is not None:
modified_query = [self.opt.decoder_prompt_format.format_map({"query": q}) for q in query]
target = [q + t for (q, t) in zip(modified_query, target)]
query_mask = self.reader_tokenizer(
modified_query,
max_length=self.opt.target_maxlength,
padding="max_length",
truncation=True,
return_tensors="pt",
add_special_tokens=False,
)["attention_mask"]
if self.opt.decoder_format is not None:
target = [self.opt.decoder_format.format(target=t) for t in target]
target = [t + "</s>" if not t.endswith("</s>") else t for t in target]
target_tokens = self.reader_tokenizer(
target,
max_length=self.opt.target_maxlength,
padding="max_length",
truncation=True,
return_tensors="pt",
add_special_tokens=False,
)
decoder_input_ids = self.reader._shift_right(target_tokens["input_ids"])
labels = target_tokens["input_ids"].masked_fill(~target_tokens["attention_mask"].bool(), IGNORE_INDEX)
# If decoder prompt is not None mask labels such that the model is not trained to predict the prompt
if self.opt.decoder_prompt_format is not None:
query_mask = self.reader_tokenizer(
modified_query,
max_length=self.opt.target_maxlength,
padding="max_length",
truncation=True,
return_tensors="pt",
add_special_tokens=False,
)["attention_mask"]
padding = torch.zeros((query_mask.size(0), target_tokens["input_ids"].size(-1) - query_mask.size(-1)))
query_mask = torch.cat([query_mask, padding], dim=1)
labels = labels.masked_fill(query_mask.bool(), IGNORE_INDEX)
return labels.cuda(), decoder_input_ids.cuda()
def tokenize(self, query, target, target_tokens):
if query is None and target is None:
return None, None, None
assert (
target_tokens is None or self.opt.decoder_prompt_format is None
), "decoder_prompt_format not compatible with target tokenized in iterator"
query_enc = self.retriever_tokenize(query) if not self.opt.use_file_passages else None
labels, decoder_input_ids = self.reader_tokenize(query, target, target_tokens)
return query_enc, labels, decoder_input_ids
def tokenize_passages(self, query, passages):
if len(query) == 0:
return None, None
query_passages = [self.append_query(q, p) for q, p in zip(query, passages)]
fstr = self.opt.retriever_format
retriever_passages = [[fstr.format(**p) for p in example] for example in passages]
if self.retriever_tokenizer:
retriever_tok = encode_passages(
retriever_passages,
self.retriever_tokenizer,
min(self.opt.text_maxlength, BERT_MAX_SEQ_LENGTH),
)
retriever_tok = _to_cuda(retriever_tok)
else:
retriever_tok = None
reader_tok = encode_passages(query_passages, self.reader_tokenizer, self.opt.text_maxlength)
reader_tok = _to_cuda(reader_tok)
return reader_tok, retriever_tok
def perplexity_score(self, reader_ids, reader_mask, decoder_input_ids, labels, cfg, bsz):
with torch.no_grad():
self.reader.eval()
total_context = reader_ids.size(1)
cfg.n_context = 1
cfg.bsz = bsz * total_context
reader_ids_score = reader_ids.view(bsz * total_context, -1)
reader_mask_score = reader_mask.view(bsz * total_context, -1)
repeated_decoder_input_ids = torch.repeat_interleave(decoder_input_ids, total_context, dim=0)
repeated_labels = torch.repeat_interleave(labels, total_context, dim=0)
reader_output = self.reader(
input_ids=reader_ids_score.cuda(),
attention_mask=reader_mask_score.cuda(),
decoder_input_ids=repeated_decoder_input_ids,
labels=repeated_labels,
use_cache=False,
)
token_loss = nn.functional.cross_entropy(
reader_output.logits.view(-1, reader_output.logits.size(-1)),
repeated_labels.flatten(),
reduction="none",
)
gold_score = token_loss.view(bsz, total_context, -1)
z = (repeated_labels.view(bsz, total_context, -1) > -1).sum(dim=-1)
gold_score = -gold_score.sum(dim=-1) / z
return gold_score
def eval_score(self, reader_ids, reader_mask, decoder_input_ids, labels, cfg, bsz, mask_query):
self.reader.eval()
self.reader.reset_score_storage()
cfg.bsz = reader_ids.size(0)
cfg.n_context = reader_ids.size(1)
reader_ids_score = reader_ids.view(reader_ids.size(0), -1)
reader_mask_score = reader_mask.view(reader_mask.size(0), -1)
with torch.no_grad():
reader_output = self.reader(
input_ids=reader_ids_score,
attention_mask=reader_mask_score,
decoder_input_ids=decoder_input_ids,
labels=labels,
use_cache=False,
)
crossattention_scores = self.reader.get_crossattention_scores(
cfg.n_context,
reader_mask_score,
labels=labels,
ids=reader_ids,
mode=self.opt.gold_score_mode,
mask_query=mask_query,
)
gold_score = select_crossattention_scores(crossattention_scores, self.opt.gold_score_mode)
if self.training:
self.reader.train()
return gold_score
def loop_score(self, reader_ids, reader_mask, decoder_input_ids, labels, cfg, bsz):
with torch.no_grad():
total_context = reader_ids.size(1)
doc_len = reader_ids.size(-1)
self.reader.eval()
cfg.bsz = bsz
cfg.n_context = total_context
reader_ids_score_eval = reader_ids.view(reader_ids.size(0), -1)
reader_mask_score_eval = reader_mask.view(reader_mask.size(0), -1)
# forward pass for calculating and caching the encoder states:
reader_output_eval = self.reader(
input_ids=reader_ids_score_eval,
attention_mask=reader_mask_score_eval,
decoder_input_ids=decoder_input_ids,
labels=labels,
use_cache=False,
)
eval_hidden_state = reader_output_eval.encoder_last_hidden_state
# run n_docs - 1 forward passes to calculate pp when leaving a doc out
gold_scores = []
for loo_index in range(total_context):
reader_mask_loo = reader_mask.clone()
reader_mask_loo[:, loo_index] = False # mask out this doc
loo_output_eval = self.reader(
encoder_outputs=[eval_hidden_state],
attention_mask=reader_mask_loo.view(bsz, (total_context) * doc_len),
decoder_input_ids=decoder_input_ids,
labels=labels,
use_cache=False,
)
token_loss = nn.functional.cross_entropy(
loo_output_eval.logits.view(-1, loo_output_eval.logits.size(-1)), labels.view(-1), reduction="none"
)
mean_loss = token_loss.view(bsz, labels.shape[-1]).sum(dim=-1) / (labels > -1).sum(-1)
gold_scores.append(mean_loss)
gold_score = torch.stack(gold_scores, dim=1)
return gold_score
@torch.no_grad()
def emdr_score(self, reader_ids, reader_mask, decoder_input_ids, labels, cfg, bsz):
self.reader.eval()
cfg.n_context = 1
cfg.bsz = bsz * self.opt.retriever_n_context
reader_ids_score = reader_ids.view(bsz * self.opt.retriever_n_context, -1)
reader_mask_score = reader_mask.view(bsz * self.opt.retriever_n_context, -1)
repeated_decoder_input_ids = torch.repeat_interleave(decoder_input_ids, self.opt.retriever_n_context, dim=0)
repeated_labels = torch.repeat_interleave(labels, self.opt.retriever_n_context, dim=0)
reader_output = self.reader(
input_ids=reader_ids_score.cuda(),
attention_mask=reader_mask_score.cuda(),
labels=repeated_labels,
use_cache=False,
)
gold_score = reader_output.logits
return gold_score
def forward(
self,
index,
query,
target,
target_tokens=None,
passages=None,
batch_metadata=None,
filtering_fun=None,
use_cache=False,
train_retriever=False,
iter_stats={},
):
forward_start = time.time()
bsz = len(query)
query_mask_reader = (
self.reader_tokenizer.batch_encode_plus(
query,
max_length=self.opt.text_maxlength,
padding="longest",
truncation=True,
return_tensors="pt",
add_special_tokens=False,
)["attention_mask"]
.bool()
.cuda()
)
query_enc, labels, decoder_input_ids = self.tokenize(query, target, target_tokens)
if not self.opt.use_file_passages:
retrieve_start = time.time()
passages, _ = self.retrieve(
index,
self.opt.retriever_n_context,
query,
query_enc["input_ids"],
query_enc["attention_mask"],
batch_metadata=batch_metadata,
filtering_fun=filtering_fun,
iter_stats=iter_stats,
)
iter_stats["runtime/retrieve"] = (time.time() - retrieve_start, 1)
reader_tokens, retriever_tokens = self.tokenize_passages(query, passages)
reader_ids = reader_tokens["input_ids"] # FIXME
reader_mask = reader_tokens["attention_mask"].bool()
n_context_training = min(self.opt.n_context, reader_ids.size(1))
cfg = self.reader.encoder.config
retriever_loss = None
if train_retriever:
if self.opt.use_gradient_checkpoint_retriever:
self.retriever.gradient_checkpointing_enable()
query_emb = self.retriever(**query_enc, is_passages=False)
if "std" in self.opt.gold_score_mode:
retriever_tokens = {k: v[:, :n_context_training] for k, v in retriever_tokens.items()}
retriever_tokens = {k: v.reshape(-1, v.size(-1)) for k, v in retriever_tokens.items()}
passage_emb = self.retriever(**retriever_tokens, is_passages=True).to(query_emb)
passage_emb = passage_emb.view(bsz, -1, passage_emb.size(-1))
retriever_score = torch.einsum("id, ijd->ij", [query_emb, passage_emb])
if self.opt.use_gradient_checkpoint_retriever:
self.retriever.gradient_checkpointing_disable()
if "eval" in self.opt.gold_score_mode:
gold_score = self.eval_score(
reader_ids, reader_mask, decoder_input_ids, labels, cfg, bsz, query_mask_reader
)
elif "loop" in self.opt.gold_score_mode:
gold_score = self.loop_score(reader_ids, reader_mask, decoder_input_ids, labels, cfg, bsz)
elif "ppmean" in self.opt.gold_score_mode:
gold_score = self.perplexity_score(reader_ids, reader_mask, decoder_input_ids, labels, cfg, bsz)
elif "emdr" in self.opt.gold_score_mode:
gold_score = self.emdr_score(reader_ids, reader_mask, decoder_input_ids, labels, cfg, bsz)
self.reader.reset_score_storage()
if self.training:
self.reader.train()
cfg.bsz = reader_ids.size(0)
cfg.n_context = n_context_training
reader_ids_training = reader_ids[:, :n_context_training].contiguous()
reader_mask_training = reader_mask[:, :n_context_training].contiguous()
reader_ids_training = reader_ids_training.view(reader_ids.size(0), -1)
reader_mask_training = reader_mask_training.view(reader_mask.size(0), -1)
if self.opt.use_gradient_checkpoint_reader:
self.reader.gradient_checkpointing_enable()
reader_output = self.reader(
input_ids=reader_ids_training,
attention_mask=reader_mask_training,
decoder_input_ids=decoder_input_ids,
labels=labels,
use_cache=False,
)
reader_loss = reader_output[0]
if self.opt.use_gradient_checkpoint_reader:
self.reader.gradient_checkpointing_disable()
if train_retriever:
if self.opt.compute_crossattention_stats or "std" in self.opt.gold_score_mode:
crossattention_scores = self.reader.get_crossattention_scores(
n_context_training,
reader_mask_training.cuda(),
ids=reader_ids_training.cuda(),
mask_query=query_mask_reader.cuda(),
labels=labels,
mode="all",
)
if "std" in self.opt.gold_score_mode:
gold_score = select_crossattention_scores(
crossattention_scores, self.opt.gold_score_mode
).detach() # TODO: is detach really useful here?
retriever_score = retriever_score / np.sqrt(query_emb.size(-1))
if self.opt.compute_crossattention_stats:
with torch.no_grad():
for k, v in crossattention_scores.items():
corr = torch.corrcoef(torch.stack([gold_score.view(-1), v.view(-1)]))
corr = corr[0, 1].item()
if np.isnan(corr):
corr = 0.0
iter_stats[f"corr/{k}"] = (corr, len(query))
if gold_score is not None:
gold_score = gold_score.float()
retriever_score = retriever_score.float()
if self.opt.gold_score_mode == "emdr":
retriever_loss = self.logprob(retriever_score, gold_score, labels)
else:
retriever_loss = self.kldivloss(retriever_score, gold_score)
self.reader.reset_score_storage()
iter_stats["loss/reader_loss"] = (reader_loss.item(), len(query))
if retriever_loss is not None:
iter_stats["loss/retriever_loss"] = (retriever_loss.item(), len(query))
iter_stats["runtime/forward"] = (time.time() - forward_start, 1)
return reader_loss, retriever_loss
def kldivloss(self, score, gold_score):
gold_score = torch.softmax(gold_score / self.opt.temperature_gold, dim=-1)
score = torch.nn.functional.log_softmax(score / self.opt.temperature_score, dim=-1)
return torch.nn.KLDivLoss()(score, gold_score)
def logprob(self, score, gold_score, labels):
with torch.no_grad():
repeated_labels = torch.repeat_interleave(labels, self.opt.retriever_n_context, dim=0)
repeated_labels[repeated_labels == IGNORE_INDEX] = 0
mask_labels = labels >= 0
gold_log_prob = torch.nn.functional.log_softmax(gold_score / self.opt.temperature_gold, dim=-1)
gold_log_probs = torch.gather(gold_log_prob, dim=-1, index=repeated_labels[..., None]).view(
gold_log_prob.size(0), -1
)
gold_log_probs = gold_log_probs.view(score.size(0), score.size(1), -1)
log_score = torch.nn.functional.log_softmax(score / self.opt.temperature_score, dim=-1)
log_prob = gold_log_probs + log_score[..., None]
logsumprobs = torch.logsumexp(log_prob, dim=1)
loss = -1 * torch.sum(logsumprobs * mask_labels) / torch.sum(mask_labels)
return loss
@torch.no_grad()
def compute_reader_loss_and_logits(self, tokens, decoder_input_ids, labels):
cfg = self.reader.encoder.config
cfg.bsz = tokens["input_ids"].size(0)
cfg.n_context = min(self.opt.n_context, tokens["input_ids"].size(1))
reader_loss = self.reader(
input_ids=tokens["input_ids"].cuda().view(tokens["input_ids"].size(0), -1),
attention_mask=tokens["attention_mask"].cuda().view(tokens["attention_mask"].size(0), -1),
decoder_input_ids=decoder_input_ids.cuda(),
labels=labels.cuda(),
use_cache=False,
)
return reader_loss[0].cpu().item(), reader_loss[1]
@torch.no_grad()
def generate(self, tokens, query, choices=None):
cfg = self.reader.encoder.config
cfg.bsz = tokens["input_ids"].size(0)
cfg.n_context = min(self.opt.n_context, tokens["input_ids"].size(1))
tokens = {k: v.view(v.size(0), -1) for k, v in tokens.items()}
bos_token_id = None
prefix_allowed_tokens_fn = None
if self.opt.decoder_prompt_format is not None:
prefix_str = [self.opt.decoder_prompt_format.format_map({"query": q}) for q in query]
prefix_allowed_tokens_fn = self.get_prefix_allowed_tokens_fn(prefix_str)
outputs = self.reader.generate(
input_ids=tokens["input_ids"].cuda(),
attention_mask=tokens["attention_mask"].cuda(),
num_return_sequences=1,
max_length=self.opt.generation_max_length,
min_length=self.opt.generation_min_length,
num_beams=self.opt.generation_num_beams,
length_penalty=self.opt.generation_length_penalty,
forced_bos_token_id=bos_token_id,
prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
)
return outputs
def get_prefix_allowed_tokens_fn(self, prefix_str: Optional[str] = None):
if prefix_str:
prefix_tokens_ids = self.reader_tokenizer.batch_encode_plus(prefix_str, add_special_tokens=False)[
"input_ids"
]
def prefix_allowed_tokens_fn(batch_id: int, input_ids: torch.Tensor) -> List[int]:
if input_ids.shape[-1] > len(prefix_tokens_ids[batch_id]):
return self.READER_ALL_TOKENS
return prefix_tokens_ids[batch_id][input_ids.shape[-1] - 1]
else:
prefix_allowed_tokens_fn = None
return prefix_allowed_tokens_fn
def select_crossattention_scores(scores, mode):
if "eval" in mode:
return scores[mode[len("eval") :]]
elif "std" in mode:
return scores[mode[len("std") :]]
def _to_cuda(tok_dict):
return {k: v.cuda() for k, v in tok_dict.items()}
|
atlas-main
|
src/atlas.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import datetime
import os
import subprocess
from logging import getLogger
import torch
logger = getLogger()
def init_distributed_mode_torchrun(params):
"""
Handle single and multi-GPU for singe-node jobs with torchrun.
Initialize the following variables:
- n_nodes
- node_id
- local_rank
- global_rank
- world_size
For NCCL verbose mode, use:
os.environ["NCCL_DEBUG"] = "INFO"
"""
params.local_rank = int(os.environ["LOCAL_RANK"])
params.node_id = 0
params.n_nodes = 1
params.global_rank = int(os.environ["RANK"])
params.world_size = int(os.environ["WORLD_SIZE"])
# define whether this is the master process / if we are in distributed mode
params.is_main = params.node_id == 0 and params.local_rank == 0
params.multi_node = params.n_nodes > 1
params.multi_gpu = params.world_size > 1
params.is_distributed = True
# summary
PREFIX = "%i - " % params.global_rank
# set GPU device
if params.is_distributed:
torch.cuda.set_device(params.local_rank)
device = torch.device("cuda", params.local_rank)
else:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
params.device = device
# initialize multi-GPU
if params.is_distributed:
# http://pytorch.apachecn.org/en/0.3.0/distributed.html#environment-variable-initialization
# 'env://' will read these environment variables:
# WORLD_SIZE - required; can be set either here, or in a call to init function
# RANK - required; can be set either here, or in a call to init function
# print("Initializing PyTorch distributed ...")
# Fix for if gloo sockets are inconsistent
p1 = subprocess.Popen(["ip", "r"], stdout=subprocess.PIPE)
p2 = subprocess.Popen(["grep", "default"], stdin=p1.stdout, stdout=subprocess.PIPE)
p1.stdout.close()
gloo_socket_ifname = subprocess.check_output(["awk", "{print $5}"], stdin=p2.stdout).decode("utf-8").strip()
p2.stdout.close()
os.environ["GLOO_SOCKET_IFNAME"] = gloo_socket_ifname
torch.distributed.init_process_group(
init_method="env://",
backend="nccl",
)
global GLOO_GROUP
GLOO_GROUP = torch.distributed.new_group(
list(range(params.world_size)),
backend="gloo",
timeout=datetime.timedelta(0, 600),
)
|
atlas-main
|
src/torchrun_utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.distributed as dist
from src import slurm
class Gather(torch.autograd.Function):
@staticmethod
def forward(ctx, x: torch.tensor):
output = [torch.zeros_like(x) for _ in range(dist.get_world_size())]
dist.all_gather(output, x)
return tuple(output)
@staticmethod
def backward(ctx, *grads):
all_gradients = torch.stack(grads)
dist.all_reduce(all_gradients)
return all_gradients[dist.get_rank()]
def gather_wgrad(x: torch.tensor, dim: int = 0):
if not dist.is_initialized():
return x
x_gather = Gather.apply(x)
x_gather = torch.cat(x_gather, dim=dim)
return x_gather
@torch.no_grad()
def all_gather(x: torch.tensor, dim: int = 0):
if not dist.is_initialized():
return x
x_gather = [torch.ones_like(x) for _ in range(dist.get_world_size())]
dist.all_gather(x_gather, x)
x_gather = torch.cat(x_gather, dim=dim)
return x_gather
@torch.no_grad()
def varsize_all_gather(x: torch.Tensor, dim: int = 0):
"""all_gather tensors of different sizes along the specified dimension with concatenation"""
if not dist.is_initialized():
return x
size = x.size(dim)
tensor_size = torch.tensor(size, device=x.device, dtype=torch.int64)
all_sizes = [torch.zeros_like(tensor_size) for _ in range(dist.get_world_size())]
dist.all_gather(all_sizes, tensor_size)
max_size = max([s.item() for s in all_sizes])
padding_tuple_size = [max_size - size if k == dim else x.size(k) for k in range(x.ndim)]
tensor_tuple_size = [max_size if k == dim else x.size(k) for k in range(x.ndim)]
if size != max_size:
padding = torch.empty(size=padding_tuple_size, dtype=x.dtype, device=x.device)
x = torch.cat((x, padding), dim=dim)
tensor_list = [torch.empty(tensor_tuple_size, device=x.device, dtype=x.dtype) for s in all_sizes]
dist.all_gather(tensor_list=tensor_list, tensor=x)
tensor_list = [torch.narrow(tensor, dim, start=0, length=all_sizes[k]) for k, tensor in enumerate(tensor_list)]
output = torch.cat(tensor_list, dim=dim)
return output
@torch.no_grad()
def varsize_gather(x: torch.Tensor, dst: int = 0, dim: int = 0):
"""gather tensors of different sizes along the specified dimension"""
if not dist.is_initialized():
return x
size = x.size(dim)
tensor_size = torch.tensor(size, device=x.device, dtype=torch.int64)
all_sizes = [torch.zeros_like(tensor_size) for _ in range(dist.get_world_size())]
dist.all_gather(all_sizes, tensor_size)
max_size = max([s.item() for s in all_sizes])
padding_tuple_size = [max_size - size if k == dim else x.size(k) for k in range(x.ndim)]
tensor_tuple_size = [max_size if k == dim else x.size(k) for k in range(x.ndim)]
if size != max_size:
padding = torch.empty(size=padding_tuple_size, dtype=x.dtype, device=x.device)
x = torch.cat((x, padding), dim=dim)
if get_rank() == dst:
tensor_list = [torch.empty(tensor_tuple_size, device=x.device, dtype=x.dtype) for s in all_sizes]
else:
tensor_list = None
dist.gather(x, gather_list=tensor_list, dst=dst)
if get_rank() == dst:
tensor_list = [torch.narrow(tensor, dim, start=0, length=all_sizes[k]) for k, tensor in enumerate(tensor_list)]
return tensor_list
@torch.no_grad()
def get_varsize(x: torch.Tensor, dim: int = 0):
"""gather tensors of different sizes along the first dimension"""
if not dist.is_initialized():
return torch.tensor([x.size(dim)])
# determine max size
size = torch.tensor([x.size(dim)], device=x.device, dtype=torch.int)
allsizes = [torch.zeros_like(size) for _ in range(dist.get_world_size())]
dist.all_gather(allsizes, size)
allsizes = torch.cat(allsizes)
return allsizes
@torch.no_grad()
def gather_number(x):
if not dist.is_initialized():
return [x]
output = [None for _ in range(get_world_size())]
dist.all_gather_object(output, x, group=slurm.get_gloo_group())
return output
def barrier():
if dist.is_initialized():
torch.distributed.barrier()
def get_rank():
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def is_main():
return get_rank() == 0
def get_world_size():
if not dist.is_initialized():
return 1
else:
return dist.get_world_size()
def average_main(x):
if not dist.is_initialized():
return x
if dist.is_initialized() and dist.get_world_size() > 1:
dist.reduce(x, 0, op=dist.ReduceOp.SUM)
if is_main():
x = x / dist.get_world_size()
return x
def sum_main(x):
if not dist.is_initialized():
return x
if dist.is_initialized() and dist.get_world_size() > 1:
dist.reduce(x, 0, op=dist.ReduceOp.SUM)
return x
def weighted_average(x, count):
if not dist.is_initialized():
if isinstance(x, torch.Tensor):
x = x.item()
return x, count
t_loss = torch.tensor([x * count]).cuda()
t_total = torch.tensor([count]).cuda()
t_loss = sum_main(t_loss)
t_total = sum_main(t_total)
return (t_loss / t_total).item(), t_total.item()
|
atlas-main
|
src/dist_utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import copy
import itertools
import string
import torch
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from src.evaluation import exact_match_score
from src.options import Options
from src.tasks.base import BaseTask
def _get_permutation_orderings(N, permutations_type):
li = list(range(N))
if permutations_type == "cyclic":
orderings = [li[N - i :] + li[: N - i] for i in range(N)]
elif permutations_type == "all":
orderings = list(itertools.permutations(li))
else:
orderings = [li]
return orderings
class Task(BaseTask):
metrics = ["debiased_accuracy", "accuracy", "eval_loss"]
def __init__(self, opt: Options, tokenizer: PreTrainedTokenizerBase, *args, **kwargs):
super().__init__()
self.tokenizer = tokenizer
self.maximum_question_length = 356
self.choices = string.ascii_uppercase[: opt.multiple_choice_num_options]
self.choice2index = {o: self.tokenizer(o)["input_ids"][0] for o in self.choices}
@staticmethod
def get_multiple_choice_question_prompt(tokenizer, question, choices, maximum_length=356):
def _length_in_tokens(string):
return len(tokenizer(string)["input_ids"])
def _get_prompt(question, choices_wseparator):
preprocessed_question = f"question: {question.strip()} options: {choices_wseparator} answer: <extra_id_0>"
return preprocessed_question
choices_wseparator = " ".join([f"({L}) {T}" for L, T in choices.items()]).strip()
question_with_options = _get_prompt(question, choices_wseparator)
if _length_in_tokens(question_with_options) > maximum_length:
max_qlen = maximum_length - _length_in_tokens(_get_prompt("", choices_wseparator))
truncated_question = tokenizer.decode(
tokenizer(question)["input_ids"][-max_qlen:], skip_special_tokens=True
)
question_with_options = _get_prompt(truncated_question, choices_wseparator)
return question_with_options
def process(self, example, *args, **kwargs):
preprocessed_question = self.get_multiple_choice_question_prompt(
self.tokenizer, example["question"], example["options"], maximum_length=self.maximum_question_length
)
target = f'<extra_id_0> {example["answer"]}'
return {
"query": preprocessed_question,
"target": target,
"choices": self.choices,
"passages": [{"title": "", "text": ""}],
"answers": [example["answer"]],
"metadata": example,
}
@staticmethod
def get_permutations(example, permutations_type):
"""clones example according to permutations_type (either "none", 'cyclic' or 'full'"""
options, answer = example["options"], example["answer"]
uid = example["question"] + " ".join(options.values())
choice_keys = list(sorted(options.keys()))
choice_values = [options[l] for l in choice_keys]
orderings = _get_permutation_orderings(len(choice_keys), permutations_type)
permuted_examples = []
for ordering in orderings:
permuted_options = {l: choice_values[o] for l, o in zip(choice_keys, ordering)}
permuted_answer = [k for k, ans in permuted_options.items() if ans == options[answer]][0]
permed_example = copy.deepcopy(example)
permed_example["options"] = permuted_options
permed_example["answer"] = permuted_answer
permed_example["is_original"] = permuted_options == example["options"]
permed_example["uid"] = uid
permuted_examples.append(permed_example)
return permuted_examples
@staticmethod
def data_iterator(*args, **kwargs):
# wrap base data iterator in the case of permuting examples
super_iterator = super(Task, Task).data_iterator(*args, **kwargs)
perms_type = (
kwargs["opt"].multiple_choice_eval_permutations
if kwargs.get("is_eval", False)
else kwargs["opt"].multiple_choice_train_permutations
)
for example in super_iterator:
for permed_item in Task.get_permutations(example, perms_type):
yield permed_item
def evaluation(self, prediction, ground_truths):
sample_metrics = {"accuracy": exact_match_score(prediction, ground_truths)}
return sample_metrics
def get_choice_logits(self, logits):
prediction_logits = {
letter: logits[1, letter_index].cpu().item() for letter, letter_index in self.choice2index.items()
}
return prediction_logits
def _get_original_instance(self, permutations):
return [p for p in permutations if p["metadata"]["is_original"]][0]
def _marginalize_across_permutations(self, permutations):
original_instance = self._get_original_instance(permutations)
text_answer_2_letter = {v: k for k, v in original_instance["metadata"]["options"].items()}
aggregate_probs = {}
for perm in permutations:
logits = torch.tensor([perm["choice_logits"][c] for c in self.choices])
probs = torch.softmax(logits, dim=0).tolist()
perm_text_options = [perm["metadata"]["options"][c] for c in self.choices]
for t, p in zip(perm_text_options, probs):
aggregate_probs.setdefault(t, []).append(p)
marginalized = {text_answer_2_letter[t]: torch.tensor(v).mean().item() for t, v in aggregate_probs.items()}
return marginalized, aggregate_probs
def _reduce_permutations(self, dataset_wpred):
to_agg = {}
for output in dataset_wpred:
to_agg.setdefault(output["metadata"]["uid"], []).append(output)
output_dataset_wpred = []
for _, perms in to_agg.items():
original_instance = copy.deepcopy(self._get_original_instance(perms))
scores, all_scores = self._marginalize_across_permutations(perms)
del original_instance["choice_logits"]
original_instance["choice_probs"] = scores
original_instance["generation"] = max(scores.items(), key=lambda x: x[1])[0]
original_instance["choice_probs"] = scores
original_instance["all_probs"] = all_scores
original_instance["permutations"] = perms
output_dataset_wpred.append(original_instance)
return output_dataset_wpred
def evaluation_postprocessing(self, metrics, dataset_with_predictions):
dataset_with_predictions = self._reduce_permutations(dataset_with_predictions)
metrics["debiased_accuracy"] = [
float(d["generation"] == d["metadata"]["answer"]) for d in dataset_with_predictions
]
return metrics, dataset_with_predictions
|
atlas-main
|
src/tasks/multiple_choice.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from src.evaluation import exact_match_score
from src.tasks.base import BaseTask
class Task(BaseTask):
metrics = ["accuracy"]
def process(self, example, *args, **kwargs):
clean_input = example["claim"]
clean_target = ""
if "label" in example:
target = example["label"]
if target == "NOT ENOUGH INFO":
clean_target = "maybe"
elif target == "REFUTES":
clean_target = "false"
elif target == "SUPPORTS":
clean_target = "true"
if not "passages" in example:
example["passages"] = [{"title": "", "text": ""}]
example["metadata"] = example.get("metadata", {})
example["query"] = f"question: {clean_input} answer: <extra_id_0>"
if clean_target is not None:
example["target"] = f"<extra_id_0> {clean_target}"
example["passages"] = [{"title": "", "text": ""}]
example["metadata"]["clean_target"] = clean_target
example["answers"] = [clean_target]
return example
def evaluation(self, prediction, ground_truths):
sample_metrics = {"accuracy": exact_match_score(prediction, ground_truths)}
return sample_metrics
|
atlas-main
|
src/tasks/fever.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import random
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from src.evaluation import exact_match_score, f1_score, rouge_score
from src.options import Options
from src.tasks.base import BaseTask, filter_results_by_id
class Task(BaseTask):
metrics = ["eval_loss", "accuracy", "f1", "rouge_1", "rouge_2", "rouge_L"]
def __init__(self, opt: Options, tokenizer: PreTrainedTokenizerBase, *args, **kwargs):
self.tokenizer = tokenizer
self.min_words = opt.min_words_per_lm_instance
self.mlm_noise_density = opt.mlm_noise_density
self.mlm_mean_noise_span_length = opt.mlm_mean_noise_span_length
self.text_maxlength = opt.text_maxlength
def filter(self, *args, **kwargs):
"""Remove the passage we are trying to denoise from retrieved results"""
return filter_results_by_id(*args, **kwargs)
def process(self, example, *args, **kwargs):
"""Noises the target field using T5 MLM masking, saves the orginal target in metadata,"""
clean_target = example["text"]
if len(clean_target.strip()) == 0:
return None
if self.min_words is not None and len(clean_target.split()) < self.min_words:
return None
output_example = {}
inp, out = self.apply_mlm_noise(
self.tokenizer,
clean_target,
self.mlm_noise_density,
self.mlm_mean_noise_span_length,
self.text_maxlength,
)
if not "passages" in example:
output_example["passages"] = [{"title": "", "text": ""}]
output_example["query"] = inp
output_example["target"] = out
output_example["metadata"] = example
output_example["metadata"]["clean_target"] = clean_target
return output_example
def evaluation(self, prediction, ground_truths):
sample_metrics = {}
sample_metrics["accuracy"] = exact_match_score(prediction, ground_truths)
sample_metrics["f1"] = f1_score(prediction, ground_truths)
rouge_1, rouge_2, rouge_L = rouge_score(prediction, ground_truths)
sample_metrics["rouge_1"] = rouge_1
sample_metrics["rouge_2"] = rouge_2
sample_metrics["rouge_L"] = rouge_L
return sample_metrics
@staticmethod
def apply_mlm_noise(
tokenizer,
text,
mlm_noise_density,
mlm_mean_noise_span_length,
max_input_length,
):
tokens = tokenizer(text, add_special_tokens=False, max_length=max_input_length, truncation=True)["input_ids"]
length = len(tokens)
num_noise_tokens = max(round(length * mlm_noise_density), 1)
num_noise_spans = max(round(num_noise_tokens / mlm_mean_noise_span_length), 1)
num_nonnoise_tokens = length - num_noise_tokens
def _get_span_lengths(num_items, num_segments):
positions = [i < (num_segments - 1) for i in range(num_items - 1)]
random.shuffle(positions)
positions.append(True)
output, prev_span_start = [], -1
for i, n in enumerate(positions):
if n:
output.append(i - prev_span_start)
prev_span_start = i
return output
noise_span_lengths = _get_span_lengths(num_noise_tokens, num_noise_spans)
nonnoise_span_lengths = _get_span_lengths(num_nonnoise_tokens, num_noise_spans)
inputs, outputs, offset = [], [], 0
for i, (inp_length, out_length) in enumerate(zip(nonnoise_span_lengths, noise_span_lengths)):
sentinel_id = tokenizer.additional_special_tokens_ids[i]
inputs += tokens[offset : offset + inp_length] + [sentinel_id]
offset += inp_length
outputs += [sentinel_id] + tokens[offset : offset + out_length]
offset += out_length
return tokenizer.decode(inputs), tokenizer.decode(outputs)
|
atlas-main
|
src/tasks/mlm.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from . import base, fever, kilt, lm, mlm, multiple_choice, qa, section
AVAILABLE_TASKS = {m.__name__.split(".")[-1]: m for m in [base, mlm, lm, multiple_choice, kilt, section, fever, qa]}
def get_task(opt, tokenizer):
if opt.task not in AVAILABLE_TASKS:
raise ValueError(f"{opt.task} not recognised")
task_module = AVAILABLE_TASKS[opt.task]
return task_module.Task(opt, tokenizer)
|
atlas-main
|
src/tasks/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
from src.evaluation import exact_match_score, f1_score, rouge_score
from src.options import Options
from src.tasks.base import BaseTask, filter_results_by_id
logger = logging.getLogger(__name__)
class Task(BaseTask):
metrics = ["eval_loss", "accuracy", "f1", "rouge_1", "rouge_2", "rouge_L"]
def __init__(self, opt: Options, *args, **kwargs):
self.min_words = opt.min_words_per_lm_instance
def process(self, example, *args, **kwargs):
if not "section" in example or len(example["section"].strip()) == 0:
return
query = ", ".join([example["title"], example["section"]])
text = example["text"]
if len(text.strip()) == 0:
return
if self.min_words is not None and len(text.split()) < self.min_words:
return
if not "passages" in example:
example["passages"] = [{"title": "", "text": ""}]
example["query"] = query
example["target"] = text
example["metadata"] = {}
example["metadata"]["id"] = example["id"]
return example
def evaluation(self, prediction, ground_truths):
sample_metrics = {}
sample_metrics["accuracy"] = exact_match_score(prediction, ground_truths)
sample_metrics["f1"] = f1_score(prediction, ground_truths)
rouge_1, rouge_2, rouge_L = rouge_score(prediction, ground_truths)
sample_metrics["rouge_1"] = rouge_1
sample_metrics["rouge_2"] = rouge_2
sample_metrics["rouge_L"] = rouge_L
return sample_metrics
def filter(self, *args, **kwargs):
"""Remove the passage we are trying to generate from retrieved results"""
return filter_results_by_id(*args, **kwargs)
|
atlas-main
|
src/tasks/section.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import random
from typing import List
from src.evaluation import exact_match_score, f1_score, normalize_answer
from src.tasks.base import BaseTask
class Task(BaseTask):
metrics = ["accuracy", "exact_match", "f1"]
def process(self, example, *args, **kwargs):
clean_input = example["input"]
answers = list(self.get_gold_answers(example))
if "filename" in example and "fever" in example["filename"]:
answers = ["true" if a == "SUPPORTS" else "false" for a in answers]
clean_target = random.choice(answers)
if not "passages" in example:
example["passages"] = [{"title": "", "text": ""}]
example["metadata"] = example.get("metadata", {})
example["query"] = f"question: {clean_input} answer: <extra_id_0>"
example["target"] = f"<extra_id_0> {clean_target}"
example["answers"] = answers
example["passages"] = [{"title": "", "text": ""}]
example["metadata"]["clean_target"] = clean_target
return example
def get_gold_answers(self, gold):
ground_truths = set()
for item in gold["output"]:
if "answer" in item and item["answer"] and len(item["answer"].strip()) > 0:
ground_truths.add(item["answer"].strip())
return ground_truths
def evaluation(self, prediction: str, ground_truths: List[str]):
sample_metrics = {
"accuracy": exact_match_score(prediction, ground_truths),
"exact_match": exact_match_score(prediction, ground_truths, normalize_answer),
"f1": f1_score(prediction, ground_truths, normalize_answer),
}
return sample_metrics
|
atlas-main
|
src/tasks/kilt.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import random
import re
from src.evaluation import exact_match_score, f1_score, rouge_score
from src.options import Options
from src.tasks.base import BaseTask, filter_results_by_id
logger = logging.getLogger(__name__)
class Task(BaseTask):
metrics = ["eval_loss", "accuracy", "f1", "rouge_1", "rouge_2", "rouge_L"]
def __init__(self, opt: Options, *args, **kwargs):
self.min_words = opt.min_words_per_lm_instance
self.min_context_ratio = opt.min_lm_context_ratio
self.max_context_ratio = opt.max_lm_context_ratio
def filter(self, *args, **kwargs):
"""Remove the passage we are trying to generate from retrieved results"""
return filter_results_by_id(*args, **kwargs)
def process(self, example, *args, **kwargs):
text = example["text"]
if len(text.strip()) == 0:
return
if self.min_words is not None and len(text.split()) < self.min_words:
return
inp, out = self.split(text, self.min_context_ratio, self.max_context_ratio)
if not "passages" in example:
example["passages"] = [{"title": "", "text": ""}]
example["query"] = inp
example["target"] = out
example["metadata"] = {}
example["metadata"]["id"] = example["id"]
return example
@staticmethod
def split(text, min_context_ratio, max_context_ratio):
"""Splits text into two segments for langauge modelling.
Left segment is conditioning context, right segment is for generating.
The left segment must be between min_context_ratio and max_context_ratio of right segement in terms of length.
"""
words = re.split(r"(\S+)", text)
min_length = int(max(2, len(words) * min_context_ratio))
max_length = int(max(min(len(words) - 2, len(words) * max_context_ratio), min_length + 1))
split_idx = random.randint(min_length, max_length)
inp = "".join(words[:split_idx])
out = "".join(words[split_idx:])
return inp, out
def evaluation(self, prediction, ground_truths):
sample_metrics = {}
sample_metrics["accuracy"] = exact_match_score(prediction, ground_truths)
sample_metrics["f1"] = f1_score(prediction, ground_truths)
rouge_1, rouge_2, rouge_L = rouge_score(prediction, ground_truths)
sample_metrics["rouge_1"] = rouge_1
sample_metrics["rouge_2"] = rouge_2
sample_metrics["rouge_L"] = rouge_L
return sample_metrics
|
atlas-main
|
src/tasks/lm.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import json
import logging
import random
from collections import defaultdict
from src.evaluation import exact_match_score
logger = logging.getLogger(__name__)
class BaseTask(object):
metrics = ["accuracy", "eval_loss"]
def __init__(self, *args, **kwargs):
self.filter = None
@staticmethod
def data_iterator(filenames, world_rank=-1, world_size=-1, repeat_if_less_than_world_size=False, *args, **kwargs):
if isinstance(filenames, str):
filenames = [filenames]
def _iter():
# iterate over files
return (line for filename in filenames for line in open(filename, encoding="utf-8"))
def _stop():
# stop iterating over data when at least one example has been fed to each worker
return (total_yielded >= world_size) if repeat_if_less_than_world_size else (total_yielded > 0)
total_yielded = 0
while not _stop():
for line in _iter():
total_yielded += 1
if world_rank > -1 and total_yielded % world_size != world_rank:
continue
example = json.loads(line)
yield example
@staticmethod
def batch_iterator(data_iterator, batch_size, drop_last=False, shuffle=False):
if shuffle:
data_iterator = BaseTask.shuffle_iterator(data_iterator)
batch = defaultdict(lambda: [])
batch["__size__"] = 0
batch_counter = 0
for example in data_iterator:
for k, v in example.items():
batch[k].append(v)
batch["__size__"] += 1
if batch["__size__"] == batch_size:
batch_counter += 1
yield batch
batch = defaultdict(lambda: [])
batch["__size__"] = 0
if batch["__size__"] > 0 and not drop_last:
yield batch
def evaluation(self, prediction, ground_truths):
"""most basic evaluation: checks if prediction matches ground truth"""
sample_metrics = {"accuracy": exact_match_score(prediction, ground_truths)}
return sample_metrics
@staticmethod
def shuffle_iterator(dataset):
d = list(dataset)
random.shuffle(d)
for x in d:
yield x
def process(self, example, *args, **kwargs):
"""most basic example processing, should be overwritten in subclasses"""
assert "target" in example, "base task requires a `target` field string to be defined"
assert "query" in example, "base task requires a `query` field string to be defined"
assert type(example["target"]) == str, "base task requires a `target` field string to be defined"
assert type(example["query"]) == str, "base task requires a `query` field string to be defined"
if not "passages" in example:
example["passages"] = [{"title": "", "text": ""}]
return example
def evaluation_postprocessing(self, metrics, dataset_with_predictions):
"""do any necessary postprocessing of generated predictions or metrics after the evaluation loop"""
return metrics, dataset_with_predictions
def filter_results_by_id(batch_metadata, passages, scores, topk, training=False):
"""
Removes retrieved passages from retrieved set if their id is the same as the instance in the batch metadata.
Useful for MLM or LM where we dont want model to "cheat" by retrieving the passgage it is denoising/generating.
If, once violating passages are removed, there are < topk results, the violating passages will be added back,
in with a warning
"""
if batch_metadata is None:
logger.warning("Trying to filter a batch with no metadata - probably a padding instance - just return the topk")
return [ps[:topk] for ps in passages], [ss[:topk] for ss in scores]
def _same_passage_chunk(source_metadata, passage):
return passage["id"] == source_metadata["id"]
output_passages, output_scores = [], []
for metadata, passage_li, scores_li in zip(batch_metadata, passages, scores):
filtered_passages_and_scores, violating_passages_and_scores = [], []
for (p, s) in zip(passage_li, scores_li):
if not _same_passage_chunk(metadata, p):
filtered_passages_and_scores.append((p, s))
else:
violating_passages_and_scores.append((p, s))
if topk > len(filtered_passages_and_scores):
logger.warning(f"{len(filtered_passages_and_scores)} passages after filtering for topk = {topk}")
filtered_passages_and_scores += violating_passages_and_scores
filtered_passages, filtered_scores = zip(*filtered_passages_and_scores)
output_passages.append(filtered_passages)
output_scores.append(filtered_scores)
return [ps[:topk] for ps in output_passages], [ss[:topk] for ss in output_scores]
|
atlas-main
|
src/tasks/base.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import random
from src.evaluation import exact_match_score, f1_score, normalize_answer
from src.options import Options
from src.tasks.base import BaseTask
class Task(BaseTask):
metrics = ["exact_match", "f1", "eval_loss"]
def __init__(self, opt: Options, *args, **kwargs):
super().__init__()
self.qa_prompt_format_str = opt.qa_prompt_format
def get_qa_prompt(self, question: str) -> str:
return self.qa_prompt_format_str.format(question=question)
def process(self, example, *args, **kwargs):
if "target" in example:
target = example["target"]
elif "answers" in example:
target = random.choice(example["answers"])
else:
target = None
if not "passages" in example:
example["passages"] = [{"title": "", "text": ""}]
example["metadata"] = example.get("metadata", {})
example["query"] = self.get_qa_prompt(example["question"])
if target is not None:
example["target"] = f"<extra_id_0> {target}"
return example
def evaluation(self, prediction, ground_truths):
sample_metrics = {
"exact_match": exact_match_score(prediction, ground_truths, normalize_answer),
"f1": f1_score(prediction, ground_truths, normalize_answer),
}
return sample_metrics
|
atlas-main
|
src/tasks/qa.py
|
#!/usr/bin/python
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import re
import os
import glob
import shutil
parser = argparse.ArgumentParser(
description='Generates XML RTL descriptor file for OpenCL compilation',
epilog='', formatter_class=argparse.RawTextHelpFormatter
)
requiredNamed = parser.add_argument_group('required named arguments')
requiredNamed.add_argument('--input', '-i', metavar='<input file>', type=str,
nargs=1, required=True, help='input file')
requiredNamed.add_argument('--output_xml', '-x', metavar='<output file>', type=str,
nargs=1, required=True, help='output file')
requiredNamed.add_argument('--output_rtl', '-t', metavar='<output file>', type=str,
nargs=1, required=True, help='output file')
requiredNamed.add_argument('--rtl_root', '-r', metavar='<rtl root>', type=str,
nargs=1, required=True, help='rtl root location')
requiredNamed.add_argument('--output_stub', '-s', metavar='<output file>', type=str,
nargs=1, required=True, help='output file')
args = parser.parse_args()
input_file = open(args.input[0], 'r')
output_xml = open(args.output_xml[0], 'w')
output_rtl = open(args.output_rtl[0], 'w')
output_stub = open(args.output_stub[0], 'w')
type_width = None
acc_width = None
product_width = None
acc_divide_cycles = None
type_divide_cycles = None
rtl_files = []
stub_files = []
def include_sv_files(file_list, cur_dir=False):
for filename in file_list:
if (not cur_dir):
filename = os.path.join(args.rtl_root[0], filename)
rtl_files.append(filename)
def include_files(file_list, cur_dir=False):
for filename in file_list:
if (not cur_dir):
filename = os.path.join(args.rtl_root[0], filename)
shutil.copyfile(filename, os.path.basename(filename))
def include_stub_files(file_list):
for filename in file_list:
stub_files.append(filename)
def set_type_width(w):
# FIXME: huh?
globals()['type_width'] = w
def set_acc_width(w):
globals()['acc_width'] = w
def set_product_width(w):
globals()['product_width'] = w
def set_acc_divide_cycles(c):
globals()['acc_divide_cycles'] = c
def set_type_divide_cycles(c):
globals()['type_divide_cycles'] = c
lines = []
doing_python = 0
code_block = ''
comment_indent = ''
RE_PYTHON_BLOCK_BEGIN = re.compile(r"^(\s*)START_PY(\s*)$")
RE_PYTHON_BLOCK_END = re.compile(r'^(\s*)END_PY(\s*)$')
for line in input_file:
reg0 = re.search(RE_PYTHON_BLOCK_BEGIN, line)
reg1 = re.search(RE_PYTHON_BLOCK_END, line)
if doing_python == 0 and reg0:
doing_python = 1
code_block = ''
lines.append(reg0.group(1) + '\n<!-- python -->\n')
comment_indent = reg0.group(1)
elif doing_python == 1 and reg1:
doing_python = 0
try:
exec(code_block)
except Exception:
print("Error in code:\n" + code_block + "\n")
raise
lines.append(reg1.group(1) + '\n<!-- end python -->\n')
elif doing_python == 1:
dum = re.sub(r"^(" + comment_indent + r")", r'', line)
code_block += dum
else:
# Main XML block
line = re.sub('(TYPE_WIDTH)', '{}'.format(type_width), line)
line = re.sub('(ACC_WIDTH)', '{}'.format(acc_width), line)
line = re.sub('(PRODUCT_WIDTH)', '{}'.format(product_width), line)
line = re.sub('(ACC_DIVIDE_CYCLES)', '{}'.format(acc_divide_cycles), line)
line = re.sub('(TYPE_DIVIDE_CYCLES)', '{}'.format(type_divide_cycles), line)
lines.append(line)
for line in lines:
output_xml.write(line)
input_file.close()
output_xml.close()
# write the single RTL file
for filename in rtl_files:
f = open(filename, 'r')
output_rtl.write("// ***\n// *** RTL from source file {}\n// ***\n\n".format(filename))
for line in f:
output_rtl.write(line)
f.close()
output_rtl.write("\n\n");
output_rtl.close()
# write the single stub OpenCL file
for filename in stub_files:
f = open(filename, 'r')
output_stub.write("// ***\n// *** OpenCL from source file {}\n// ***\n\n".format(filename))
for line in f:
output_stub.write(line)
f.close()
output_stub.write("\n\n");
output_stub.close()
|
deepfloat-main
|
bitstream/build_xml.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import shutil
import time
import glob
import subprocess
import sys
import math
import fpga
import fpga_resnet
import torch
from torch.utils.cpp_extension import CppExtension, BuildExtension
import torchvision.models as models
import validate
aocx_file = 'loglib'
ext, dev = fpga.init_fpga(aocx_file)
class FpgaNN():
def __init__(self, model, mul_factor=1.0):
self.model = model
self.output_p = None
self.mul_factor = mul_factor
def forward(self, input):
input_p = ext.to_posit(*dev, input)
self.output_p = self.model.forward(*dev, input_p)
# FIXME: attempt to fix d2h copy assert
dev[2].blockingWait()
return ext.to_float(*dev, self.output_p).mul_(self.mul_factor)
def forward_p(self, input):
input_p = ext.to_posit(*dev, input)
self.output_p = self.model.forward(*dev, input_p)
def forward_f(self):
return ext.to_float(*dev, self.output_p).mul_(self.mul_factor)
def get_fpga_mods(model):
def append_mod(mods, m, name):
mods.append([name, m])
mods = []
for m, name in zip([model.conv1, model.maxpool],
['conv1', 'maxpool']):
append_mod(mods, m, name)
for layer, layer_name in zip([model.layer1, model.layer2, model.layer3, model.layer4],
['layer1', 'layer2', 'layer3', 'layer4']):
for idx, seq in enumerate(layer):
for m, name in zip([seq.conv1, seq.conv2],
['conv1', 'conv2']):
append_mod(mods, m,
'{}.{}.{}'.format(layer_name, idx, name))
if (hasattr(seq, 'conv3')):
append_mod(mods, seq.conv3,
'{}.{}.{}'.format(layer_name, idx, 'conv3'))
if (seq.downsample):
append_mod(mods, seq.downsample,
'{}.{}.{}.0'.format(layer_name, idx, 'downsample'))
append_mod(mods, seq.add,
'{}.{}.{}'.format(layer_name, idx, 'add'))
for m, name in zip([model.avgpool, model.fc], ['avgpool', 'fc']):
append_mod(mods, m, name)
return mods
cpu_model = models.resnet50(True)
cpu_model.eval()
fc_n_scale = -4
fpga_model = fpga_resnet.resnet50(ext, *dev)
fpga_model.fc.setOutputScale(fc_n_scale)
fpga_resnet.fuse_resnet_params(ext, dev, cpu_model, fpga_model, fc_mul=1.0)
loader = validate.make_loader(batch_size=16, random=False)
scale = 2.0 ** fc_n_scale
mod = FpgaNN(fpga_model, 1.0 / scale)
print('ResNet-50 {}:'.format(aocx_file))
validate.validate(loader,
limit=None,
fpga_h=mod,
# reference_model=cpu_model)
reference_model=None)
|
deepfloat-main
|
py/run_fpga_resnet.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import shutil
import time
import glob
import subprocess
import sys
import math
import fpga
import validate
import torch
import torch.nn as nn
import torchvision.models as models
import torchvision.models.resnet as resnet
# def fuse_bn(conv, bn):
# conv_w = conv.weight.clone()
# conv_b = None
# if (conv.bias):
# conv_b = conv.bias.clone()
# else:
# conv_b = torch.FloatTensor(conv_w.size(0)).zero_()
# for c in range(conv_w.size(0)):
# bn_mean = bn.running_mean[c]
# bn_var = bn.running_var[c]
# bn_weight = bn.weight[c]
# bn_bias = bn.bias[c]
# inv_var = 1.0 / math.sqrt(bn_var + 1e-5)
# conv_w[c].mul_(bn_weight * inv_var)
# conv_b[c].add_(-bn_mean * inv_var * bn_weight + bn_bias)
# return conv_w, conv_b
# def fuse_resnet_params(m):
# convs = []
# convs.append([m.conv1, m.bn1])
# for seq in [m.layer1, m.layer2, m.layer3, m.layer4]:
# for bb in seq:
# convs.append([bb.conv1, bb.bn1])
# convs.append([bb.conv2, bb.bn2])
# if (bb.conv3):
# convs.append([bb.conv3, bb.bn3])
# if (bb.downsample):
# convs.append([bb.downsample[0], bb.downsample[1]])
# params = []
# for c in convs:
# w, b = fuse_bn(c[0], c[1])
# params.append(['conv', [w, b]])
# params.append(['fc', [m.fc.weight, m.fc.bias]])
# return params
# def orig_resnet_params(m):
# modules = []
# modules.extend([['conv', m.conv1], ['bn', m.bn1]])
# for seq in [m.layer1, m.layer2, m.layer3, m.layer4]:
# for bb in seq:
# modules.extend([['conv', bb.conv1], ['bn', bb.bn1]])
# modules.extend([['conv', bb.conv2], ['bn', bb.bn2]])
# if (bb.conv3):
# modules.extend([['conv', bb.conv3], ['bn', bb.bn3]])
# if (bb.downsample):
# modules.extend([['conv', bb.downsample[0]], ['bn', bb.downsample[1]]])
# modules.append(['fc', m.fc])
# params = []
# for m in modules:
# if (m[0] == 'conv'):
# if (m[1].bias != None):
# params.append([m[0], [m[1].weight,
# m[1].bias]])
# else:
# params.append([m[0], [m[1].weight]])
# elif (m[0] == 'bn'):
# params.append([m[0], [m[1].running_mean,
# m[1].running_var,
# m[1].weight,
# m[1].bias]])
# elif (m[0] == 'fc'):
# params.append([m[0], [m[1].weight,
# m[1].bias]])
# return params
# destructiely updates conv
def fuse_bn(conv, bn):
conv_w = conv.weight
conv_b = None
if (conv.bias):
conv_b = conv.bias
else:
conv_b = torch.FloatTensor(conv_w.size(0)).zero_()
conv.bias = torch.nn.Parameter(conv_b)
for c in range(conv_w.size(0)):
bn_mean = bn.running_mean[c]
bn_var = bn.running_var[c]
bn_weight = bn.weight[c]
bn_bias = bn.bias[c]
inv_var = 1.0 / math.sqrt(bn_var + 1e-5)
conv_w[c].mul_(bn_weight * inv_var)
conv_b[c].add_(-bn_mean * inv_var * bn_weight + bn_bias)
# param_stats = []
# act_stats = []
# def get_stats(t):
# t_abs = t.abs()
# t_sort = t_abs.view(t_abs.nelement()).sort()[0]
# num = t_sort.nelement()
# return [t_sort[int(0.5 * num)].item(),
# t_sort[int(0.9 * num)].item(),
# t_sort[int(0.95 * num)].item(),
# t_sort[int(0.99 * num)].item(),
# t_sort[int(0.995 * num)].item(),
# t_sort[int(0.999 * num)].item(),
# t_sort[-1].item()]
# def print_act(name, t):
# act_stats.append([name, get_stats(t)])
# def print_params(name, m):
# w = get_stats(m.weight)
# b = None
# if m.bias is not None:
# b = get_stats(m.bias)
# param_stats.append([name, w, b])
def new_forward(self, x):
residual = x
out = self.conv1(x)
out = self.relu(out)
out = self.conv2(out)
out = self.relu(out)
if (hasattr(self, 'conv3')):
out = self.conv3(out)
if self.downsample is not None:
residual = self.downsample[0](x)
out += residual
out = self.relu(out)
return out
def new_resnet_forward(self, x):
x = self.conv1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def fuse_resnet_params(m):
resnet.Bottleneck.forward = new_forward
resnet.ResNet.forward = new_resnet_forward
m.fused = True
fuse_bn(m.conv1, m.bn1)
del m.bn1
for seq in [m.layer1, m.layer2, m.layer3, m.layer4]:
seq.fused = True
for bb in seq:
bb.fused = True
fuse_bn(bb.conv1, bb.bn1)
del bb.bn1
fuse_bn(bb.conv2, bb.bn2)
del bb.bn2
if (hasattr(bb, 'conv3')):
fuse_bn(bb.conv3, bb.bn3)
del bb.bn3
if (bb.downsample):
fuse_bn(bb.downsample[0], bb.downsample[1])
del bb.downsample[1]
|
deepfloat-main
|
py/examine_resnet.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import shutil
import time
import glob
import subprocess
import sys
import math
import torch
from torch.utils.cpp_extension import CppExtension, BuildExtension
def init_fpga(aocx_file, dir='../bitstream'):
files = []
files.extend(glob.glob('../cpp/utils/*.cpp'))
files.extend(glob.glob('../cpp/ops/*.cpp'))
files.extend(glob.glob('../cpp/layers/*.cpp'))
files.append('../cpp/PythonInterface.cpp')
aocl_compile_conf = subprocess.check_output(
['aocl', 'compile-config']).decode('utf-8').strip()
aocl_link_conf = subprocess.check_output(
['aocl', 'link-config']).decode('utf-8').strip()
ext = torch.utils.cpp_extension.load(
name='fpga_extension',
sources=files,
extra_cflags=[aocl_compile_conf, '-g'],
extra_ldflags=[aocl_link_conf],
extra_include_paths=['../cpp/'],
verbose=False)
dev = ext.fpga_init(dir, aocx_file)
return ext, dev
|
deepfloat-main
|
py/fpga.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import math
from torch.utils.cpp_extension import CppExtension, BuildExtension
def inspect(name, ext, context, program, queue, x):
return
# f = ext.to_float(context, program, queue, x).abs_()
# print('{}: mean {} max {}'.format(name, f.mean(), f.max()))
class Sequential():
def __init__(self, *args):
self.modules = [*args]
def __len__(self):
return len(self.modules)
def __getitem__(self, idx):
return self.modules[idx]
def add(self, *args):
for a in arg:
self.modules.append(a)
def forward(self, context, program, queue, x):
for m in self.modules:
x = m.forward(context, program, queue, x)
return x
class BasicBlock():
expansion = 1
def __init__(self, ext, context, program, queue,
inplanes, planes, stride=1, downsample=None):
self.ext = ext
self.conv1 = ext.Conv2d(context, program, queue,
inplanes, planes,
3, stride,
1, 1,
False, 0, 0)
self.relu1 = ext.ReLU(context, program, queue)
self.conv2 = ext.Conv2d(context, program, queue,
planes, planes,
3, 1,
1, 1,
False, 0, 0)
self.relu2 = ext.ReLU(context, program, queue)
self.downsample = downsample
self.stride = stride
self.add = ext.Add(context, program, queue, 0, 0, 0)
def forward(self, context, program, queue, x):
residual = x
ext = self.ext
out = self.conv1.forward(context, program, queue, x)
inspect("conv1", ext, context, program, queue, out)
out = self.relu1.forward(context, program, queue, out)
inspect("relu1", ext, context, program, queue, out)
out = self.conv2.forward(context, program, queue, out)
inspect("conv2", ext, context, program, queue, out)
if self.downsample is not None:
residual = self.downsample.forward(context, program, queue, x)
inspect("residual downsample", ext, context, program, queue, residual)
self.add.setAdd(residual)
# inspect("residual", ext, context, program, queue, residual)
out = self.add.forward(context, program, queue, out)
# inspect("add", ext, context, program, queue, out)
out = self.relu2.forward(context, program, queue, out)
inspect("relu2", ext, context, program, queue, out)
return out
class Bottleneck():
expansion = 4
def __init__(self, ext, context, program, queue,
inplanes, planes, stride=1, downsample=None):
self.ext = ext
self.conv1 = ext.Conv2d(context, program, queue,
inplanes, planes,
1, 1,
0, 0,
False, 0, 0)
self.relu1 = ext.ReLU(context, program, queue)
self.conv2 = ext.Conv2d(context, program, queue,
planes, planes,
3, stride,
1, 1,
False, 0, 0)
self.relu2 = ext.ReLU(context, program, queue)
self.conv3 = ext.Conv2d(context, program, queue,
planes, planes * self.expansion,
1, 1,
0, 0,
False, 0, 0)
self.relu3 = ext.ReLU(context, program, queue)
self.downsample = downsample
self.stride = stride
self.add = ext.Add(context, program, queue, 0, 0, 0)
def forward(self, context, program, queue, x):
residual = x
ext = self.ext
out = self.conv1.forward(context, program, queue, x)
inspect("bottleneck conv1", ext, context, program, queue, out)
out = self.relu1.forward(context, program, queue, out)
inspect("bottleneck relu1", ext, context, program, queue, out)
out = self.conv2.forward(context, program, queue, out)
inspect("bottleneck conv2", ext, context, program, queue, out)
out = self.relu2.forward(context, program, queue, out)
inspect("bottleneck relu2", ext, context, program, queue, out)
out = self.conv3.forward(context, program, queue, out)
inspect("bottleneck conv3", ext, context, program, queue, out)
if self.downsample is not None:
residual = self.downsample.forward(context, program, queue, x)
inspect("residual downsample", ext, context, program, queue, residual)
self.add.setAdd(residual)
out = self.add.forward(context, program, queue, out)
inspect("bottleneck add", ext, context, program, queue, out)
out = self.relu3.forward(context, program, queue, out)
inspect("bottleneck relu3", ext, context, program, queue, out)
return out
class ResNet():
def __init__(self, ext, context, program, queue,
block, layers, num_classes=1000):
self.inplanes = 64
self.ext = ext
self.conv1 = ext.Conv2d(context, program, queue,
3, 64,
7, 2,
3, 3, False, 0, 0)
self.relu = ext.ReLU(context, program, queue)
self.maxpool = ext.Pool2d(context, program, queue,
3, 2, 1, 1, ext.PoolOp.Max, 0, 0)
self.layer1 = self._make_layer(ext, context, program, queue,
block, 64, layers[0])
self.layer2 = self._make_layer(ext, context, program, queue,
block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(ext, context, program, queue,
block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(ext, context, program, queue,
block, 512, layers[3], stride=2)
self.avgpool = ext.Pool2d(context, program, queue,
7, 1, 0, 0, ext.PoolOp.Avg, 0, 0)
self.view = ext.View(context, program, queue,
[[0], [1, 2, 3]])
self.fc = ext.Linear(context, program, queue,
512 * block.expansion, num_classes, True, 0, 0)
def _make_layer(self, ext, context, program, queue,
block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = ext.Conv2d(context, program, queue,
self.inplanes, planes * block.expansion,
1, stride, 0, 0,
False, 0, 0)
layers = []
layers.append(block(ext, context, program, queue,
self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(ext, context, program, queue,
self.inplanes, planes))
return Sequential(*layers)
def forward(self, context, program, queue, x):
ext = self.ext
inspect("input", ext, context, program, queue, x)
x = self.conv1.forward(context, program, queue, x)
inspect("conv1", ext, context, program, queue, x)
x = self.relu.forward(context, program, queue, x)
inspect("relu1", ext, context, program, queue, x)
x = self.maxpool.forward(context, program, queue, x)
inspect("maxpool", ext, context, program, queue, x)
x = self.layer1.forward(context, program, queue, x)
inspect("layer1 out", ext, context, program, queue, x)
x = self.layer2.forward(context, program, queue, x)
inspect("layer2 out", ext, context, program, queue, x)
x = self.layer3.forward(context, program, queue, x)
inspect("layer3 out", ext, context, program, queue, x)
x = self.layer4.forward(context, program, queue, x)
inspect("layer4 out", ext, context, program, queue, x)
x = self.avgpool.forward(context, program, queue, x)
inspect("avgpool out", ext, context, program, queue, x)
x = self.view.forward(context, program, queue, x)
inspect("view out", ext, context, program, queue, x)
x = self.fc.forward(context, program, queue, x)
inspect("fc out", ext, context, program, queue, x)
return x
def resnet18(ext, context, program, queue, pretrained=False, **kwargs):
model = ResNet(ext, context, program, queue, BasicBlock, [2, 2, 2, 2], **kwargs)
return model
def resnet34(ext, context, program, queue, pretrained=False, **kwargs):
model = ResNet(ext, context, program, queue, BasicBlock, [3, 4, 6, 3], **kwargs)
return model
def resnet50(ext, context, program, queue, pretrained=False, **kwargs):
model = ResNet(ext, context, program, queue, Bottleneck, [3, 4, 6, 3], **kwargs)
return model
def resnet101(ext, context, program, queue, pretrained=False, **kwargs):
model = ResNet(ext, context, program, queue, Bottleneck, [3, 4, 23, 3], **kwargs)
return model
def resnet152(ext, context, program, queue, pretrained=False, **kwargs):
model = ResNet(ext, context, program, queue, Bottleneck, [3, 8, 36, 3], **kwargs)
return model
def fuse_bn(conv, bn):
conv_w = conv.weight.clone()
conv_b = None
if (conv.bias):
conv_b = conv.bias.clone()
else:
conv_b = torch.FloatTensor(conv_w.size(0)).zero_()
for c in range(conv_w.size(0)):
bn_mean = bn.running_mean[c]
bn_var = bn.running_var[c]
bn_weight = bn.weight[c]
bn_bias = bn.bias[c]
inv_var = 1.0 / math.sqrt(bn_var + 1e-5)
conv_w[c].mul_(bn_weight * inv_var)
conv_b[c].add_(-bn_mean * inv_var * bn_weight + bn_bias)
return conv_w, conv_b
def apply_params(ext, dev, w, b, m):
w_p = ext.to_posit(*dev, w)
b_p = ext.to_posit(*dev, b)
m.setWeight(*dev, w_p)
m.setBias(*dev, b_p)
def fuse_apply_params(ext, dev, conv, bn, out_conv, w_scale=1.0, b_scale=1.0):
w, b = fuse_bn(conv, bn)
# w.mul_(w_scale)
# b.mul_(b_scale)
apply_params(ext, dev, w, b, out_conv)
def fuse_resnet_params(ext, dev, m_in, m_out, fc_mul=1.0):
fuse_apply_params(ext, dev, m_in.conv1, m_in.bn1, m_out.conv1)
for seq_in, seq_out in zip([m_in.layer1, m_in.layer2, m_in.layer3, m_in.layer4],
[m_out.layer1, m_out.layer2, m_out.layer3, m_out.layer4]):
for bb_in, bb_out in zip(seq_in, seq_out):
fuse_apply_params(ext, dev, bb_in.conv1, bb_in.bn1, bb_out.conv1)
fuse_apply_params(ext, dev, bb_in.conv2, bb_in.bn2, bb_out.conv2)
if (hasattr(bb_in, 'conv3')):
fuse_apply_params(ext, dev, bb_in.conv3, bb_in.bn3, bb_out.conv3)
if (bb_in.downsample):
fuse_apply_params(ext, dev, bb_in.downsample[0],
bb_in.downsample[1],
bb_out.downsample)
apply_params(ext, dev,
m_in.fc.weight.mul(fc_mul),
m_in.fc.bias.mul(fc_mul), m_out.fc)
def gather_act(ext, dev, model):
def append_act(ext, dev, acts, m):
acts.append(m.getInput())
acts = []
for m in [model.conv1, model.relu, model.maxpool]:
append_act(ext, dev, acts, m)
for l in [model.layer1, model.layer2, model.layer3, model.layer4]:
for s in l:
for m in [s.conv1, s.relu1, s.conv2, s.relu2]:
append_act(ext, dev, acts, m)
if (hasattr(s, 'conv3')):
append_act(ext, act, acts, s.conv3)
if (s.downsample):
append_act(ext, act, acts, s.downsample)
append_act(ext, act, acts, s.add)
for m in [model.avgpool, model.fc]:
append_act(ext, dev, acts, m)
return acts
|
deepfloat-main
|
py/fpga_resnet.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import shutil
import time
import glob
import subprocess
import sys
import math
import torch
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def validate(val_loader, limit, fpga_h=None, reference_model=None):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
end = time.time()
ref_batch_time = AverageMeter()
ref_losses = AverageMeter()
ref_top1 = AverageMeter()
ref_top5 = AverageMeter()
ref_end = time.time()
limit = limit or -1
criterion = nn.CrossEntropyLoss()
count = 0
for i, (input, target) in enumerate(val_loader):
count = count + 1
if (count > limit and not (limit == -1)):
break
if (fpga_h):
end = time.time()
# fpga_h.forward_p(input)
if (reference_model):
ref_end = time.time()
ref_output = reference_model.forward(input)
# ref_target_var = torch.autograd.Variable(target, volatile=True)
ref_target_var = torch.autograd.Variable(target)
ref_loss = criterion(ref_output, ref_target_var)
prec1, prec5 = accuracy(ref_output, target, topk=(1, 5))
ref_losses.update(ref_loss.item(), input.size(0))
ref_top1.update(prec1[0], input.size(0))
ref_top5.update(prec5[0], input.size(0))
# measure elapsed time
ref_batch_time.update(time.time() - ref_end)
print('CPU float32: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
(i + 1) * val_loader.batch_size,
len(val_loader) * val_loader.batch_size,
batch_time=ref_batch_time, loss=ref_losses,
top1=ref_top1, top5=ref_top5))
sys.stdout.flush()
if (fpga_h):
# output = fpga_h.forward_f()
output = fpga_h.forward(input)
# target_var = torch.autograd.Variable(target, volatile=True)
target_var = torch.autograd.Variable(target)
loss = criterion(output, target_var)
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
print('FPGA: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
(i + 1) * val_loader.batch_size,
len(val_loader) * val_loader.batch_size,
batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
sys.stdout.flush()
# return top1.avg.item(), top5.avg.item()
def make_loader(batch_size, random=False, seed=1):
valdir = '/home/jhj/imagenet/data/local/packages/ai-group.imagenet-full-size/prod/imagenet_full_size/val'
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
dataset = datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]))
sampler = None
if random:
sampler = torch.utils.data.RandomSampler(dataset)
torch.manual_seed(seed)
return torch.utils.data.DataLoader(
dataset,
sampler=sampler,
batch_size=batch_size,
shuffle=False,
num_workers=0)
def sample_loader(loader):
for (input, target) in loader:
return input, target
|
deepfloat-main
|
py/validate.py
|
#!/usr/bin/python3
# Simple Python Fixed-Point Module (SPFPM)
# (C)Copyright 2006-2018, RW Penney
# This file is (C)Copyright 2006-2018, RW Penney
# and is released under the Python-2.4.2 license
# (see http://www.python.org/psf/license),
# it therefore comes with NO WARRANTY, and NO CLAIMS OF FITNESS FOR ANY PURPOSE.
# However, the author welcomes *constructive* feedback
# and bug-fixes via: rwpenney 'AT' users 'DOT' sourceforge 'DOT' net
"""
The Simple Python Fixed-Point Module (SPFPM) provides objects of types
FXnum and FXfamily which implement basic mathematical operations
on fixed-point binary numbers (i.e. having a fixed number of
fractional binary digits, with the number of integer digits being either
arbitrary or subject to a user-defined limit).
FXnum objects exist within a user-controllable collection of families
managed by the FXfamily class, which sets the number of fractional
& integer digits for each family. This can be used, for example,
to ensure that a set of 8-bit quantities can be manipulated consistently
and kept separate from a set of 200-bit quantities in the same program.
Conversion between FXnum objects in different families is supported,
but solely through an explicit cast.
>>> x = FXnum(2.1) # default FXfamily, with 64-bits
>>> print(x)
2.10000000000000008881
>>> x = FXnum(21) / 10 # fractional error ~1/2^64 or ~5e-20
>>> print(x)
2.09999999999999999996
>>> rx = x.sqrt() # rx created in same family as x
>>> print(rx)
1.44913767461894385735
>>> v = x + 2 * rx
>>> print(v)
4.99827534923788771467
>>> y = FXnum(3.2, FXfamily(12)) # lower-precision 12-bit number
>>> ly = y.log() # ly created in same family as y
>>> print(ly) # fractional error ~1/2^12 or ~2e-4
1.1628
>>> print(ly.exp())
3.1987
>>> fy = float(y)
>>> print(fy)
3.199951171875
>>> # a = x + y # throws exception - different families
>>> a = x + FXnum(y, _defaultFamily)
>>> print(a)
5.30007324218749999996
>>> b = rx + x # ok - same families
>>> # c = rx + ly # throws exception - different families
>>> d = ly + y # ok - same families
>>> a = FXnum(1.4, FXfamily(12, 4)) # limit magnitude to 2^(4-1)
>>> print(a)
1.3999
>>> print(a * 5, a * -5)
6.9995 -6.9995
>>> #print(a * 6, a * -6) # throws exception indicating overflow
>>> fam = FXfamily(200)
>>> print(fam.pi)
3.1415926535897932384626433832795028841971693993751058209749444
>>> # Accurate to 60 decimal places ^- first error
Note:
Be careful not to assume that a large number of fractional bits within
a number will necessarily mean large accuracy. For example, computations
involving exponentiation and logarithms are intrinsically vulnerable to
magnifying mere rounding errors in their inputs into significant errors
in their outputs. This is a fact of life with any approximation to
real arithmetic using finite-precision quantities.
SPFPM is provided as-is, with no warranty of any form.
"""
SPFPM_VERSION = '1.4.4'
class FXfamily(object):
"""Descriptor of the accuracy of a set of fixed-point numbers.
This class defines the fixed-point resolution of a set of FXnum objects.
All arithmetic operations between FXnum objects that are
not explicitly cast into a different FXfamily
must share the same FXfamily.
Multiple FXfamily objects can exist within the same application so that,
for example, sets of 12-bit, 32-bit & 200-bit quantities
can be manipulated concurrently.
"""
def __init__(self, n_bits=64, n_intbits=None):
self.fraction_bits = n_bits # Bits to right of binary point
self.integer_bits = n_intbits # Bits to left of binary point (including sign)
self.scale = 1 << n_bits
self._roundup = 1 << (n_bits - 1)
try:
thresh = 1 << (n_bits + n_intbits - 1)
def validate(scaledval):
if scaledval >= thresh or scaledval < -thresh:
raise FXoverflowError
except:
def validate(scaledval): return
self.validate = validate
# Cached values of various mathematical constants:
self._exp1, self._log2, self._pi, self._sqrt2 = (None,) * 4
@property
def resolution(self):
"""The number of fractional binary digits"""
return self.fraction_bits
@property
def exp1(self):
"""Inverse natural logarithm of unity."""
if self._exp1 is None:
# Brute-force calculation of exp(1) using augmented accuracy:
augfamily = self.augment()
augexp = FXnum(1, augfamily)._rawexp()
arg = 1 / FXnum(4, augfamily)
q0 = arg._rawexp()
q1 = q0 * q0
augexp = q1 * q1
self._exp1 = FXnum(augexp, self)
return self._exp1
@property
def log2(self):
"""Natural logarithm of two."""
if self._log2 is None:
# Brute-force calculation of log(2) using augmented accuracy
# via log(2) = 5log(3^12 / 2^19) - 12log(3^5 / 2^8)
augfamily = self.augment()
q0 = FXnum((3 ** 12) - (1 << 19), augfamily) >> 19
q1 = FXnum((3 ** 5) - (1 << 8), augfamily) >> 8
auglog2 = (5 * q0._rawlog(isDelta=True)
- 12 * q1._rawlog(isDelta=True))
self._log2 = FXnum(auglog2, self)
return self._log2
@property
def pi(self):
"""Ratio of circle's perimeter to its diameter."""
if self._pi is None:
# Use Bailey–Borwein–Plouffe representation of Pi,
# involving powers of 1/16 and simple rational terms:
augfamily = self.augment()
augpi = augfamily(0)
k4 = 0
while True:
k8 = k4 * 2
term = (4 / augfamily(k8 + 1)
- 2 / augfamily(k8 + 4)
- 1 / augfamily(k8 + 5)
- 1 / augfamily(k8 + 6)) >> k4
if term.scaledval == 0: break
augpi += term
k4 += 4
self._pi = FXnum(augpi, self)
return self._pi
@property
def sqrt2(self):
"""Square-root of two."""
if self._sqrt2 is None:
augfamily = self.augment()
x = FXnum(3, augfamily) >> 1
while True:
# Apply Newton-Raphson iteration to f(x)=2/(x*x)-1:
delta = (x * (2 - x * x)) >> 2
x += delta
if abs(delta.scaledval) <= 1:
break
self._sqrt2 = FXnum(x, self)
return self._sqrt2
@property
def unity(self):
"""The multiplicative identity."""
return FXnum(1, self)
@property
def zero(self):
"""The additive identity."""
return FXnum(0, self)
def __hash__(self):
return hash(self.fraction_bits)
def __repr__(self):
return 'FXfamily(n_bits={}, n_intbits={})'.format(self.fraction_bits,
self.integer_bits)
def __eq__(self, other):
try:
return (self.fraction_bits == other.fraction_bits
and self.integer_bits == other.integer_bits)
except AttributeError:
return false
def __ne__(self, other):
try:
return (self.fraction_bits != other.fraction_bits
or self.integer_bits != other.integer_bits)
except AttributeError:
return true
def __call__(self, val):
"""Create a fixed-point number within this family."""
return FXnum(val, family=self)
def convert(self, other, other_val):
"""Convert number from different number of fraction-bits"""
bit_inc = self.fraction_bits - other.fraction_bits
if bit_inc == 0:
return other_val
elif bit_inc > 0:
new_val = other_val << bit_inc
if other_val > 0:
new_val |= 1 << (bit_inc - 1)
else:
new_val |= ((1 << (bit_inc -1)) - 1)
return new_val
else:
# Safest approach is to truncate bits, rather than rounding:
return (other_val >> -bit_inc)
def augment(self, opcount=None):
"""Construct new FXfamily with enhanced resolution.
The returned FXfamily will have an increased number of fractional bits,
sufficient to accommodate the worst-case accumulation of 1-LSB errors
over the specified number of operations. If the supplied
operation-count is None, then this defaults to
the existing number of fractional digits.
"""
nb = opcount if opcount is not None else self.fraction_bits
augbits = 4
while nb > 0:
augbits += 1
nb >>= 1
return FXfamily(self.fraction_bits + augbits)
# ^^^ class FXfamily ^^^
_defaultFamily = FXfamily()
####
# Exceptions
#
class FXexception(ArithmeticError):
"""Base-class of exceptions generated by SPFPM operations"""
class FXdomainError(FXexception):
"""Signal that input argument of mathematical function is unsuitable"""
class FXoverflowError(FXexception):
"""Signal that value has overflowed its most-significant bit"""
class FXfamilyError(FXexception, TypeError):
"""Signal that family-types of FXnums in binary operation are mismatched"""
class FXbrokenError(FXexception):
"""Signal some form of internal error, e.g. broken logic"""
class FXnum(object):
"""Representation of a binary fixed-point real number."""
__slots__ = ('family', 'scaledval')
def __init__(self, val=0, family=_defaultFamily, **kwargs):
self.family = family
converter = family.convert
try:
# Assume that val is similar to FXnum:
self.scaledval = converter(val.family, val.scaledval)
except AttributeError:
self.scaledval = kwargs.get('scaled_value',
int(val * family.scale))
self.family.validate(self.scaledval)
@classmethod
def _rawbuild(cls, fam, sv):
"""Shortcut for creating new FXnum instance, for internal use only."""
num = object.__new__(cls)
fam.validate(sv)
num.family = fam
num.scaledval = sv
return num
def __hash__(self):
return hash(self.scaledval) ^ hash(self.family)
def __repr__(self):
"""Create unambiguous string representation of self"""
return 'FXnum(family={}, scaled_value={})'.format(self.family,
self.scaledval)
# Conversion operations:
def __int__(self):
"""Cast to integer"""
if self.scaledval >= 0:
return int(self.scaledval // self.family.scale)
else:
return int((self.scaledval + self.family.scale - 1) // self.family.scale)
def __float__(self):
"""Cast to floating-point"""
return float(self.scaledval) / float(self.family.scale)
def _CastOrFail_(self, other):
"""Turn number into FXnum or check that it is in same family"""
try:
# Binary operations must involve members of same family
if self.family != other.family:
raise FXfamilyError(1)
except AttributeError:
# Automatic casting from types other than FXnum is allowed:
other = FXnum(other, self.family)
return other
# Unary arithmetic operations:
def __abs__(self):
"""Modulus"""
if self.scaledval < 0:
return -self
else:
return self
def __neg__(self):
"""Change sign"""
return FXnum._rawbuild(self.family, -self.scaledval)
def __pos__(self):
"""Identity operation"""
return self
# Arithmetic comparison tests:
def __eq__(self, other):
"""Equality test"""
other = self._CastOrFail_(other)
return self.scaledval == other.scaledval and self.family == other.family
def __ne__(self, other):
"""Inequality test"""
other = self._CastOrFail_(other)
return self.scaledval != other.scaledval
def __ge__(self, other):
"""Greater-or-equal test"""
other = self._CastOrFail_(other)
return self.scaledval >= other.scaledval
def __gt__(self, other):
"""Greater-than test"""
other = self._CastOrFail_(other)
return self.scaledval > other.scaledval
def __le__(self, other):
"""Less-or-equal test"""
other = self._CastOrFail_(other)
return self.scaledval <= other.scaledval
def __lt__(self, other):
"""Greater-than test"""
other = self._CastOrFail_(other)
return self.scaledval < other.scaledval
def __bool__(self):
"""Test for truth/falsehood"""
return (self.scaledval != 0)
def __nonzero__(self):
"""Test for non-zero"""
return (self.scaledval != 0)
# Arithmetic combinations:
def __add__(self, other):
"""Add another number"""
other = self._CastOrFail_(other)
return FXnum._rawbuild(self.family,
(self.scaledval + other.scaledval))
def __radd__(self, other):
return FXnum(other, self.family) + self
def __sub__(self, other):
"""Subtract another number"""
other = self._CastOrFail_(other)
return FXnum._rawbuild(self.family,
(self.scaledval - other.scaledval))
def __rsub__(self, other):
return FXnum(other, self.family) - self
def __mul__(self, other):
"""Multiply by another number"""
other = self._CastOrFail_(other)
return FXnum._rawbuild(self.family,
((self.scaledval * other.scaledval
+ self.family._roundup)
// self.family.scale))
def __rmul__(self, other):
return FXnum(other, self.family) * self
def __lshift__(self, shift):
return FXnum._rawbuild(self.family,
(self.scaledval << shift))
def __rshift__(self, shift):
return FXnum._rawbuild(self.family,
(self.scaledval >> shift))
def __truediv__(self, other):
"""Divide by another number (without truncation)"""
other = self._CastOrFail_(other)
return FXnum._rawbuild(self.family,
((self.scaledval * self.family.scale
+ self.family._roundup)
// other.scaledval))
__div__ = __truediv__
def __rtruediv__(self, other):
return FXnum(other, self.family) / self
__rdiv__ = __rtruediv__
# Printing/converstion routines:
def __str__(self):
"""Convert number (as decimal) into string"""
return self.toDecimalString()
def toDecimalString(self, precision=None, round10=False):
"""Convert number (as decimal) into string
precision - The maximum number of digits after the decimal point.
round10 - Round last decimal digit of fractional part,
by adding 0.5/10^precision.
"""
# Despite rebinding costs, list+join idiom appears slower here
# than string concatenation building 'rep' from successive digits
famScale = self.family.scale
if precision is None or not isinstance(precision, int):
precision = int((3 + self.family.fraction_bits) / 3.32)
# Each fractional bit adds about log_10(2) decimal digits
val = self.scaledval
rep = ''
if self.scaledval < 0:
rep = '-'
val *= -1
if round10:
# Round (decimal) fractional part by adding half of last-digit:
decimalScale = 10 ** precision
val = (val * decimalScale + famScale // 2) // decimalScale
whole = val // famScale
frac = val - whole * famScale
rep += str(whole)
if frac != 0 and precision > 0:
rep += '.'
idx = 0
while idx < precision and frac != 0:
frac *= 10
q = frac // famScale
rep += str(q)
frac -= q * famScale
idx += 1
return rep
def toBinaryString(self, logBase=1, twosComp=True):
"""Convert number into string in base 2/4/8/16
logBase - log_2 of the number base for printing.
(e.g. 1 for binary, 3 for octal, 4 for hexadecimal).
This must be no greater than 4.
twosComp - Whether to convert negative numbers into
twos-complement form. If this is False,
then negative numbers are simply prefixed
by a minus sign.
Note that when negative numbers are converted to twos-complement form,
this may involve estimating how many bits are needed
to contain the integer part if this is not specified by the FXfamily.
"""
if not isinstance(logBase, int) or logBase > 4 or logBase < 1:
raise ValueError('Cannot convert to base greater than 16')
sign, prefix = 1, ''
if self.scaledval < 0 and not twosComp:
sign, prefix = -1, '-'
(bits, intDigits, fracDigits) = \
(sign * self)._toTwosComplement(logBase)
digits = []
mask = (1 << logBase) - 1
for dig in range(intDigits+fracDigits):
digits.append('{:1x}'.format(bits & mask))
bits >>= logBase
digits = ''.join(reversed(digits))
return prefix + digits[:-fracDigits] + '.' + digits[-fracDigits:]
def _toTwosComplement(self, logBase=1):
"""Convert binary representation to twos-complement for printing.
This will convert negative numbers into their twos-complement form,
and automatically guess the number of digits required to represent
the integer part of the invoking number. The returned bit-pattern
is aligned so that it has a whole number of digits (in base 1<<logBase)
both before and after the binary/octal/hexadecimal-point.
"""
fracDigits = (self.family.resolution + logBase - 1) // logBase
bitPattern = self.scaledval
if self.family.integer_bits is not None:
intDigits = (self.family.integer_bits + logBase - 1) // logBase
else:
intDigits = 1
intPart = self.scaledval >> self.family.resolution
if intPart >= 0:
while intPart >= (1 << (intDigits * logBase)):
intDigits += 1
else:
while (1 << (intDigits * logBase - 1)) + intPart < 0:
intDigits += 1
if bitPattern < 0:
bitPattern += 1 << (intDigits * logBase + self.family.resolution)
bitPattern <<= (fracDigits * logBase - self.family.resolution)
return (bitPattern, intDigits, fracDigits)
# Mathematical functions:
def __pow__(self, other, modulus=None):
"""Evaluate self ^ other"""
assert modulus is None
if self == 0:
return self.family.unity
ipwr = int(other)
rmdr = (other -ipwr)
if rmdr == 0:
frac = self.family.unity
else:
frac = (rmdr * self.log()).exp()
return self.intpower(ipwr) * frac
def __rpow__(self, other):
return FXnum(other, self.family) ** self
def intpower(self, pwr):
"""Compute integer power by repeated squaring"""
assert isinstance(pwr, int)
invert = False
if pwr < 0:
pwr *= -1
invert = True
result = self.family.unity
term = self
while True:
if pwr & 1:
result *= term
pwr >>= 1
if not pwr:
break
term *= term
if invert:
result = FXnum(1, self.family) / result
return result
def sqrt(self):
"""Compute square-root of given number."""
if self.scaledval < 0:
raise FXdomainError
elif self.scaledval == 0:
return self
# Calculate crude initial approximation:
rt = FXnum(family=self.family,
scaled_value=(1 << (self.family.fraction_bits // 2)))
val = self.scaledval
while val > 0:
val >>= 2
rt.scaledval <<= 1
# Refine approximation by Newton iteration:
while True:
delta = (rt - self / rt) >> 1
rt -= delta
if delta.scaledval == 0: break
return rt
def exp(self):
"""Compute exponential of given number"""
pwr = int(self)
return (self - pwr)._rawexp() * (self.family.exp1 ** pwr)
def _rawexp(self):
"""Brute-force exponential of given number (assumed smallish)"""
ex = self.family.unity
term = self.family.unity
idx = 1
while True:
term *= self / idx
ex += term
idx += 1
if term.scaledval == 0: break
return ex
def log(self):
"""Compute (natural) logarithm of given number"""
if self.scaledval <= 0:
raise FXdomainError
elif self == 1:
return FXnum(0, self.family)
uprthresh = FXnum(1.6, self.family)
lwrthresh = uprthresh / 2
count = 0
val = self
while val > uprthresh:
val /= 2
count += 1
while val < lwrthresh:
val *= 2
count -= 1
return val._rawlog() + count * self.family.log2
def _rawlog(self, isDelta=False):
"""Compute (natural) logarithm of given number (assumed close to 1)"""
lg = self.family.zero
if isDelta:
z = self / (self + 2)
else:
z = (self - 1) / (self + 1)
z2 = z * z
term = 2 * z
idx = 1
while True:
lg += term / idx
term *= z2
idx += 2
if term.scaledval == 0: break
return lg
def sin(self):
"""Compute sine of given number (as angle in radians)"""
(ang, idx, reflect) = self._angnorm()
idx = idx % 4
if idx == 0: sn = ang._rawQsine(False)
elif idx == 1: sn = ang._rawQsine(True)
elif idx == 2: sn = -ang._rawQsine(False)
elif idx == 3: sn = -ang._rawQsine(True)
else: raise FXbrokenError
if reflect: sn *= -1
return sn
def asin(self):
"""Compute inverse sine of given number"""
arg = self
reflect = False
if self < 0:
arg *= -1
reflect = True
if arg <= 0.5:
asn = arg._rawarcsin()
else:
# apply 1-cos2t transformation:
cs2 = (1 - arg) / 2
if cs2 < 0: raise FXdomainError
asn = self.family.pi / 2 - 2 * cs2.sqrt()._rawarcsin()
if reflect: asn *= -1
return asn
def _rawarcsin(self):
"""Brute-force inverse-sine of given number.
This requires roughly as many integer bits as fractional bits,
in order to accommodate (2n!)/(n!n!).
"""
asn = FXnum(1, self.family)
x2 = self * self
x2n = x2
half = self.family.unity / 2
nCn = 2 # (2n)! / ((n!)^2)
idx = 1
while True:
delta = x2n * ((FXnum(nCn, self.family) >> (2 * idx))
/ (2 * idx + 1))
asn += delta
if delta.scaledval == 0: break
idx += 1
x2n *= x2
nCn = (nCn * 2 * (2 * idx - 1)) // idx
return self * asn
def cos(self):
"""Compute cosine of given number (as angle in radians)"""
(ang, idx, reflect) = self._angnorm()
idx = idx % 4
if idx == 0: cs = ang._rawQsine(True)
elif idx == 1: cs = -ang._rawQsine(False)
elif idx == 2: cs = -ang._rawQsine(True)
elif idx == 3: cs = ang._rawQsine(False)
else: raise FXbrokenError
return cs
def acos(self):
"""Compute inverse cosine of given number"""
arg = self
reflect = False
if self < 0:
arg *= -1
reflect = True
if arg <= 0.5:
acs = self.family.pi / 2 - arg._rawarcsin()
else:
# apply 1-cos2t transformation:
sn2 = (1 - arg) / 2
if sn2 < 0: raise FXdomainError
acs = 2 * (sn2.sqrt())._rawarcsin()
if reflect: acs = self.family.pi - acs
return acs
def sincos(self):
"""Compute sine & cosine of given number (as angle in radians)"""
(ang, idx, reflect) = self._angnorm()
osn = ang._rawQsine(False)
ocs = ang._rawQsine(True)
# transform according to sin(ang+offset), cos(ang+offset):
idx = idx % 4
if idx == 0: (sn, cs) = (osn, ocs)
elif idx == 1: (sn, cs) = (ocs, -osn)
elif idx == 2: (sn, cs) = (-osn, -ocs)
elif idx == 3: (sn, cs) = (-ocs, osn)
else: raise FXbrokenError
if reflect: sn *= -1
return (sn, cs)
def _angnorm(self):
"""Helper function for reducing angle modulo 2.Pi"""
reflect = False
ang = self
if ang < 0:
ang *= -1
reflect = True
# Find nearest multiple of pi/2:
halfpi = self.family.pi / 2
idx = int(ang / halfpi + 0.5)
ang -= idx * halfpi
return (ang, idx, reflect)
def _rawQsine(self, doCos=False, doHyp=False):
"""Helper function for brute-force calculation of sine & cosine"""
sn = self.family.zero
if doHyp:
x2 = self * self
else:
x2 = -self * self
term = self.family.unity
if doCos: idx = 1
else: idx = 2
while True:
sn += term
term *= x2 / (idx * (idx + 1))
idx += 2
if term.scaledval == 0: break
if doCos: return sn
else: return self * sn
def tan(self):
"""Compute tangent of given number (as angle in radians)"""
(sn, cs) = self.sincos()
return sn / cs
def atan(self):
"""Compute inverse-tangent of given number (as angle in radians)"""
reflect = False
recip = False
double = False
tan = self
if tan < 0:
tan *= -1
reflect = True
if tan > 1:
tan = 1 / tan
recip = True
if tan > 0.414:
tan = ((1 + tan * tan).sqrt() - 1) / tan
double = True
ang = tan._rawarctan()
if double:
ang *= 2
if recip:
ang = self.family.pi / 2 - ang
if reflect:
ang *= -1
return ang
def _rawarctan(self):
"""Brute-force inverse-tangent of given number (for |self|<1)."""
atn = 1
x2 = self * self
omx2 = 1 - x2
opx2 = 1 + x2
x4 = x2 * x2
term = x2
idx = 1
while True:
# Combine pair of successive terms with opposite signs:
delta = term * (4 * idx * omx2 + opx2) / (16 * idx * idx - 1)
atn -= delta
term *= x4
idx += 1
if delta.scaledval == 0: break
return self * atn
# ^^^ class FXnum ^^^
if __name__ == "__main__":
import doctest
try:
doctest.testmod()
except TypeError:
print("*** Problems running doctest module ***")
# vim: set ts=4 sw=4 et:
|
deepfloat-main
|
rtl/log/luts/FixedPoint.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import FixedPoint
import math
import argparse
import io
parser = argparse.ArgumentParser(
description='Generates pow2 and log2 tables for log-linear conversions',
epilog='', formatter_class=argparse.RawTextHelpFormatter
)
group = parser.add_argument_group('arguments')
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
parser.add_argument("--mem", type=str2bool, nargs='?',
const=True, default=False,
help="generate memory tables")
group.add_argument('--bits_in', '-bi', metavar='<bits in>', type=int,
nargs=1, required=True,
help='bits for input')
group.add_argument('--bits_out', '-bo', metavar='<bits out>', type=int,
nargs=1, required=True,
help='bits for output')
parser.add_argument('--log', type=str2bool, nargs='?',
const=True, default=False,
help="generate log2 table only")
parser.add_argument('--pow', type=str2bool, nargs='?',
const=True, default=False,
help="generate pow2 table only")
parser.add_argument('--pow_delta', type=str2bool, nargs='?',
const=True, default=False,
help="generate pow2 delta table only")
parser.add_argument('--log_delta', type=str2bool, nargs='?',
const=True, default=False,
help="generate log2 delta table only")
parser.add_argument('--str', type=str2bool, nargs='?',
const=True, default=False,
help="print to stdout only")
def get_r2ne(x, bits):
str = x.toBinaryString()
assert str[1] == '.'
keep_bit = str[2+bits-1] == '1'
guard_bit = str[2+bits] == '1'
round_bit = str[2+bits+1] == '1'
sticky_bits = str[2+bits+2:].find('1') != -1
round_down = (not guard_bit) or ((not keep_bit) and guard_bit and (not round_bit) and (not sticky_bits))
return not round_down
def get_fraction(x, bits=-1):
str = x.toBinaryString()
# Find the fixed point
idx = str.find('.')
if bits == -1:
return str[idx+1:]
else:
return str[idx+1:idx+1+bits]
args = parser.parse_args()
overlaps = {}
#
# Non-delta
#
def get_pow2_expansion(i, in_bits, out_bits, enable_rounding=True):
prec_bits = out_bits * 4
fam20 = FixedPoint.FXfamily(prec_bits)
x = (FixedPoint.FXnum(i, fam20) / (2 ** in_bits))
orig_x = x
orig_str = x.toBinaryString()[2:2+in_bits]
x = pow(2, x)
pow2_str = x.toBinaryString()
keep_bit = pow2_str[2+out_bits-1] == '1'
guard_bit = pow2_str[2+out_bits] == '1'
round_bit = pow2_str[2+out_bits+1] == '1'
sticky_bits = pow2_str[2+out_bits+2:].find('1') != -1
round_down = (not guard_bit) or ((not keep_bit) and guard_bit and (not round_bit) and (not sticky_bits))
if (not round_down and enable_rounding):
add = FixedPoint.FXnum(1, fam20) >> out_bits
x = x + add
before_round = pow2_str[2:2+out_bits]
after_round = x.toBinaryString()[2:2+out_bits]
is_overlap = False
if after_round in overlaps:
is_overlap = True
else:
overlaps[after_round] = True
# can also formulate as what to subtract, excepting 0
# print(orig_str, (x - (1 + orig_x)).toBinaryString()[2+2:4 + out_bits - 2])
return orig_str, before_round, after_round, not round_down, is_overlap
def get_log2_expansion(i, in_bits, out_bits, enable_rounding=True):
prec_bits = out_bits * 4
fam20 = FixedPoint.FXfamily(prec_bits)
x = (FixedPoint.FXnum(i, fam20) / (2 ** in_bits))
orig_str = x.toBinaryString()[2:2+in_bits]
x = (x + 1).log() / math.log(2)
pow2_str = x.toBinaryString()
keep_bit = pow2_str[2+out_bits-1] == '1'
guard_bit = pow2_str[2+out_bits] == '1'
round_bit = pow2_str[2+out_bits+1] == '1'
sticky_bits = pow2_str[2+out_bits+2:].find('1') != -1
round_down = (not guard_bit) or ((not keep_bit) and guard_bit and (not round_bit) and (not sticky_bits))
if (not round_down and enable_rounding):
add = FixedPoint.FXnum(1, fam20) >> out_bits
x = x + add
before_round = pow2_str[2:2+out_bits]
after_round = x.toBinaryString()[2:2+out_bits]
is_overlap = False
if after_round in overlaps:
is_overlap = True
else:
overlaps[after_round] = True
return orig_str, before_round, after_round, not round_down, is_overlap
#
# delta
#
def get_pow2_delta_expansion(i, in_bits, out_bits, enable_rounding=True):
prec_bits = out_bits * 4
fam20 = FixedPoint.FXfamily(prec_bits)
x = (FixedPoint.FXnum(i, fam20) / (2 ** in_bits))
orig_x = x
orig_str = x.toBinaryString()[2:2+in_bits]
pow2_x = pow(2, x)
pow2_str = x.toBinaryString()
round_up = get_r2ne(pow2_x, out_bits)
pow2_round_x = pow2_x
if (round_up and enable_rounding):
add = FixedPoint.FXnum(1, fam20) >> out_bits
pow2_round_x = pow2_x + add
# As an out_bits-sized fixed point number
fam_out = FixedPoint.FXfamily(out_bits)
y = FixedPoint.FXnum(pow2_round_x, fam_out)
cur = FixedPoint.FXnum(i, fam_out) / (2 ** in_bits)
# This is what we are encoding, all values except for 0 are negative
delta_y = y - cur
delta_y = delta_y << 3
delta_y_truncated = FixedPoint.FXnum(delta_y, FixedPoint.FXfamily(out_bits-3))
delta_y_truncated = delta_y_truncated - 7
# print(y.toBinaryString(), cur.toBinaryString(), (y - cur).toBinaryString(), get_fraction(delta_y_truncated))
# Now, see if we can recover y from delta_y_truncated
recover_y = FixedPoint.FXnum(delta_y_truncated, fam_out)
recover_y = recover_y + 7
recover_y = recover_y >> 3
recover_val = cur + recover_y
assert recover_val == y
before_round = get_fraction(pow2_x, out_bits)
after_round = get_fraction(pow2_round_x, out_bits)
return orig_str, after_round, get_fraction(delta_y_truncated)
def get_log2_delta_expansion(i, in_bits, out_bits, enable_rounding=True):
prec_bits = out_bits * 4
fam20 = FixedPoint.FXfamily(prec_bits)
x = (FixedPoint.FXnum(i, fam20) / (2 ** in_bits))
orig_x = x
orig_str = x.toBinaryString()[2:2+in_bits]
log2_x = (x + 1).log() / math.log(2)
log2_str = x.toBinaryString()
round_up = get_r2ne(log2_x, out_bits)
log2_round_x = log2_x
if (round_up and enable_rounding):
add = FixedPoint.FXnum(1, fam20) >> out_bits
log2_round_x = log2_x + add
# As an out_bits-sized fixed point number
fam_out = FixedPoint.FXfamily(out_bits)
y = FixedPoint.FXnum(log2_round_x, fam_out)
cur = FixedPoint.FXnum(i, fam_out) / (2 ** in_bits)
# This is what we are encoding, all values except for 0 are negative
delta_y = y - cur
delta_y = delta_y << 3
# print('cur {} round {} delta {}'.format(cur.toBinaryString(), y.toBinaryString(), delta_y.toBinaryString()))
delta_y_truncated = FixedPoint.FXnum(delta_y, FixedPoint.FXfamily(out_bits-3))
delta_y_truncated = delta_y_truncated - 7
# Now, see if we can recover y from delta_y_truncated
recover_y = FixedPoint.FXnum(delta_y_truncated, fam_out)
recover_y = recover_y + 7
recover_y = recover_y >> 3
recover_val = cur + recover_y
# print('here', recover_val.toBinaryString())
assert recover_val == y
before_round = get_fraction(log2_x, out_bits)
after_round = get_fraction(log2_round_x, out_bits)
return orig_str, after_round, get_fraction(delta_y_truncated)
#
# module generation
#
def gen_pow2(file, gen_mem, in_bits, out_bits):
if (not gen_mem):
header = """
module Pow2LUT_{}x{}
(input [{}:0] in,
output logic [{}:0] out);
always_comb begin
case (in)
""".format(in_bits, out_bits, in_bits-1, out_bits-1)
file.write(header)
had_overlap = False
for i in range(2 ** in_bits):
in_fixed, before_fixed, out_fixed, r, is_overlap = get_pow2_expansion(i, in_bits, out_bits)
if (gen_mem):
file.write(out_fixed)
file.write('\n')
else:
overlap_str = ''
if (is_overlap and r):
had_overlap = True
overlap_str = ' // overlap + round'
elif (is_overlap):
had_overlap = True
overlap_str = ' // overlap'
elif (r):
had_overlap = True
overlap_str = ' // round'
file.write(' {}\'b{}: out = {}\'b{};{}\n'.format(
in_bits,
in_fixed,
out_bits,
out_fixed,
overlap_str))
if (not gen_mem):
file.write(' default: out = {}\'b{};\n'.format(out_bits, 'x' * out_bits))
file.write(' endcase\n')
file.write(' end\n')
file.write('endmodule\n')
def gen_log2(file, gen_mem, in_bits, out_bits):
if (not gen_mem):
header = """
module Log2LUT_{}x{}
(input [{}:0] in,
output logic [{}:0] out);
always_comb begin
case (in)
""".format(in_bits, out_bits, in_bits-1, out_bits)
file.write(header)
had_overlap = False
for i in range(2 ** in_bits):
in_fixed, before_fixed, out_fixed, r, is_overlap = get_log2_expansion(i, in_bits, out_bits, True)
if (i < 2 ** (in_bits - 1) or out_fixed != ('0' * out_bits)):
r = False
if (gen_mem):
file.write('{}{}\n'.format(int(r), out_fixed))
else:
overlap_str = ''
if (is_overlap and r):
had_overlap = True
overlap_str = ' // overlap + round'
elif (is_overlap):
had_overlap = True
overlap_str = ' // overlap'
elif (r):
had_overlap = True
overlap_str = ' // round'
file.write(' {}\'b{}: out = {}\'b{}{};{}\n'.format(
in_bits,
in_fixed,
out_bits + 1,
int(r),
out_fixed,
overlap_str))
if (not gen_mem):
file.write(' default: out = {}\'b{};\n'.format(out_bits + 1, 'x' * (out_bits + 1)))
file.write(' endcase\n')
file.write(' end\n')
file.write('endmodule\n')
def gen_pow2_delta(file, gen_mem, in_bits, out_bits):
if (not gen_mem):
header = """
module Pow2DeltaLUT_{}x{}
(input [{}:0] in,
output logic [{}:0] out);
always_comb begin
case (in)
""".format(in_bits, out_bits, in_bits-1, out_bits-4)
file.write(header)
for i in range(2 ** in_bits):
in_fixed, out_fixed, delta = get_pow2_delta_expansion(i, in_bits, out_bits)
if (gen_mem):
file.write(delta)
file.write('\n')
else:
file.write(' {}\'b{}: out = {}\'b{};\n'.format(
in_bits,
in_fixed,
out_bits-3,
delta))
if (not gen_mem):
file.write(' default: out = {}\'b{};\n'.format(out_bits-3, 'x' * (out_bits-3)))
file.write(' endcase\n')
file.write(' end\n')
file.write('endmodule\n')
def gen_log2_delta(file, gen_mem, in_bits, out_bits):
if (not gen_mem):
header = """
module Log2DeltaLUT_{}x{}
(input [{}:0] in,
output logic [{}:0] out);
always_comb begin
case (in)
""".format(in_bits, out_bits, in_bits-1, out_bits-4)
file.write(header)
for i in range(2 ** in_bits):
in_fixed, out_fixed, delta = get_log2_delta_expansion(i, in_bits, out_bits)
if (gen_mem):
file.write(delta)
file.write('\n')
else:
file.write(' {}\'b{}: out = {}\'b{};\n'.format(
in_bits,
in_fixed,
out_bits-3,
delta))
if (not gen_mem):
file.write(' default: out = {}\'b{};\n'.format(out_bits-3, 'x' * (out_bits-3)))
file.write(' endcase\n')
file.write(' end\n')
file.write('endmodule\n')
# def gen_pow2_mem(file, in_bits, out_bits):
# header = """
# module Pow2Mem_{}x{}
# (input [{}:0] in,
# output logic [{}:0] out);
# logic [{}:0] mem[0:(2**{})-1];
# initial begin
# $readmemb("pow2_{}x{}.hex", mem);
# end
# always_comb begin
# out = mem[in];
# end
# endmodule
# """.format(in_bits, out_bits, in_bits-1, out_bits-1, out_bits-1, in_bits, in_bits, out_bits)
# file.write(header)
# def gen_log2_mem(file, in_bits, out_bits):
# header = """
# module Log2Mem_{}x{}
# (input [{}:0] in,
# output logic [{}:0] out);
# logic [{}:0] mem[0:(2**{})-1];
# initial begin
# $readmemb("log2_{}x{}.hex", mem);
# end
# always_comb begin
# out = mem[in];
# end
# endmodule
# """.format(in_bits, out_bits, in_bits-1, out_bits, out_bits, in_bits, in_bits, out_bits)
# file.write(header)
in_bits = args.bits_in[0]
out_bits = args.bits_out[0]
def make_file(name):
if (args.str):
return io.StringIO()
return open(name, 'w')
def close_file(f):
if (args.str):
print(f.getvalue())
else:
f.close()
if (args.pow):
f = make_file('Pow2LUT_{}x{}.sv'.format(in_bits, out_bits))
gen_pow2(f, False, in_bits, out_bits)
close_file(f)
# if (args.mem):
# f = make_file('Pow2Mem_{}x{}.sv'.format(in_bits, out_bits))
# gen_pow2_mem(f, in_bits, out_bits)
# close_file(f)
# f = make_file('pow2_{}x{}.hex'.format(in_bits, out_bits))
# gen_pow2(f, True, in_bits, out_bits)
# close_file(f)
if (args.pow_delta):
f = make_file('Pow2DeltaLUT_{}x{}.sv'.format(in_bits, out_bits))
gen_pow2_delta(f, False, in_bits, out_bits)
close_file(f)
if (args.log):
f = make_file('Log2LUT_{}x{}.sv'.format(in_bits, out_bits))
gen_log2(f, False, in_bits, out_bits)
close_file(f)
if (args.log_delta):
f = make_file('Log2DeltaLUT_{}x{}.sv'.format(in_bits, out_bits))
gen_log2_delta(f, False, in_bits, out_bits)
close_file(f)
# if (args.mem):
# f = make_file('Log2Mem_{}x{}.sv'.format(in_bits, out_bits))
# gen_log2_mem(f, in_bits, out_bits)
# close_file(f)
# f = make_file('log2_{}x{}.hex'.format(in_bits, out_bits))
# gen_log2(f, True, out_bits, in_bits)
# close_file(f)
|
deepfloat-main
|
rtl/log/luts/gen_tables.py
|
AutoCTR-main
|
utils/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import numpy as np
import torch
from graphviz import Digraph
from torch.autograd import Variable
logger = logging.getLogger(__name__)
def size_to_str(size):
return "(" + (", ").join(["%d" % v for v in size]) + ")"
def visualize(model):
feats = create_fake_feats(model.feature_config)
pred = model(feats)
return net_visual(pred, params=dict(model.named_parameters()))
# default batch size = 2 so that BN layers can work
def create_fake_feats(feature_config, batch_size=2):
num_dense_feat = len(feature_config.dense.features)
feats = {"dense": torch.FloatTensor(np.random.rand(batch_size, num_dense_feat))}
feats.update(
{
feat.name: {
"data": torch.LongTensor([]),
"offsets": torch.LongTensor([0] * batch_size),
}
for feat in feature_config.sparse.features
}
)
return feats
def net_visual(var, params=None):
""" Produces Graphviz representation of PyTorch autograd graph.
Blue nodes are the Variables that require grad, orange are Tensors
saved for backward in torch.autograd.Function
Args:
var: output Variable
params: dict of (name, Variable) to add names to node that
require grad (TODO: make optional)
"""
if params is not None:
assert all(isinstance(p, Variable) for p in params.values())
param_map = {id(v): k for k, v in params.items()}
node_attr = {
"style": "filled",
"shape": "box",
"align": "left",
"fontsize": "12",
"ranksep": "0.1",
"height": "0.2",
}
graph_attr = {"size": "12,12"}
dot = Digraph(node_attr=node_attr, graph_attr=graph_attr)
seen = set()
output_nodes = (
(var.grad_fn,) if not isinstance(var, tuple) else tuple(v.grad_fn for v in var)
)
def add_nodes(var):
if var not in seen:
if torch.is_tensor(var):
# note: this used to show .saved_tensors in pytorch0.2, but stopped
# working as it was moved to ATen and Variable-Tensor merged
dot.node(str(id(var)), size_to_str(var.size()), fillcolor="orange")
elif hasattr(var, "variable"):
u = var.variable
name = param_map[id(u)] if params is not None else ""
node_name = "%s\n %s" % (name, size_to_str(u.size()))
dot.node(str(id(var)), node_name, fillcolor="lightblue")
elif var in output_nodes:
dot.node(
str(id(var)), str(type(var).__name__), fillcolor="darkolivegreen1"
)
else:
dot.node(str(id(var)), str(type(var).__name__))
seen.add(var)
if hasattr(var, "next_functions"):
for u in var.next_functions:
if u[0] is not None:
dot.edge(str(id(u[0])), str(id(var)))
add_nodes(u[0])
if hasattr(var, "saved_tensors"):
for t in var.saved_tensors:
dot.edge(str(id(t)), str(id(var)))
add_nodes(t)
# handle multiple outputs
if isinstance(var, tuple):
for v in var:
add_nodes(v.grad_fn)
else:
add_nodes(var.grad_fn)
_resize_graph(dot)
return dot
def _resize_graph(dot, size_per_element=0.15, min_size=12):
"""Resize the graph according to how much content it contains.
Modify the graph in place.
"""
# Get the approximate number of nodes and edges
num_rows = len(dot.body)
content_size = num_rows * size_per_element
size = max(min_size, content_size)
size_str = str(size) + "," + str(size)
dot.graph_attr.update(size=size_str)
|
AutoCTR-main
|
utils/viz_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import sys
sys.path.append('gen-py')
import argparse
import json
from block_config import ttypes as b_config
from config import ttypes as config
def get_args():
parser = argparse.ArgumentParser(
description="Neural Recommendation Model Searching Script for Kaggle Dataset"
)
# configs for final fit only
parser.add_argument("--model-file", type=str, default="",
help="a json file contain the model structure for final fit")
parser.add_argument("--save-model", action="store_true", default=False, help="save model or not during the final fit process")
# configs for search and final fit
parser.add_argument("--data-file", type=str, default="", help="data for search or final fit")
parser.add_argument("--data-set-name", type=str, default="", help="dataset name", choices=["criteo", "avazu", "kdd2012"])
parser.add_argument("--log-freq", type=int, default=10, help="log freqency of model training (# of epochs)")
parser.add_argument("--splits", type=str, default="0.8:0.1",
help="split of train,val,test, e.g., 0.8:0.1 means 80% train, 10% val, 10% test")
parser.add_argument("--batch-size", type=int, default=100, help="batch size for training each model")
parser.add_argument("--hash-size", type=int, default=10000, help="hash size for the features")
parser.add_argument("--learning-rate", type=float, default=0.001, help="learning rate of each model")
parser.add_argument("--nepochs", type=int, default=50, help="maximum epoch for training a model")
parser.add_argument("--num-workers", type=int, default=4,
help="number of workers (cpus) to preprocess data")
parser.add_argument("--num-trainers", type=int, default=1,
help="number of training for cpu training, currently this is abandoned and to be removed, we only support gpu training now")
parser.add_argument("--repeat-checker-off", action="store_true", default=False, help="check and avoid repeating searching same architectures")
parser.add_argument(
"--save-model-path", type=str, default="", help="the file path to save the models during the search process"
)
parser.add_argument("--search-nepochs", type=int, default=3, help="number of search iterations")
parser.add_argument(
"--reward-type",
default="logloss",
type=str,
choices=["logloss", "auc"],
help="measurement for the search model to compare models"
)
parser.add_argument(
"--searcher-type",
default="random",
type=str,
choices=["random", "evo"],
help="search algorithm"
)
parser.add_argument("--max-num-block", type=int, default=5, help="maximum number of blocks in each model in the search space")
parser.add_argument(
"--feature-processing-type", default="", type=str, choices=["idasp"], help="if we want to treat dense feature as sparse features"
)
# hyperparameters for proposed evo algorithm
parser.add_argument("--population-size", type=int, default=3,
help="size of the population, it also decides how many random initialization architectures we will do")
parser.add_argument("--candidate-size", type=float, default=2,
help="number of candidates to be picked from the population, the best one will be used to generate offsprings")
parser.add_argument("--sampler-type", type=int, default=10, help="number of neigbors for each candidate")
parser.add_argument("--historical-sample-path", type=str, default="", help="path for historical architectures to warm start the evo searcher")
parser.add_argument("--historical-sample-num", type=int, default=0, help="number of historical architectures to warm start the evo searcher")
parser.add_argument(
"--survival-type", default="comb", type=str, choices=["age", "fit", "mix", "comb"],
help="survival type, comb is multi-objective survival function, mix is a two-step survival function"
)
# search space config
parser.add_argument(
"--macro-space-type", type=int, default=config.MacroSearchSpaceType.INPUT_GROUP,
help="search space for features, either group sparse/dense features or not, please check out the /if/config.thrift for more detail"
)
parser.add_argument(
"--micro-space-types",
default="close",
type=str,
choices=[
"close",
"micro_mlp",
],
help="micro search space for blocks, currently only mlp have a micro space hyperparameter (units in each mlp layer), close means do not search mlp units",
)
# general search config
parser.add_argument("--num-machines", type=int, default=1, help="number of GPUs to be used")
parser.add_argument("--waiting-time", type=float, default=30,
help="waiting time for checking if the current running models are complete, default: check every 30 seconds")
parser.add_argument("--resume-file", type=str, default="", help="the file path to resume the search process")
parser.add_argument("--fbl-kill-time", type=float, default=1800,
help="time to kill a model during search, this is used to avoid some model crush and stuck during training")
parser.add_argument("--numpy-seed", type=int, default=123, help="numpy seed")
parser.add_argument("--torch-seed", type=int, default=4321, help="torch seed")
parser.add_argument("--warm-start-emb", action="store_true", default=False,
help="if we have a `.ckp` model weight to warm start the embeddings of the sparse features in each model")
# gpu config
parser.add_argument("--use-gpu", action="store_true", default=False, help="use gpu or not")
parser.add_argument("--maxLoad", type=float, default=0.5,
help="only load a model when the current used load of this gpu is lower than maxLoad")
parser.add_argument("--maxMemory", type=float, default=0.5,
help="only load a model when the current used memory of this gpu is lower than maxMemory")
parser.add_argument("--save-batches", action="store_true", default=False,
help="if we want to save the training data batches in the gpu memory, this will accelerate the speed")
parser.add_argument("--save-val-batches", action="store_true", default=False,
help="if we want to save the validation data batches in the gpu memory, this will accelerate the speed")
parser.add_argument("--total-gpus", type=int, default=1, help="total number of gpus on the machine")
parser.add_argument("--excludeID", type=str, default="", help="")
args = parser.parse_args()
if not args.save_model_path:
args.save_model_path = os.path.join(os.getcwd(), "results")
return args
def get_micro_space_types(args):
micro_space_types = args.micro_space_types.replace(" ", "")
micro_space_types = micro_space_types.split(",")
micro_space_types = list(set(micro_space_types))
micro_space_configs = []
if "close" in micro_space_types:
return [config.MicroSearchSpaceType(close=config.MicroClose())]
elif "micro_mlp" in micro_space_types:
micro_space_configs.append(
config.MicroSearchSpaceType(
micro_mlp=config.MicroMLPConfig(arc=[32, 64, 128, 256, 512, 1024])
)
)
elif "micro_cin" in micro_space_types:
micro_space_configs.append(
config.MicroSearchSpaceType(
micro_cin=config.MicroCINConfig(
arc=[64, 128, 256], num_of_layers=[1, 2, 3]
)
)
)
elif "micro_attention" in micro_space_types:
micro_space_configs.append(
config.MicroSearchSpaceType(
micro_attention=config.MicroAttentionConfig(
num_of_layers=[1, 2, 3],
num_of_heads=[1, 2, 3],
att_embed_dim=[],
dropout_prob=[],
)
)
)
else:
raise ValueError("Error micro space type.")
return micro_space_configs
def get_feature_processing_type(args):
feature_processing_type = args.feature_processing_type.replace(" ", "")
feature_processing_type = feature_processing_type.split(",")
feature_processing_type = list(set(feature_processing_type))
feature_processing_configs = []
if feature_processing_type != [""]:
if "idasp" in feature_processing_type:
feature_processing_configs.append(
config.FeatureProcessingType(idasp=config.InputDenseAsSparse())
)
else:
raise ValueError("Error micro space type.")
return feature_processing_configs
def get_searcher_config(args):
block_types = [
b_config.ExtendedBlockType.MLP_DENSE,
# b_config.ExtendedBlockType.MLP_EMB,
# b_config.ExtendedBlockType.CROSSNET,
# b_config.ExtendedBlockType.FM_DENSE,
b_config.ExtendedBlockType.FM_EMB,
# b_config.ExtendedBlockType.DOTPROCESSOR_DENSE,
b_config.ExtendedBlockType.DOTPROCESSOR_EMB,
# b_config.ExtendedBlockType.CAT_DENSE,
# b_config.ExtendedBlockType.CAT_EMB,
# b_config.ExtendedBlockType.CIN,
# b_config.ExtendedBlockType.ATTENTION,
]
if args.searcher_type == "random":
searcher_config = config.SearcherConfig(
random_searcher=config.RandomSearcherConfig(
max_num_block=args.max_num_block,
block_types=block_types,
macro_space_type=args.macro_space_type,
micro_space_types=get_micro_space_types(args),
feature_processing_type=get_feature_processing_type(args),
)
)
elif args.searcher_type == "evo":
searcher_config = config.SearcherConfig(
evolutionary_searcher=config.EvolutionarySearcherConfig(
max_num_block=args.max_num_block,
block_types=block_types,
population_size=args.population_size,
candidate_size=max(1, int(args.candidate_size)),
macro_space_type=args.macro_space_type,
micro_space_types=get_micro_space_types(args),
feature_processing_type=get_feature_processing_type(args),
)
)
return searcher_config
def get_trainer_config(args):
fp = os.getcwd()
if args.data_set_name == "criteo":
input_summary = json.load(open(fp + "/utils/fblearner_template/criteo_search.json"))
elif args.data_set_name == "avazu":
input_summary = json.load(open(fp + "/utils/fblearner_template/avazu_search.json"))
elif args.data_set_name == "kdd2012":
input_summary = json.load(open(fp + "/utils/fblearner_template/kdd2012_search.json"))
else:
input_summary = json.load(open(fp + "/utils/fblearner_template/criteo_search.json"))
return input_summary, args
def get_final_fit_trainer_config(args):
fp = os.getcwd()
if args.data_set_name == "criteo":
input_summary = json.load(open(fp + "/utils/fblearner_template/criteo_transfer.json"))
elif args.data_set_name == "avazu":
input_summary = json.load(open(fp + "/utils/fblearner_template/avazu_transfer.json"))
elif args.data_set_name == "kdd2012":
input_summary = json.load(open(fp + "/utils/fblearner_template/kdd2012_transfer.json"))
else:
input_summary = json.load(open(fp + "/utils/fblearner_template/criteo_transfer.json"))
return input_summary, args
def get_phenotype(args):
filenames = [args.model_file]
model_config_dicts = []
for filename in filenames:
with open(filename) as fp:
model_config_dict = json.load(fp)
fp.close()
model_config_dicts.append(model_config_dict)
return filenames, model_config_dicts
|
AutoCTR-main
|
utils/search_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
sys.path.append('gen-py')
import logging
from collections import namedtuple
from copy import deepcopy
import numpy as np
import torch
from torch.utils.data import DataLoader, Dataset
from config import ttypes as config
ReaderOption = namedtuple("ReaderOption", ["type", "options"])
logger = logging.getLogger(__name__)
kEpsilon = 1e-10
class DenseDataset(Dataset):
"""Dense dataset."""
def __init__(self, X, y, sample_weights=None):
self.X = torch.FloatTensor(X)
self.y = torch.FloatTensor(y)
if sample_weights is not None:
self.sample_weights = torch.FloatTensor(sample_weights)
else:
self.sample_weights = None
def __len__(self):
return len(self.y)
def __getitem__(self, idx):
sample = {}
sample["label"] = self.y[idx]
sample["dense"] = self.X[idx]
if self.sample_weights is not None:
sample["weight"] = self.sample_weights[idx]
return sample
def share_memory_(self):
self.X.share_memory_()
self.y.share_memory_()
if self.sample_weights is not None:
self.sample_weights.share_memory_()
############################################################
# criteo data utils
############################################################
class CriteoDataset(Dataset):
"""Criteo dataset."""
def __init__(self, X_cat, X_int, y, dense_transform=None):
self.X_cat, self.X_int, self.y, self.dense_transform = (
torch.LongTensor(X_cat),
torch.FloatTensor(X_int),
torch.FloatTensor(y),
dense_transform,
)
def __len__(self):
return len(self.y)
def __getitem__(self, idx):
# Criteo data only have categorical features as sparse feature
sample = {
"sparse_{}".format(i): torch.tensor([v + 1])
for i, v in enumerate(self.X_cat[idx])
}
sample["label"] = self.y[idx]
sample["dense"] = (
self.X_int[idx]
if self.dense_transform is None
else self.dense_transform(self.X_int[idx])
)
return sample
def share_memory_(self):
self.X_cat.share_memory_()
self.X_int.share_memory_()
self.y.share_memory_()
############################################################
# synthetic data utils
############################################################
def _set_random_seed(seed=0):
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
class SyntheticDataset(Dataset):
"""Synthetic dataset."""
def __init__(
self,
num_dense,
num_sparse,
max_sparse_id,
num_samples,
batch_size,
id_list_configs,
):
_set_random_seed()
# We generate 10k examples and then reuse the these examples during
# data reading
data_num_samples = 10000
self.num_batches = data_num_samples // batch_size
self.num_batch_samples = num_samples // batch_size
# Limit the number of examples as we are only doing benchmarking for
# synthetic data set.
dense = torch.randn((self.num_batches, batch_size, num_dense))
label = torch.randint(2, size=(self.num_batches, batch_size))
weight = None # torch.ones((self.num_batches, batch_size))
assert (
len(id_list_configs) == num_sparse or len(id_list_configs) == 1
), "len(id_list_configs) != num_sparse: {0} vs {1}".format(
len(id_list_configs), num_sparse
)
if len(id_list_configs) == 1:
id_list_configs = [deepcopy(id_list_configs[0]) for _ in range(num_sparse)]
sparse_id_list_len = [
[
[
min(max(0, int(x)), config.truncation)
for x in np.random.normal(
config.mean, config.std, size=(batch_size)
)
]
for config in id_list_configs
]
for _ in range(self.num_batches)
]
sparse = []
for k in range(self.num_batches):
sparse_batch = []
for i in range(num_sparse):
sparse_batch.append({})
ids = []
offsets = [0]
for j in range(batch_size):
id_list_len = sparse_id_list_len[k][i][j]
ids.extend(np.random.randint(max_sparse_id, size=id_list_len))
offsets.append(offsets[-1] + id_list_len)
sparse_batch[i]["data"] = torch.tensor(ids)
sparse_batch[i]["offsets"] = torch.tensor(offsets[:-1])
sparse.append(sparse_batch)
self.data = []
for i in range(self.num_batches):
batch = {}
batch["dense"] = dense[i]
batch["label"] = label[i]
batch["weight"] = weight[i] if weight is not None else None
batch["sparse"] = [sparse[i][j] for j in range(num_sparse)]
self.data.append(batch)
def __len__(self):
return self.num_batch_samples
def __getitem__(self, idx):
return self.data[idx % self.num_batches]
def synthetic_data_generator(
num_dense, num_sparse, max_sparse_id, num_samples, batch_size, id_list_configs
):
_set_random_seed()
# Limit the number of examples as we are only doing benchmarking for
# synthetic data set.
data_num_batches = min(1000, min(100000, num_samples) // batch_size)
dense = torch.randn((data_num_batches, batch_size, num_dense))
label = torch.randint(2, size=(data_num_batches, batch_size))
# weight = torch.ones((data_num_batches, batch_size))
assert (
len(id_list_configs) == num_sparse or len(id_list_configs) == 1
), "len(id_list_configs) != num_sparse: {0} vs {1}".format(
len(id_list_configs), num_sparse
)
if len(id_list_configs) == 1:
id_list_configs = [deepcopy(id_list_configs[0]) for _ in range(num_sparse)]
sparse_id_list_len = [
[
[
min(max(0, int(x)), config.truncation)
for x in np.random.normal(config.mean, config.std, size=(batch_size))
]
for config in id_list_configs
]
for _ in range(data_num_batches)
]
sparse = []
for k in range(data_num_batches):
sparse_batch = []
for i in range(num_sparse):
sparse_batch.append({})
ids = []
offsets = [0]
for j in range(batch_size):
id_list_len = sparse_id_list_len[k][i][j]
ids.extend(np.random.randint(max_sparse_id, size=id_list_len))
offsets.append(offsets[-1] + id_list_len)
sparse_batch[i]["data"] = torch.tensor(ids)
sparse_batch[i]["offsets"] = torch.tensor(offsets[:-1])
sparse.append(sparse_batch)
data = []
for i in range(data_num_batches):
batch = {}
batch["dense"] = dense[i]
batch["label"] = label[i]
batch["weight"] = None # weight[i]
batch["sparse"] = [sparse[i][j] for j in range(num_sparse)]
data.append(batch)
return data
def get_split_indices(splits, num_samples):
if np.sum(splits) >= 1.0:
raise ValueError("sum of splits should be smaller than 1.0")
bins = list(np.cumsum([0.0] + list(splits)))
bins.append(1.0)
indices = [
range(int(bins[i] * num_samples), int(bins[i + 1] * num_samples))
for i in range(len(splits) + 1)
]
if any(len(indice) <= 0 for indice in indices):
raise ValueError(
"Split {} is causing empty partitions: {}".format(
splits, [len(indice) for indice in indices]
)
)
return indices
def split_dense_dataset(data, splits, sample_weights=None):
"""
dataset: Dataset
splits: array of split ratio of length L, will create L+1 dataloaders
according to the ratio, the last partition is 1.0-sum(splits);
if None, return the entire dataset in dataloader
example:
splits= [0.8, 0.1] for a 80%, 10%, 10% splits
between train, validation, eval
"""
num_samples = len(data["y"])
indices = get_split_indices(splits=splits, num_samples=num_samples)
logger.info(
"Split data into partitions with size: {}".format(
[len(indice) for indice in indices]
)
)
datasets = []
for indice in indices:
dataset = DenseDataset(
data["X"][indice],
data["y"][indice],
None if sample_weights is None else sample_weights[indice],
)
datasets.append(dataset)
return datasets
def load_and_split_dataset(npz_file, splits=None):
"""
dataset: Dataset
splits: array of split ratio of length L, will create L+1 dataloaders
according to the ratio, the last partition is 1.0-sum(splits);
if None, return the entire dataset in dataloader
example:
splits= [0.8, 0.1] for a 80%, 10%, 10% splits
between train, validation, eval
"""
data = np.load(npz_file)
if splits is None:
return CriteoDataset(X_cat=data["X_cat"], X_int=data["X_int"], y=data["y"])
num_samples = len(data["y"])
indices = get_split_indices(splits=splits, num_samples=num_samples)
logger.info(
"Split data into partitions with size: {}".format(
[len(indice) for indice in indices]
)
)
return [
CriteoDataset(
X_cat=data["X_cat"][indice],
X_int=data["X_int"][indice],
y=data["y"][indice],
)
for indice in indices
]
############################################################
# batch processors
############################################################
def _save_transforms(dense_transform, filename):
torch.save({"dense_transform": dense_transform}, filename)
def _load_transforms(filename):
state = torch.load(filename)
return state["dense_transform"]
# the __call__ method for a BatchProcessor should return label, feats, weight:
# label: a (batch_size,) FloatTensor for labels
# weight: optional, None or (batch_size,) FloatTensor for per sample weights
# feats: dict for features
# feats['dense']: (batch_size, num_dense) FloatTensor for dense features
# feats['[sparse_feature_name]]']: for each sparse feature name (consistent
# with feature_config), it is a dict with two keys:
# 'data' and 'offsets'. See EmbeddingBag doc for the supported types.
class BatchProcessor(object):
def __init__(
self,
feature_config=None,
dense_transform=None,
device=None,
dense_feature_clamp=-1.0,
):
self.feature_config = deepcopy(feature_config)
self.dense_transform = dense_transform
self.device = torch.device("cpu") if device is None else device
self.dense_feature_clamp = dense_feature_clamp
def save_transforms(self, filename):
_save_transforms(self.dense_transform, filename)
def load_transforms(self, filename):
self.dense_transform = _load_transforms(filename)
def share_memory(self):
if self.dense_transform is not None:
self.dense_transform.share_memory_()
def __call__(self):
raise NotImplementedError
class DenseBatchProcessor(BatchProcessor):
def __call__(self, mini_batch):
for k, v in mini_batch.items():
if k == "dense":
v = v if self.dense_transform is None else self.dense_transform(v)
mini_batch[k] = v.to(device=self.device, dtype=torch.float32)
elif k in ["label", "weight"]:
mini_batch[k] = v.to(device=self.device, dtype=torch.float32)
else:
raise ValueError("invalid mini_batch key")
label = mini_batch.pop("label", None)
weight = mini_batch.pop("weight", None)
return label, mini_batch, weight
class CriteoBatchProcessor(BatchProcessor):
def __call__(self, mini_batch, transform=True, reverse=0):
if reverse == 1:
for k, v in mini_batch.items():
if k in ["dense", "label"]:
mini_batch[k] = v.to(device=self.device, dtype=torch.float32)
else:
mini_batch[k] = {
"data": v["data"].to(device=self.device, dtype=torch.long),
"offsets": None,
}
elif reverse == 2:
for k, v in mini_batch.items():
if k in ["dense", "label"]:
mini_batch[k] = v.to(device=torch.device("cpu"), dtype=torch.float32)
else:
mini_batch[k] = {
"data": v["data"].to(device=torch.device("cpu"), dtype=torch.long),
"offsets": None,
}
else:
if transform:
for k, v in mini_batch.items():
if k == "dense":
v = v if self.dense_transform is None else self.dense_transform(v)
mini_batch[k] = v.to(device=self.device, dtype=torch.float32)
elif k == "label":
mini_batch[k] = v.to(device=self.device, dtype=torch.float32)
else:
mini_batch[k] = {
"data": v.to(device=self.device, dtype=torch.long),
"offsets": None,
}
# else:
# for k, v in mini_batch.items():
# mini_batch[k] = v
# label = mini_batch.pop("label", None)
label = mini_batch["label"]
# Criteo does not have sample weights
weight = None
return label, mini_batch, weight
def loadDataset(file):
"""
Loads dataset from NumPy format.
Inputs:
file (str): path to the npz file of dataset (Kaggle or Terabyte)
Outputs:
X_cat (np.ndarray): categorical features
X_int (np.ndarray): continuous features
y (np.ndarray): labels
counts (list): number of categories for each categorical feature
"""
# load and preprocess data
with np.load(file) as data:
X_int = data["X_int"]
X_cat = data["X_cat"]
y = data["y"]
counts = data["counts"]
return X_cat, X_int, y, counts
############################################################
# dense transform
############################################################
class DenseTransform(object):
def __init__(self, mean, std):
self.mean = mean.cpu()
self.std = std.cpu()
def __call__(self, dense):
return (dense - self.mean) / self.std
def share_memory_(self):
self.mean.share_memory_()
self.std.share_memory_()
def create_dense_transform(train_dataloader, batch_processor, num_batches):
mean = 0.0
num_samples = 0
for i_batch, sample_batched in enumerate(train_dataloader):
if i_batch >= num_batches:
break
_, feats, _ = batch_processor(mini_batch=sample_batched)
dense = feats["dense"]
num_samples += dense.shape[0]
mean += torch.sum(dense.to(dtype=torch.float), dim=0)
mean /= num_samples
var = 0.0
num_samples = 0
for i_batch, sample_batched in enumerate(train_dataloader):
if i_batch >= num_batches:
break
_, feats, _ = batch_processor(mini_batch=sample_batched)
dense = feats["dense"]
num_samples += dense.shape[0]
var += torch.sum((dense.to(dtype=torch.float) - mean) ** 2, dim=0)
std = torch.sqrt((var + kEpsilon) / num_samples)
return DenseTransform(mean=mean, std=std)
def create_dense_transform_from_synthetic():
# Due to the dense features are sampled from normal distribution,
# we simply set mean and std based on normal distribution.
# We add this part is for benchmark purpose.
return DenseTransform(mean=torch.tensor(0), std=torch.tensor(1))
def prepare_data(data_options, performance_options, CUDA="cuda:0", pin_memory=False):
if data_options.getType() == config.DataConfig.FROM_FILE:
data_option = data_options.get_from_file()
(
datasets,
batch_processor,
train_dataloader,
val_dataloader,
eval_dataloader,
) = prepare_criteo_data(data_option, performance_options, CUDA, pin_memory)
else:
raise ValueError("Unknown data option type.")
dense_transform = create_dense_transform(
train_dataloader,
batch_processor,
num_batches=int(data_option.num_samples_meta / data_option.batch_size),
)
batch_processor.dense_transform = dense_transform
return datasets, batch_processor, train_dataloader, val_dataloader, eval_dataloader
def prepare_criteo_data(data_options, performance_options, CUDA, pin_memory=False):
logger.info("Loading data from {}".format(data_options.data_file))
datasets = load_and_split_dataset(
npz_file=data_options.data_file, splits=data_options.splits
)
logger.info("Data loaded")
# pin_memory=True,
train_dataloader, val_dataloader, eval_dataloader = (
DataLoader(dataset,
batch_size=data_options.batch_size,
pin_memory=pin_memory,
num_workers=performance_options.num_readers) for dataset in datasets
)
batch_processor = CriteoBatchProcessor(
device=(
torch.device(CUDA)
if performance_options.use_gpu
else torch.device("cpu")
)
)
return datasets, batch_processor, train_dataloader, val_dataloader, eval_dataloader
|
AutoCTR-main
|
utils/data.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from copy import deepcopy
import torch.nn as nn
from block_config import ttypes as b_config
from nasrec.blocks import set_block_from_config
from .base_net import BaseNet
from .utils import (
Optimizers,
apply_emb,
create_emb_dict,
create_optimizers_for_dense,
create_optimizers_for_embed,
)
logger = logging.getLogger(__name__)
class NASRecNet(BaseNet):
def __init__(self, model_config, feature_config):
super(NASRecNet, self).__init__(model_config, feature_config)
self.nasrec_net_option = self.model_config.get_nasrec_net()
self.num_block = len(self.nasrec_net_option.block_configs)
self._init_model_params()
self._build_arc()
def _init_model_params(self):
self.sparse_hash_size = {
item.name: int(item.hash_size)
for item in self.sparse_feature_options.features
}
self.feat_dim = {
"dense": {0: [self.num_dense_feat]},
"sparse": {
0: [self.sparse_feature_options.embed_dim] * self.num_sparse_feat
},
}
def _build_arc(self):
self.emb_dict = create_emb_dict(self.sparse_feature_options)
self.blocks = nn.ModuleList()
for block_config in self.nasrec_net_option.block_configs:
block = set_block_from_config(block_config, self.feat_dim)
self.feat_dim = block.dim_config(self.feat_dim)
self.blocks.append(block)
# build up final block
self.blocks.append(self._build_final_block())
def _build_final_block(self):
"""Construct the final block
"""
dense = deepcopy(self.feat_dim["dense"])
sparse = deepcopy(self.feat_dim["sparse"])
# make dicts of all features id (including intermidiate features)
for block_id in dense:
if len(dense[block_id]) > 0:
dense[block_id] = list(range(dense[block_id][0]))
else:
dense[block_id] = []
for block_id in sparse:
sparse[block_id] = list(range(len(sparse[block_id])))
# remove the features that has already been used as intermidiate input
for block_id in range(0, self.num_block):
dense_feat = self.blocks[block_id].feat_dense_id
sparse_feat = self.blocks[block_id].feat_sparse_id
for former_block_id in dense_feat:
tmp_ids = dense_feat[former_block_id]
dense[former_block_id] = (
(
[]
if tmp_ids == [-1]
else list(set(dense[former_block_id]) - set(tmp_ids))
)
if former_block_id in dense
else []
)
for former_block_id in sparse_feat:
tmp_ids = sparse_feat[former_block_id]
sparse[former_block_id] = (
(
[]
if tmp_ids == [-1]
else list(set(sparse[former_block_id]) - set(tmp_ids))
)
if former_block_id in sparse
else []
)
# convert feature dicts (dense & sparse) to feature configs
feat_configs = []
for block_id, feat_list in dense.items():
if block_id in sparse:
feat_config = b_config.FeatSelectionConfig(
block_id=block_id, dense=feat_list, sparse=sparse[block_id]
)
else:
feat_config = b_config.FeatSelectionConfig(
block_id=block_id, dense=feat_list, sparse=[]
)
feat_configs.append(feat_config)
for block_id, feat_list in sparse.items():
if block_id in dense:
continue
else:
feat_config = b_config.FeatSelectionConfig(
block_id=block_id, dense=[], sparse=feat_list
)
feat_configs.append(feat_config)
# construct the MLP block config
block_config = b_config.BlockConfig(
mlp_block=b_config.MLPBlockConfig(
name="MLPBlock",
block_id=self.num_block + 1,
arc=[1],
type=b_config.BlockType(dense=b_config.DenseBlockType()),
input_feat_config=feat_configs,
ly_act=False,
)
)
return set_block_from_config(block_config, self.feat_dim)
def get_optimizers(self):
optimizers = Optimizers()
# add dense optimizers
create_optimizers_for_dense(
optimizers,
named_parameters=self.named_parameters(),
dense_optim_config=self.dense_feature_options.optim,
)
# add sparse optimizers
create_optimizers_for_embed(
optimizers,
emb_dict=self.emb_dict,
sparse_feature_options=self.sparse_feature_options,
)
return optimizers
def forward(self, feats):
# process sparse features(using embeddings), resulting in a list of row vectors
feat_dict = {"dense": {0: feats["dense"]}} # if self.num_dense_feat > 0 else []
ly = apply_emb(feats, self.emb_dict, self.sparse_hash_size)
feat_dict["sparse"] = {
0: {feat_id: ly[feat_id] for feat_id in range(self.num_sparse_feat)}
}
# blocks
for qq, block in enumerate(self.blocks):
feat_dict = block(feat_dict)
p = feat_dict["dense"][self.blocks[-1].block_id]
return p.view(-1)
|
AutoCTR-main
|
models/nas_modules.py
|
AutoCTR-main
|
models/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
sys.path.append('gen-py')
import logging
import torch
from config import ttypes as config
from .nas_modules import NASRecNet
logger = logging.getLogger(__name__)
def build_model(model_config, feature_config):
if model_config.getType() == config.ModelConfig.NASREC_NET:
return build_nasrec_net(model_config, feature_config)
else:
raise ValueError("Unknown model type.")
def build_nasrec_net(model_config, feature_config):
return NASRecNet(model_config=model_config, feature_config=feature_config)
def save_model(filename, model):
logger.warning("Saving model to {}".format(filename))
state = {
"state_dict": model.state_dict(),
"model_config": model.model_config,
"feature_config": model.feature_config,
}
torch.save(state, filename)
def load_model(filename):
logger.warning("Loading model from {}".format(filename))
state = torch.load(filename, map_location='cpu')
model_config = state["model_config"]
feature_config = state["feature_config"]
model = build_model(model_config=model_config, feature_config=feature_config)
model.load_state_dict(state["state_dict"])
return model
|
AutoCTR-main
|
models/builder.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import torch
import torch.nn as nn
from config import ttypes as config
logger = logging.getLogger(__name__)
def apply_emb(feats, emb_dict, sparse_hash_size):
ly = []
for name, E in emb_dict.items():
if name not in feats:
raise ValueError("feature {} missing from input! ".format(name))
val = feats[name]
hash_size = sparse_hash_size[name]
V = E(input=torch.remainder(val["data"], hash_size), offsets=val["offsets"])
ly.append(V)
return ly
def create_mlp(ln, ly_act=False):
ln = list(ln)
layers = nn.ModuleList()
for i in range(1, len(ln) - 1):
layers.append(nn.Linear(int(ln[i - 1]), int(ln[i]), bias=True))
layers.append(nn.ReLU())
layers.append(nn.Linear(int(ln[-2]), int(ln[-1]), bias=True))
if ly_act:
layers.append(nn.ReLU())
return torch.nn.Sequential(*layers)
def create_emb(sparse_feature, comm_embed_dim):
embed_dim = (
sparse_feature.embed_dim if sparse_feature.embed_dim > 0 else comm_embed_dim
)
hash_size = sparse_feature.hash_size
if sparse_feature.pooling.getType() == config.PoolingConfig.SUM:
mode = "sum"
elif sparse_feature.pooling.getType() == config.PoolingConfig.AVG:
mode = "mean"
else:
raise ValueError(
"Unknown pooling option: {}".format(sparse_feature.pooling.getType())
)
# return nn.EmbeddingBag(hash_size, embed_dim, sparse=True, mode=mode)
a = nn.EmbeddingBag(hash_size, embed_dim, sparse=True, mode=mode)
nn.init.normal_(a.weight, 0, 0.01)
return a
def create_emb_dict(sparse_feature_options):
comm_embed_dim = sparse_feature_options.embed_dim
return nn.ModuleDict(
{
item.name: create_emb(sparse_feature=item, comm_embed_dim=comm_embed_dim)
for item in sparse_feature_options.features
}
)
def create_optim(params, optim_config):
if optim_config.getType() == config.OptimConfig.SGD:
opt_config = optim_config.get_sgd()
return torch.optim.SGD(
params,
lr=opt_config.lr,
momentum=opt_config.momentum,
dampening=opt_config.dampening,
weight_decay=opt_config.weight_decay,
nesterov=opt_config.nesterov,
)
elif optim_config.getType() == config.OptimConfig.ADAGRAD:
opt_config = optim_config.get_adagrad()
return torch.optim.Adagrad(
params,
lr=opt_config.lr,
lr_decay=opt_config.lr_decay,
weight_decay=opt_config.weight_decay,
initial_accumulator_value=opt_config.initial_accumulator_value,
)
elif optim_config.getType() == config.OptimConfig.SPARSE_ADAM:
opt_config = optim_config.get_sparse_adam()
return torch.optim.SparseAdam(
params,
lr=opt_config.lr,
betas=(opt_config.betas0, opt_config.betas1),
eps=opt_config.eps,
)
elif optim_config.getType() == config.OptimConfig.ADAM:
opt_config = optim_config.get_adam()
return torch.optim.Adam(
params,
lr=opt_config.lr,
weight_decay=opt_config.weight_decay,
amsgrad=opt_config.amsgrad,
betas=(opt_config.betas0, opt_config.betas1),
eps=opt_config.eps,
)
elif optim_config.getType() == config.OptimConfig.RMSPROP:
opt_config = optim_config.get_rmsprop()
return torch.optim.RMSprop(
params,
lr=opt_config.lr,
weight_decay=opt_config.weight_decay,
alpha=opt_config.alpha,
momentum=opt_config.momentum,
centered=opt_config.centered,
eps=opt_config.eps,
)
else:
raise ValueError("unknown optimizer type: {}".format(optim_config))
class Optimizers(object):
def __init__(self, optimizers=None, named_optimizers=None):
self.optimizers = [] if optimizers is None else optimizers
self.named_optimizers = {} if named_optimizers is None else named_optimizers
def add(self, optimizer, name=None):
if name is None:
self.optimizers.append(optimizer)
else:
assert (
name not in self.named_optimizers
), "optimizer for {} already exist!".format(name)
self.named_optimizers[name] = optimizer
def zero_grad(self):
for optimizer in self.optimizers:
optimizer.zero_grad()
for _, optimizer in self.named_optimizers.items():
optimizer.zero_grad()
def step(self):
for optimizer in self.optimizers:
optimizer.step()
for _, optimizer in self.named_optimizers.items():
optimizer.step()
# Assumes that embedding params have [sparse_name_key] (default "emb_dict")
# in their name. It is true for embeddings created via
# self.emb_dict = create_emb_dict(self.sparse_feature_options)
def create_optimizers_for_dense(
optimizers, named_parameters, dense_optim_config, sparse_name_key="emb_dict"
):
params = [param for name, param in named_parameters if sparse_name_key not in name]
logger.info(
"Creating optim for non-embedding params with config: "
"{}.".format(dense_optim_config)
)
logger.info(
"Creating optim for non-embedding params list:"
", ".join([name for name, _ in named_parameters if sparse_name_key not in name])
)
optimizers.add(
create_optim(params=params, optim_config=dense_optim_config), name="dense"
)
def create_optimizers_for_embed(optimizers, emb_dict, sparse_feature_options):
sparse_optim_config = sparse_feature_options.optim
for item in sparse_feature_options.features:
name = item.name
item_optim_config = sparse_optim_config if item.optim is None else item.optim
logger.info(
"Creating optim for {} with config: {}".format(name, item_optim_config)
)
optimizers.add(
create_optim(
params=emb_dict[name].parameters(), optim_config=item_optim_config
),
name=name,
)
|
AutoCTR-main
|
models/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from copy import deepcopy
import torch.nn as nn
logger = logging.getLogger(__name__)
class BaseNet(nn.Module):
def __init__(self, model_config, feature_config):
super(BaseNet, self).__init__()
# for serilization purpose
self.model_config = deepcopy(model_config)
self.feature_config = deepcopy(feature_config)
self.dense_feature_options = self.feature_config.dense
self.sparse_feature_options = self.feature_config.sparse
self.num_dense_feat = len(self.dense_feature_options.features)
self.num_sparse_feat = len(self.sparse_feature_options.features)
def _build_arc(self):
raise NotImplementedError
def get_optimizers(self):
raise NotImplementedError
def forward(self, fs):
raise NotImplementedError
|
AutoCTR-main
|
models/base_net.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import math
import numpy as np
import lightgbm as lgb
import scipy.stats as ss
from config import ttypes as config
from models.nas_modules import NASRecNet
from .base_searcher import BaseSearcher
logger = logging.getLogger(__name__)
def nCr(n,r):
f = math.factorial
return f(n) / f(r) / f(n-r)
def prob_comb(population_size, candidate_size):
prob = []
for rank in range(population_size, 0, -1):
prob.append(nCr(rank + candidate_size-1, candidate_size)/nCr(population_size + candidate_size, candidate_size + 1))
return prob
class EvolutionaryController(BaseSearcher):
"""Aging evolution: https://arxiv.org/abs/1802.01548
"""
def __init__(self, searcher_config, feature_config):
super(EvolutionaryController, self).__init__(searcher_config, feature_config)
self.controller_option = searcher_config.get_evolutionary_searcher()
self._init_base_searcher_params()
self.population_size = self.controller_option.population_size
self.candidate_size = self.controller_option.candidate_size
self.all_arc_vecs = None
self.all_rewards = None
self.all_params = None
self.all_flops = None
self.sampler_type = 1
self.eval_at = 3
self._build_arc()
self.sample_prob = prob_comb(self.population_size, self.candidate_size)
def _build_arc(self):
self.population_arc_queue = []
self.population_val_queue = []
def _selection_candidate(self, type=0):
if type == 0:
candidate_indices = np.sort(
np.random.choice(
self.population_size, self.candidate_size, replace=False
)
)
candidate_arcs = list(
map(self.population_arc_queue.__getitem__, candidate_indices)
)
candidate_vals = list(
map(self.population_val_queue.__getitem__, candidate_indices)
)
best_arc_idx = np.argmin(candidate_vals)
best_arc = candidate_arcs[best_arc_idx]
elif type == 1:
rank = ss.rankdata(np.array(self.population_val_queue), method='ordinal')
tmp_prob = [self.sample_prob[i-1] for i in rank]
best_arc_idx = np.random.choice(list(range(self.population_size)), p=tmp_prob)
best_arc = self.population_arc_queue[best_arc_idx]
return best_arc_idx, best_arc
def sample(self, batch_size=1, return_config=False, is_initial=True):
"""sample a batch_size number of NasRecNets from the controller, where
each node is made up of a set of blocks with number self.num_blocks.
If is_initial=True, random sample a batch size of arcs into population,
else sample a candidate size arch from population queue, get the best one,
mutate the best one to a new arch, repeat this a batch_size of time.
"""
if batch_size < 1:
raise ValueError("Wrong batch_size.")
nasrec_nets, all_vec_configs, nasrec_arc_vecs = [], [], []
for _ in range(batch_size):
if is_initial:
vecs, vec_configs = self.random_sample()
else:
best_arc_idx, best_arc = self._selection_candidate(type=1)
# mutate to get child
if self.sampler_type > 1:
vecs, vec_configs = self.ML_sampler(parent=best_arc)
else:
vecs, vec_configs = self.mutate_arc(parent=best_arc)
arc_vec = np.concatenate(vecs)
nasrec_arc_vecs.append(arc_vec)
all_vec_configs.append(vec_configs)
block_configs = self.vecs_to_model_config(vec_configs)
model_config = config.ModelConfig(
nasrec_net=config.NASRecNetConfig(block_configs=block_configs)
)
if return_config:
nasrec_nets.append(model_config)
else:
nasrec_nets.append(NASRecNet(model_config, self.feature_config))
return nasrec_nets, [], all_vec_configs, nasrec_arc_vecs
def update(self, actions, rewards, survival_type="age"):
"""add k new archs into the population queue and
kick out the k oldest archs"""
# add child to right of population
self.population_arc_queue += actions
self.population_val_queue += rewards
if survival_type == "age":
self.population_arc_queue = self.population_arc_queue[-self.population_size:]
self.population_val_queue = self.population_val_queue[-self.population_size:]
elif survival_type == "comb":
self.comb()
else:
if survival_type == "fit":
idx = sorted(
range(len(self.population_val_queue)),
key=lambda i: self.population_val_queue[i], reverse=True
)[-self.population_size:]
elif survival_type == "mix":
division = int(0.5 * self.population_size)
tmp_rewards = self.population_val_queue[:-division]
idx = sorted(range(len(tmp_rewards)), key=lambda i: tmp_rewards[i], reverse=True)[-division:]
age_arcs = self.population_arc_queue[-division:]
age_vals = self.population_val_queue[-division:]
self.population_arc_queue = np.array(self.population_arc_queue)[idx].tolist()
self.population_val_queue = np.array(self.population_val_queue)[idx].tolist()
if survival_type == "mix":
self.population_arc_queue += age_arcs
self.population_val_queue += age_vals
# if keep_largest:
# idx = sorted(
# range(len(self.population_val_queue)),
# key=lambda i: self.population_val_queue[i], reverse=True
# )[-self.population_size:]
# self.population_arc_queue = np.array(self.population_arc_queue)[idx].tolist()
# self.population_val_queue = np.array(self.population_val_queue)[idx].tolist()
# else:
# # remove dead from left of population if exceed population_size
# self.population_arc_queue = self.population_arc_queue[-self.population_size :]
# self.population_val_queue = self.population_val_queue[-self.population_size :]
if self.sampler_type > 1:
# QQ TODO: build GBDT_rank:
self.update_GBDT()
def comb(self, trade_off=[0.1, 1, 0.1, 1]):
if len(self.all_rewards) <= self.population_size:
self.population_arc_queue = self.all_actions[-self.population_size:]
self.population_val_queue = self.all_rewards[-self.population_size:]
else:
if trade_off[3] == 0:
rank_weight = ss.rankdata(np.array(self.all_rewards)) / len(self.all_rewards)
age_weight = np.array(range(len(self.all_rewards), 0, -1)) / len(self.all_rewards)
age_weight[:self.population_size] = age_weight[self.population_size - 1]
flops_weight = ss.rankdata(np.array(self.all_flops)) / len(self.all_flops)
all_weight = trade_off[0] * rank_weight + trade_off[1] * age_weight + trade_off[2] * flops_weight
idx = np.array(
sorted(range(len(all_weight)), key=lambda i: all_weight[i]))[:self.population_size]# < self.population_size # [-division:]
self.population_arc_queue = np.array(self.all_actions)[idx].tolist()
self.population_val_queue = np.array(self.all_rewards)[idx].tolist()
elif trade_off[3] == 1:
age_weight = np.array(range(len(self.all_rewards), 0, -1)) / len(self.all_rewards)
age_weight[:self.population_size] = age_weight[self.population_size - 1]
# filter with age weight
idx1 = np.array(
sorted(range(len(age_weight)), key=lambda i: age_weight[i]))[:2*self.population_size]
age_rewards = np.array(self.all_rewards)[idx1].tolist()
age_actions = np.array(self.all_actions)[idx1].tolist()
age_flops = np.array(self.all_flops)[idx1].tolist()
rank_weight = ss.rankdata(np.array(age_rewards)) / len(age_rewards)
age_weight = np.array(age_weight)[idx1]
flops_weight = ss.rankdata(np.array(age_flops)) / len(age_flops)
all_weight = trade_off[0] * rank_weight + trade_off[1] * age_weight + trade_off[2] * flops_weight
idx2 = np.array(
sorted(range(len(all_weight)), key=lambda i: all_weight[i]))[:self.population_size] # < self.population_size # [-division:]
self.population_arc_queue = np.array(age_actions)[idx2].tolist()
self.population_val_queue = np.array(age_rewards)[idx2].tolist()
def update_GBDT(self):
k = len(self.all_arc_vecs)
r = 0.8
# create dataset for lightgbm
X_train, X_test, y_train1, y_test1 = self.all_arc_vecs[:int(k * r)], \
self.all_arc_vecs[int(k * r):], \
self.all_rewards[:int(k * r)], \
self.all_rewards[int(k * r):]
X_train, X_test, y_train1, y_test1 = np.array(X_train), \
np.array(X_test), \
np.array(y_train1), \
np.array(y_test1)
logger.warning('Train Shape {}{}{}{}'.format(X_train.shape,
X_test.shape,
y_train1.shape,
y_test1.shape))
y_train = ss.rankdata(-y_train1) - 1
y_test = ss.rankdata(-y_test1) - 1
y_train = y_train.astype(int)
y_test = y_test.astype(int)
lgb_train = lgb.Dataset(X_train, y_train, group=np.array([len(y_train)])) # free_raw_data=False
lgb_eval = lgb.Dataset(X_test, y_test, group=np.array([len(y_test)]),
reference=lgb_train) # ,free_raw_data=False
# specify your configurations as a dict
params = {
'boosting_type': 'gbdt',
'objective': 'lambdarank', # 'regression', #
'metric': "ndcg", # "auc", #"ndcg", # {'l2', 'l1'},
'label_gain': np.array(list(range(len(y_train)))) * 2, #
'max_depth': 3, # 'num_leaves': 31,
'learning_rate': 0.05,
'feature_fraction': 0.9,
'bagging_fraction': 0.8,
'eval_at': self.eval_at,
'bagging_freq': 5,
'verbose': 0,
'num_threads': 5,
}
logger.warning('Starting training...')
# train
self.gbm = lgb.train(params,
lgb_train,
num_boost_round=1500,
valid_sets=lgb_eval,
early_stopping_rounds=150)
logger.warning('Finish training...')
def ML_sampler(self, parent):
vecs_list, arc_vec_list, vec_configs_list = [], [], []
i = 0
while i < self.sampler_type:
vecs, vec_configs = self.mutate_arc(parent=parent)
arc_vec = np.concatenate(vecs)
# check current
repeat_idx = (
[]
if not arc_vec_list
else np.where(
np.sum(abs(np.array(arc_vec_list) - arc_vec), 1) == 0
)[0]
)
if len(repeat_idx) != 0:
logger.warning("The architecture is same with: {}.".format(repeat_idx))
continue
# check all
repeat_idx = (
[]
if not self.all_arc_vecs
else np.where(
np.sum(abs(np.array(self.all_arc_vecs) - arc_vec), 1) == 0
)[0]
)
if len(repeat_idx) != 0:
logger.warning("The architecture is same all_arc_vectors with: {}.".format(repeat_idx))
continue
vecs_list.append(vecs)
arc_vec_list.append(arc_vec)
vec_configs_list.append(vec_configs)
i += 1
logger.warning('Test Shape {}'.format(np.array(arc_vec_list).shape))
y_pred = self.gbm.predict(np.array(arc_vec_list), num_iteration=self.gbm.best_iteration)
idx = np.where(y_pred == np.max(y_pred))[0][0]
return vecs_list[idx], vec_configs_list[idx]
|
AutoCTR-main
|
nasrec/evolutionary_controller.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from copy import deepcopy
import numpy as np
import torch
import torch.nn as nn
from block_config import ttypes as b_config
from config import ttypes as config
logger = logging.getLogger(__name__)
class BaseSearcher(nn.Module):
def __init__(self, searcher_config, feature_config):
super(BaseSearcher, self).__init__()
# for serilization purpose
self.searcher_config = deepcopy(searcher_config)
self.feature_config = deepcopy(feature_config)
self.dense_feature_options = self.feature_config.dense
self.sparse_feature_options = self.feature_config.sparse
self.num_dense_feat = len(self.dense_feature_options.features)
self.num_sparse_feat = len(self.sparse_feature_options.features)
def _set_micro_space_from_config(self):
# get micro space type list
self.micro_space_types = [
space_type.getType()
for space_type in self.controller_option.micro_space_types
]
# get feature processig type list
self.feature_processing_type = [
processing_type.getType()
for processing_type in self.controller_option.feature_processing_type
]
# set up corresponding micro space
for space_type in self.controller_option.micro_space_types:
if space_type.getType() == config.MicroSearchSpaceType.MICRO_MLP:
self.micro_mlp_option = space_type.get_micro_mlp()
elif space_type.getType() == config.MicroSearchSpaceType.MICRO_CIN:
self.micro_cin_option = space_type.get_micro_cin()
if len(self.micro_cin_option.arc) == 0:
self.micro_cin_option.arc = [128]
if len(self.micro_cin_option.num_of_layers) == 0:
self.micro_cin_option.num_of_layers = [1]
elif space_type.getType() == config.MicroSearchSpaceType.MICRO_ATTENTION:
self.micro_attention_option = space_type.get_micro_attention()
if len(self.micro_attention_option.num_of_layers) == 0:
self.micro_attention_option.num_of_layers = [1]
if len(self.micro_attention_option.num_of_heads) == 0:
self.micro_attention_option.num_of_heads = [2]
if len(self.micro_attention_option.att_embed_dim) == 0:
self.micro_attention_option.att_embed_dim = [10]
if len(self.micro_attention_option.dropout_prob) == 0:
self.micro_attention_option.dropout_prob = [0.0]
def _init_base_searcher_params(self):
# get micro search space configurations
self._set_micro_space_from_config()
# constraint search space
if (
self.controller_option.macro_space_type
== config.MacroSearchSpaceType.INPUT_GROUP
):
self.num_dense_feat = 1
self.num_sparse_feat = 1
# length of the DAG to be searched (exclude the final clf layer)
self.num_blocks = self.controller_option.max_num_block
# block_types to be searched
self.block_types = list(set(self.controller_option.block_types))
self.num_block_type = len(self.block_types)
if self.num_block_type == 0:
raise ValueError("Should provide at least one block type to be searched.")
# construct dictionaries to map between int and block types
self.type_int_dict = {
self.block_types[i]: i for i in range(self.num_block_type)
}
self.int_type_dict = {
i: self.block_types[i] for i in range(self.num_block_type)
}
# all tokens to be searched
self.num_tokens = {
"block_type": self.num_block_type,
"dense_feat": self.num_dense_feat,
"sparse_feat": self.num_sparse_feat,
"skip_connect": self.num_blocks,
}
self.token_names = ["block_type", "dense_feat", "sparse_feat", "skip_connect"]
if (
self.controller_option.macro_space_type
== config.MacroSearchSpaceType.INPUT_ELASTIC_PRIOR
):
# constraint search space with smooth learnable priors
self.num_tokens["elastic_prior"] = 2
self.token_names.append("elastic_prior")
self.num_total_tokens = sum(v for _, v in self.num_tokens.items())
if config.MicroSearchSpaceType.MICRO_MLP in self.micro_space_types:
if (
b_config.ExtendedBlockType.MLP_DENSE
in self.controller_option.block_types
):
self.num_tokens["mlp_dense"] = len(self.micro_mlp_option.arc)
self.token_names.append("mlp_dense")
self.num_total_tokens += 1
if b_config.ExtendedBlockType.MLP_EMB in self.controller_option.block_types:
self.num_tokens["mlp_emb"] = len(self.micro_mlp_option.arc)
self.token_names.append("mlp_emb")
self.num_total_tokens += 1
if config.MicroSearchSpaceType.MICRO_CIN in self.micro_space_types:
if b_config.ExtendedBlockType.CIN in self.controller_option.block_types:
self.num_tokens["cin"] = len(self.micro_cin_option.arc) + len(
self.micro_cin_option.num_of_layers
)
self.token_names.append("cin")
self.num_total_tokens += 1 if len(self.micro_cin_option.arc) > 0 else 0
self.num_total_tokens += (
1 if len(self.micro_cin_option.num_of_layers) > 0 else 0
)
if config.MicroSearchSpaceType.MICRO_ATTENTION in self.micro_space_types:
if (
b_config.ExtendedBlockType.ATTENTION
in self.controller_option.block_types
):
self.att_num_tokens = {
"head": len(self.micro_attention_option.num_of_heads),
"layer": len(self.micro_attention_option.num_of_layers),
"emb": len(self.micro_attention_option.att_embed_dim),
"drop": len(self.micro_attention_option.dropout_prob),
}
self.num_tokens["attention"] = sum(
v for _, v in self.att_num_tokens.items()
)
self.token_names.append("attention")
for _, v in self.att_num_tokens.items():
self.num_total_tokens += 1 if v != 0 else 0
def _build_arc(self):
raise NotImplementedError
def sample(self):
raise NotImplementedError
def update(self):
raise NotImplementedError
def random_sample(self):
vec_configs, vecs = [], []
for b_id in range(self.num_blocks):
# macro random search
block_type_vec = np.random.multinomial(
1, [1.0 / self.num_block_type] * self.num_block_type
)
block_type_id = np.argmax(block_type_vec)
dense_feat_vec = np.random.binomial(1, 0.5, self.num_dense_feat)
sparse_feat_vec = np.random.binomial(1, 0.5, self.num_sparse_feat)
skip_connection_vec = np.random.binomial(1, 0.5, self.num_blocks)
skip_connection_vec[b_id:] = 0 # cannot connect with later block
vec_config = {
"block_type": block_type_id,
"dense_feat": dense_feat_vec,
"sparse_feat": sparse_feat_vec,
"skip_connect": skip_connection_vec,
}
# micro random search
mlp_dense_vec, mlp_emb_vec, cin_vec, att_vec = (
np.array([]),
np.array([]),
np.array([]),
np.array([]),
)
if config.MicroSearchSpaceType.MICRO_MLP in self.micro_space_types:
if (
b_config.ExtendedBlockType.MLP_DENSE
in self.controller_option.block_types
):
mlp_dense_vec = np.argmax(
np.random.multinomial(
1,
[1.0 / len(self.micro_mlp_option.arc)]
* len(self.micro_mlp_option.arc),
)
)
vec_config["mlp_dense"] = mlp_dense_vec
mlp_dense_vec = np.array([mlp_dense_vec])
if (
b_config.ExtendedBlockType.MLP_EMB
in self.controller_option.block_types
):
mlp_emb_vec = np.argmax(
np.random.multinomial(
1,
[1.0 / len(self.micro_mlp_option.arc)]
* len(self.micro_mlp_option.arc),
)
)
vec_config["mlp_emb"] = mlp_emb_vec
mlp_emb_vec = np.array([mlp_emb_vec])
if config.MicroSearchSpaceType.MICRO_CIN in self.micro_space_types:
if b_config.ExtendedBlockType.CIN in self.controller_option.block_types:
cin_width = np.argmax(
np.random.multinomial(
1,
[1.0 / len(self.micro_cin_option.arc)]
* len(self.micro_cin_option.arc),
)
)
cin_depth = np.argmax(
np.random.multinomial(
1,
[1.0 / len(self.micro_cin_option.num_of_layers)]
* len(self.micro_cin_option.num_of_layers),
)
)
cin_vec = np.array([cin_width, cin_depth])
vec_config["cin"] = {"width": cin_width, "depth": cin_depth}
if config.MicroSearchSpaceType.MICRO_ATTENTION in self.micro_space_types:
if (
b_config.ExtendedBlockType.ATTENTION
in self.controller_option.block_types
):
att_head = np.argmax(
np.random.multinomial(
1,
[1.0 / self.att_num_tokens["head"]]
* self.att_num_tokens["head"],
)
)
att_layer = np.argmax(
np.random.multinomial(
1,
[1.0 / self.att_num_tokens["layer"]]
* self.att_num_tokens["layer"],
)
)
att_emb_dim = np.argmax(
np.random.multinomial(
1,
[1.0 / self.att_num_tokens["emb"]]
* self.att_num_tokens["emb"],
)
)
att_dropout_prob = np.argmax(
np.random.multinomial(
1,
[1.0 / self.att_num_tokens["drop"]]
* self.att_num_tokens["drop"],
)
)
att_vec = np.array(
[att_head, att_layer, att_emb_dim, att_dropout_prob]
)
vec_config["attention"] = {
"head": att_head,
"layer": att_layer,
"emb": att_emb_dim,
"drop": att_dropout_prob,
}
block_vec = np.concatenate(
[
block_type_vec,
dense_feat_vec,
sparse_feat_vec,
skip_connection_vec,
mlp_dense_vec,
mlp_emb_vec,
cin_vec,
att_vec,
]
)
vecs.append(block_vec)
vec_configs.append(vec_config)
# cat the config of a architecture to one vector
return vecs, vec_configs
def block_type_to_int(self, block_config):
if block_config.getType() == b_config.BlockConfig.MLP_BLOCK:
block_option = block_config.get_mlp_block()
key = (
b_config.ExtendedBlockType.MLP_DENSE
if block_option.type.getType() == b_config.BlockType.DENSE
else b_config.ExtendedBlockType.MLP_EMB
)
elif block_config.getType() == b_config.BlockConfig.CROSSNET_BLOCK:
block_option = block_config.get_crossnet_block()
key = b_config.ExtendedBlockType.CROSSNET
elif block_config.getType() == b_config.BlockConfig.FM_BLOCK:
block_option = block_config.get_fm_block()
key = (
b_config.ExtendedBlockType.FM_DENSE
if block_option.type.getType() == b_config.BlockType.DENSE
else b_config.ExtendedBlockType.FM_EMB
)
elif block_config.getType() == b_config.BlockConfig.DOTPROCESSOR_BLOCK:
block_option = block_config.get_dotprocessor_block()
key = (
b_config.ExtendedBlockType.DOTPROCESSOR_DENSE
if block_option.type.getType() == b_config.BlockType.DENSE
else b_config.ExtendedBlockType.DOTPROCESSOR_EMB
)
elif block_config.getType() == b_config.BlockConfig.CAT_BLOCK:
block_option = block_config.get_cat_block()
key = (
b_config.ExtendedBlockType.CAT_DENSE
if block_option.type.getType() == b_config.BlockType.DENSE
else b_config.ExtendedBlockType.CAT_EMB
)
elif block_config.getType() == b_config.BlockConfig.CIN:
block_option = block_config.get_cin_block()
key = b_config.ExtendedBlockType.CIN
elif block_config.getType() == b_config.BlockConfig.ATTENTION:
block_option = block_config.get_attention_block()
key = b_config.ExtendedBlockType.ATTENTION
return self.type_int_dict[key], block_option
def vecs_to_model_config(self, vecs):
block_configs = []
for block_id, vec in enumerate(vecs):
block_configs.append(self.vec_to_block_config(vec, block_id + 1))
return block_configs
def vec_to_block_config(self, vec, block_id):
"""convert a controller vector to block_config
"""
# split a vector and convert the corresponding part to the id format
block_type_id = (
vec["block_type"].numpy()[0]
if type(vec["block_type"]) is torch.Tensor
else vec["block_type"]
)
input_dense = vec["dense_feat"]
input_sparse = vec["sparse_feat"]
skip_connection = vec["skip_connect"]
if (
self.controller_option.macro_space_type
== config.MacroSearchSpaceType.INPUT_GROUP
):
input_dense_id = [-1] if input_dense == 1 else []
input_sparse_id = [-1] if input_sparse == 1 else []
else:
input_dense_id = [i for i, e in enumerate(input_dense) if e == 1]
input_sparse_id = [i for i, e in enumerate(input_sparse) if e == 1]
skip_connection_id = [
i + 1 for i, e in enumerate(skip_connection) if e == 1 and i + 1 < block_id
]
dense_as_sparse = (
True
if config.FeatureProcessingType.IDASP in self.feature_processing_type
else False
)
# construct input config
# orignal input features
input_feat_config = [
b_config.FeatSelectionConfig(
block_id=0, dense=input_dense_id, sparse=input_sparse_id
)
]
# input from other blocks' outputs
input_feat_config += [
b_config.FeatSelectionConfig(block_id=id, dense=[-1], sparse=[-1])
for id in skip_connection_id
]
comm_embed_dim = self.sparse_feature_options.embed_dim
block_type = self.int_type_dict[block_type_id]
if block_type == b_config.ExtendedBlockType.CROSSNET:
block_config = b_config.BlockConfig(
crossnet_block=b_config.CrossNetBlockConfig(
name="CrossNetBlocks",
block_id=block_id,
num_of_layers=1,
input_feat_config=input_feat_config,
cross_feat_config=input_feat_config,
)
)
elif block_type == b_config.ExtendedBlockType.ATTENTION:
head, layer, emb, drop = (
(
self.micro_attention_option.num_of_heads[vec["attention"]["head"]],
self.micro_attention_option.num_of_layers[
vec["attention"]["layer"]
],
self.micro_attention_option.att_embed_dim[vec["attention"]["emb"]],
self.micro_attention_option.dropout_prob[vec["attention"]["drop"]],
)
if "attention" in vec
else (2, 1, 10, 0.0)
)
block_config = b_config.BlockConfig(
attention_block=b_config.AttentionBlockConfig(
name="AttentionBlock",
block_id=block_id,
input_feat_config=input_feat_config,
emb_config=b_config.EmbedBlockType(
comm_embed_dim=comm_embed_dim, dense_as_sparse=dense_as_sparse
),
att_embed_dim=emb,
num_of_heads=head,
num_of_layers=layer,
dropout_prob=drop,
use_res=True,
batchnorm=False,
)
)
elif block_type == b_config.ExtendedBlockType.CIN:
arc = (
[self.micro_cin_option.arc[vec["cin"]["width"]]]
* self.micro_cin_option.num_of_layers[vec["cin"]["depth"]]
if "cin" in vec
else [128]
)
block_config = b_config.BlockConfig(
cin_block=b_config.CINBlockConfig(
name="CINBlock",
block_id=block_id,
emb_config=b_config.EmbedBlockType(
comm_embed_dim=comm_embed_dim, dense_as_sparse=dense_as_sparse
),
arc=arc,
split_half=True,
input_feat_config=input_feat_config,
)
)
elif block_type == b_config.ExtendedBlockType.MLP_DENSE:
arc = (
self.micro_mlp_option.arc[vec["mlp_dense"]]
if "mlp_dense" in vec
else 128
)
block_config = b_config.BlockConfig(
mlp_block=b_config.MLPBlockConfig(
name="MLPBlock",
block_id=block_id,
arc=[arc],
type=b_config.BlockType(dense=b_config.DenseBlockType()),
input_feat_config=input_feat_config,
)
)
elif block_type == b_config.ExtendedBlockType.MLP_EMB:
arc = self.micro_mlp_option.arc[vec["mlp_emb"]] if "mlp_emb" in vec else 128
block_config = b_config.BlockConfig(
mlp_block=b_config.MLPBlockConfig(
name="MLPBlock",
block_id=block_id,
arc=[arc],
type=b_config.BlockType(
emb=b_config.EmbedBlockType(
comm_embed_dim=comm_embed_dim,
dense_as_sparse=dense_as_sparse,
)
),
input_feat_config=input_feat_config,
)
)
elif block_type == b_config.ExtendedBlockType.FM_DENSE:
block_config = b_config.BlockConfig(
fm_block=b_config.FMBlockConfig(
name="FMBlock",
block_id=block_id,
type=b_config.BlockType(dense=b_config.DenseBlockType()),
input_feat_config=input_feat_config,
)
)
elif block_type == b_config.ExtendedBlockType.FM_EMB:
block_config = b_config.BlockConfig(
fm_block=b_config.FMBlockConfig(
name="FMBlock",
block_id=block_id,
type=b_config.BlockType(
emb=b_config.EmbedBlockType(
comm_embed_dim=comm_embed_dim,
dense_as_sparse=dense_as_sparse,
)
),
input_feat_config=input_feat_config,
)
)
elif block_type == b_config.ExtendedBlockType.DOTPROCESSOR_DENSE:
block_config = b_config.BlockConfig(
dotprocessor_block=b_config.DotProcessorBlockConfig(
name="DotProcessorBlock",
block_id=block_id,
type=b_config.BlockType(dense=b_config.DenseBlockType()),
input_feat_config=input_feat_config,
)
)
elif block_type == b_config.ExtendedBlockType.DOTPROCESSOR_EMB:
block_config = b_config.BlockConfig(
dotprocessor_block=b_config.DotProcessorBlockConfig(
name="DotProcessorBlock",
block_id=block_id,
type=b_config.BlockType(
emb=b_config.EmbedBlockType(
comm_embed_dim=comm_embed_dim,
dense_as_sparse=dense_as_sparse,
)
),
input_feat_config=input_feat_config,
)
)
elif block_type == b_config.ExtendedBlockType.CAT_DENSE:
block_config = b_config.BlockConfig(
cat_block=b_config.CatBlockConfig(
name="CatBlock",
block_id=block_id,
type=b_config.BlockType(dense=b_config.DenseBlockType()),
input_feat_config=input_feat_config,
)
)
elif block_type == b_config.ExtendedBlockType.CAT_EMB:
block_config = b_config.BlockConfig(
cat_block=b_config.CatBlockConfig(
name="CatBlock",
block_id=block_id,
type=b_config.BlockType(
emb=b_config.EmbedBlockType(
comm_embed_dim=comm_embed_dim,
dense_as_sparse=dense_as_sparse,
)
),
input_feat_config=input_feat_config,
)
)
return block_config
def dicts_to_vecs(self, dicts):
vecs = []
for block in dicts:
for token_name in self.num_tokens:
if token_name in ["block_type"]:
tmp_vec = np.zeros([self.num_tokens[token_name]])
tmp_vec[block[token_name]] = 1.0
vecs.append(tmp_vec)
elif token_name in ["mlp_dense", "mlp_emb"]:
tmp_vec = np.array([block[token_name]])
vecs.append(tmp_vec)
elif token_name == "cin":
tmp_vec = np.array([block["cin"]["width"], block["cin"]["depth"]])
vecs.append(tmp_vec)
elif token_name == "attention":
tmp_vec = np.array(
[
block["attention"]["head"],
block["attention"]["layer"],
block["attention"]["emb"],
block["attention"]["drop"],
]
)
vecs.append(tmp_vec)
else:
vecs.append(block[token_name])
return vecs
def _action_equal(self, action1, action2):
return (
action1 == action2
if type(action1) == dict
else np.array_equal(action1, action2)
)
def mutate_arc(self, parent):
child = deepcopy(parent)
# 1. choose block to mutate
block_id = np.random.choice(self.num_blocks, 1)[0]
# 2. choose one token of a block to mutate (e.g., block_type, dense_feat)
token_name = np.random.choice(self.token_names, 1)[0]
while token_name == "skip_connect" and block_id == 0:
block_id = np.random.choice(self.num_blocks, 1)[0]
token_name = np.random.choice(self.token_names, 1)[0]
while (
token_name == "cin"
and len(self.micro_cin_option.arc) == 1
and len(self.micro_cin_option.num_of_layers) == 1
) or (
token_name == "attention"
and self.att_num_tokens["head"] == 1
and self.att_num_tokens["layer"] == 1
and self.att_num_tokens["emb"] == 1
and self.att_num_tokens["drop"] == 1
):
token_name = np.random.choice(self.token_names, 1)[0]
# 3. mutate the corresponding token
new_action = child[block_id][token_name]
while self._action_equal(new_action, child[block_id][token_name]):
if token_name in ["block_type", "mlp_dense", "mlp_emb"]:
new_action_vec = np.random.multinomial(
1, [1.0 / self.num_tokens[token_name]] * self.num_tokens[token_name]
)
new_action = np.argmax(new_action_vec)
elif token_name == "cin":
cin_width = np.argmax(
np.random.multinomial(
1,
[1.0 / len(self.micro_cin_option.arc)]
* len(self.micro_cin_option.arc),
)
)
cin_depth = np.argmax(
np.random.multinomial(
1,
[1.0 / len(self.micro_cin_option.num_of_layers)]
* len(self.micro_cin_option.num_of_layers),
)
)
new_action = {"width": cin_width, "depth": cin_depth}
elif token_name == "attention":
head = np.argmax(
np.random.multinomial(
1,
[1.0 / self.att_num_tokens["head"]]
* self.att_num_tokens["head"],
)
)
layer = np.argmax(
np.random.multinomial(
1,
[1.0 / self.att_num_tokens["layer"]]
* self.att_num_tokens["layer"],
)
)
emb = np.argmax(
np.random.multinomial(
1,
[1.0 / self.att_num_tokens["emb"]] * self.att_num_tokens["emb"],
)
)
drop = np.argmax(
np.random.multinomial(
1,
[1.0 / self.att_num_tokens["drop"]]
* self.att_num_tokens["drop"],
)
)
new_action = {"head": head, "layer": layer, "emb": emb, "drop": drop}
else:
new_action = np.random.binomial(1, 0.5, self.num_tokens[token_name])
child[block_id][token_name] = new_action
vecs = self.dicts_to_vecs(child)
return vecs, child
|
AutoCTR-main
|
nasrec/base_searcher.py
|
AutoCTR-main
|
nasrec/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import torch
from config import ttypes as config
from .evolutionary_controller import EvolutionaryController
from .random_controller import RandomController
logger = logging.getLogger(__name__)
def build_searcher(searcher_config, feature_config):
if searcher_config.getType() == config.SearcherConfig.RANDOM_SEARCHER:
return build_random_searcher(searcher_config, feature_config)
elif searcher_config.getType() == config.SearcherConfig.EVOLUTIONARY_SEARCHER:
return build_evolutionary_searcher(searcher_config, feature_config)
else:
raise ValueError("Unknown searcher type.")
def build_random_searcher(searcher_config, feature_config):
return RandomController(
searcher_config=searcher_config, feature_config=feature_config
)
def build_evolutionary_searcher(searcher_config, feature_config):
return EvolutionaryController(
searcher_config=searcher_config, feature_config=feature_config
)
def save_searcher(filename, searcher):
logger.info("Saving searcher to {}".format(filename))
state = {
"state_dict": searcher.state_dict(),
"searcher_config": searcher.searcher_config,
"feature_config": searcher.feature_config,
}
torch.save(state, filename)
def load_searcher(filename):
logger.info("Loading searcher from {}".format(filename))
state = torch.load(filename)
searcher_config = state["searcher_config"]
feature_config = state["feature_config"]
searcher = build_searcher(
searcher_config=searcher_config, feature_config=feature_config
)
searcher.load_state_dict(state["state_dict"])
return searcher
|
AutoCTR-main
|
nasrec/builder.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import numpy as np
from config import ttypes as config
from models.nas_modules import NASRecNet
from .base_searcher import BaseSearcher
logger = logging.getLogger(__name__)
class RandomController(BaseSearcher):
def __init__(self, searcher_config, feature_config):
super(RandomController, self).__init__(searcher_config, feature_config)
self.controller_option = searcher_config.get_random_searcher()
self._init_base_searcher_params()
def _build_arc(self):
pass
def sample(self, batch_size=1, return_config=False):
"""Samples a batch_size number of NasRecNets from the controller, where
each node is made up of a set of blocks with number self.num_blocks
"""
if batch_size < 1:
raise ValueError("Wrong batch_size.")
nasrec_nets, all_vec_configs, nasrec_arc_vecs = [], [], []
for _ in range(batch_size):
vecs, vec_configs = self.random_sample()
arc_vec = np.concatenate(vecs)
nasrec_arc_vecs.append(arc_vec)
all_vec_configs.append(vec_configs)
block_configs = self.vecs_to_model_config(vec_configs)
model_config = config.ModelConfig(
nasrec_net=config.NASRecNetConfig(block_configs=block_configs)
)
if return_config:
nasrec_nets.append(model_config)
else:
nasrec_nets.append(NASRecNet(model_config, self.feature_config))
return nasrec_nets, [], all_vec_configs, nasrec_arc_vecs
def update(self, probs, rewards):
pass
|
AutoCTR-main
|
nasrec/random_controller.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import numpy as np
import torch
import torch.nn as nn
from graphviz import Digraph
logger = logging.getLogger(__name__)
def reward_normalization(rewards, alpha=3, bias=0.5):
return rewards
# return 0.5 * np.tanh((rewards - bias) * alpha) + 0.5
def clean_feat_id(feat_ids, feat_dim, feat_type):
"""check and modify feat_ids, remove nonexist features and empty block
Args:
feat_ids: dictionary of {block:feat_ids} to be cleaned
feat_dim: dictionary of {block:feat_dim} used to clean feat_ids
feat_type: a string indicating feature type (i.e., "dense" or "sprase")
"""
tmp = {
k: [
feat_id
for feat_id in set(feat_ids[k])
if feat_id < (feat_dim[k][0] if feat_type is "dense" else len(feat_dim[k]))
]
for k in set(feat_ids).intersection(set(feat_dim))
}
# remove empty and sorted
return {k: sorted(v) for k, v in tmp.items() if v}
def create_emb_converter(
num_dense_feat, feat_sparse_id, feat_sparse_dim, comm_embed_dim, num_dense_as_sp=0
):
# set embedding layers
feat_emb = nn.ModuleDict()
# set dense emb layer
if num_dense_feat > 0:
feat_emb["dense"] = (
nn.Linear(num_dense_feat, comm_embed_dim, bias=True)
if num_dense_feat != comm_embed_dim
else nn.Identity()
)
if num_dense_as_sp > 0:
feat_emb["dense_as_sparse"] = nn.Embedding(num_dense_as_sp, comm_embed_dim)
# set sparse emb layer
feat_emb["sparse"] = nn.ModuleDict()
sparse_in_dim = get_sparse_feat_dim(feat_sparse_id, feat_sparse_dim)
for block in feat_sparse_id:
feat_emb["sparse"][str(block)] = nn.ModuleDict()
if feat_sparse_id[block] == [-1]:
for feat_id in range(len(sparse_in_dim[block])):
feat_emb["sparse"][str(block)][str(feat_id)] = (
nn.Linear(sparse_in_dim[block][feat_id], comm_embed_dim, bias=True)
if sparse_in_dim[block][feat_id] != comm_embed_dim
else nn.Identity()
)
else:
for feat_id in feat_sparse_id[block]:
feat_emb["sparse"][str(block)][str(feat_id)] = (
nn.Linear(sparse_in_dim[block][feat_id], comm_embed_dim, bias=True)
if sparse_in_dim[block][feat_id] != comm_embed_dim
else nn.Identity()
)
return feat_emb
def convert_to_emb(
feat_dict,
feat_emb_layers,
num_dense_feat,
feat_sparse_id,
comm_embed_dim,
num_dense_as_sp=0,
):
"""
:param num_dense_as_sp: # of input dense features to be treated as sparse features
"""
# embedding all features into the same length and concatenate them into a matrix
# dense
feat = [] if num_dense_feat <= 0 else [feat_emb_layers["dense"](feat_dict["dense"])]
# sparse
sp_feats = []
for block in feat_sparse_id:
if feat_sparse_id[block] == [-1]:
for feat_id, sp in feat_dict["sparse"][block].items():
emb = feat_emb_layers["sparse"][str(block)][str(feat_id)]
sp = sp.to(dtype=torch.float)
sp_feats.append(emb(sp))
else:
for feat_id in feat_sparse_id[block]:
emb = feat_emb_layers["sparse"][str(block)][str(feat_id)]
sp = feat_dict["sparse"][block][feat_id]
sp = sp.to(dtype=torch.float)
sp_feats.append(emb(sp))
# dense_to_sparse
if num_dense_as_sp > 0:
emb_table = feat_emb_layers["dense_as_sparse"](
torch.tensor(list(range(num_dense_as_sp)))
)
emb_table = emb_table.repeat([feat_dict["dense_as_sparse"].shape[0], 1, 1])
dense_as_sp_feat = emb_table * feat_dict["dense_as_sparse"][:, :, None]
# concatenation
if feat + sp_feats:
feat = torch.cat(feat + sp_feats, dim=1)
batch_size = feat.shape[0]
feat = feat.view((batch_size, -1, comm_embed_dim))
if num_dense_as_sp > 0:
feat = torch.cat([feat, dense_as_sp_feat], dim=1)
else:
feat = dense_as_sp_feat
return feat
def cat_feats(feat_dict, feat_sparse_id):
# concatenate all features into one row vector
feat = [] if feat_dict["dense"].nelement() == 0 else [feat_dict["dense"]]
sp_feats = []
for block, feat_ids in feat_sparse_id.items():
if feat_ids == [-1]:
for feat_id in feat_dict["sparse"][block]:
sp = feat_dict["sparse"][block][feat_id]
sp = sp.to(dtype=torch.float)
sp_feats.append(sp)
else:
for feat_id in feat_sparse_id[block]:
sp = feat_dict["sparse"][block][feat_id]
sp = sp.to(dtype=torch.float)
sp_feats.append(sp)
return torch.cat(feat + sp_feats, dim=1)
def extract_dense_feat(feat_dense_dict, feat_dense_id):
# extract
dense = []
for block, feat_id in feat_dense_id.items():
if feat_dense_dict[block].nelement() != 0:
dense.append(
feat_dense_dict[block]
if feat_id == [-1]
else feat_dense_dict[block][:, feat_id]
)
return torch.cat(dense, dim=1) if dense else torch.Tensor([])
def config_to_dict(feat_configs):
feat_dense_id = {
feat_config.block_id: feat_config.dense
for feat_config in feat_configs
if len(feat_config.dense)
}
feat_sparse_id = {
feat_config.block_id: feat_config.sparse
for feat_config in feat_configs
if len(feat_config.sparse)
}
return feat_dense_id, feat_sparse_id
def get_sparse_feat_dim(feat_id_dict, feat_dim_dict):
# get sparse feature dimension
sparse_in_dim = {}
for block, feat_ids in feat_id_dict.items():
if feat_ids == [-1]:
sparse_in_dim[block] = feat_dim_dict[block]
else:
sparse_in_dim[block] = {}
for feat_id in feat_ids:
sparse_in_dim[block][feat_id] = feat_dim_dict[block][feat_id]
return sparse_in_dim
def get_sparse_feat_dim_num(feat_id_dict, feat_dim_dict):
# get sparse feature dimension
num_sparse_in_dim = 0
for block, feat_ids in feat_id_dict.items():
if feat_ids == [-1]:
num_sparse_in_dim += sum(feat_dim_dict[block])
else:
for feat_id in feat_ids:
num_sparse_in_dim += feat_dim_dict[block][feat_id]
return num_sparse_in_dim
def create_crossnet(num_of_layers, num_input_feat):
weight_w = torch.nn.ModuleList(
[torch.nn.Linear(num_input_feat, 1, bias=False) for _ in range(num_of_layers)]
)
weight_b = torch.nn.ParameterList(
[
torch.nn.Parameter(torch.zeros((num_input_feat,)))
for _ in range(num_of_layers)
]
)
batchnorm = torch.nn.ModuleList(
[nn.BatchNorm1d(num_input_feat, affine=False) for _ in range(num_of_layers)]
)
return weight_w, weight_b, batchnorm
def create_cin(layer_sizes, field_nums):
conv_layers, bias_layers, activation_layers = (
nn.ModuleList(),
nn.ParameterList(),
nn.ModuleList(),
)
for i, size in enumerate(layer_sizes):
single_conv_layer = nn.Conv2d(
in_channels=1, out_channels=size, kernel_size=(field_nums[i], field_nums[0])
)
conv_layers.append(single_conv_layer)
bias_layers.append(
nn.Parameter(torch.nn.init.normal_(torch.empty(size), mean=0.0, std=1e-6))
)
activation_layers.append(nn.ReLU())
return conv_layers, bias_layers, activation_layers
def create_transformer(
emb_dim, att_embed_dim, num_of_heads, num_of_layers, use_res, use_batchnorm
):
w_query, w_key, w_value, w_res, bn = (
nn.ModuleList(),
nn.ModuleList(),
nn.ModuleList(),
nn.ModuleList(),
nn.ModuleList(),
)
num_units = att_embed_dim * num_of_heads
emb_dim = [emb_dim] + (num_of_layers - 1) * [num_units]
for l in range(num_of_layers):
w_query.append(nn.Linear(emb_dim[l], num_units, bias=True))
w_key.append(nn.Linear(emb_dim[l], num_units, bias=True))
w_value.append(nn.Linear(emb_dim[l], num_units, bias=True))
if use_res:
w_res.append(nn.Linear(emb_dim[l], num_units, bias=True))
if use_batchnorm:
bn.append(nn.BatchNorm1d(num_units))
return w_query, w_key, w_value, w_res, bn
def nasnet_visual(nasrec_model):
""" function to visualize the nasrec net model
"""
dot = Digraph(comment="Graph", format="png")
with dot.subgraph() as s:
s.attr(rank="same")
s.node("0_d", "Dense", color="red")
s.node("0_s", "Sparse", color="red")
block_name = []
for i, block in enumerate(nasrec_model.blocks):
block_name.append(block.__str__() + "Block")
dot.node(
str(i + 1), str(i + 1) + "_" + block_name[-1], shape="box", color="green"
)
dense = block.feat_dense_id
sparse = block.feat_sparse_id
skip_block_id = set(dense.keys()).union(set(sparse.keys()))
cross_dense = []
cross_sparse = []
if block_name[-1] == "CrossNet":
cross_dense = block.cross_feat_dense_id
cross_sparse = block.cross_feat_sparse_id
skip_block_id = skip_block_id.union(set(cross_dense.keys()))
skip_block_id = skip_block_id.union(set(cross_sparse.keys()))
for id in skip_block_id:
if id == 0:
if id in dense or (cross_dense and id in cross_dense):
dot.edge("0_d", str(i + 1))
if id in sparse or (cross_sparse and id in cross_sparse):
dot.edge("0_s", str(i + 1))
else:
dot.edge(str(id), str(i + 1))
return dot
|
AutoCTR-main
|
nasrec/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from copy import deepcopy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from block_config import ttypes as b_config
from models.utils import create_mlp
from .utils import (
cat_feats,
clean_feat_id,
config_to_dict,
convert_to_emb,
create_cin,
create_crossnet,
create_emb_converter,
create_transformer,
extract_dense_feat,
get_sparse_feat_dim_num,
)
logger = logging.getLogger(__name__)
def set_block_from_config(block_config, feat_dim):
if block_config is None:
return None
name2block = {
b_config.BlockConfig.MLP_BLOCK: MLPBlock,
b_config.BlockConfig.CROSSNET_BLOCK: CrossNetBlock,
b_config.BlockConfig.FM_BLOCK: FMBlock,
b_config.BlockConfig.DOTPROCESSOR_BLOCK: DotProcessorBlock,
b_config.BlockConfig.CAT_BLOCK: CatBlock,
b_config.BlockConfig.CIN_BLOCK: CINBlock,
b_config.BlockConfig.ATTENTION_BLOCK: AttentionBlock,
}
block_name = block_config.getType() # block_config.name
block = name2block[block_name]
return block(block_config, feat_dim)
def save_block(block, filename):
logger.info("Saving block to {}".format(filename))
state = {
"state_dict": block.state_dict(),
"block_config": block.block_config,
"feat_dim": {"dense": block.feat_dense_dim, "sparse": block.feat_sparse_dim},
}
torch.save(state, filename)
def load_block(filename):
logger.info("Loading model from {}".format(filename))
state = torch.load(filename)
block_config = state["block_config"]
feat_dim = state["feat_dim"]
block = set_block_from_config(block_config=block_config, feat_dim=feat_dim)
block.load_state_dict(state["state_dict"])
return block
class BaseBlock(nn.Module):
def __init__(self, block_config, feat_dim):
super(BaseBlock, self).__init__()
# for serilization purpose
self.block_config = deepcopy(block_config)
# extract input feat_dim dictionary {block_id: feat_dim (list)}
self.feat_dense_dim = feat_dim["dense"]
self.feat_sparse_dim = feat_dim["sparse"]
def _init_basic_block_params(self):
self.block_id = self.block_option.block_id
self.input_feat_config = self.block_option.input_feat_config
# convert input feat_id into dictionary format {block_id: feat_id (list)}
self.feat_dense_id, self.feat_sparse_id = config_to_dict(
self.block_option.input_feat_config
)
# check and modify feat_ids
self.feat_dense_id = clean_feat_id(
self.feat_dense_id, self.feat_dense_dim, "dense"
)
self.feat_sparse_id = clean_feat_id(
self.feat_sparse_id, self.feat_sparse_dim, "sparse"
)
# get input feature number
# dense feature
self.num_dense_feat = sum(
(
self.feat_dense_dim[b][0] # all dense feats in block b
if self.feat_dense_id[b] == [-1]
else len(self.feat_dense_id[b])
)
for b in self.feat_dense_id
)
self.num_sparse_feat = sum(
(
len(self.feat_sparse_dim[b])
if self.feat_sparse_id[b] == [-1]
else len(self.feat_sparse_id[b])
)
for b in self.feat_sparse_id
)
def _refine_emb_arc(self):
# refine the arc if the raw input dense feature are treated as sparse
# treat input dense features in block 0 as sparse features if existed
self.dense_as_sparse_id, self.num_dense_as_sparse_feat = None, 0
if self.emb_config.dense_as_sparse and 0 in self.feat_dense_id:
self.dense_as_sparse_id = self.feat_dense_id.pop(0)
self.num_dense_as_sparse_feat = (
self.feat_dense_dim[0][0]
if self.dense_as_sparse_id == [-1]
else len(self.dense_as_sparse_id)
)
self.num_dense_feat -= self.num_dense_as_sparse_feat
self.num_sparse_feat += self.num_dense_as_sparse_feat
def forward(self, feat_dict):
raise NotImplementedError
def dim_config(self, feat_dim):
raise NotImplementedError
def __str__(self):
return type(self).__name__[:-5]
class MLPBlock(BaseBlock):
def __init__(self, block_config, feat_dim):
super(MLPBlock, self).__init__(block_config, feat_dim)
self.block_option = self.block_config.get_mlp_block()
self._init_basic_block_params()
self._build_arc()
def _build_arc(self):
if self.num_sparse_feat + self.num_dense_feat == 0:
return
if self.block_option.type.getType() == b_config.BlockType.DENSE:
# set mlp layer
self.num_input_feat = self.num_dense_feat
if self.num_sparse_feat > 0:
self.num_input_feat += get_sparse_feat_dim_num(
self.feat_sparse_id, self.feat_sparse_dim
)
self.layers = create_mlp(
[self.num_input_feat] + self.block_option.arc,
ly_act=self.block_option.ly_act,
)
elif self.block_option.type.getType() == b_config.BlockType.EMB:
self.emb_config = self.block_option.type.get_emb()
self._refine_emb_arc()
# set embeding layer
self.feat_emb = create_emb_converter(
self.num_dense_feat,
self.feat_sparse_id,
self.feat_sparse_dim,
self.emb_config.comm_embed_dim,
self.num_dense_as_sparse_feat,
)
# set mlp layer
self.layers = create_mlp(
[self.emb_config.comm_embed_dim] + self.block_option.arc,
ly_act=self.block_option.ly_act,
)
else:
raise ValueError("Unsupported configuration for MLPBlock type.")
def dim_config(self, feat_dim):
if self.num_sparse_feat + self.num_dense_feat != 0:
if self.block_option.type.getType() == b_config.BlockType.DENSE:
feat_dim["dense"][self.block_id] = [self.block_option.arc[-1]]
elif self.block_option.type.getType() == b_config.BlockType.EMB:
if self.num_dense_feat > 0:
feat_dim["sparse"][self.block_id] = [self.block_option.arc[-1]] * (
self.num_sparse_feat + 1
)
else:
feat_dim["sparse"][self.block_id] = [
self.block_option.arc[-1]
] * self.num_sparse_feat
return feat_dim
def forward(self, feat_dict):
if self.num_sparse_feat + self.num_dense_feat == 0:
return feat_dict
# extract dense features based on id
extracted_feat_dict = {
"dense": extract_dense_feat(feat_dict["dense"], self.feat_dense_id),
"sparse": feat_dict["sparse"],
}
if self.block_option.type.getType() == b_config.BlockType.DENSE:
feat = cat_feats(extracted_feat_dict, self.feat_sparse_id)
try:
p = self.layers(feat)
except:
exit()
feat_dict["dense"][self.block_id] = p
elif self.block_option.type.getType() == b_config.BlockType.EMB:
if self.num_dense_as_sparse_feat > 0:
extracted_feat_dict["dense_as_sparse"] = (
feat_dict["dense"][0]
if self.dense_as_sparse_id == [-1]
else feat_dict["dense"][0][:, self.dense_as_sparse_id]
)
feat = convert_to_emb(
extracted_feat_dict,
self.feat_emb,
self.num_dense_feat,
self.feat_sparse_id,
self.emb_config.comm_embed_dim,
self.num_dense_as_sparse_feat,
)
p = self.layers(feat)
feat_dict["sparse"][self.block_id] = {
feat_id: p[:, feat_id] for feat_id in range(p.shape[1]) # 1 for dense
}
return feat_dict
def __str__(self):
return (
super().__str__()
+ "("
+ ", ".join(str(item) for item in self.block_option.arc)
+ ")"
)
class CrossNetBlock(BaseBlock):
def __init__(self, block_config, feat_dim):
super(CrossNetBlock, self).__init__(block_config, feat_dim)
self.block_option = self.block_config.get_crossnet_block()
self.num_of_layers = self.block_option.num_of_layers
self._init_basic_block_params()
self._init_cross_params()
self._build_arc()
def _init_cross_params(self):
# cross input feat id
self.cross_feat_config = self.block_option.cross_feat_config
# convert cross input feat_id into dictionary format {block_id: feat_id (list)}
self.cross_feat_dense_id, self.cross_feat_sparse_id = config_to_dict(
self.block_option.cross_feat_config
)
# check and modify feat_ids
self.cross_feat_dense_id = clean_feat_id(
self.cross_feat_dense_id, self.feat_dense_dim, "dense"
)
self.cross_feat_sparse_id = clean_feat_id(
self.cross_feat_sparse_id, self.feat_sparse_dim, "dense"
)
# get cross input feature number
# dense feature
self.cross_num_dense_feat_per_block = []
for b in self.cross_feat_dense_id:
self.cross_num_dense_feat_per_block += (
self.feat_dense_dim[b] # all dense feats in block b
if self.cross_feat_dense_id[b] == [-1]
else [len(self.cross_feat_dense_id[b])]
)
# sparse feature
self.cross_num_sparse_feat_per_block = []
for b in self.cross_feat_sparse_id:
self.cross_num_sparse_feat_per_block += (
self.feat_sparse_dim[b]
if self.cross_feat_sparse_id[b] == [-1]
else [len(self.cross_feat_sparse_id[b])]
)
self.cross_num_dense_feat = sum(self.cross_num_dense_feat_per_block)
self.cross_num_sparse_feat = sum(self.cross_num_sparse_feat_per_block)
# remodify feat_ids if the block is emtpy block
if (
self.num_sparse_feat + self.num_dense_feat == 0
or self.cross_num_dense_feat + self.cross_num_sparse_feat == 0
):
self.feat_dense_id = {}
self.feat_sparse_id = {}
self.cross_feat_dense_id = {}
self.cross_feat_sparse_id = {}
def _build_arc(self):
if (
self.num_sparse_feat + self.num_dense_feat == 0
or self.cross_num_dense_feat + self.cross_num_sparse_feat == 0
):
return
self.num_input_feat = self.num_dense_feat
self.num_input_feat += get_sparse_feat_dim_num(
self.feat_sparse_id, self.feat_sparse_dim
)
self.cross_num_input_feat = self.cross_num_dense_feat
self.cross_num_input_feat += get_sparse_feat_dim_num(
self.cross_feat_sparse_id, self.feat_sparse_dim
)
if self.num_input_feat != self.cross_num_input_feat:
# construct a embedding layer
self.emb_layer = nn.Linear(self.cross_num_input_feat, self.num_input_feat)
self.weight_w, self.weight_b, self.batchnorm = create_crossnet(
self.num_of_layers, self.num_input_feat
)
def dim_config(self, feat_dim):
if (
self.num_sparse_feat + self.num_dense_feat != 0
and self.cross_num_dense_feat + self.cross_num_sparse_feat != 0
):
feat_dim["dense"][self.block_id] = [self.num_input_feat]
return feat_dim
def forward(self, feat_dict):
if (
self.num_sparse_feat + self.num_dense_feat == 0
or self.cross_num_dense_feat + self.cross_num_sparse_feat == 0
):
return feat_dict
# extract dense features based on id
extracted_feat_dict = {
"dense": extract_dense_feat(feat_dict["dense"], self.feat_dense_id),
"sparse": feat_dict["sparse"],
}
cross_feat_dict = {
"dense": extract_dense_feat(feat_dict["dense"], self.cross_feat_dense_id),
"sparse": feat_dict["sparse"],
}
# concatenate two feature dicts into two vectors
feat = cat_feats(extracted_feat_dict, self.feat_sparse_id)
cross_feat = cat_feats(cross_feat_dict, self.cross_feat_sparse_id)
# crossnet
if self.num_input_feat != self.cross_num_input_feat:
cross_feat = self.emb_layer(cross_feat)
for i in range(self.num_of_layers):
feat = cross_feat * self.weight_w[i](feat) + self.weight_b[i] + feat
if self.block_option.batchnorm:
feat = self.batchnorm[i](feat)
feat_dict["dense"][self.block_id] = feat
return feat_dict
def __str__(self):
return super().__str__()
class FMBlock(BaseBlock):
def __init__(self, block_config, feat_dim):
super(FMBlock, self).__init__(block_config, feat_dim)
self.block_option = self.block_config.get_fm_block()
self._init_basic_block_params()
self._build_arc()
def _build_arc(self):
if self.num_sparse_feat + self.num_dense_feat == 0:
return
if self.block_option.type.getType() == b_config.BlockType.DENSE:
self.num_input_feat = self.num_dense_feat
if self.num_sparse_feat > 0:
self.num_input_feat += get_sparse_feat_dim_num(
self.feat_sparse_id, self.feat_sparse_dim
)
# set FM layer
# first order embedding layer
self.weight_w_first = nn.Parameter(
torch.nn.init.normal_(torch.empty(self.num_input_feat))
)
self.weight_b_first = nn.Parameter(
torch.nn.init.normal_(torch.empty(self.num_input_feat))
)
# second order embedding layer
self.weight_w_second = nn.Parameter(
torch.nn.init.normal_(torch.empty(self.num_input_feat))
)
self.weight_b_second = nn.Parameter(
torch.nn.init.normal_(torch.empty(self.num_input_feat))
)
elif self.block_option.type.getType() == b_config.BlockType.EMB:
self.emb_config = self.block_option.type.get_emb()
self._refine_emb_arc()
# set FM layer
# first order embedding layer
self.first_order_feat_emb = create_emb_converter(
self.num_dense_feat,
self.feat_sparse_id,
self.feat_sparse_dim,
1,
self.num_dense_as_sparse_feat,
)
# second order embedding layer
self.second_order_feat_emb = create_emb_converter(
self.num_dense_feat,
self.feat_sparse_id,
self.feat_sparse_dim,
self.emb_config.comm_embed_dim,
self.num_dense_as_sparse_feat,
)
else:
raise ValueError("Unsupported configuration for FMBlock type.")
def dim_config(self, feat_dim):
if self.num_sparse_feat + self.num_dense_feat != 0:
feat_dim["dense"][self.block_id] = [1]
return feat_dim
def forward(self, feat_dict):
if self.num_sparse_feat + self.num_dense_feat == 0:
return feat_dict
# extract dense features based on id
extracted_feat_dict = {
"dense": extract_dense_feat(feat_dict["dense"], self.feat_dense_id),
"sparse": feat_dict["sparse"],
}
# compute FM layer
if self.block_option.type.getType() == b_config.BlockType.DENSE:
feat = cat_feats(extracted_feat_dict, self.feat_sparse_id)
feat1 = feat * self.weight_w_first + self.weight_b_first
feat2 = feat * self.weight_w_second + self.weight_b_second
p = self.fm_sum(feat1, feat2)
elif self.block_option.type.getType() == b_config.BlockType.EMB:
if self.num_dense_as_sparse_feat > 0:
extracted_feat_dict["dense_as_sparse"] = (
feat_dict["dense"][0]
if self.dense_as_sparse_id == [-1]
else feat_dict["dense"][0][:, self.dense_as_sparse_id]
)
feat1 = convert_to_emb(
extracted_feat_dict,
self.first_order_feat_emb,
self.num_dense_feat,
self.feat_sparse_id,
1,
self.num_dense_as_sparse_feat,
)
feat2 = convert_to_emb(
extracted_feat_dict,
self.second_order_feat_emb,
self.num_dense_feat,
self.feat_sparse_id,
self.emb_config.comm_embed_dim,
self.num_dense_as_sparse_feat,
)
p = self.fm_sum(feat1, feat2)
feat_dict["dense"][self.block_id] = p
return feat_dict
def fm_sum(self, feat1, feat2):
if self.block_option.type.getType() == b_config.BlockType.DENSE:
# first order
p1 = torch.sum(feat1, 1)
# second order
sum_square = torch.pow(torch.sum(feat2, 1), 2)
square_sum = torch.sum(torch.pow(feat2, 2), 1)
p2 = (sum_square - square_sum) * 0.5
p = p1 + p2
elif self.block_option.type.getType() == b_config.BlockType.EMB:
p1 = torch.sum(feat1, [1, 2])
sum_square = torch.pow(torch.sum(feat2, 1), 2)
square_sum = torch.sum(torch.pow(feat2, 2), 1)
p2 = (sum_square - square_sum) * 0.5
p = p1 + torch.sum(p2, 1)
return p[:, None]
def __str__(self):
return super().__str__()
class DotProcessorBlock(BaseBlock):
def __init__(self, block_config, feat_dim):
super(DotProcessorBlock, self).__init__(block_config, feat_dim)
self.block_option = self.block_config.get_dotprocessor_block()
self._init_basic_block_params()
self._build_arc()
def _build_arc(self):
if self.num_sparse_feat + self.num_dense_feat == 0:
return
if self.block_option.type.getType() == b_config.BlockType.DENSE:
self.num_input_feat = self.num_dense_feat
if self.num_sparse_feat > 0:
self.num_input_feat += get_sparse_feat_dim_num(
self.feat_sparse_id, self.feat_sparse_dim
)
# set DP layer
self.weight_w = nn.Parameter(
torch.nn.init.normal_(torch.empty(self.num_input_feat))
)
self.weight_b = nn.Parameter(
torch.nn.init.normal_(torch.empty(self.num_input_feat))
)
elif self.block_option.type.getType() == b_config.BlockType.EMB:
self.emb_config = self.block_option.type.get_emb()
self._refine_emb_arc()
self.num_input_feat = 1 + self.num_sparse_feat
# set Embedding Layer
self.feat_emb = create_emb_converter(
self.num_dense_feat,
self.feat_sparse_id,
self.feat_sparse_dim,
self.emb_config.comm_embed_dim,
self.num_dense_as_sparse_feat,
)
else:
raise ValueError("Unsupported configuration for DotProcessorBlock type.")
def dim_config(self, feat_dim):
if self.num_sparse_feat + self.num_dense_feat != 0:
feat_dim["dense"][self.block_id] = [
int(self.num_input_feat * (self.num_input_feat + 1) / 2)
]
return feat_dim
def forward(self, feat_dict):
if self.num_sparse_feat + self.num_dense_feat == 0:
return feat_dict
# extract dense features based on id
extracted_feat_dict = {
"dense": extract_dense_feat(feat_dict["dense"], self.feat_dense_id),
"sparse": feat_dict["sparse"],
}
# compute DP layer
if self.block_option.type.getType() == b_config.BlockType.DENSE:
feat = cat_feats(extracted_feat_dict, self.feat_sparse_id)
feat = feat * self.weight_w + self.weight_b
p = self.dp_sum(feat[:, :, None])
elif self.block_option.type.getType() == b_config.BlockType.EMB:
if self.num_dense_as_sparse_feat > 0:
extracted_feat_dict["dense_as_sparse"] = (
feat_dict["dense"][0]
if self.dense_as_sparse_id == [-1]
else feat_dict["dense"][0][:, self.dense_as_sparse_id]
)
feat = convert_to_emb(
extracted_feat_dict,
self.feat_emb,
self.num_dense_feat,
self.feat_sparse_id,
self.emb_config.comm_embed_dim,
self.num_dense_as_sparse_feat,
)
p = self.dp_sum(feat)
feat_dict["dense"][self.block_id] = p
return feat_dict
def dp_sum(self, feat):
Z = torch.matmul(feat, torch.transpose(feat, 1, 2))
Zflat = Z.view((feat.shape[0], -1))
num_ints = int(self.num_input_feat * (self.num_input_feat + 1) / 2)
return Zflat[:, :num_ints]
def __str__(self):
return super().__str__()
class CatBlock(BaseBlock):
def __init__(self, block_config, feat_dim):
super(CatBlock, self).__init__(block_config, feat_dim)
self.block_option = self.block_config.get_cat_block()
self._init_basic_block_params()
self._build_arc()
def _build_arc(self):
if self.num_sparse_feat + self.num_dense_feat == 0:
return
if self.block_option.type.getType() == b_config.BlockType.DENSE:
self.num_input_feat = self.num_dense_feat
if self.num_sparse_feat > 0:
self.num_input_feat += get_sparse_feat_dim_num(
self.feat_sparse_id, self.feat_sparse_dim
)
elif self.block_option.type.getType() == b_config.BlockType.EMB:
self.emb_config = self.block_option.type.get_emb()
self._refine_emb_arc()
self.num_input_feat = 1 + self.num_sparse_feat
# set Embedding Layer
self.feat_emb = create_emb_converter(
self.num_dense_feat,
self.feat_sparse_id,
self.feat_sparse_dim,
self.emb_config.comm_embed_dim,
self.num_dense_as_sparse_feat,
)
else:
raise ValueError("Unsupported configuration for CatBlock type.")
def dim_config(self, feat_dim):
if self.num_sparse_feat + self.num_dense_feat != 0:
if self.block_option.type.getType() == b_config.BlockType.DENSE:
feat_dim["dense"][self.block_id] = [self.num_input_feat]
elif self.block_option.type.getType() == b_config.BlockType.EMB:
feat_dim["sparse"][self.block_id] = (
[self.emb_config.comm_embed_dim] * (self.num_sparse_feat + 1)
if self.num_dense_feat > 0
else [self.emb_config.comm_embed_dim] * self.num_sparse_feat
)
return feat_dim
def forward(self, feat_dict):
if self.num_sparse_feat + self.num_dense_feat == 0:
return feat_dict
# extract dense features based on id
extracted_feat_dict = {
"dense": extract_dense_feat(feat_dict["dense"], self.feat_dense_id),
"sparse": feat_dict["sparse"],
}
# compute Cat layer
if self.block_option.type.getType() == b_config.BlockType.DENSE:
p = cat_feats(extracted_feat_dict, self.feat_sparse_id)
feat_dict["dense"][self.block_id] = (
p[:, None] if self.num_input_feat == 1 else p
)
elif self.block_option.type.getType() == b_config.BlockType.EMB:
if self.num_dense_as_sparse_feat > 0:
extracted_feat_dict["dense_as_sparse"] = (
feat_dict["dense"][0]
if self.dense_as_sparse_id == [-1]
else feat_dict["dense"][0][:, self.dense_as_sparse_id]
)
p = convert_to_emb(
extracted_feat_dict,
self.feat_emb,
self.num_dense_feat,
self.feat_sparse_id,
self.emb_config.comm_embed_dim,
self.num_dense_as_sparse_feat,
)
feat_dict["sparse"][self.block_id] = {
feat_id: p[:, feat_id] for feat_id in range(p.shape[1]) # 1 for dense
}
return feat_dict
def __str__(self):
return super().__str__()
class CINBlock(BaseBlock):
"""Compressed Interaction Network used in xDeepFM.
https://arxiv.org/pdf/1803.05170.pdf.
"""
def __init__(self, block_config, feat_dim):
super(CINBlock, self).__init__(block_config, feat_dim)
self.block_option = self.block_config.get_cin_block()
self.layer_sizes = self.block_option.arc
self._init_basic_block_params()
self._build_arc()
def _build_arc(self):
if self.num_sparse_feat + self.num_dense_feat == 0:
return
self.emb_config = self.block_option.emb_config
self._refine_emb_arc()
self.field_nums = [self.num_sparse_feat + 1]
for i, size in enumerate(self.layer_sizes):
if self.block_option.split_half:
if i != len(self.layer_sizes) - 1 and size % 2 > 0:
raise ValueError(
"layer_size must be even number except for the last layer when split_half=True"
)
self.field_nums.append(size // 2)
else:
self.field_nums.append(size)
# set embeding layers
self.feat_emb = create_emb_converter(
self.num_dense_feat,
self.feat_sparse_id,
self.feat_sparse_dim,
self.emb_config.comm_embed_dim,
self.num_dense_as_sparse_feat,
)
# set CIN convolutional layers
self.conv_layers, self.bias_layers, self.activation_layers = create_cin(
self.layer_sizes, self.field_nums
)
def dim_config(self, feat_dim):
if self.num_sparse_feat + self.num_dense_feat != 0:
feat_dim["dense"][self.block_id] = (
[sum(self.layer_sizes[:-1]) // 2 + self.layer_sizes[-1]]
if self.block_option.split_half
else [sum(self.layer_sizes)]
)
return feat_dim
def forward(self, feat_dict):
if self.num_sparse_feat + self.num_dense_feat == 0:
return feat_dict
# extract dense features based on id
extracted_feat_dict = {
"dense": extract_dense_feat(feat_dict["dense"], self.feat_dense_id),
"sparse": feat_dict["sparse"],
}
if self.num_dense_as_sparse_feat > 0:
extracted_feat_dict["dense_as_sparse"] = (
feat_dict["dense"][0]
if self.dense_as_sparse_id == [-1]
else feat_dict["dense"][0][:, self.dense_as_sparse_id]
)
# get feature matrix X0
feat = convert_to_emb(
extracted_feat_dict,
self.feat_emb,
self.num_dense_feat,
self.feat_sparse_id,
self.emb_config.comm_embed_dim,
self.num_dense_as_sparse_feat,
)
if feat.dim() != 3:
raise ValueError(
"Unexpected inputs dimensions %d, expect to be 3 dimensions"
% (feat.dim())
)
p = self.cin(feat)
feat_dict["dense"][self.block_id] = p
return feat_dict
def cin(self, feat):
dim = feat.shape[-1]
p = []
hidden_nn_layers = [feat]
cross_feats = torch.split(hidden_nn_layers[0], dim * [1], 2)
for l_idx, layer_size in enumerate(self.layer_sizes):
curr_feats = torch.split(hidden_nn_layers[-1], dim * [1], 2)
dot_result_m = torch.stack(
[
torch.bmm(curr_feats[t_idx], t.transpose(1, 2))
for t_idx, t in enumerate(cross_feats)
]
)
dot_result_m = dot_result_m.view(
-1, 1, dot_result_m.shape[2], dot_result_m.shape[3]
)
# apply conv, add bias, activation
curr_out = torch.squeeze(self.conv_layers[l_idx](dot_result_m))
curr_out = curr_out.view(dim, -1, layer_size) # (dim * batch_size * Hk)
curr_out = curr_out + self.bias_layers[l_idx]
curr_out = self.activation_layers[l_idx](curr_out)
curr_out = curr_out.permute(1, 2, 0)
if self.block_option.split_half:
if l_idx != len(self.layer_sizes) - 1:
next_hidden, direct_connect = torch.split(
curr_out, 2 * [layer_size // 2], 1
)
else:
direct_connect = curr_out
next_hidden = 0
else:
direct_connect = curr_out
next_hidden = curr_out
p.append(direct_connect)
hidden_nn_layers.append(next_hidden)
return torch.cat(p, 1).sum(-1)
def __str__(self):
return super().__str__()
class AttentionBlock(BaseBlock):
def __init__(self, block_config, feat_dim):
super(AttentionBlock, self).__init__(block_config, feat_dim)
self.block_option = self.block_config.get_attention_block()
self._init_basic_block_params()
self._build_arc()
def _build_arc(self):
if self.num_sparse_feat + self.num_dense_feat == 0:
return
self.emb_config = self.block_option.emb_config
self.att_embed_dim = self.block_option.att_embed_dim
self.num_of_heads = self.block_option.num_of_heads
self.num_of_layers = self.block_option.num_of_layers
self.use_res = self.block_option.use_res
self.use_batchnorm = self.block_option.batchnorm
self._dropout_p = self.block_option.dropout_prob
self._refine_emb_arc()
# set embeding layers
self.feat_emb = create_emb_converter(
self.num_dense_feat,
self.feat_sparse_id,
self.feat_sparse_dim,
self.emb_config.comm_embed_dim,
self.num_dense_as_sparse_feat,
)
# set attention params
self.query_layers, self.key_layers, self.value_layers, self.res_layers, self.bn_layers = create_transformer(
self.emb_config.comm_embed_dim,
self.att_embed_dim,
self.num_of_heads,
self.num_of_layers,
self.use_res,
self.use_batchnorm,
)
def dim_config(self, feat_dim):
if self.num_sparse_feat + self.num_dense_feat != 0:
if self.num_dense_feat > 0:
feat_dim["sparse"][self.block_id] = [
self.att_embed_dim * self.num_of_heads
] * (self.num_sparse_feat + 1)
else:
feat_dim["sparse"][self.block_id] = [
self.att_embed_dim * self.num_of_heads
] * self.num_sparse_feat
return feat_dim
def forward(self, feat_dict):
if self.num_sparse_feat + self.num_dense_feat == 0:
return feat_dict
# extract dense features based on id
extracted_feat_dict = {
"dense": extract_dense_feat(feat_dict["dense"], self.feat_dense_id),
"sparse": feat_dict["sparse"],
}
if self.num_dense_as_sparse_feat > 0:
extracted_feat_dict["dense_as_sparse"] = (
feat_dict["dense"][0]
if self.dense_as_sparse_id == [-1]
else feat_dict["dense"][0][:, self.dense_as_sparse_id]
)
# get feature matrix X0
feat = convert_to_emb(
extracted_feat_dict,
self.feat_emb,
self.num_dense_feat,
self.feat_sparse_id,
self.emb_config.comm_embed_dim,
self.num_dense_as_sparse_feat,
)
if feat.dim() != 3:
raise ValueError(
"Unexpected inputs dimensions %d, expect to be 3 dimensions"
% (feat.dim())
)
p = self.transformer(feat)
feat_dict["sparse"][self.block_id] = {
feat_id: p[:, feat_id] for feat_id in range(p.shape[1]) # 1 for dense
}
return feat_dict
def transformer(self, feat):
attention = feat
for l in range(self.num_of_layers):
Q = F.relu(self.query_layers[l](attention))
K = F.relu(self.key_layers[l](attention))
V = F.relu(self.value_layers[l](attention))
if self.use_res:
V_res = F.relu(self.res_layers[l](attention))
# Split and concat
Q_ = torch.cat(Q.split(split_size=self.att_embed_dim, dim=2), dim=0)
K_ = torch.cat(K.split(split_size=self.att_embed_dim, dim=2), dim=0)
V_ = torch.cat(V.split(split_size=self.att_embed_dim, dim=2), dim=0)
# calculate QK^T
weights = torch.matmul(Q_, K_.transpose(1, 2))
# normalize with sqrt(dk)
weights = weights / np.sqrt(self.att_embed_dim)
# put it to softmax
weights = F.softmax(weights, dim=-1)
# apply dropout
weights = F.dropout(weights, self._dropout_p)
# multiply it with V
attention = torch.matmul(weights, V_)
# convert attention back to its input original size
restore_chunk_size = int(attention.size(0) / self.num_of_heads)
attention = torch.cat(
attention.split(split_size=restore_chunk_size, dim=0), dim=2
)
# residual connection
if self.use_res:
attention += V_res
# TODO: do we need this?
attention = F.relu(attention)
# apply batch normalization
if self.use_batchnorm:
attention = self.bn_layers[l](attention.transpose(1, 2)).transpose(1, 2)
return attention
def __str__(self):
return super().__str__()
|
AutoCTR-main
|
nasrec/blocks.py
|
#
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
# @generated
#
|
AutoCTR-main
|
gen-py/__init__.py
|
#
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
# @generated
#
from __future__ import absolute_import
import six
from thrift.util.Recursive import fix_spec
from thrift.Thrift import *
from thrift.protocol.TProtocol import TProtocolException
import block_config.ttypes
from .ttypes import *
|
AutoCTR-main
|
gen-py/config/constants.py
|
#
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
# @generated
#
__all__ = ['ttypes', 'constants']
|
AutoCTR-main
|
gen-py/config/__init__.py
|
#
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
# @generated
#
from __future__ import absolute_import
import six
from thrift.util.Recursive import fix_spec
from thrift.Thrift import *
from thrift.protocol.TProtocol import TProtocolException
import block_config.ttypes
import pprint
import warnings
from thrift import Thrift
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from thrift.protocol import TCompactProtocol
from thrift.protocol import THeaderProtocol
fastproto = None
if not '__pypy__' in sys.builtin_module_names:
try:
from thrift.protocol import fastproto
except ImportError:
pass
all_structs = []
UTF8STRINGS = bool(0) or sys.version_info.major >= 3
__all__ = ['UTF8STRINGS', 'MacroSearchSpaceType', 'DataFromFileConfig', 'DataConfig', 'MicroClose', 'MicroMLPConfig', 'MicroCINConfig', 'MicroAttentionConfig', 'MicroSearchSpaceType', 'InputDenseAsSparse', 'FeatureProcessingType', 'NASRecNetConfig', 'RandomSearcherConfig', 'EvolutionarySearcherConfig', 'SearcherConfig', 'ModelConfig', 'SGDOptimConfig', 'AdagradOptimConfig', 'SparseAdamOptimConfig', 'AdamOptimConfig', 'RMSpropOptimConfig', 'OptimConfig', 'SumPooling', 'AvgPooling', 'PoolingConfig', 'SparseFeatureItem', 'SparseFeatureConfig', 'DenseFeatureConfig', 'FeatureConfig', 'BCEWithLogitsLoss', 'BCELoss', 'MSELoss', 'LossConfig', 'LoggingConfig', 'TrainConfig', 'EvalConfig', 'CheckpointConfig', 'KoskiReaderConfig', 'PerformanceConfig']
class MacroSearchSpaceType:
INPUT_DIFF = 1
INPUT_GROUP = 2
INPUT_DIFF_PRIOR = 3
INPUT_ELASTIC_PRIOR = 4
_VALUES_TO_NAMES = {
1: "INPUT_DIFF",
2: "INPUT_GROUP",
3: "INPUT_DIFF_PRIOR",
4: "INPUT_ELASTIC_PRIOR",
}
_NAMES_TO_VALUES = {
"INPUT_DIFF": 1,
"INPUT_GROUP": 2,
"INPUT_DIFF_PRIOR": 3,
"INPUT_ELASTIC_PRIOR": 4,
}
class DataFromFileConfig:
"""
Attributes:
- data_file
- batch_size
- num_batches
- splits
- num_samples_meta
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.data_file = iprot.readString().decode('utf-8') if UTF8STRINGS else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.batch_size = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.num_batches = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.LIST:
self.splits = []
(_etype3, _size0) = iprot.readListBegin()
if _size0 >= 0:
for _i4 in six.moves.range(_size0):
_elem5 = iprot.readFloat()
self.splits.append(_elem5)
else:
while iprot.peekList():
_elem6 = iprot.readFloat()
self.splits.append(_elem6)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.num_samples_meta = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('DataFromFileConfig')
if self.data_file != None:
oprot.writeFieldBegin('data_file', TType.STRING, 1)
oprot.writeString(self.data_file.encode('utf-8')) if UTF8STRINGS and not isinstance(self.data_file, bytes) else oprot.writeString(self.data_file)
oprot.writeFieldEnd()
if self.batch_size != None:
oprot.writeFieldBegin('batch_size', TType.I32, 2)
oprot.writeI32(self.batch_size)
oprot.writeFieldEnd()
if self.num_batches != None:
oprot.writeFieldBegin('num_batches', TType.I32, 3)
oprot.writeI32(self.num_batches)
oprot.writeFieldEnd()
if self.splits != None:
oprot.writeFieldBegin('splits', TType.LIST, 4)
oprot.writeListBegin(TType.FLOAT, len(self.splits))
for iter7 in self.splits:
oprot.writeFloat(iter7)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.num_samples_meta != None:
oprot.writeFieldBegin('num_samples_meta', TType.I32, 5)
oprot.writeI32(self.num_samples_meta)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.data_file is not None:
value = pprint.pformat(self.data_file, indent=0)
value = padding.join(value.splitlines(True))
L.append(' data_file=%s' % (value))
if self.batch_size is not None:
value = pprint.pformat(self.batch_size, indent=0)
value = padding.join(value.splitlines(True))
L.append(' batch_size=%s' % (value))
if self.num_batches is not None:
value = pprint.pformat(self.num_batches, indent=0)
value = padding.join(value.splitlines(True))
L.append(' num_batches=%s' % (value))
if self.splits is not None:
value = pprint.pformat(self.splits, indent=0)
value = padding.join(value.splitlines(True))
L.append(' splits=%s' % (value))
if self.num_samples_meta is not None:
value = pprint.pformat(self.num_samples_meta, indent=0)
value = padding.join(value.splitlines(True))
L.append(' num_samples_meta=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class DataConfig(object):
"""
Attributes:
- from_file
"""
thrift_spec = None
__init__ = None
__EMPTY__ = 0
FROM_FILE = 1
@staticmethod
def isUnion():
return True
def get_from_file(self):
assert self.field == 1
return self.value
def set_from_file(self, value):
self.field = 1
self.value = value
def getType(self):
return self.field
def __repr__(self):
value = pprint.pformat(self.value)
member = ''
if self.field == 1:
padding = ' ' * 10
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('from_file', value)
return "%s(%s)" % (self.__class__.__name__, member)
def read(self, iprot):
self.field = 0
self.value = None
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
from_file = DataFromFileConfig()
from_file.read(iprot)
assert self.field == 0 and self.value is None
self.set_from_file(from_file)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeUnionBegin('DataConfig')
if self.field == 1:
oprot.writeFieldBegin('from_file', TType.STRUCT, 1)
from_file = self.value
from_file.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeUnionEnd()
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class MicroClose:
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('MicroClose')
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class MicroMLPConfig:
"""
Attributes:
- arc
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.arc = []
(_etype11, _size8) = iprot.readListBegin()
if _size8 >= 0:
for _i12 in six.moves.range(_size8):
_elem13 = iprot.readI32()
self.arc.append(_elem13)
else:
while iprot.peekList():
_elem14 = iprot.readI32()
self.arc.append(_elem14)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('MicroMLPConfig')
if self.arc != None:
oprot.writeFieldBegin('arc', TType.LIST, 1)
oprot.writeListBegin(TType.I32, len(self.arc))
for iter15 in self.arc:
oprot.writeI32(iter15)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.arc is not None:
value = pprint.pformat(self.arc, indent=0)
value = padding.join(value.splitlines(True))
L.append(' arc=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class MicroCINConfig:
"""
Attributes:
- arc
- num_of_layers
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.arc = []
(_etype19, _size16) = iprot.readListBegin()
if _size16 >= 0:
for _i20 in six.moves.range(_size16):
_elem21 = iprot.readI32()
self.arc.append(_elem21)
else:
while iprot.peekList():
_elem22 = iprot.readI32()
self.arc.append(_elem22)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.num_of_layers = []
(_etype26, _size23) = iprot.readListBegin()
if _size23 >= 0:
for _i27 in six.moves.range(_size23):
_elem28 = iprot.readI32()
self.num_of_layers.append(_elem28)
else:
while iprot.peekList():
_elem29 = iprot.readI32()
self.num_of_layers.append(_elem29)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('MicroCINConfig')
if self.arc != None:
oprot.writeFieldBegin('arc', TType.LIST, 1)
oprot.writeListBegin(TType.I32, len(self.arc))
for iter30 in self.arc:
oprot.writeI32(iter30)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.num_of_layers != None:
oprot.writeFieldBegin('num_of_layers', TType.LIST, 2)
oprot.writeListBegin(TType.I32, len(self.num_of_layers))
for iter31 in self.num_of_layers:
oprot.writeI32(iter31)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.arc is not None:
value = pprint.pformat(self.arc, indent=0)
value = padding.join(value.splitlines(True))
L.append(' arc=%s' % (value))
if self.num_of_layers is not None:
value = pprint.pformat(self.num_of_layers, indent=0)
value = padding.join(value.splitlines(True))
L.append(' num_of_layers=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class MicroAttentionConfig:
"""
Attributes:
- num_of_layers
- num_of_heads
- att_embed_dim
- dropout_prob
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.num_of_layers = []
(_etype35, _size32) = iprot.readListBegin()
if _size32 >= 0:
for _i36 in six.moves.range(_size32):
_elem37 = iprot.readI32()
self.num_of_layers.append(_elem37)
else:
while iprot.peekList():
_elem38 = iprot.readI32()
self.num_of_layers.append(_elem38)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.num_of_heads = []
(_etype42, _size39) = iprot.readListBegin()
if _size39 >= 0:
for _i43 in six.moves.range(_size39):
_elem44 = iprot.readI32()
self.num_of_heads.append(_elem44)
else:
while iprot.peekList():
_elem45 = iprot.readI32()
self.num_of_heads.append(_elem45)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.att_embed_dim = []
(_etype49, _size46) = iprot.readListBegin()
if _size46 >= 0:
for _i50 in six.moves.range(_size46):
_elem51 = iprot.readI32()
self.att_embed_dim.append(_elem51)
else:
while iprot.peekList():
_elem52 = iprot.readI32()
self.att_embed_dim.append(_elem52)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.LIST:
self.dropout_prob = []
(_etype56, _size53) = iprot.readListBegin()
if _size53 >= 0:
for _i57 in six.moves.range(_size53):
_elem58 = iprot.readFloat()
self.dropout_prob.append(_elem58)
else:
while iprot.peekList():
_elem59 = iprot.readFloat()
self.dropout_prob.append(_elem59)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('MicroAttentionConfig')
if self.num_of_layers != None:
oprot.writeFieldBegin('num_of_layers', TType.LIST, 1)
oprot.writeListBegin(TType.I32, len(self.num_of_layers))
for iter60 in self.num_of_layers:
oprot.writeI32(iter60)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.num_of_heads != None:
oprot.writeFieldBegin('num_of_heads', TType.LIST, 2)
oprot.writeListBegin(TType.I32, len(self.num_of_heads))
for iter61 in self.num_of_heads:
oprot.writeI32(iter61)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.att_embed_dim != None:
oprot.writeFieldBegin('att_embed_dim', TType.LIST, 3)
oprot.writeListBegin(TType.I32, len(self.att_embed_dim))
for iter62 in self.att_embed_dim:
oprot.writeI32(iter62)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.dropout_prob != None:
oprot.writeFieldBegin('dropout_prob', TType.LIST, 4)
oprot.writeListBegin(TType.FLOAT, len(self.dropout_prob))
for iter63 in self.dropout_prob:
oprot.writeFloat(iter63)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.num_of_layers is not None:
value = pprint.pformat(self.num_of_layers, indent=0)
value = padding.join(value.splitlines(True))
L.append(' num_of_layers=%s' % (value))
if self.num_of_heads is not None:
value = pprint.pformat(self.num_of_heads, indent=0)
value = padding.join(value.splitlines(True))
L.append(' num_of_heads=%s' % (value))
if self.att_embed_dim is not None:
value = pprint.pformat(self.att_embed_dim, indent=0)
value = padding.join(value.splitlines(True))
L.append(' att_embed_dim=%s' % (value))
if self.dropout_prob is not None:
value = pprint.pformat(self.dropout_prob, indent=0)
value = padding.join(value.splitlines(True))
L.append(' dropout_prob=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class MicroSearchSpaceType(object):
"""
Attributes:
- close
- micro_mlp
- micro_cin
- micro_attention
"""
thrift_spec = None
__init__ = None
__EMPTY__ = 0
CLOSE = 1
MICRO_MLP = 2
MICRO_CIN = 3
MICRO_ATTENTION = 4
@staticmethod
def isUnion():
return True
def get_close(self):
assert self.field == 1
return self.value
def get_micro_mlp(self):
assert self.field == 2
return self.value
def get_micro_cin(self):
assert self.field == 3
return self.value
def get_micro_attention(self):
assert self.field == 4
return self.value
def set_close(self, value):
self.field = 1
self.value = value
def set_micro_mlp(self, value):
self.field = 2
self.value = value
def set_micro_cin(self, value):
self.field = 3
self.value = value
def set_micro_attention(self, value):
self.field = 4
self.value = value
def getType(self):
return self.field
def __repr__(self):
value = pprint.pformat(self.value)
member = ''
if self.field == 1:
padding = ' ' * 6
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('close', value)
if self.field == 2:
padding = ' ' * 10
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('micro_mlp', value)
if self.field == 3:
padding = ' ' * 10
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('micro_cin', value)
if self.field == 4:
padding = ' ' * 16
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('micro_attention', value)
return "%s(%s)" % (self.__class__.__name__, member)
def read(self, iprot):
self.field = 0
self.value = None
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
close = MicroClose()
close.read(iprot)
assert self.field == 0 and self.value is None
self.set_close(close)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
micro_mlp = MicroMLPConfig()
micro_mlp.read(iprot)
assert self.field == 0 and self.value is None
self.set_micro_mlp(micro_mlp)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
micro_cin = MicroCINConfig()
micro_cin.read(iprot)
assert self.field == 0 and self.value is None
self.set_micro_cin(micro_cin)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
micro_attention = MicroAttentionConfig()
micro_attention.read(iprot)
assert self.field == 0 and self.value is None
self.set_micro_attention(micro_attention)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeUnionBegin('MicroSearchSpaceType')
if self.field == 1:
oprot.writeFieldBegin('close', TType.STRUCT, 1)
close = self.value
close.write(oprot)
oprot.writeFieldEnd()
if self.field == 2:
oprot.writeFieldBegin('micro_mlp', TType.STRUCT, 2)
micro_mlp = self.value
micro_mlp.write(oprot)
oprot.writeFieldEnd()
if self.field == 3:
oprot.writeFieldBegin('micro_cin', TType.STRUCT, 3)
micro_cin = self.value
micro_cin.write(oprot)
oprot.writeFieldEnd()
if self.field == 4:
oprot.writeFieldBegin('micro_attention', TType.STRUCT, 4)
micro_attention = self.value
micro_attention.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeUnionEnd()
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class InputDenseAsSparse:
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('InputDenseAsSparse')
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class FeatureProcessingType(object):
"""
Attributes:
- idasp
"""
thrift_spec = None
__init__ = None
__EMPTY__ = 0
IDASP = 1
@staticmethod
def isUnion():
return True
def get_idasp(self):
assert self.field == 1
return self.value
def set_idasp(self, value):
self.field = 1
self.value = value
def getType(self):
return self.field
def __repr__(self):
value = pprint.pformat(self.value)
member = ''
if self.field == 1:
padding = ' ' * 6
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('idasp', value)
return "%s(%s)" % (self.__class__.__name__, member)
def read(self, iprot):
self.field = 0
self.value = None
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
idasp = InputDenseAsSparse()
idasp.read(iprot)
assert self.field == 0 and self.value is None
self.set_idasp(idasp)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeUnionBegin('FeatureProcessingType')
if self.field == 1:
oprot.writeFieldBegin('idasp', TType.STRUCT, 1)
idasp = self.value
idasp.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeUnionEnd()
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class NASRecNetConfig:
"""
Attributes:
- block_configs
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.block_configs = []
(_etype67, _size64) = iprot.readListBegin()
if _size64 >= 0:
for _i68 in six.moves.range(_size64):
_elem69 = block_config.ttypes.BlockConfig()
_elem69.read(iprot)
self.block_configs.append(_elem69)
else:
while iprot.peekList():
_elem70 = block_config.ttypes.BlockConfig()
_elem70.read(iprot)
self.block_configs.append(_elem70)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('NASRecNetConfig')
if self.block_configs != None:
oprot.writeFieldBegin('block_configs', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.block_configs))
for iter71 in self.block_configs:
iter71.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.block_configs is not None:
value = pprint.pformat(self.block_configs, indent=0)
value = padding.join(value.splitlines(True))
L.append(' block_configs=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class RandomSearcherConfig:
"""
Attributes:
- max_num_block
- block_types
- macro_space_type
- micro_space_types
- feature_processing_type
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.max_num_block = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.block_types = []
(_etype75, _size72) = iprot.readListBegin()
if _size72 >= 0:
for _i76 in six.moves.range(_size72):
_elem77 = iprot.readI32()
self.block_types.append(_elem77)
else:
while iprot.peekList():
_elem78 = iprot.readI32()
self.block_types.append(_elem78)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.macro_space_type = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.LIST:
self.micro_space_types = []
(_etype82, _size79) = iprot.readListBegin()
if _size79 >= 0:
for _i83 in six.moves.range(_size79):
_elem84 = MicroSearchSpaceType()
_elem84.read(iprot)
self.micro_space_types.append(_elem84)
else:
while iprot.peekList():
_elem85 = MicroSearchSpaceType()
_elem85.read(iprot)
self.micro_space_types.append(_elem85)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.LIST:
self.feature_processing_type = []
(_etype89, _size86) = iprot.readListBegin()
if _size86 >= 0:
for _i90 in six.moves.range(_size86):
_elem91 = FeatureProcessingType()
_elem91.read(iprot)
self.feature_processing_type.append(_elem91)
else:
while iprot.peekList():
_elem92 = FeatureProcessingType()
_elem92.read(iprot)
self.feature_processing_type.append(_elem92)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('RandomSearcherConfig')
if self.max_num_block != None:
oprot.writeFieldBegin('max_num_block', TType.I32, 1)
oprot.writeI32(self.max_num_block)
oprot.writeFieldEnd()
if self.block_types != None:
oprot.writeFieldBegin('block_types', TType.LIST, 2)
oprot.writeListBegin(TType.I32, len(self.block_types))
for iter93 in self.block_types:
oprot.writeI32(iter93)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.macro_space_type != None:
oprot.writeFieldBegin('macro_space_type', TType.I32, 3)
oprot.writeI32(self.macro_space_type)
oprot.writeFieldEnd()
if self.micro_space_types != None:
oprot.writeFieldBegin('micro_space_types', TType.LIST, 5)
oprot.writeListBegin(TType.STRUCT, len(self.micro_space_types))
for iter94 in self.micro_space_types:
iter94.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.feature_processing_type != None:
oprot.writeFieldBegin('feature_processing_type', TType.LIST, 6)
oprot.writeListBegin(TType.STRUCT, len(self.feature_processing_type))
for iter95 in self.feature_processing_type:
iter95.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.max_num_block is not None:
value = pprint.pformat(self.max_num_block, indent=0)
value = padding.join(value.splitlines(True))
L.append(' max_num_block=%s' % (value))
if self.block_types is not None:
value = pprint.pformat(self.block_types, indent=0)
value = padding.join(value.splitlines(True))
L.append(' block_types=%s' % (value))
if self.macro_space_type is not None:
value = pprint.pformat(self.macro_space_type, indent=0)
value = padding.join(value.splitlines(True))
L.append(' macro_space_type=%s' % (value))
if self.micro_space_types is not None:
value = pprint.pformat(self.micro_space_types, indent=0)
value = padding.join(value.splitlines(True))
L.append(' micro_space_types=%s' % (value))
if self.feature_processing_type is not None:
value = pprint.pformat(self.feature_processing_type, indent=0)
value = padding.join(value.splitlines(True))
L.append(' feature_processing_type=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class EvolutionarySearcherConfig:
"""
Attributes:
- max_num_block
- block_types
- population_size
- candidate_size
- macro_space_type
- micro_space_types
- feature_processing_type
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.max_num_block = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.block_types = []
(_etype99, _size96) = iprot.readListBegin()
if _size96 >= 0:
for _i100 in six.moves.range(_size96):
_elem101 = iprot.readI32()
self.block_types.append(_elem101)
else:
while iprot.peekList():
_elem102 = iprot.readI32()
self.block_types.append(_elem102)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.population_size = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.candidate_size = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.macro_space_type = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.LIST:
self.micro_space_types = []
(_etype106, _size103) = iprot.readListBegin()
if _size103 >= 0:
for _i107 in six.moves.range(_size103):
_elem108 = MicroSearchSpaceType()
_elem108.read(iprot)
self.micro_space_types.append(_elem108)
else:
while iprot.peekList():
_elem109 = MicroSearchSpaceType()
_elem109.read(iprot)
self.micro_space_types.append(_elem109)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.LIST:
self.feature_processing_type = []
(_etype113, _size110) = iprot.readListBegin()
if _size110 >= 0:
for _i114 in six.moves.range(_size110):
_elem115 = FeatureProcessingType()
_elem115.read(iprot)
self.feature_processing_type.append(_elem115)
else:
while iprot.peekList():
_elem116 = FeatureProcessingType()
_elem116.read(iprot)
self.feature_processing_type.append(_elem116)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('EvolutionarySearcherConfig')
if self.max_num_block != None:
oprot.writeFieldBegin('max_num_block', TType.I32, 1)
oprot.writeI32(self.max_num_block)
oprot.writeFieldEnd()
if self.block_types != None:
oprot.writeFieldBegin('block_types', TType.LIST, 2)
oprot.writeListBegin(TType.I32, len(self.block_types))
for iter117 in self.block_types:
oprot.writeI32(iter117)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.population_size != None:
oprot.writeFieldBegin('population_size', TType.I32, 3)
oprot.writeI32(self.population_size)
oprot.writeFieldEnd()
if self.candidate_size != None:
oprot.writeFieldBegin('candidate_size', TType.I32, 4)
oprot.writeI32(self.candidate_size)
oprot.writeFieldEnd()
if self.macro_space_type != None:
oprot.writeFieldBegin('macro_space_type', TType.I32, 5)
oprot.writeI32(self.macro_space_type)
oprot.writeFieldEnd()
if self.micro_space_types != None:
oprot.writeFieldBegin('micro_space_types', TType.LIST, 7)
oprot.writeListBegin(TType.STRUCT, len(self.micro_space_types))
for iter118 in self.micro_space_types:
iter118.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.feature_processing_type != None:
oprot.writeFieldBegin('feature_processing_type', TType.LIST, 8)
oprot.writeListBegin(TType.STRUCT, len(self.feature_processing_type))
for iter119 in self.feature_processing_type:
iter119.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.max_num_block is not None:
value = pprint.pformat(self.max_num_block, indent=0)
value = padding.join(value.splitlines(True))
L.append(' max_num_block=%s' % (value))
if self.block_types is not None:
value = pprint.pformat(self.block_types, indent=0)
value = padding.join(value.splitlines(True))
L.append(' block_types=%s' % (value))
if self.population_size is not None:
value = pprint.pformat(self.population_size, indent=0)
value = padding.join(value.splitlines(True))
L.append(' population_size=%s' % (value))
if self.candidate_size is not None:
value = pprint.pformat(self.candidate_size, indent=0)
value = padding.join(value.splitlines(True))
L.append(' candidate_size=%s' % (value))
if self.macro_space_type is not None:
value = pprint.pformat(self.macro_space_type, indent=0)
value = padding.join(value.splitlines(True))
L.append(' macro_space_type=%s' % (value))
if self.micro_space_types is not None:
value = pprint.pformat(self.micro_space_types, indent=0)
value = padding.join(value.splitlines(True))
L.append(' micro_space_types=%s' % (value))
if self.feature_processing_type is not None:
value = pprint.pformat(self.feature_processing_type, indent=0)
value = padding.join(value.splitlines(True))
L.append(' feature_processing_type=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class SearcherConfig(object):
"""
Attributes:
- random_searcher
- evolutionary_searcher
"""
thrift_spec = None
__init__ = None
__EMPTY__ = 0
RANDOM_SEARCHER = 1
EVOLUTIONARY_SEARCHER = 2
@staticmethod
def isUnion():
return True
def get_random_searcher(self):
assert self.field == 1
return self.value
def get_evolutionary_searcher(self):
assert self.field == 2
return self.value
def set_random_searcher(self, value):
self.field = 1
self.value = value
def set_evolutionary_searcher(self, value):
self.field = 2
self.value = value
def getType(self):
return self.field
def __repr__(self):
value = pprint.pformat(self.value)
member = ''
if self.field == 1:
padding = ' ' * 16
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('random_searcher', value)
if self.field == 2:
padding = ' ' * 22
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('evolutionary_searcher', value)
return "%s(%s)" % (self.__class__.__name__, member)
def read(self, iprot):
self.field = 0
self.value = None
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
random_searcher = RandomSearcherConfig()
random_searcher.read(iprot)
assert self.field == 0 and self.value is None
self.set_random_searcher(random_searcher)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
evolutionary_searcher = EvolutionarySearcherConfig()
evolutionary_searcher.read(iprot)
assert self.field == 0 and self.value is None
self.set_evolutionary_searcher(evolutionary_searcher)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeUnionBegin('SearcherConfig')
if self.field == 1:
oprot.writeFieldBegin('random_searcher', TType.STRUCT, 1)
random_searcher = self.value
random_searcher.write(oprot)
oprot.writeFieldEnd()
if self.field == 2:
oprot.writeFieldBegin('evolutionary_searcher', TType.STRUCT, 2)
evolutionary_searcher = self.value
evolutionary_searcher.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeUnionEnd()
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ModelConfig(object):
"""
Attributes:
- nasrec_net
"""
thrift_spec = None
__init__ = None
__EMPTY__ = 0
NASREC_NET = 1
@staticmethod
def isUnion():
return True
def get_nasrec_net(self):
assert self.field == 1
return self.value
def set_nasrec_net(self, value):
self.field = 1
self.value = value
def getType(self):
return self.field
def __repr__(self):
value = pprint.pformat(self.value)
member = ''
if self.field == 1:
padding = ' ' * 11
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('nasrec_net', value)
return "%s(%s)" % (self.__class__.__name__, member)
def read(self, iprot):
self.field = 0
self.value = None
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
nasrec_net = NASRecNetConfig()
nasrec_net.read(iprot)
assert self.field == 0 and self.value is None
self.set_nasrec_net(nasrec_net)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeUnionBegin('ModelConfig')
if self.field == 1:
oprot.writeFieldBegin('nasrec_net', TType.STRUCT, 1)
nasrec_net = self.value
nasrec_net.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeUnionEnd()
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class SGDOptimConfig:
"""
Attributes:
- lr
- momentum
- dampening
- nesterov
- weight_decay
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.FLOAT:
self.lr = iprot.readFloat()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.FLOAT:
self.momentum = iprot.readFloat()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.FLOAT:
self.dampening = iprot.readFloat()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.BOOL:
self.nesterov = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.FLOAT:
self.weight_decay = iprot.readFloat()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('SGDOptimConfig')
if self.lr != None:
oprot.writeFieldBegin('lr', TType.FLOAT, 1)
oprot.writeFloat(self.lr)
oprot.writeFieldEnd()
if self.momentum != None:
oprot.writeFieldBegin('momentum', TType.FLOAT, 2)
oprot.writeFloat(self.momentum)
oprot.writeFieldEnd()
if self.dampening != None:
oprot.writeFieldBegin('dampening', TType.FLOAT, 3)
oprot.writeFloat(self.dampening)
oprot.writeFieldEnd()
if self.nesterov != None:
oprot.writeFieldBegin('nesterov', TType.BOOL, 4)
oprot.writeBool(self.nesterov)
oprot.writeFieldEnd()
if self.weight_decay != None:
oprot.writeFieldBegin('weight_decay', TType.FLOAT, 5)
oprot.writeFloat(self.weight_decay)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.lr is not None:
value = pprint.pformat(self.lr, indent=0)
value = padding.join(value.splitlines(True))
L.append(' lr=%s' % (value))
if self.momentum is not None:
value = pprint.pformat(self.momentum, indent=0)
value = padding.join(value.splitlines(True))
L.append(' momentum=%s' % (value))
if self.dampening is not None:
value = pprint.pformat(self.dampening, indent=0)
value = padding.join(value.splitlines(True))
L.append(' dampening=%s' % (value))
if self.nesterov is not None:
value = pprint.pformat(self.nesterov, indent=0)
value = padding.join(value.splitlines(True))
L.append(' nesterov=%s' % (value))
if self.weight_decay is not None:
value = pprint.pformat(self.weight_decay, indent=0)
value = padding.join(value.splitlines(True))
L.append(' weight_decay=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class AdagradOptimConfig:
"""
Attributes:
- lr
- lr_decay
- weight_decay
- initial_accumulator_value
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.FLOAT:
self.lr = iprot.readFloat()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.FLOAT:
self.lr_decay = iprot.readFloat()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.FLOAT:
self.weight_decay = iprot.readFloat()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.FLOAT:
self.initial_accumulator_value = iprot.readFloat()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('AdagradOptimConfig')
if self.lr != None:
oprot.writeFieldBegin('lr', TType.FLOAT, 1)
oprot.writeFloat(self.lr)
oprot.writeFieldEnd()
if self.lr_decay != None:
oprot.writeFieldBegin('lr_decay', TType.FLOAT, 2)
oprot.writeFloat(self.lr_decay)
oprot.writeFieldEnd()
if self.weight_decay != None:
oprot.writeFieldBegin('weight_decay', TType.FLOAT, 3)
oprot.writeFloat(self.weight_decay)
oprot.writeFieldEnd()
if self.initial_accumulator_value != None:
oprot.writeFieldBegin('initial_accumulator_value', TType.FLOAT, 4)
oprot.writeFloat(self.initial_accumulator_value)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.lr is not None:
value = pprint.pformat(self.lr, indent=0)
value = padding.join(value.splitlines(True))
L.append(' lr=%s' % (value))
if self.lr_decay is not None:
value = pprint.pformat(self.lr_decay, indent=0)
value = padding.join(value.splitlines(True))
L.append(' lr_decay=%s' % (value))
if self.weight_decay is not None:
value = pprint.pformat(self.weight_decay, indent=0)
value = padding.join(value.splitlines(True))
L.append(' weight_decay=%s' % (value))
if self.initial_accumulator_value is not None:
value = pprint.pformat(self.initial_accumulator_value, indent=0)
value = padding.join(value.splitlines(True))
L.append(' initial_accumulator_value=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class SparseAdamOptimConfig:
"""
Attributes:
- lr
- betas0
- betas1
- eps
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.FLOAT:
self.lr = iprot.readFloat()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.FLOAT:
self.betas0 = iprot.readFloat()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.FLOAT:
self.betas1 = iprot.readFloat()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.FLOAT:
self.eps = iprot.readFloat()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('SparseAdamOptimConfig')
if self.lr != None:
oprot.writeFieldBegin('lr', TType.FLOAT, 1)
oprot.writeFloat(self.lr)
oprot.writeFieldEnd()
if self.betas0 != None:
oprot.writeFieldBegin('betas0', TType.FLOAT, 2)
oprot.writeFloat(self.betas0)
oprot.writeFieldEnd()
if self.betas1 != None:
oprot.writeFieldBegin('betas1', TType.FLOAT, 3)
oprot.writeFloat(self.betas1)
oprot.writeFieldEnd()
if self.eps != None:
oprot.writeFieldBegin('eps', TType.FLOAT, 4)
oprot.writeFloat(self.eps)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.lr is not None:
value = pprint.pformat(self.lr, indent=0)
value = padding.join(value.splitlines(True))
L.append(' lr=%s' % (value))
if self.betas0 is not None:
value = pprint.pformat(self.betas0, indent=0)
value = padding.join(value.splitlines(True))
L.append(' betas0=%s' % (value))
if self.betas1 is not None:
value = pprint.pformat(self.betas1, indent=0)
value = padding.join(value.splitlines(True))
L.append(' betas1=%s' % (value))
if self.eps is not None:
value = pprint.pformat(self.eps, indent=0)
value = padding.join(value.splitlines(True))
L.append(' eps=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class AdamOptimConfig:
"""
Attributes:
- lr
- amsgrad
- weight_decay
- betas0
- betas1
- eps
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.FLOAT:
self.lr = iprot.readFloat()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BOOL:
self.amsgrad = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.FLOAT:
self.weight_decay = iprot.readFloat()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.FLOAT:
self.betas0 = iprot.readFloat()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.FLOAT:
self.betas1 = iprot.readFloat()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.FLOAT:
self.eps = iprot.readFloat()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('AdamOptimConfig')
if self.lr != None:
oprot.writeFieldBegin('lr', TType.FLOAT, 1)
oprot.writeFloat(self.lr)
oprot.writeFieldEnd()
if self.amsgrad != None:
oprot.writeFieldBegin('amsgrad', TType.BOOL, 2)
oprot.writeBool(self.amsgrad)
oprot.writeFieldEnd()
if self.weight_decay != None:
oprot.writeFieldBegin('weight_decay', TType.FLOAT, 3)
oprot.writeFloat(self.weight_decay)
oprot.writeFieldEnd()
if self.betas0 != None:
oprot.writeFieldBegin('betas0', TType.FLOAT, 4)
oprot.writeFloat(self.betas0)
oprot.writeFieldEnd()
if self.betas1 != None:
oprot.writeFieldBegin('betas1', TType.FLOAT, 5)
oprot.writeFloat(self.betas1)
oprot.writeFieldEnd()
if self.eps != None:
oprot.writeFieldBegin('eps', TType.FLOAT, 6)
oprot.writeFloat(self.eps)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.lr is not None:
value = pprint.pformat(self.lr, indent=0)
value = padding.join(value.splitlines(True))
L.append(' lr=%s' % (value))
if self.amsgrad is not None:
value = pprint.pformat(self.amsgrad, indent=0)
value = padding.join(value.splitlines(True))
L.append(' amsgrad=%s' % (value))
if self.weight_decay is not None:
value = pprint.pformat(self.weight_decay, indent=0)
value = padding.join(value.splitlines(True))
L.append(' weight_decay=%s' % (value))
if self.betas0 is not None:
value = pprint.pformat(self.betas0, indent=0)
value = padding.join(value.splitlines(True))
L.append(' betas0=%s' % (value))
if self.betas1 is not None:
value = pprint.pformat(self.betas1, indent=0)
value = padding.join(value.splitlines(True))
L.append(' betas1=%s' % (value))
if self.eps is not None:
value = pprint.pformat(self.eps, indent=0)
value = padding.join(value.splitlines(True))
L.append(' eps=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class RMSpropOptimConfig:
"""
Attributes:
- lr
- alpha
- weight_decay
- momentum
- centered
- eps
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.FLOAT:
self.lr = iprot.readFloat()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.FLOAT:
self.alpha = iprot.readFloat()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.FLOAT:
self.weight_decay = iprot.readFloat()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.FLOAT:
self.momentum = iprot.readFloat()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.BOOL:
self.centered = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.FLOAT:
self.eps = iprot.readFloat()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('RMSpropOptimConfig')
if self.lr != None:
oprot.writeFieldBegin('lr', TType.FLOAT, 1)
oprot.writeFloat(self.lr)
oprot.writeFieldEnd()
if self.alpha != None:
oprot.writeFieldBegin('alpha', TType.FLOAT, 2)
oprot.writeFloat(self.alpha)
oprot.writeFieldEnd()
if self.weight_decay != None:
oprot.writeFieldBegin('weight_decay', TType.FLOAT, 3)
oprot.writeFloat(self.weight_decay)
oprot.writeFieldEnd()
if self.momentum != None:
oprot.writeFieldBegin('momentum', TType.FLOAT, 4)
oprot.writeFloat(self.momentum)
oprot.writeFieldEnd()
if self.centered != None:
oprot.writeFieldBegin('centered', TType.BOOL, 5)
oprot.writeBool(self.centered)
oprot.writeFieldEnd()
if self.eps != None:
oprot.writeFieldBegin('eps', TType.FLOAT, 6)
oprot.writeFloat(self.eps)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.lr is not None:
value = pprint.pformat(self.lr, indent=0)
value = padding.join(value.splitlines(True))
L.append(' lr=%s' % (value))
if self.alpha is not None:
value = pprint.pformat(self.alpha, indent=0)
value = padding.join(value.splitlines(True))
L.append(' alpha=%s' % (value))
if self.weight_decay is not None:
value = pprint.pformat(self.weight_decay, indent=0)
value = padding.join(value.splitlines(True))
L.append(' weight_decay=%s' % (value))
if self.momentum is not None:
value = pprint.pformat(self.momentum, indent=0)
value = padding.join(value.splitlines(True))
L.append(' momentum=%s' % (value))
if self.centered is not None:
value = pprint.pformat(self.centered, indent=0)
value = padding.join(value.splitlines(True))
L.append(' centered=%s' % (value))
if self.eps is not None:
value = pprint.pformat(self.eps, indent=0)
value = padding.join(value.splitlines(True))
L.append(' eps=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class OptimConfig(object):
"""
Attributes:
- sgd
- adagrad
- sparse_adam
- adam
- rmsprop
"""
thrift_spec = None
__init__ = None
__EMPTY__ = 0
SGD = 1
ADAGRAD = 2
SPARSE_ADAM = 3
ADAM = 4
RMSPROP = 5
@staticmethod
def isUnion():
return True
def get_sgd(self):
assert self.field == 1
return self.value
def get_adagrad(self):
assert self.field == 2
return self.value
def get_sparse_adam(self):
assert self.field == 3
return self.value
def get_adam(self):
assert self.field == 4
return self.value
def get_rmsprop(self):
assert self.field == 5
return self.value
def set_sgd(self, value):
self.field = 1
self.value = value
def set_adagrad(self, value):
self.field = 2
self.value = value
def set_sparse_adam(self, value):
self.field = 3
self.value = value
def set_adam(self, value):
self.field = 4
self.value = value
def set_rmsprop(self, value):
self.field = 5
self.value = value
def getType(self):
return self.field
def __repr__(self):
value = pprint.pformat(self.value)
member = ''
if self.field == 1:
padding = ' ' * 4
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('sgd', value)
if self.field == 2:
padding = ' ' * 8
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('adagrad', value)
if self.field == 3:
padding = ' ' * 12
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('sparse_adam', value)
if self.field == 4:
padding = ' ' * 5
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('adam', value)
if self.field == 5:
padding = ' ' * 8
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('rmsprop', value)
return "%s(%s)" % (self.__class__.__name__, member)
def read(self, iprot):
self.field = 0
self.value = None
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
sgd = SGDOptimConfig()
sgd.read(iprot)
assert self.field == 0 and self.value is None
self.set_sgd(sgd)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
adagrad = AdagradOptimConfig()
adagrad.read(iprot)
assert self.field == 0 and self.value is None
self.set_adagrad(adagrad)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
sparse_adam = SparseAdamOptimConfig()
sparse_adam.read(iprot)
assert self.field == 0 and self.value is None
self.set_sparse_adam(sparse_adam)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
adam = AdamOptimConfig()
adam.read(iprot)
assert self.field == 0 and self.value is None
self.set_adam(adam)
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRUCT:
rmsprop = RMSpropOptimConfig()
rmsprop.read(iprot)
assert self.field == 0 and self.value is None
self.set_rmsprop(rmsprop)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeUnionBegin('OptimConfig')
if self.field == 1:
oprot.writeFieldBegin('sgd', TType.STRUCT, 1)
sgd = self.value
sgd.write(oprot)
oprot.writeFieldEnd()
if self.field == 2:
oprot.writeFieldBegin('adagrad', TType.STRUCT, 2)
adagrad = self.value
adagrad.write(oprot)
oprot.writeFieldEnd()
if self.field == 3:
oprot.writeFieldBegin('sparse_adam', TType.STRUCT, 3)
sparse_adam = self.value
sparse_adam.write(oprot)
oprot.writeFieldEnd()
if self.field == 4:
oprot.writeFieldBegin('adam', TType.STRUCT, 4)
adam = self.value
adam.write(oprot)
oprot.writeFieldEnd()
if self.field == 5:
oprot.writeFieldBegin('rmsprop', TType.STRUCT, 5)
rmsprop = self.value
rmsprop.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeUnionEnd()
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class SumPooling:
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('SumPooling')
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class AvgPooling:
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('AvgPooling')
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class PoolingConfig(object):
"""
Attributes:
- sum
- avg
"""
thrift_spec = None
__init__ = None
__EMPTY__ = 0
SUM = 1
AVG = 2
@staticmethod
def isUnion():
return True
def get_sum(self):
assert self.field == 1
return self.value
def get_avg(self):
assert self.field == 2
return self.value
def set_sum(self, value):
self.field = 1
self.value = value
def set_avg(self, value):
self.field = 2
self.value = value
def getType(self):
return self.field
def __repr__(self):
value = pprint.pformat(self.value)
member = ''
if self.field == 1:
padding = ' ' * 4
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('sum', value)
if self.field == 2:
padding = ' ' * 4
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('avg', value)
return "%s(%s)" % (self.__class__.__name__, member)
def read(self, iprot):
self.field = 0
self.value = None
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
sum = SumPooling()
sum.read(iprot)
assert self.field == 0 and self.value is None
self.set_sum(sum)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
avg = AvgPooling()
avg.read(iprot)
assert self.field == 0 and self.value is None
self.set_avg(avg)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeUnionBegin('PoolingConfig')
if self.field == 1:
oprot.writeFieldBegin('sum', TType.STRUCT, 1)
sum = self.value
sum.write(oprot)
oprot.writeFieldEnd()
if self.field == 2:
oprot.writeFieldBegin('avg', TType.STRUCT, 2)
avg = self.value
avg.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeUnionEnd()
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class SparseFeatureItem:
"""
Attributes:
- name
- hash_size
- embed_dim
- optim
- pooling
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8') if UTF8STRINGS else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.hash_size = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.embed_dim = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
self.optim = OptimConfig()
self.optim.read(iprot)
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRUCT:
self.pooling = PoolingConfig()
self.pooling.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('SparseFeatureItem')
if self.name != None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name.encode('utf-8')) if UTF8STRINGS and not isinstance(self.name, bytes) else oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.hash_size != None:
oprot.writeFieldBegin('hash_size', TType.I32, 2)
oprot.writeI32(self.hash_size)
oprot.writeFieldEnd()
if self.embed_dim != None:
oprot.writeFieldBegin('embed_dim', TType.I32, 3)
oprot.writeI32(self.embed_dim)
oprot.writeFieldEnd()
if self.optim != None:
oprot.writeFieldBegin('optim', TType.STRUCT, 4)
self.optim.write(oprot)
oprot.writeFieldEnd()
if self.pooling != None:
oprot.writeFieldBegin('pooling', TType.STRUCT, 5)
self.pooling.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.name is not None:
value = pprint.pformat(self.name, indent=0)
value = padding.join(value.splitlines(True))
L.append(' name=%s' % (value))
if self.hash_size is not None:
value = pprint.pformat(self.hash_size, indent=0)
value = padding.join(value.splitlines(True))
L.append(' hash_size=%s' % (value))
if self.embed_dim is not None:
value = pprint.pformat(self.embed_dim, indent=0)
value = padding.join(value.splitlines(True))
L.append(' embed_dim=%s' % (value))
if self.optim is not None:
value = pprint.pformat(self.optim, indent=0)
value = padding.join(value.splitlines(True))
L.append(' optim=%s' % (value))
if self.pooling is not None:
value = pprint.pformat(self.pooling, indent=0)
value = padding.join(value.splitlines(True))
L.append(' pooling=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class SparseFeatureConfig:
"""
Attributes:
- features
- embed_dim
- optim
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.features = []
(_etype123, _size120) = iprot.readListBegin()
if _size120 >= 0:
for _i124 in six.moves.range(_size120):
_elem125 = SparseFeatureItem()
_elem125.read(iprot)
self.features.append(_elem125)
else:
while iprot.peekList():
_elem126 = SparseFeatureItem()
_elem126.read(iprot)
self.features.append(_elem126)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.embed_dim = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.optim = OptimConfig()
self.optim.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('SparseFeatureConfig')
if self.features != None:
oprot.writeFieldBegin('features', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.features))
for iter127 in self.features:
iter127.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.embed_dim != None:
oprot.writeFieldBegin('embed_dim', TType.I32, 2)
oprot.writeI32(self.embed_dim)
oprot.writeFieldEnd()
if self.optim != None:
oprot.writeFieldBegin('optim', TType.STRUCT, 3)
self.optim.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.features is not None:
value = pprint.pformat(self.features, indent=0)
value = padding.join(value.splitlines(True))
L.append(' features=%s' % (value))
if self.embed_dim is not None:
value = pprint.pformat(self.embed_dim, indent=0)
value = padding.join(value.splitlines(True))
L.append(' embed_dim=%s' % (value))
if self.optim is not None:
value = pprint.pformat(self.optim, indent=0)
value = padding.join(value.splitlines(True))
L.append(' optim=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class DenseFeatureConfig:
"""
Attributes:
- features
- optim
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.features = []
(_etype131, _size128) = iprot.readListBegin()
if _size128 >= 0:
for _i132 in six.moves.range(_size128):
_elem133 = iprot.readString().decode('utf-8') if UTF8STRINGS else iprot.readString()
self.features.append(_elem133)
else:
while iprot.peekList():
_elem134 = iprot.readString().decode('utf-8') if UTF8STRINGS else iprot.readString()
self.features.append(_elem134)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.optim = OptimConfig()
self.optim.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('DenseFeatureConfig')
if self.features != None:
oprot.writeFieldBegin('features', TType.LIST, 1)
oprot.writeListBegin(TType.STRING, len(self.features))
for iter135 in self.features:
oprot.writeString(iter135.encode('utf-8')) if UTF8STRINGS and not isinstance(iter135, bytes) else oprot.writeString(iter135)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.optim != None:
oprot.writeFieldBegin('optim', TType.STRUCT, 2)
self.optim.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.features is not None:
value = pprint.pformat(self.features, indent=0)
value = padding.join(value.splitlines(True))
L.append(' features=%s' % (value))
if self.optim is not None:
value = pprint.pformat(self.optim, indent=0)
value = padding.join(value.splitlines(True))
L.append(' optim=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class FeatureConfig:
"""
Attributes:
- dense
- sparse
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.dense = DenseFeatureConfig()
self.dense.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.sparse = SparseFeatureConfig()
self.sparse.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('FeatureConfig')
if self.dense != None:
oprot.writeFieldBegin('dense', TType.STRUCT, 1)
self.dense.write(oprot)
oprot.writeFieldEnd()
if self.sparse != None:
oprot.writeFieldBegin('sparse', TType.STRUCT, 2)
self.sparse.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.dense is not None:
value = pprint.pformat(self.dense, indent=0)
value = padding.join(value.splitlines(True))
L.append(' dense=%s' % (value))
if self.sparse is not None:
value = pprint.pformat(self.sparse, indent=0)
value = padding.join(value.splitlines(True))
L.append(' sparse=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class BCEWithLogitsLoss:
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('BCEWithLogitsLoss')
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class BCELoss:
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('BCELoss')
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class MSELoss:
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('MSELoss')
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class LossConfig(object):
"""
Attributes:
- bcewithlogits
- bce
- mse
"""
thrift_spec = None
__init__ = None
__EMPTY__ = 0
BCEWITHLOGITS = 1
BCE = 2
MSE = 3
@staticmethod
def isUnion():
return True
def get_bcewithlogits(self):
assert self.field == 1
return self.value
def get_bce(self):
assert self.field == 2
return self.value
def get_mse(self):
assert self.field == 3
return self.value
def set_bcewithlogits(self, value):
self.field = 1
self.value = value
def set_bce(self, value):
self.field = 2
self.value = value
def set_mse(self, value):
self.field = 3
self.value = value
def getType(self):
return self.field
def __repr__(self):
value = pprint.pformat(self.value)
member = ''
if self.field == 1:
padding = ' ' * 14
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('bcewithlogits', value)
if self.field == 2:
padding = ' ' * 4
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('bce', value)
if self.field == 3:
padding = ' ' * 4
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('mse', value)
return "%s(%s)" % (self.__class__.__name__, member)
def read(self, iprot):
self.field = 0
self.value = None
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
bcewithlogits = BCEWithLogitsLoss()
bcewithlogits.read(iprot)
assert self.field == 0 and self.value is None
self.set_bcewithlogits(bcewithlogits)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
bce = BCELoss()
bce.read(iprot)
assert self.field == 0 and self.value is None
self.set_bce(bce)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
mse = MSELoss()
mse.read(iprot)
assert self.field == 0 and self.value is None
self.set_mse(mse)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeUnionBegin('LossConfig')
if self.field == 1:
oprot.writeFieldBegin('bcewithlogits', TType.STRUCT, 1)
bcewithlogits = self.value
bcewithlogits.write(oprot)
oprot.writeFieldEnd()
if self.field == 2:
oprot.writeFieldBegin('bce', TType.STRUCT, 2)
bce = self.value
bce.write(oprot)
oprot.writeFieldEnd()
if self.field == 3:
oprot.writeFieldBegin('mse', TType.STRUCT, 3)
mse = self.value
mse.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeUnionEnd()
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class LoggingConfig:
"""
Attributes:
- log_freq
- tb_log_freq
- tb_log_model_weight_hist
- tb_log_pr_curve_batch
- tb_log_model_weight_filter_regex
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.log_freq = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.tb_log_freq = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.BOOL:
self.tb_log_model_weight_hist = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.BOOL:
self.tb_log_pr_curve_batch = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.LIST:
self.tb_log_model_weight_filter_regex = []
(_etype139, _size136) = iprot.readListBegin()
if _size136 >= 0:
for _i140 in six.moves.range(_size136):
_elem141 = iprot.readString().decode('utf-8') if UTF8STRINGS else iprot.readString()
self.tb_log_model_weight_filter_regex.append(_elem141)
else:
while iprot.peekList():
_elem142 = iprot.readString().decode('utf-8') if UTF8STRINGS else iprot.readString()
self.tb_log_model_weight_filter_regex.append(_elem142)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('LoggingConfig')
if self.log_freq != None:
oprot.writeFieldBegin('log_freq', TType.I32, 1)
oprot.writeI32(self.log_freq)
oprot.writeFieldEnd()
if self.tb_log_freq != None:
oprot.writeFieldBegin('tb_log_freq', TType.I32, 2)
oprot.writeI32(self.tb_log_freq)
oprot.writeFieldEnd()
if self.tb_log_model_weight_hist != None:
oprot.writeFieldBegin('tb_log_model_weight_hist', TType.BOOL, 3)
oprot.writeBool(self.tb_log_model_weight_hist)
oprot.writeFieldEnd()
if self.tb_log_pr_curve_batch != None:
oprot.writeFieldBegin('tb_log_pr_curve_batch', TType.BOOL, 4)
oprot.writeBool(self.tb_log_pr_curve_batch)
oprot.writeFieldEnd()
if self.tb_log_model_weight_filter_regex != None:
oprot.writeFieldBegin('tb_log_model_weight_filter_regex', TType.LIST, 5)
oprot.writeListBegin(TType.STRING, len(self.tb_log_model_weight_filter_regex))
for iter143 in self.tb_log_model_weight_filter_regex:
oprot.writeString(iter143.encode('utf-8')) if UTF8STRINGS and not isinstance(iter143, bytes) else oprot.writeString(iter143)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.log_freq is not None:
value = pprint.pformat(self.log_freq, indent=0)
value = padding.join(value.splitlines(True))
L.append(' log_freq=%s' % (value))
if self.tb_log_freq is not None:
value = pprint.pformat(self.tb_log_freq, indent=0)
value = padding.join(value.splitlines(True))
L.append(' tb_log_freq=%s' % (value))
if self.tb_log_model_weight_hist is not None:
value = pprint.pformat(self.tb_log_model_weight_hist, indent=0)
value = padding.join(value.splitlines(True))
L.append(' tb_log_model_weight_hist=%s' % (value))
if self.tb_log_pr_curve_batch is not None:
value = pprint.pformat(self.tb_log_pr_curve_batch, indent=0)
value = padding.join(value.splitlines(True))
L.append(' tb_log_pr_curve_batch=%s' % (value))
if self.tb_log_model_weight_filter_regex is not None:
value = pprint.pformat(self.tb_log_model_weight_filter_regex, indent=0)
value = padding.join(value.splitlines(True))
L.append(' tb_log_model_weight_filter_regex=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class TrainConfig:
"""
Attributes:
- logging_config
- nepochs
- early_stop_on_val_loss
- loss
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.logging_config = LoggingConfig()
self.logging_config.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.nepochs = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.BOOL:
self.early_stop_on_val_loss = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRUCT:
self.loss = LossConfig()
self.loss.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('TrainConfig')
if self.logging_config != None:
oprot.writeFieldBegin('logging_config', TType.STRUCT, 1)
self.logging_config.write(oprot)
oprot.writeFieldEnd()
if self.nepochs != None:
oprot.writeFieldBegin('nepochs', TType.I32, 3)
oprot.writeI32(self.nepochs)
oprot.writeFieldEnd()
if self.early_stop_on_val_loss != None:
oprot.writeFieldBegin('early_stop_on_val_loss', TType.BOOL, 5)
oprot.writeBool(self.early_stop_on_val_loss)
oprot.writeFieldEnd()
if self.loss != None:
oprot.writeFieldBegin('loss', TType.STRUCT, 6)
self.loss.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.logging_config is not None:
value = pprint.pformat(self.logging_config, indent=0)
value = padding.join(value.splitlines(True))
L.append(' logging_config=%s' % (value))
if self.nepochs is not None:
value = pprint.pformat(self.nepochs, indent=0)
value = padding.join(value.splitlines(True))
L.append(' nepochs=%s' % (value))
if self.early_stop_on_val_loss is not None:
value = pprint.pformat(self.early_stop_on_val_loss, indent=0)
value = padding.join(value.splitlines(True))
L.append(' early_stop_on_val_loss=%s' % (value))
if self.loss is not None:
value = pprint.pformat(self.loss, indent=0)
value = padding.join(value.splitlines(True))
L.append(' loss=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class EvalConfig:
"""
Attributes:
- logging_config
- loss
- compute_ne
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.logging_config = LoggingConfig()
self.logging_config.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.loss = LossConfig()
self.loss.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.BOOL:
self.compute_ne = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('EvalConfig')
if self.logging_config != None:
oprot.writeFieldBegin('logging_config', TType.STRUCT, 1)
self.logging_config.write(oprot)
oprot.writeFieldEnd()
if self.loss != None:
oprot.writeFieldBegin('loss', TType.STRUCT, 2)
self.loss.write(oprot)
oprot.writeFieldEnd()
if self.compute_ne != None:
oprot.writeFieldBegin('compute_ne', TType.BOOL, 3)
oprot.writeBool(self.compute_ne)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.logging_config is not None:
value = pprint.pformat(self.logging_config, indent=0)
value = padding.join(value.splitlines(True))
L.append(' logging_config=%s' % (value))
if self.loss is not None:
value = pprint.pformat(self.loss, indent=0)
value = padding.join(value.splitlines(True))
L.append(' loss=%s' % (value))
if self.compute_ne is not None:
value = pprint.pformat(self.compute_ne, indent=0)
value = padding.join(value.splitlines(True))
L.append(' compute_ne=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class CheckpointConfig:
"""
Attributes:
- ckp_interval
- ckp_path
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.ckp_interval = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.ckp_path = iprot.readString().decode('utf-8') if UTF8STRINGS else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('CheckpointConfig')
if self.ckp_interval != None:
oprot.writeFieldBegin('ckp_interval', TType.I32, 1)
oprot.writeI32(self.ckp_interval)
oprot.writeFieldEnd()
if self.ckp_path != None:
oprot.writeFieldBegin('ckp_path', TType.STRING, 2)
oprot.writeString(self.ckp_path.encode('utf-8')) if UTF8STRINGS and not isinstance(self.ckp_path, bytes) else oprot.writeString(self.ckp_path)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.ckp_interval is not None:
value = pprint.pformat(self.ckp_interval, indent=0)
value = padding.join(value.splitlines(True))
L.append(' ckp_interval=%s' % (value))
if self.ckp_path is not None:
value = pprint.pformat(self.ckp_path, indent=0)
value = padding.join(value.splitlines(True))
L.append(' ckp_path=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class KoskiReaderConfig:
"""
Attributes:
- prefetch_capacity
- pin_memory
- num_workers
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.prefetch_capacity = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BOOL:
self.pin_memory = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.num_workers = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('KoskiReaderConfig')
if self.prefetch_capacity != None:
oprot.writeFieldBegin('prefetch_capacity', TType.I64, 1)
oprot.writeI64(self.prefetch_capacity)
oprot.writeFieldEnd()
if self.pin_memory != None:
oprot.writeFieldBegin('pin_memory', TType.BOOL, 2)
oprot.writeBool(self.pin_memory)
oprot.writeFieldEnd()
if self.num_workers != None:
oprot.writeFieldBegin('num_workers', TType.I32, 3)
oprot.writeI32(self.num_workers)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.prefetch_capacity is not None:
value = pprint.pformat(self.prefetch_capacity, indent=0)
value = padding.join(value.splitlines(True))
L.append(' prefetch_capacity=%s' % (value))
if self.pin_memory is not None:
value = pprint.pformat(self.pin_memory, indent=0)
value = padding.join(value.splitlines(True))
L.append(' pin_memory=%s' % (value))
if self.num_workers is not None:
value = pprint.pformat(self.num_workers, indent=0)
value = padding.join(value.splitlines(True))
L.append(' num_workers=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class PerformanceConfig:
"""
Attributes:
- use_gpu
- num_readers
- num_trainers
- ckp_config
- data_queue_maxsize
- reader_threads
- num_gpu
- enable_profiling
- koski
- omp_num_threads
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.BOOL:
self.use_gpu = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.num_readers = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.num_trainers = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
self.ckp_config = CheckpointConfig()
self.ckp_config.read(iprot)
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.data_queue_maxsize = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I32:
self.reader_threads = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.I32:
self.num_gpu = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.BOOL:
self.enable_profiling = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.STRUCT:
self.koski = KoskiReaderConfig()
self.koski.read(iprot)
else:
iprot.skip(ftype)
elif fid == 10:
if ftype == TType.I32:
self.omp_num_threads = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('PerformanceConfig')
if self.use_gpu != None:
oprot.writeFieldBegin('use_gpu', TType.BOOL, 1)
oprot.writeBool(self.use_gpu)
oprot.writeFieldEnd()
if self.num_readers != None:
oprot.writeFieldBegin('num_readers', TType.I32, 2)
oprot.writeI32(self.num_readers)
oprot.writeFieldEnd()
if self.num_trainers != None:
oprot.writeFieldBegin('num_trainers', TType.I32, 3)
oprot.writeI32(self.num_trainers)
oprot.writeFieldEnd()
if self.ckp_config != None:
oprot.writeFieldBegin('ckp_config', TType.STRUCT, 4)
self.ckp_config.write(oprot)
oprot.writeFieldEnd()
if self.data_queue_maxsize != None:
oprot.writeFieldBegin('data_queue_maxsize', TType.I32, 5)
oprot.writeI32(self.data_queue_maxsize)
oprot.writeFieldEnd()
if self.reader_threads != None:
oprot.writeFieldBegin('reader_threads', TType.I32, 6)
oprot.writeI32(self.reader_threads)
oprot.writeFieldEnd()
if self.num_gpu != None:
oprot.writeFieldBegin('num_gpu', TType.I32, 7)
oprot.writeI32(self.num_gpu)
oprot.writeFieldEnd()
if self.enable_profiling != None and self.enable_profiling != self.thrift_spec[8][4]:
oprot.writeFieldBegin('enable_profiling', TType.BOOL, 8)
oprot.writeBool(self.enable_profiling)
oprot.writeFieldEnd()
if self.koski != None:
oprot.writeFieldBegin('koski', TType.STRUCT, 9)
self.koski.write(oprot)
oprot.writeFieldEnd()
if self.omp_num_threads != None and self.omp_num_threads != self.thrift_spec[10][4]:
oprot.writeFieldBegin('omp_num_threads', TType.I32, 10)
oprot.writeI32(self.omp_num_threads)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.use_gpu is not None:
value = pprint.pformat(self.use_gpu, indent=0)
value = padding.join(value.splitlines(True))
L.append(' use_gpu=%s' % (value))
if self.num_readers is not None:
value = pprint.pformat(self.num_readers, indent=0)
value = padding.join(value.splitlines(True))
L.append(' num_readers=%s' % (value))
if self.num_trainers is not None:
value = pprint.pformat(self.num_trainers, indent=0)
value = padding.join(value.splitlines(True))
L.append(' num_trainers=%s' % (value))
if self.ckp_config is not None:
value = pprint.pformat(self.ckp_config, indent=0)
value = padding.join(value.splitlines(True))
L.append(' ckp_config=%s' % (value))
if self.data_queue_maxsize is not None:
value = pprint.pformat(self.data_queue_maxsize, indent=0)
value = padding.join(value.splitlines(True))
L.append(' data_queue_maxsize=%s' % (value))
if self.reader_threads is not None:
value = pprint.pformat(self.reader_threads, indent=0)
value = padding.join(value.splitlines(True))
L.append(' reader_threads=%s' % (value))
if self.num_gpu is not None:
value = pprint.pformat(self.num_gpu, indent=0)
value = padding.join(value.splitlines(True))
L.append(' num_gpu=%s' % (value))
if self.enable_profiling is not None:
value = pprint.pformat(self.enable_profiling, indent=0)
value = padding.join(value.splitlines(True))
L.append(' enable_profiling=%s' % (value))
if self.koski is not None:
value = pprint.pformat(self.koski, indent=0)
value = padding.join(value.splitlines(True))
L.append(' koski=%s' % (value))
if self.omp_num_threads is not None:
value = pprint.pformat(self.omp_num_threads, indent=0)
value = padding.join(value.splitlines(True))
L.append(' omp_num_threads=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
all_structs.append(DataFromFileConfig)
DataFromFileConfig.thrift_spec = (
None, # 0
(1, TType.STRING, 'data_file', True, None, 2, ), # 1
(2, TType.I32, 'batch_size', None, 100, 2, ), # 2
(3, TType.I32, 'num_batches', None, -1, 2, ), # 3
(4, TType.LIST, 'splits', (TType.FLOAT,None), [
0.800000,
0.100000,
], 2, ), # 4
(5, TType.I32, 'num_samples_meta', None, 100000, 2, ), # 5
)
DataFromFileConfig.thrift_struct_annotations = {
}
DataFromFileConfig.thrift_field_annotations = {
}
def DataFromFileConfig__init__(self, data_file=None, batch_size=DataFromFileConfig.thrift_spec[2][4], num_batches=DataFromFileConfig.thrift_spec[3][4], splits=DataFromFileConfig.thrift_spec[4][4], num_samples_meta=DataFromFileConfig.thrift_spec[5][4],):
self.data_file = data_file
self.batch_size = batch_size
self.num_batches = num_batches
if splits is self.thrift_spec[4][4]:
splits = [
0.800000,
0.100000,
]
self.splits = splits
self.num_samples_meta = num_samples_meta
DataFromFileConfig.__init__ = DataFromFileConfig__init__
def DataFromFileConfig__setstate__(self, state):
state.setdefault('data_file', None)
state.setdefault('batch_size', 100)
state.setdefault('num_batches', -1)
state.setdefault('splits', [
0.800000,
0.100000,
])
state.setdefault('num_samples_meta', 100000)
self.__dict__ = state
DataFromFileConfig.__getstate__ = lambda self: self.__dict__.copy()
DataFromFileConfig.__setstate__ = DataFromFileConfig__setstate__
all_structs.append(DataConfig)
DataConfig.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'from_file', [DataFromFileConfig, DataFromFileConfig.thrift_spec, False], None, 2, ), # 1
)
DataConfig.thrift_struct_annotations = {
}
DataConfig.thrift_field_annotations = {
}
def DataConfig__init__(self, from_file=None,):
self.field = 0
self.value = None
if from_file is not None:
assert self.field == 0 and self.value is None
self.field = 1
self.value = from_file
DataConfig.__init__ = DataConfig__init__
all_structs.append(MicroClose)
MicroClose.thrift_spec = (
)
MicroClose.thrift_struct_annotations = {
}
MicroClose.thrift_field_annotations = {
}
all_structs.append(MicroMLPConfig)
MicroMLPConfig.thrift_spec = (
None, # 0
(1, TType.LIST, 'arc', (TType.I32,None), None, 2, ), # 1
)
MicroMLPConfig.thrift_struct_annotations = {
}
MicroMLPConfig.thrift_field_annotations = {
}
def MicroMLPConfig__init__(self, arc=None,):
self.arc = arc
MicroMLPConfig.__init__ = MicroMLPConfig__init__
def MicroMLPConfig__setstate__(self, state):
state.setdefault('arc', None)
self.__dict__ = state
MicroMLPConfig.__getstate__ = lambda self: self.__dict__.copy()
MicroMLPConfig.__setstate__ = MicroMLPConfig__setstate__
all_structs.append(MicroCINConfig)
MicroCINConfig.thrift_spec = (
None, # 0
(1, TType.LIST, 'arc', (TType.I32,None), None, 2, ), # 1
(2, TType.LIST, 'num_of_layers', (TType.I32,None), [
1,
2,
3,
], 2, ), # 2
)
MicroCINConfig.thrift_struct_annotations = {
}
MicroCINConfig.thrift_field_annotations = {
}
def MicroCINConfig__init__(self, arc=None, num_of_layers=MicroCINConfig.thrift_spec[2][4],):
self.arc = arc
if num_of_layers is self.thrift_spec[2][4]:
num_of_layers = [
1,
2,
3,
]
self.num_of_layers = num_of_layers
MicroCINConfig.__init__ = MicroCINConfig__init__
def MicroCINConfig__setstate__(self, state):
state.setdefault('arc', None)
state.setdefault('num_of_layers', [
1,
2,
3,
])
self.__dict__ = state
MicroCINConfig.__getstate__ = lambda self: self.__dict__.copy()
MicroCINConfig.__setstate__ = MicroCINConfig__setstate__
all_structs.append(MicroAttentionConfig)
MicroAttentionConfig.thrift_spec = (
None, # 0
(1, TType.LIST, 'num_of_layers', (TType.I32,None), [
1,
2,
3,
], 2, ), # 1
(2, TType.LIST, 'num_of_heads', (TType.I32,None), [
1,
2,
3,
], 2, ), # 2
(3, TType.LIST, 'att_embed_dim', (TType.I32,None), [
10,
], 2, ), # 3
(4, TType.LIST, 'dropout_prob', (TType.FLOAT,None), [
0.00000,
0.200000,
0.400000,
], 2, ), # 4
)
MicroAttentionConfig.thrift_struct_annotations = {
}
MicroAttentionConfig.thrift_field_annotations = {
}
def MicroAttentionConfig__init__(self, num_of_layers=MicroAttentionConfig.thrift_spec[1][4], num_of_heads=MicroAttentionConfig.thrift_spec[2][4], att_embed_dim=MicroAttentionConfig.thrift_spec[3][4], dropout_prob=MicroAttentionConfig.thrift_spec[4][4],):
if num_of_layers is self.thrift_spec[1][4]:
num_of_layers = [
1,
2,
3,
]
self.num_of_layers = num_of_layers
if num_of_heads is self.thrift_spec[2][4]:
num_of_heads = [
1,
2,
3,
]
self.num_of_heads = num_of_heads
if att_embed_dim is self.thrift_spec[3][4]:
att_embed_dim = [
10,
]
self.att_embed_dim = att_embed_dim
if dropout_prob is self.thrift_spec[4][4]:
dropout_prob = [
0.00000,
0.200000,
0.400000,
]
self.dropout_prob = dropout_prob
MicroAttentionConfig.__init__ = MicroAttentionConfig__init__
def MicroAttentionConfig__setstate__(self, state):
state.setdefault('num_of_layers', [
1,
2,
3,
])
state.setdefault('num_of_heads', [
1,
2,
3,
])
state.setdefault('att_embed_dim', [
10,
])
state.setdefault('dropout_prob', [
0.00000,
0.200000,
0.400000,
])
self.__dict__ = state
MicroAttentionConfig.__getstate__ = lambda self: self.__dict__.copy()
MicroAttentionConfig.__setstate__ = MicroAttentionConfig__setstate__
all_structs.append(MicroSearchSpaceType)
MicroSearchSpaceType.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'close', [MicroClose, MicroClose.thrift_spec, False], None, 2, ), # 1
(2, TType.STRUCT, 'micro_mlp', [MicroMLPConfig, MicroMLPConfig.thrift_spec, False], None, 2, ), # 2
(3, TType.STRUCT, 'micro_cin', [MicroCINConfig, MicroCINConfig.thrift_spec, False], None, 2, ), # 3
(4, TType.STRUCT, 'micro_attention', [MicroAttentionConfig, MicroAttentionConfig.thrift_spec, False], None, 2, ), # 4
)
MicroSearchSpaceType.thrift_struct_annotations = {
}
MicroSearchSpaceType.thrift_field_annotations = {
}
def MicroSearchSpaceType__init__(self, close=None, micro_mlp=None, micro_cin=None, micro_attention=None,):
self.field = 0
self.value = None
if close is not None:
assert self.field == 0 and self.value is None
self.field = 1
self.value = close
if micro_mlp is not None:
assert self.field == 0 and self.value is None
self.field = 2
self.value = micro_mlp
if micro_cin is not None:
assert self.field == 0 and self.value is None
self.field = 3
self.value = micro_cin
if micro_attention is not None:
assert self.field == 0 and self.value is None
self.field = 4
self.value = micro_attention
MicroSearchSpaceType.__init__ = MicroSearchSpaceType__init__
all_structs.append(InputDenseAsSparse)
InputDenseAsSparse.thrift_spec = (
)
InputDenseAsSparse.thrift_struct_annotations = {
}
InputDenseAsSparse.thrift_field_annotations = {
}
all_structs.append(FeatureProcessingType)
FeatureProcessingType.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'idasp', [InputDenseAsSparse, InputDenseAsSparse.thrift_spec, False], None, 2, ), # 1
)
FeatureProcessingType.thrift_struct_annotations = {
}
FeatureProcessingType.thrift_field_annotations = {
}
def FeatureProcessingType__init__(self, idasp=None,):
self.field = 0
self.value = None
if idasp is not None:
assert self.field == 0 and self.value is None
self.field = 1
self.value = idasp
FeatureProcessingType.__init__ = FeatureProcessingType__init__
all_structs.append(NASRecNetConfig)
NASRecNetConfig.thrift_spec = (
None, # 0
(1, TType.LIST, 'block_configs', (TType.STRUCT,[block_config.ttypes.BlockConfig, block_config.ttypes.BlockConfig.thrift_spec, True]), None, 2, ), # 1
)
NASRecNetConfig.thrift_struct_annotations = {
}
NASRecNetConfig.thrift_field_annotations = {
}
def NASRecNetConfig__init__(self, block_configs=None,):
self.block_configs = block_configs
NASRecNetConfig.__init__ = NASRecNetConfig__init__
def NASRecNetConfig__setstate__(self, state):
state.setdefault('block_configs', None)
self.__dict__ = state
NASRecNetConfig.__getstate__ = lambda self: self.__dict__.copy()
NASRecNetConfig.__setstate__ = NASRecNetConfig__setstate__
all_structs.append(RandomSearcherConfig)
RandomSearcherConfig.thrift_spec = (
None, # 0
(1, TType.I32, 'max_num_block', None, 3, 2, ), # 1
(2, TType.LIST, 'block_types', (TType.I32,block_config.ttypes.ExtendedBlockType), None, 2, ), # 2
(3, TType.I32, 'macro_space_type', MacroSearchSpaceType, 1, 2, ), # 3
None, # 4
(5, TType.LIST, 'micro_space_types', (TType.STRUCT,[MicroSearchSpaceType, MicroSearchSpaceType.thrift_spec, True]), None, 2, ), # 5
(6, TType.LIST, 'feature_processing_type', (TType.STRUCT,[FeatureProcessingType, FeatureProcessingType.thrift_spec, True]), [
], 2, ), # 6
)
RandomSearcherConfig.thrift_struct_annotations = {
}
RandomSearcherConfig.thrift_field_annotations = {
}
def RandomSearcherConfig__init__(self, max_num_block=RandomSearcherConfig.thrift_spec[1][4], block_types=None, macro_space_type=RandomSearcherConfig.thrift_spec[3][4], micro_space_types=None, feature_processing_type=RandomSearcherConfig.thrift_spec[6][4],):
self.max_num_block = max_num_block
self.block_types = block_types
self.macro_space_type = macro_space_type
self.micro_space_types = micro_space_types
if feature_processing_type is self.thrift_spec[6][4]:
feature_processing_type = [
]
self.feature_processing_type = feature_processing_type
RandomSearcherConfig.__init__ = RandomSearcherConfig__init__
def RandomSearcherConfig__setstate__(self, state):
state.setdefault('max_num_block', 3)
state.setdefault('block_types', None)
state.setdefault('macro_space_type', 1)
state.setdefault('micro_space_types', None)
state.setdefault('feature_processing_type', [
])
self.__dict__ = state
RandomSearcherConfig.__getstate__ = lambda self: self.__dict__.copy()
RandomSearcherConfig.__setstate__ = RandomSearcherConfig__setstate__
all_structs.append(EvolutionarySearcherConfig)
EvolutionarySearcherConfig.thrift_spec = (
None, # 0
(1, TType.I32, 'max_num_block', None, 3, 2, ), # 1
(2, TType.LIST, 'block_types', (TType.I32,block_config.ttypes.ExtendedBlockType), None, 2, ), # 2
(3, TType.I32, 'population_size', None, 10, 2, ), # 3
(4, TType.I32, 'candidate_size', None, 5, 2, ), # 4
(5, TType.I32, 'macro_space_type', MacroSearchSpaceType, 1, 2, ), # 5
None, # 6
(7, TType.LIST, 'micro_space_types', (TType.STRUCT,[MicroSearchSpaceType, MicroSearchSpaceType.thrift_spec, True]), None, 2, ), # 7
(8, TType.LIST, 'feature_processing_type', (TType.STRUCT,[FeatureProcessingType, FeatureProcessingType.thrift_spec, True]), [
], 2, ), # 8
)
EvolutionarySearcherConfig.thrift_struct_annotations = {
}
EvolutionarySearcherConfig.thrift_field_annotations = {
}
def EvolutionarySearcherConfig__init__(self, max_num_block=EvolutionarySearcherConfig.thrift_spec[1][4], block_types=None, population_size=EvolutionarySearcherConfig.thrift_spec[3][4], candidate_size=EvolutionarySearcherConfig.thrift_spec[4][4], macro_space_type=EvolutionarySearcherConfig.thrift_spec[5][4], micro_space_types=None, feature_processing_type=EvolutionarySearcherConfig.thrift_spec[8][4],):
self.max_num_block = max_num_block
self.block_types = block_types
self.population_size = population_size
self.candidate_size = candidate_size
self.macro_space_type = macro_space_type
self.micro_space_types = micro_space_types
if feature_processing_type is self.thrift_spec[8][4]:
feature_processing_type = [
]
self.feature_processing_type = feature_processing_type
EvolutionarySearcherConfig.__init__ = EvolutionarySearcherConfig__init__
def EvolutionarySearcherConfig__setstate__(self, state):
state.setdefault('max_num_block', 3)
state.setdefault('block_types', None)
state.setdefault('population_size', 10)
state.setdefault('candidate_size', 5)
state.setdefault('macro_space_type', 1)
state.setdefault('micro_space_types', None)
state.setdefault('feature_processing_type', [
])
self.__dict__ = state
EvolutionarySearcherConfig.__getstate__ = lambda self: self.__dict__.copy()
EvolutionarySearcherConfig.__setstate__ = EvolutionarySearcherConfig__setstate__
all_structs.append(SearcherConfig)
SearcherConfig.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'random_searcher', [RandomSearcherConfig, RandomSearcherConfig.thrift_spec, False], None, 2, ), # 1
(2, TType.STRUCT, 'evolutionary_searcher', [EvolutionarySearcherConfig, EvolutionarySearcherConfig.thrift_spec, False], None, 2, ), # 2
)
SearcherConfig.thrift_struct_annotations = {
}
SearcherConfig.thrift_field_annotations = {
}
def SearcherConfig__init__(self, random_searcher=None, evolutionary_searcher=None,):
self.field = 0
self.value = None
if random_searcher is not None:
assert self.field == 0 and self.value is None
self.field = 1
self.value = random_searcher
if evolutionary_searcher is not None:
assert self.field == 0 and self.value is None
self.field = 2
self.value = evolutionary_searcher
SearcherConfig.__init__ = SearcherConfig__init__
all_structs.append(ModelConfig)
ModelConfig.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'nasrec_net', [NASRecNetConfig, NASRecNetConfig.thrift_spec, False], None, 2, ), # 1
)
ModelConfig.thrift_struct_annotations = {
}
ModelConfig.thrift_field_annotations = {
}
def ModelConfig__init__(self, nasrec_net=None,):
self.field = 0
self.value = None
if nasrec_net is not None:
assert self.field == 0 and self.value is None
self.field = 1
self.value = nasrec_net
ModelConfig.__init__ = ModelConfig__init__
all_structs.append(SGDOptimConfig)
SGDOptimConfig.thrift_spec = (
None, # 0
(1, TType.FLOAT, 'lr', None, 0.0100000, 2, ), # 1
(2, TType.FLOAT, 'momentum', None, 0.00000, 2, ), # 2
(3, TType.FLOAT, 'dampening', None, 0.00000, 2, ), # 3
(4, TType.BOOL, 'nesterov', None, False, 2, ), # 4
(5, TType.FLOAT, 'weight_decay', None, 0.00000, 2, ), # 5
)
SGDOptimConfig.thrift_struct_annotations = {
}
SGDOptimConfig.thrift_field_annotations = {
}
def SGDOptimConfig__init__(self, lr=SGDOptimConfig.thrift_spec[1][4], momentum=SGDOptimConfig.thrift_spec[2][4], dampening=SGDOptimConfig.thrift_spec[3][4], nesterov=SGDOptimConfig.thrift_spec[4][4], weight_decay=SGDOptimConfig.thrift_spec[5][4],):
self.lr = lr
self.momentum = momentum
self.dampening = dampening
self.nesterov = nesterov
self.weight_decay = weight_decay
SGDOptimConfig.__init__ = SGDOptimConfig__init__
def SGDOptimConfig__setstate__(self, state):
state.setdefault('lr', 0.0100000)
state.setdefault('momentum', 0.00000)
state.setdefault('dampening', 0.00000)
state.setdefault('nesterov', False)
state.setdefault('weight_decay', 0.00000)
self.__dict__ = state
SGDOptimConfig.__getstate__ = lambda self: self.__dict__.copy()
SGDOptimConfig.__setstate__ = SGDOptimConfig__setstate__
all_structs.append(AdagradOptimConfig)
AdagradOptimConfig.thrift_spec = (
None, # 0
(1, TType.FLOAT, 'lr', None, 0.0100000, 2, ), # 1
(2, TType.FLOAT, 'lr_decay', None, 0.00000, 2, ), # 2
(3, TType.FLOAT, 'weight_decay', None, 0.00000, 2, ), # 3
(4, TType.FLOAT, 'initial_accumulator_value', None, 0.00000, 2, ), # 4
)
AdagradOptimConfig.thrift_struct_annotations = {
}
AdagradOptimConfig.thrift_field_annotations = {
}
def AdagradOptimConfig__init__(self, lr=AdagradOptimConfig.thrift_spec[1][4], lr_decay=AdagradOptimConfig.thrift_spec[2][4], weight_decay=AdagradOptimConfig.thrift_spec[3][4], initial_accumulator_value=AdagradOptimConfig.thrift_spec[4][4],):
self.lr = lr
self.lr_decay = lr_decay
self.weight_decay = weight_decay
self.initial_accumulator_value = initial_accumulator_value
AdagradOptimConfig.__init__ = AdagradOptimConfig__init__
def AdagradOptimConfig__setstate__(self, state):
state.setdefault('lr', 0.0100000)
state.setdefault('lr_decay', 0.00000)
state.setdefault('weight_decay', 0.00000)
state.setdefault('initial_accumulator_value', 0.00000)
self.__dict__ = state
AdagradOptimConfig.__getstate__ = lambda self: self.__dict__.copy()
AdagradOptimConfig.__setstate__ = AdagradOptimConfig__setstate__
all_structs.append(SparseAdamOptimConfig)
SparseAdamOptimConfig.thrift_spec = (
None, # 0
(1, TType.FLOAT, 'lr', None, 0.00100000, 2, ), # 1
(2, TType.FLOAT, 'betas0', None, 0.900000, 2, ), # 2
(3, TType.FLOAT, 'betas1', None, 0.999000, 2, ), # 3
(4, TType.FLOAT, 'eps', None, 1.00000e-08, 2, ), # 4
)
SparseAdamOptimConfig.thrift_struct_annotations = {
}
SparseAdamOptimConfig.thrift_field_annotations = {
}
def SparseAdamOptimConfig__init__(self, lr=SparseAdamOptimConfig.thrift_spec[1][4], betas0=SparseAdamOptimConfig.thrift_spec[2][4], betas1=SparseAdamOptimConfig.thrift_spec[3][4], eps=SparseAdamOptimConfig.thrift_spec[4][4],):
self.lr = lr
self.betas0 = betas0
self.betas1 = betas1
self.eps = eps
SparseAdamOptimConfig.__init__ = SparseAdamOptimConfig__init__
def SparseAdamOptimConfig__setstate__(self, state):
state.setdefault('lr', 0.00100000)
state.setdefault('betas0', 0.900000)
state.setdefault('betas1', 0.999000)
state.setdefault('eps', 1.00000e-08)
self.__dict__ = state
SparseAdamOptimConfig.__getstate__ = lambda self: self.__dict__.copy()
SparseAdamOptimConfig.__setstate__ = SparseAdamOptimConfig__setstate__
all_structs.append(AdamOptimConfig)
AdamOptimConfig.thrift_spec = (
None, # 0
(1, TType.FLOAT, 'lr', None, 0.00100000, 2, ), # 1
(2, TType.BOOL, 'amsgrad', None, False, 2, ), # 2
(3, TType.FLOAT, 'weight_decay', None, 0.00000, 2, ), # 3
(4, TType.FLOAT, 'betas0', None, 0.900000, 2, ), # 4
(5, TType.FLOAT, 'betas1', None, 0.999000, 2, ), # 5
(6, TType.FLOAT, 'eps', None, 1.00000e-08, 2, ), # 6
)
AdamOptimConfig.thrift_struct_annotations = {
}
AdamOptimConfig.thrift_field_annotations = {
}
def AdamOptimConfig__init__(self, lr=AdamOptimConfig.thrift_spec[1][4], amsgrad=AdamOptimConfig.thrift_spec[2][4], weight_decay=AdamOptimConfig.thrift_spec[3][4], betas0=AdamOptimConfig.thrift_spec[4][4], betas1=AdamOptimConfig.thrift_spec[5][4], eps=AdamOptimConfig.thrift_spec[6][4],):
self.lr = lr
self.amsgrad = amsgrad
self.weight_decay = weight_decay
self.betas0 = betas0
self.betas1 = betas1
self.eps = eps
AdamOptimConfig.__init__ = AdamOptimConfig__init__
def AdamOptimConfig__setstate__(self, state):
state.setdefault('lr', 0.00100000)
state.setdefault('amsgrad', False)
state.setdefault('weight_decay', 0.00000)
state.setdefault('betas0', 0.900000)
state.setdefault('betas1', 0.999000)
state.setdefault('eps', 1.00000e-08)
self.__dict__ = state
AdamOptimConfig.__getstate__ = lambda self: self.__dict__.copy()
AdamOptimConfig.__setstate__ = AdamOptimConfig__setstate__
all_structs.append(RMSpropOptimConfig)
RMSpropOptimConfig.thrift_spec = (
None, # 0
(1, TType.FLOAT, 'lr', None, 0.0100000, 2, ), # 1
(2, TType.FLOAT, 'alpha', None, 0.990000, 2, ), # 2
(3, TType.FLOAT, 'weight_decay', None, 0.00000, 2, ), # 3
(4, TType.FLOAT, 'momentum', None, 0.00000, 2, ), # 4
(5, TType.BOOL, 'centered', None, False, 2, ), # 5
(6, TType.FLOAT, 'eps', None, 1.00000e-08, 2, ), # 6
)
RMSpropOptimConfig.thrift_struct_annotations = {
}
RMSpropOptimConfig.thrift_field_annotations = {
}
def RMSpropOptimConfig__init__(self, lr=RMSpropOptimConfig.thrift_spec[1][4], alpha=RMSpropOptimConfig.thrift_spec[2][4], weight_decay=RMSpropOptimConfig.thrift_spec[3][4], momentum=RMSpropOptimConfig.thrift_spec[4][4], centered=RMSpropOptimConfig.thrift_spec[5][4], eps=RMSpropOptimConfig.thrift_spec[6][4],):
self.lr = lr
self.alpha = alpha
self.weight_decay = weight_decay
self.momentum = momentum
self.centered = centered
self.eps = eps
RMSpropOptimConfig.__init__ = RMSpropOptimConfig__init__
def RMSpropOptimConfig__setstate__(self, state):
state.setdefault('lr', 0.0100000)
state.setdefault('alpha', 0.990000)
state.setdefault('weight_decay', 0.00000)
state.setdefault('momentum', 0.00000)
state.setdefault('centered', False)
state.setdefault('eps', 1.00000e-08)
self.__dict__ = state
RMSpropOptimConfig.__getstate__ = lambda self: self.__dict__.copy()
RMSpropOptimConfig.__setstate__ = RMSpropOptimConfig__setstate__
all_structs.append(OptimConfig)
OptimConfig.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'sgd', [SGDOptimConfig, SGDOptimConfig.thrift_spec, False], None, 2, ), # 1
(2, TType.STRUCT, 'adagrad', [AdagradOptimConfig, AdagradOptimConfig.thrift_spec, False], AdagradOptimConfig(**{
}), 2, ), # 2
(3, TType.STRUCT, 'sparse_adam', [SparseAdamOptimConfig, SparseAdamOptimConfig.thrift_spec, False], None, 2, ), # 3
(4, TType.STRUCT, 'adam', [AdamOptimConfig, AdamOptimConfig.thrift_spec, False], None, 2, ), # 4
(5, TType.STRUCT, 'rmsprop', [RMSpropOptimConfig, RMSpropOptimConfig.thrift_spec, False], None, 2, ), # 5
)
OptimConfig.thrift_struct_annotations = {
}
OptimConfig.thrift_field_annotations = {
}
def OptimConfig__init__(self, sgd=None, adagrad=OptimConfig.thrift_spec[2][4], sparse_adam=None, adam=None, rmsprop=None,):
self.field = 0
self.value = None
if sgd is not None:
assert self.field == 0 and self.value is None
self.field = 1
self.value = sgd
if adagrad is not None:
assert self.field == 0 and self.value is None
self.field = 2
self.value = adagrad
if sparse_adam is not None:
assert self.field == 0 and self.value is None
self.field = 3
self.value = sparse_adam
if adam is not None:
assert self.field == 0 and self.value is None
self.field = 4
self.value = adam
if rmsprop is not None:
assert self.field == 0 and self.value is None
self.field = 5
self.value = rmsprop
OptimConfig.__init__ = OptimConfig__init__
all_structs.append(SumPooling)
SumPooling.thrift_spec = (
)
SumPooling.thrift_struct_annotations = {
}
SumPooling.thrift_field_annotations = {
}
all_structs.append(AvgPooling)
AvgPooling.thrift_spec = (
)
AvgPooling.thrift_struct_annotations = {
}
AvgPooling.thrift_field_annotations = {
}
all_structs.append(PoolingConfig)
PoolingConfig.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'sum', [SumPooling, SumPooling.thrift_spec, False], SumPooling(**{
}), 2, ), # 1
(2, TType.STRUCT, 'avg', [AvgPooling, AvgPooling.thrift_spec, False], None, 2, ), # 2
)
PoolingConfig.thrift_struct_annotations = {
}
PoolingConfig.thrift_field_annotations = {
}
def PoolingConfig__init__(self, sum=PoolingConfig.thrift_spec[1][4], avg=None,):
self.field = 0
self.value = None
if sum is not None:
assert self.field == 0 and self.value is None
self.field = 1
self.value = sum
if avg is not None:
assert self.field == 0 and self.value is None
self.field = 2
self.value = avg
PoolingConfig.__init__ = PoolingConfig__init__
all_structs.append(SparseFeatureItem)
SparseFeatureItem.thrift_spec = (
None, # 0
(1, TType.STRING, 'name', True, None, 2, ), # 1
(2, TType.I32, 'hash_size', None, 10000, 2, ), # 2
(3, TType.I32, 'embed_dim', None, -1, 2, ), # 3
(4, TType.STRUCT, 'optim', [OptimConfig, OptimConfig.thrift_spec, True], None, 1, ), # 4
(5, TType.STRUCT, 'pooling', [PoolingConfig, PoolingConfig.thrift_spec, True], PoolingConfig(**{
"sum" : SumPooling(**{
}),
}), 2, ), # 5
)
SparseFeatureItem.thrift_struct_annotations = {
}
SparseFeatureItem.thrift_field_annotations = {
}
def SparseFeatureItem__init__(self, name=None, hash_size=SparseFeatureItem.thrift_spec[2][4], embed_dim=SparseFeatureItem.thrift_spec[3][4], optim=None, pooling=SparseFeatureItem.thrift_spec[5][4],):
self.name = name
self.hash_size = hash_size
self.embed_dim = embed_dim
self.optim = optim
if pooling is self.thrift_spec[5][4]:
pooling = PoolingConfig(**{
"sum" : SumPooling(**{
}),
})
self.pooling = pooling
SparseFeatureItem.__init__ = SparseFeatureItem__init__
def SparseFeatureItem__setstate__(self, state):
state.setdefault('name', None)
state.setdefault('hash_size', 10000)
state.setdefault('embed_dim', -1)
state.setdefault('optim', None)
state.setdefault('pooling', PoolingConfig(**{
"sum" : SumPooling(**{
}),
}))
self.__dict__ = state
SparseFeatureItem.__getstate__ = lambda self: self.__dict__.copy()
SparseFeatureItem.__setstate__ = SparseFeatureItem__setstate__
all_structs.append(SparseFeatureConfig)
SparseFeatureConfig.thrift_spec = (
None, # 0
(1, TType.LIST, 'features', (TType.STRUCT,[SparseFeatureItem, SparseFeatureItem.thrift_spec, False]), [
], 2, ), # 1
(2, TType.I32, 'embed_dim', None, -1, 2, ), # 2
(3, TType.STRUCT, 'optim', [OptimConfig, OptimConfig.thrift_spec, True], None, 2, ), # 3
)
SparseFeatureConfig.thrift_struct_annotations = {
}
SparseFeatureConfig.thrift_field_annotations = {
}
def SparseFeatureConfig__init__(self, features=SparseFeatureConfig.thrift_spec[1][4], embed_dim=SparseFeatureConfig.thrift_spec[2][4], optim=None,):
if features is self.thrift_spec[1][4]:
features = [
]
self.features = features
self.embed_dim = embed_dim
self.optim = optim
SparseFeatureConfig.__init__ = SparseFeatureConfig__init__
def SparseFeatureConfig__setstate__(self, state):
state.setdefault('features', [
])
state.setdefault('embed_dim', -1)
state.setdefault('optim', None)
self.__dict__ = state
SparseFeatureConfig.__getstate__ = lambda self: self.__dict__.copy()
SparseFeatureConfig.__setstate__ = SparseFeatureConfig__setstate__
all_structs.append(DenseFeatureConfig)
DenseFeatureConfig.thrift_spec = (
None, # 0
(1, TType.LIST, 'features', (TType.STRING,True), None, 2, ), # 1
(2, TType.STRUCT, 'optim', [OptimConfig, OptimConfig.thrift_spec, True], None, 2, ), # 2
)
DenseFeatureConfig.thrift_struct_annotations = {
}
DenseFeatureConfig.thrift_field_annotations = {
}
def DenseFeatureConfig__init__(self, features=None, optim=None,):
self.features = features
self.optim = optim
DenseFeatureConfig.__init__ = DenseFeatureConfig__init__
def DenseFeatureConfig__setstate__(self, state):
state.setdefault('features', None)
state.setdefault('optim', None)
self.__dict__ = state
DenseFeatureConfig.__getstate__ = lambda self: self.__dict__.copy()
DenseFeatureConfig.__setstate__ = DenseFeatureConfig__setstate__
all_structs.append(FeatureConfig)
FeatureConfig.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'dense', [DenseFeatureConfig, DenseFeatureConfig.thrift_spec, False], None, 2, ), # 1
(2, TType.STRUCT, 'sparse', [SparseFeatureConfig, SparseFeatureConfig.thrift_spec, False], None, 2, ), # 2
)
FeatureConfig.thrift_struct_annotations = {
}
FeatureConfig.thrift_field_annotations = {
}
def FeatureConfig__init__(self, dense=None, sparse=None,):
self.dense = dense
self.sparse = sparse
FeatureConfig.__init__ = FeatureConfig__init__
def FeatureConfig__setstate__(self, state):
state.setdefault('dense', None)
state.setdefault('sparse', None)
self.__dict__ = state
FeatureConfig.__getstate__ = lambda self: self.__dict__.copy()
FeatureConfig.__setstate__ = FeatureConfig__setstate__
all_structs.append(BCEWithLogitsLoss)
BCEWithLogitsLoss.thrift_spec = (
)
BCEWithLogitsLoss.thrift_struct_annotations = {
}
BCEWithLogitsLoss.thrift_field_annotations = {
}
all_structs.append(BCELoss)
BCELoss.thrift_spec = (
)
BCELoss.thrift_struct_annotations = {
}
BCELoss.thrift_field_annotations = {
}
all_structs.append(MSELoss)
MSELoss.thrift_spec = (
)
MSELoss.thrift_struct_annotations = {
}
MSELoss.thrift_field_annotations = {
}
all_structs.append(LossConfig)
LossConfig.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'bcewithlogits', [BCEWithLogitsLoss, BCEWithLogitsLoss.thrift_spec, False], None, 2, ), # 1
(2, TType.STRUCT, 'bce', [BCELoss, BCELoss.thrift_spec, False], None, 2, ), # 2
(3, TType.STRUCT, 'mse', [MSELoss, MSELoss.thrift_spec, False], None, 2, ), # 3
)
LossConfig.thrift_struct_annotations = {
}
LossConfig.thrift_field_annotations = {
}
def LossConfig__init__(self, bcewithlogits=None, bce=None, mse=None,):
self.field = 0
self.value = None
if bcewithlogits is not None:
assert self.field == 0 and self.value is None
self.field = 1
self.value = bcewithlogits
if bce is not None:
assert self.field == 0 and self.value is None
self.field = 2
self.value = bce
if mse is not None:
assert self.field == 0 and self.value is None
self.field = 3
self.value = mse
LossConfig.__init__ = LossConfig__init__
all_structs.append(LoggingConfig)
LoggingConfig.thrift_spec = (
None, # 0
(1, TType.I32, 'log_freq', None, 10000, 2, ), # 1
(2, TType.I32, 'tb_log_freq', None, -1, 2, ), # 2
(3, TType.BOOL, 'tb_log_model_weight_hist', None, False, 2, ), # 3
(4, TType.BOOL, 'tb_log_pr_curve_batch', None, True, 2, ), # 4
(5, TType.LIST, 'tb_log_model_weight_filter_regex', (TType.STRING,True), [
"sparse",
], 2, ), # 5
)
LoggingConfig.thrift_struct_annotations = {
}
LoggingConfig.thrift_field_annotations = {
}
def LoggingConfig__init__(self, log_freq=LoggingConfig.thrift_spec[1][4], tb_log_freq=LoggingConfig.thrift_spec[2][4], tb_log_model_weight_hist=LoggingConfig.thrift_spec[3][4], tb_log_pr_curve_batch=LoggingConfig.thrift_spec[4][4], tb_log_model_weight_filter_regex=LoggingConfig.thrift_spec[5][4],):
self.log_freq = log_freq
self.tb_log_freq = tb_log_freq
self.tb_log_model_weight_hist = tb_log_model_weight_hist
self.tb_log_pr_curve_batch = tb_log_pr_curve_batch
if tb_log_model_weight_filter_regex is self.thrift_spec[5][4]:
tb_log_model_weight_filter_regex = [
"sparse",
]
self.tb_log_model_weight_filter_regex = tb_log_model_weight_filter_regex
LoggingConfig.__init__ = LoggingConfig__init__
def LoggingConfig__setstate__(self, state):
state.setdefault('log_freq', 10000)
state.setdefault('tb_log_freq', -1)
state.setdefault('tb_log_model_weight_hist', False)
state.setdefault('tb_log_pr_curve_batch', True)
state.setdefault('tb_log_model_weight_filter_regex', [
"sparse",
])
self.__dict__ = state
LoggingConfig.__getstate__ = lambda self: self.__dict__.copy()
LoggingConfig.__setstate__ = LoggingConfig__setstate__
all_structs.append(TrainConfig)
TrainConfig.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'logging_config', [LoggingConfig, LoggingConfig.thrift_spec, False], None, 2, ), # 1
None, # 2
(3, TType.I32, 'nepochs', None, 1, 2, ), # 3
None, # 4
(5, TType.BOOL, 'early_stop_on_val_loss', None, True, 2, ), # 5
(6, TType.STRUCT, 'loss', [LossConfig, LossConfig.thrift_spec, True], LossConfig(**{
"bcewithlogits" : BCEWithLogitsLoss(**{
}),
}), 2, ), # 6
)
TrainConfig.thrift_struct_annotations = {
}
TrainConfig.thrift_field_annotations = {
}
def TrainConfig__init__(self, logging_config=None, nepochs=TrainConfig.thrift_spec[3][4], early_stop_on_val_loss=TrainConfig.thrift_spec[5][4], loss=TrainConfig.thrift_spec[6][4],):
self.logging_config = logging_config
self.nepochs = nepochs
self.early_stop_on_val_loss = early_stop_on_val_loss
if loss is self.thrift_spec[6][4]:
loss = LossConfig(**{
"bcewithlogits" : BCEWithLogitsLoss(**{
}),
})
self.loss = loss
TrainConfig.__init__ = TrainConfig__init__
def TrainConfig__setstate__(self, state):
state.setdefault('logging_config', None)
state.setdefault('nepochs', 1)
state.setdefault('early_stop_on_val_loss', True)
state.setdefault('loss', LossConfig(**{
"bcewithlogits" : BCEWithLogitsLoss(**{
}),
}))
self.__dict__ = state
TrainConfig.__getstate__ = lambda self: self.__dict__.copy()
TrainConfig.__setstate__ = TrainConfig__setstate__
all_structs.append(EvalConfig)
EvalConfig.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'logging_config', [LoggingConfig, LoggingConfig.thrift_spec, False], None, 2, ), # 1
(2, TType.STRUCT, 'loss', [LossConfig, LossConfig.thrift_spec, True], LossConfig(**{
"bcewithlogits" : BCEWithLogitsLoss(**{
}),
}), 2, ), # 2
(3, TType.BOOL, 'compute_ne', None, True, 2, ), # 3
)
EvalConfig.thrift_struct_annotations = {
}
EvalConfig.thrift_field_annotations = {
}
def EvalConfig__init__(self, logging_config=None, loss=EvalConfig.thrift_spec[2][4], compute_ne=EvalConfig.thrift_spec[3][4],):
self.logging_config = logging_config
if loss is self.thrift_spec[2][4]:
loss = LossConfig(**{
"bcewithlogits" : BCEWithLogitsLoss(**{
}),
})
self.loss = loss
self.compute_ne = compute_ne
EvalConfig.__init__ = EvalConfig__init__
def EvalConfig__setstate__(self, state):
state.setdefault('logging_config', None)
state.setdefault('loss', LossConfig(**{
"bcewithlogits" : BCEWithLogitsLoss(**{
}),
}))
state.setdefault('compute_ne', True)
self.__dict__ = state
EvalConfig.__getstate__ = lambda self: self.__dict__.copy()
EvalConfig.__setstate__ = EvalConfig__setstate__
all_structs.append(CheckpointConfig)
CheckpointConfig.thrift_spec = (
None, # 0
(1, TType.I32, 'ckp_interval', None, 10, 2, ), # 1
(2, TType.STRING, 'ckp_path', True, "", 2, ), # 2
)
CheckpointConfig.thrift_struct_annotations = {
}
CheckpointConfig.thrift_field_annotations = {
}
def CheckpointConfig__init__(self, ckp_interval=CheckpointConfig.thrift_spec[1][4], ckp_path=CheckpointConfig.thrift_spec[2][4],):
self.ckp_interval = ckp_interval
self.ckp_path = ckp_path
CheckpointConfig.__init__ = CheckpointConfig__init__
def CheckpointConfig__setstate__(self, state):
state.setdefault('ckp_interval', 10)
state.setdefault('ckp_path', "")
self.__dict__ = state
CheckpointConfig.__getstate__ = lambda self: self.__dict__.copy()
CheckpointConfig.__setstate__ = CheckpointConfig__setstate__
all_structs.append(KoskiReaderConfig)
KoskiReaderConfig.thrift_spec = (
None, # 0
(1, TType.I64, 'prefetch_capacity', None, 128, 2, ), # 1
(2, TType.BOOL, 'pin_memory', None, True, 2, ), # 2
(3, TType.I32, 'num_workers', None, 4, 2, ), # 3
)
KoskiReaderConfig.thrift_struct_annotations = {
}
KoskiReaderConfig.thrift_field_annotations = {
}
def KoskiReaderConfig__init__(self, prefetch_capacity=KoskiReaderConfig.thrift_spec[1][4], pin_memory=KoskiReaderConfig.thrift_spec[2][4], num_workers=KoskiReaderConfig.thrift_spec[3][4],):
self.prefetch_capacity = prefetch_capacity
self.pin_memory = pin_memory
self.num_workers = num_workers
KoskiReaderConfig.__init__ = KoskiReaderConfig__init__
def KoskiReaderConfig__setstate__(self, state):
state.setdefault('prefetch_capacity', 128)
state.setdefault('pin_memory', True)
state.setdefault('num_workers', 4)
self.__dict__ = state
KoskiReaderConfig.__getstate__ = lambda self: self.__dict__.copy()
KoskiReaderConfig.__setstate__ = KoskiReaderConfig__setstate__
all_structs.append(PerformanceConfig)
PerformanceConfig.thrift_spec = (
None, # 0
(1, TType.BOOL, 'use_gpu', None, False, 2, ), # 1
(2, TType.I32, 'num_readers', None, 4, 2, ), # 2
(3, TType.I32, 'num_trainers', None, 1, 2, ), # 3
(4, TType.STRUCT, 'ckp_config', [CheckpointConfig, CheckpointConfig.thrift_spec, False], CheckpointConfig(**{
"ckp_interval" : 10,
}), 2, ), # 4
(5, TType.I32, 'data_queue_maxsize', None, 100, 2, ), # 5
(6, TType.I32, 'reader_threads', None, 8, 2, ), # 6
(7, TType.I32, 'num_gpu', None, 1, 2, ), # 7
(8, TType.BOOL, 'enable_profiling', None, False, 1, ), # 8
(9, TType.STRUCT, 'koski', [KoskiReaderConfig, KoskiReaderConfig.thrift_spec, False], None, 1, ), # 9
(10, TType.I32, 'omp_num_threads', None, 0, 1, ), # 10
)
PerformanceConfig.thrift_struct_annotations = {
}
PerformanceConfig.thrift_field_annotations = {
}
def PerformanceConfig__init__(self, use_gpu=PerformanceConfig.thrift_spec[1][4], num_readers=PerformanceConfig.thrift_spec[2][4], num_trainers=PerformanceConfig.thrift_spec[3][4], ckp_config=PerformanceConfig.thrift_spec[4][4], data_queue_maxsize=PerformanceConfig.thrift_spec[5][4], reader_threads=PerformanceConfig.thrift_spec[6][4], num_gpu=PerformanceConfig.thrift_spec[7][4], enable_profiling=PerformanceConfig.thrift_spec[8][4], koski=None, omp_num_threads=PerformanceConfig.thrift_spec[10][4],):
self.use_gpu = use_gpu
self.num_readers = num_readers
self.num_trainers = num_trainers
if ckp_config is self.thrift_spec[4][4]:
ckp_config = CheckpointConfig(**{
"ckp_interval" : 10,
})
self.ckp_config = ckp_config
self.data_queue_maxsize = data_queue_maxsize
self.reader_threads = reader_threads
self.num_gpu = num_gpu
self.enable_profiling = enable_profiling
self.koski = koski
self.omp_num_threads = omp_num_threads
PerformanceConfig.__init__ = PerformanceConfig__init__
def PerformanceConfig__setstate__(self, state):
state.setdefault('use_gpu', False)
state.setdefault('num_readers', 4)
state.setdefault('num_trainers', 1)
state.setdefault('ckp_config', CheckpointConfig(**{
"ckp_interval" : 10,
}))
state.setdefault('data_queue_maxsize', 100)
state.setdefault('reader_threads', 8)
state.setdefault('num_gpu', 1)
state.setdefault('enable_profiling', False)
state.setdefault('koski', None)
state.setdefault('omp_num_threads', 0)
self.__dict__ = state
PerformanceConfig.__getstate__ = lambda self: self.__dict__.copy()
PerformanceConfig.__setstate__ = PerformanceConfig__setstate__
fix_spec(all_structs)
del all_structs
|
AutoCTR-main
|
gen-py/config/ttypes.py
|
#
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
# @generated
#
from __future__ import absolute_import
import six
from thrift.util.Recursive import fix_spec
from thrift.Thrift import *
from thrift.protocol.TProtocol import TProtocolException
from .ttypes import *
|
AutoCTR-main
|
gen-py/block_config/constants.py
|
#
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
# @generated
#
__all__ = ['ttypes', 'constants']
|
AutoCTR-main
|
gen-py/block_config/__init__.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.