python_code
stringlengths 0
4.04M
| repo_name
stringlengths 8
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import json
import logging
import math
import sys
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple, Union
import numpy as np
import torch
from src import dist_utils
Number = Union[float, int]
logger = logging.getLogger(__name__)
def init_logger(is_main=True, is_distributed=False, filename=None):
if is_distributed:
torch.distributed.barrier()
handlers = [logging.StreamHandler(sys.stdout)]
if filename is not None:
handlers.append(logging.FileHandler(filename=filename))
logging.basicConfig(
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if is_main else logging.WARN,
format="[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s",
handlers=handlers,
)
logging.getLogger("transformers.tokenization_utils").setLevel(logging.ERROR)
logging.getLogger("transformers.tokenization_utils_base").setLevel(logging.ERROR)
return logger
def init_tb_logger(dirname, is_main):
tb_logger = None
if is_main:
try:
from torch.utils import tensorboard
tb_logger = tensorboard.SummaryWriter(dirname)
except:
logger.warning("Tensorboard is not available.")
return tb_logger
def cast_to_precision(model, precision):
if precision == "fp32":
return model
elif precision == "fp16":
model.to(torch.float16)
elif precision == "bf16":
model.to(torch.bfloat16)
else:
raise ValueError(f"unsupported precision {precision}, must be one of fp32, fp16, bf16")
return model
class WarmupLinearScheduler(torch.optim.lr_scheduler.LambdaLR):
def __init__(self, optimizer, warmup, total, ratio, last_epoch=-1):
self.warmup = warmup
self.total = total
self.ratio = ratio
super(WarmupLinearScheduler, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup:
return (1 - self.ratio) * step / float(max(1, self.warmup)) + self.ratio
return max(
0.0,
1.0 + (self.ratio - 1) * (step - self.warmup) / float(max(1.0, self.total - self.warmup)),
)
class CosineScheduler(torch.optim.lr_scheduler.LambdaLR):
def __init__(self, optimizer, warmup, total, ratio=0.1, last_epoch=-1):
self.warmup = warmup
self.total = total
self.ratio = ratio
super(CosineScheduler, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup:
return float(step) / self.warmup
s = float(step - self.warmup) / (self.total - self.warmup)
return self.ratio + (1.0 - self.ratio) * math.cos(0.5 * math.pi * s)
class FixedScheduler(torch.optim.lr_scheduler.LambdaLR):
def __init__(self, optimizer, warmup, total, ratio, last_epoch=-1):
self.warmup = warmup
self.total = total
self.ratio = ratio
super(FixedScheduler, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup:
return float(step) / self.warmup
return 1.0
class IndexRefreshScheduler(object):
def __init__(self, format_str: str, freeze_retriever_steps: int, train_retriever: bool):
"""Build an index refresh scheduler
format_str: string that specifies the schedule.
has the format: startstep-endstep:refreshrate,startstep-endstep:refreshrate
e.g. format_str="0-100:10,100-1000000:500" will refresh the index every 10 steps for the first 100 steps
and then every 500 steps from step 100 to 1M.
Syntactic Sugar for a fixed schedule: can just pass in a single number
e.g. format_str="100" will refresh the index every 100 steps
-1 to never refresh
)
"""
self.format_str = format_str
self.train_retriever = train_retriever
self.freeze_retriever_steps = freeze_retriever_steps
self.steps2rates = IndexRefreshScheduler.parse_index_refresh_schedule_string(format_str)
@classmethod
def parse_index_refresh_schedule_string(cls, format_str):
parsed = []
if format_str == "-1":
parsed = [(0, 2**32, 2**32)]
elif format_str.isdigit():
parsed = [(0, 2**32, int(format_str))]
else:
for piece in format_str.split(","):
startend, rate = piece.split(":")
start, end = startend.split("-")
parsed.append((int(start), int(end), int(rate)))
return parsed
def is_time_to_refresh(self, step):
if not (self.train_retriever or step == 0): # if retriever is not trained only refresh at step 0
return False
if not step == 0 and step < self.freeze_retriever_steps: # freeze first steps
return False
for st, en, rate in self.steps2rates:
if st <= step < en:
steps_since_refresh_schedule_change = step - st
return (steps_since_refresh_schedule_change % rate) == 0
logger.warn(
"cant calculate refresh rate for this step, I dont have data here"
" its likely training step is higher than the specificed refresh rate see --index_refresh_rate for help."
)
return False
def set_dropout(model, dropout_rate):
for mod in model.modules():
if isinstance(mod, torch.nn.Dropout):
mod.p = dropout_rate
def set_optim(opt, model):
from src.AdamWFP32Copy import AdamWFP32Copy
retr_optimizer = None
optim_class = AdamWFP32Copy
optim_args = {"weight_decay": opt.weight_decay, "betas": (0.9, opt.beta2), "eps": opt.epsilon}
if opt.is_distributed and opt.shard_optim:
from fairscale.optim.oss import OSS
optim_args["optim"] = optim_class
optim_args["force_broadcast_object"] = True
optim_class = OSS
optimizer = optim_class(params=model.reader.parameters(), lr=opt.lr, **optim_args)
if opt.train_retriever:
retr_optimizer = optim_class(params=model.retriever.parameters(), lr=opt.lr_retriever, **optim_args)
retr_scheduler = None
scheduler_args = {"warmup": opt.warmup_steps, "total": opt.total_steps, "ratio": 0.1}
if opt.scheduler == "linear":
scheduler_class = WarmupLinearScheduler
elif opt.scheduler == "cosine":
scheduler_class = CosineScheduler
elif opt.scheduler == "fixed":
scheduler_class = FixedScheduler
else:
raise ValueError
scheduler = scheduler_class(optimizer, **scheduler_args)
if opt.train_retriever:
retr_scheduler = scheduler_class(retr_optimizer, **scheduler_args)
return optimizer, scheduler, retr_optimizer, retr_scheduler
def compute_grad_stats(model):
with torch.no_grad():
stats = []
for name, p in get_unwrapped_model_if_wrapped(model).reader.named_parameters():
if p.grad is not None:
s1 = torch.min(torch.abs(p.grad)).item()
s2 = torch.max(torch.abs(p.grad)).item()
s3 = torch.mean(torch.abs(p.grad)).item()
s4 = torch.linalg.norm(p.grad).item()
stats += [s1, s2, s3, s4]
else:
stats += [0.0, 0.0, 0.0, 0.0]
stats = torch.Tensor(stats).cuda()
if torch.distributed.is_initialized():
torch.distributed.all_reduce(stats)
stats = stats.view(-1, 4)
res = {}
res["skip_example"] = (torch.any(torch.isinf(stats)) or torch.any(torch.isnan(stats))).item()
res["min"] = stats.min(0)[0][0].item()
res["max"] = stats.max(0)[0][1].item()
res["mean"] = stats.mean(0)[2].item()
return res
def write_output(glob_path, output_path):
files = list(glob_path.glob("*.txt"))
files.sort()
with open(output_path, "w") as outfile:
for path in files:
with open(path, "r") as f:
lines = f.readlines()
for line in lines:
outfile.write(line)
path.unlink()
glob_path.rmdir()
def save_distributed_dataset(data, dataset_name, opt):
dir_path = Path(opt.checkpoint_dir) / opt.name
write_path = dir_path / "tmp_dir"
write_path.mkdir(exist_ok=True)
tmp_path = write_path / f"{opt.global_rank}.json"
with open(tmp_path, "w") as fw:
json.dump(data, fw)
if opt.is_distributed:
torch.distributed.barrier()
if opt.is_main:
final_path = dir_path / f"{dataset_name}.jsonl"
logger.info(f"Writing dataset with scores at {final_path}")
results_path = list(write_path.glob("*.json"))
results_path.sort()
alldata = []
for path in results_path:
with open(path, "r") as f:
data = json.load(f)
alldata.extend(data)
path.unlink()
with open(final_path, "w") as fout:
for ex in alldata:
json.dump(ex, fout, ensure_ascii=False)
fout.write("\n")
write_path.rmdir()
def avg_dist_dict(keys, dictionary):
avg = {}
for m in keys:
v = dictionary[m]
if len(v) > 0:
avg[m] = np.mean(v)
else:
avg[m] = 0.0
avg[m] = dist_utils.weighted_average(avg[m], len(v))[0]
return avg
class WeightedAvgStats:
"""provides an average over a bunch of stats"""
def __init__(self):
self.raw_stats: Dict[str, float] = defaultdict(float)
self.total_weights: Dict[str, float] = defaultdict(float)
def update(self, vals: Dict[str, Tuple[Number, Number]]) -> None:
for key, (value, weight) in vals.items():
self.raw_stats[key] += value * weight
self.total_weights[key] += weight
@property
def stats(self) -> Dict[str, float]:
return {x: self.raw_stats[x] / self.total_weights[x] for x in self.raw_stats.keys()}
@property
def tuple_stats(self) -> Dict[str, Tuple[float, float]]:
return {x: (self.raw_stats[x] / self.total_weights[x], self.total_weights[x]) for x in self.raw_stats.keys()}
def reset(self) -> None:
self.raw_stats = defaultdict(float)
self.total_weights = defaultdict(float)
@property
def average_stats(self) -> Dict[str, float]:
keys = sorted(self.raw_stats.keys())
if torch.distributed.is_initialized():
torch.distributed.broadcast_object_list(keys, src=0)
global_dict = {}
for k in keys:
if not k in self.total_weights:
v = 0.0
else:
v = self.raw_stats[k] / self.total_weights[k]
v, _ = dist_utils.weighted_average(v, self.total_weights[k])
global_dict[k] = v
return global_dict
def get_unwrapped_model_if_wrapped(model):
if hasattr(model, "module"):
return model.module
return model
|
atlas-main
|
src/util.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import copy
import torch
from src.modeling_bert import BertModel
EMBEDDINGS_DIM: int = 768
class Contriever(BertModel):
def __init__(self, config, pooling="average", **kwargs):
super().__init__(config, add_pooling_layer=False)
if not hasattr(config, "pooling"):
self.config.pooling = pooling
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=None,
output_hidden_states=None,
normalize=False,
):
model_output = super().forward(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
last_hidden = model_output["last_hidden_state"]
last_hidden = last_hidden.masked_fill(~attention_mask[..., None].bool(), 0.0).clone()
if self.config.pooling == "average":
emb = last_hidden.sum(dim=1).clone() / attention_mask.sum(dim=1)[..., None].clone()
elif self.config.pooling == "sqrt":
emb = last_hidden.sum(dim=1) / torch.sqrt(attention_mask.sum(dim=1)[..., None].float())
elif self.config.pooling == "cls":
emb = last_hidden[:, 0]
if normalize:
emb = torch.nn.functional.normalize(emb, dim=-1).clone()
return emb
class BaseRetriever(torch.nn.Module):
"""A retriever needs to be able to embed queries and passages, and have a forward function"""
def __init__(self, *args, **kwargs):
super(BaseRetriever, self).__init__()
def embed_queries(self, *args, **kwargs):
raise NotImplementedError()
def embed_passages(self, *args, **kwargs):
raise NotImplementedError()
def forward(self, *args, is_passages=False, **kwargs):
if is_passages:
return self.embed_passages(*args, **kwargs)
else:
return self.embed_queries(*args, **kwargs)
def gradient_checkpointing_enable(self):
for m in self.children():
m.gradient_checkpointing_enable()
def gradient_checkpointing_disable(self):
for m in self.children():
m.gradient_checkpointing_disable()
class DualEncoderRetriever(BaseRetriever):
"""Wrapper for standard contriever, or other dual encoders that parameter-share"""
def __init__(self, opt, contriever):
super(DualEncoderRetriever, self).__init__()
self.opt = opt
self.contriever = contriever
def _embed(self, *args, **kwargs):
return self.contriever(*args, **kwargs)
def embed_queries(self, *args, **kwargs):
return self._embed(*args, **kwargs)
def embed_passages(self, *args, **kwargs):
return self._embed(*args, **kwargs)
class UntiedDualEncoderRetriever(BaseRetriever):
"""Like DualEncoderRetriever, but dedicated encoders for passage and query embedding"""
def __init__(self, opt, query_encoder, passage_encoder=None):
"""Create the module: if passage_encoder is none, one will be created as a deep copy of query_encoder"""
super(UntiedDualEncoderRetriever, self).__init__()
self.opt = opt
self.query_contriever = query_encoder
if passage_encoder is None:
passage_encoder = copy.deepcopy(query_encoder) if hasattr(query_encoder, "module") else query_encoder
self.passage_contriever = passage_encoder
def embed_queries(self, *args, **kwargs):
return self.query_contriever(*args, **kwargs)
def embed_passages(self, *args, **kwargs):
if self.opt.query_side_retriever_training:
is_train = self.passage_contriever.training
self.passage_contriever.eval()
with torch.no_grad():
passage_emb = self.passage_contriever(*args, **kwargs)
if is_train:
self.passage_contriever.train()
else:
passage_emb = self.passage_contriever(*args, **kwargs)
return passage_emb
|
atlas-main
|
src/retrievers.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import string
from collections import Counter
from typing import Callable
import numpy as np
import regex
from rouge import Rouge
rouge = Rouge()
logger = logging.getLogger(__name__)
# Normalization and score functions from SQuAD evaluation script https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/
def normalize_answer(s: str) -> str:
def remove_articles(text):
return regex.sub(r"\b(a|an|the)\b", " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def em(prediction, ground_truth, normalize_fn):
return float(normalize_fn(prediction) == normalize_fn(ground_truth))
def f1(prediction, ground_truth, normalize_fn):
prediction_tokens = normalize_fn(prediction).split()
ground_truth_tokens = normalize_fn(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def rouge_wrapper(prediction, ground_truth):
try:
result = rouge.get_scores(prediction, ground_truth, avg=True)
return result["rouge-1"]["f"], result["rouge-2"]["f"], result["rouge-l"]["f"]
except:
return 0.0, 0.0, 0.0
def f1_score(prediction, ground_truths, normalize_fn: Callable[[str], str] = lambda x: x):
return max([f1(prediction, gt, normalize_fn) for gt in ground_truths])
def exact_match_score(prediction, ground_truths, normalize_fn: Callable[[str], str] = lambda x: x):
return max([em(prediction, gt, normalize_fn) for gt in ground_truths])
def rouge_score(prediction, ground_truths):
ground_truths = [x for x in ground_truths if len(x) > 0]
if (
len(prediction) == 0 or len(ground_truths) == 0
): # check if empty prediction or if there is no hypothesis with len > 0
return 0.0, 0.0, 0.0
scores = [rouge_wrapper(prediction, gt) for gt in ground_truths]
rouge1 = max(s[0] for s in scores)
rouge2 = max(s[1] for s in scores)
rougel = max(s[2] for s in scores)
return rouge1, rouge2, rougel
|
atlas-main
|
src/evaluation.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import logging
from src import dist_utils
from src.index import DistributedFAISSIndex, DistributedIndex
logger = logging.getLogger(__name__)
def load_passages(filenames, maxload=-1):
def process_jsonl(
fname,
counter,
passages,
world_size,
global_rank,
maxload,
):
def load_item(line):
if line.strip() != "":
item = json.loads(line)
assert "id" in item
if "title" in item and "section" in item and len(item["section"]) > 0:
item["title"] = f"{item['title']}: {item['section']}"
return item
else:
print("empty line")
for line in open(fname):
if maxload > -1 and counter >= maxload:
break
ex = None
if (counter % world_size) == global_rank:
ex = load_item(line)
passages.append(ex)
counter += 1
return passages, counter
counter = 0
passages = []
global_rank = dist_utils.get_rank()
world_size = dist_utils.get_world_size()
for filename in filenames:
passages, counter = process_jsonl(
filename,
counter,
passages,
world_size,
global_rank,
maxload,
)
return passages
def save_embeddings_and_index(index, opt: argparse.Namespace) -> None:
"""
Saves embeddings and passages files. It also saves faiss index files if FAISS mode is used.
"""
index.save_index(opt.save_index_path, opt.save_index_n_shards)
def load_or_initialize_index(opt):
if opt.index_mode == "flat":
index = DistributedIndex()
elif opt.index_mode == "faiss":
index = DistributedFAISSIndex(opt.faiss_index_type, opt.faiss_code_size)
else:
raise ValueError(f"unsupported index mode {opt.index_mode}")
if opt.load_index_path is not None:
logger.info(f"Loading index from: {opt.load_index_path} with index mode: {opt.index_mode}")
if opt.index_mode == "faiss":
logger.info(f"loading faiss index type {opt.faiss_index_type} with parameters {opt.faiss_code_size}")
index.load_index(opt.load_index_path, opt.save_index_n_shards)
passages = [index.doc_map[i] for i in range(len(index.doc_map))]
else:
logger.info(f"Loading passages from: {opt.passages}")
passages = []
if not opt.use_file_passages:
passages = load_passages(opt.passages, opt.max_passages)
index.init_embeddings(passages)
return index, passages
|
atlas-main
|
src/index_io.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import errno
import logging
import os
from pathlib import Path
from typing import Dict, List, Tuple, Union
import torch
import transformers
import src.fid
from src import dist_utils
from src.atlas import Atlas
from src.retrievers import Contriever, DualEncoderRetriever, UntiedDualEncoderRetriever
from src.util import cast_to_precision, set_dropout, set_optim
Number = Union[float, int]
logger = logging.getLogger(__name__)
def get_checkpoint_path(opt):
checkpoint_path = Path(opt.checkpoint_dir) / opt.name
return checkpoint_path
def create_checkpoint_directories(opt):
checkpoint_path = get_checkpoint_path(opt)
os.makedirs(checkpoint_path, exist_ok=True)
if opt.save_index_path:
os.makedirs(opt.save_index_path, exist_ok=True)
dist_utils.barrier()
return checkpoint_path, opt.save_index_path
def load_retriever(opt, opt_checkpoint=None):
if opt.use_file_passages:
return None, None
contriever_encoder = Contriever.from_pretrained(opt.retriever_model_path)
retriever_tokenizer = transformers.AutoTokenizer.from_pretrained(opt.retriever_model_path)
# once you have done query side training you cannot go back to a parameter-tied retriever
if opt_checkpoint is not None:
retriever_is_untied = opt_checkpoint.query_side_retriever_training or opt.query_side_retriever_training
else:
retriever_is_untied = opt.query_side_retriever_training
if retriever_is_untied:
retriever = UntiedDualEncoderRetriever(opt, contriever_encoder)
else:
retriever = DualEncoderRetriever(opt, contriever_encoder)
return retriever, retriever_tokenizer
def _convert_state_dict_from_dual_encoder_retriever(state_dict):
"""handles when we want to load an UntiedDualEncoderRetriever from a DualEncoderRetriever state dict"""
new_state_dict = {}
for k, tensor in state_dict.items():
if k.startswith("retriever"):
new_state_dict[k.replace("retriever.contriever", "retriever.passage_contriever")] = tensor
new_state_dict[k.replace("retriever.contriever", "retriever.query_contriever")] = tensor
else:
new_state_dict[k] = tensor
return new_state_dict
def load_reader(opt):
reader = None
if not opt.retrieve_only:
reader = src.fid.FiD.from_pretrained(opt.reader_model_type)
if opt.compute_crossattention_stats or "eval" in opt.gold_score_mode or "std" in opt.gold_score_mode:
reader.overwrite_forward_crossattention()
reader.create_crossattention_storage()
reader_tokenizer = transformers.AutoTokenizer.from_pretrained(opt.reader_model_type)
return reader, reader_tokenizer
def _set_reader_encoder_cfg(model, opt):
if model.reader is not None:
cfg = model.reader.encoder.config
cfg.n_context = opt.n_context
cfg.bsz = opt.per_gpu_batch_size
def _cast_atlas_to_precision(atlas_model, precision):
if atlas_model.reader is not None:
atlas_model.reader = cast_to_precision(atlas_model.reader, precision)
if atlas_model.retriever is not None and precision == "bf16":
atlas_model.retriever = cast_to_precision(atlas_model.retriever, precision)
def _cast_and_set_attrs_and_send_to_device(model, opt):
_set_reader_encoder_cfg(model, opt)
set_dropout(model, opt.dropout)
_cast_atlas_to_precision(model, opt.precision)
model = model.to(opt.device)
return model
def _load_atlas_model_state(opt, opt_checkpoint, model, model_dict):
model_dict = {
k.replace("retriever.module", "retriever").replace("reader.module", "reader"): v for k, v in model_dict.items()
}
if opt.query_side_retriever_training and not opt_checkpoint.query_side_retriever_training:
model_dict = _convert_state_dict_from_dual_encoder_retriever(model_dict)
if opt.retrieve_only: # dont load reader if in retrieve only mode
model_dict = {k: v for k, v in model_dict.items() if not k.startswith("reader")}
if opt.use_file_passages: # dont load retriever if in use_file_passages mode
model_dict = {k: v for k, v in model_dict.items() if not k.startswith("retriever")}
model.load_state_dict(model_dict)
model = _cast_and_set_attrs_and_send_to_device(model, opt)
return model
def load_atlas_model(dir_path, opt, reset_params=False, eval_only=False):
epoch_path = os.path.realpath(dir_path)
save_path = os.path.join(epoch_path, "model.pth.tar")
logger.info(f"Loading {epoch_path}")
logger.info(f"loading checkpoint {save_path}")
checkpoint = torch.load(save_path, map_location="cpu")
opt_checkpoint = checkpoint["opt"]
step = checkpoint["step"]
model_dict = checkpoint["model"]
reader, reader_tokenizer = load_reader(opt)
retriever, retriever_tokenizer = load_retriever(opt, opt_checkpoint)
model = Atlas(opt, reader, retriever, reader_tokenizer, retriever_tokenizer)
model = _load_atlas_model_state(opt, opt_checkpoint, model, model_dict)
if eval_only:
return model, None, None, None, None, opt_checkpoint, step
if not reset_params:
optimizer, scheduler, retr_optimizer, retr_scheduler = set_optim(opt_checkpoint, model)
scheduler.load_state_dict(checkpoint["scheduler"])
optimizer.load_state_dict(checkpoint["optimizer"])
else:
optimizer, scheduler, retr_optimizer, retr_scheduler = set_optim(opt, model)
return model, optimizer, scheduler, retr_optimizer, retr_scheduler, opt_checkpoint, step
def init_atlas_model(opt, eval_only):
reader, reader_tokenizer = load_reader(opt)
retriever, retriever_tokenizer = load_retriever(opt)
model = Atlas(opt, reader, retriever, reader_tokenizer, retriever_tokenizer)
model = _cast_and_set_attrs_and_send_to_device(model, opt)
if eval_only:
return model, None, None, None, None, opt, 0
optimizer, scheduler, retr_optimizer, retr_scheduler = set_optim(opt, model)
return model, optimizer, scheduler, retr_optimizer, retr_scheduler, opt, 0
def load_or_initialize_atlas_model(opt, eval_only=False):
"""
Either initializes a Atlas from t5 and contriever or loads one from disk.
if opt.model_path is "none" and {opt.checkpoint_dir/opt.name} doesn't exist, it will init a Atlas
or, if opt.model_path is "none" and {opt.checkpoint_dir/opt.name} does exist, it will load the Atlas at opt.checkpoint_dir/opt.name/latest
or, if opt.model_path is not "none" it will load the saved Atlas in opt.model_path
"""
checkpoint_path = get_checkpoint_path(opt)
latest_checkpoint_path = os.path.join(checkpoint_path, "checkpoint", "latest")
if opt.model_path == "none":
if not os.path.exists(latest_checkpoint_path): # Fresh run:
return init_atlas_model(opt, eval_only)
else: # Resume run
load_path, reset_params = latest_checkpoint_path, False
else: # fresh finetune run, initialized from old model
load_path, reset_params = opt.model_path, True
model, optimizer, scheduler, retr_optimizer, retr_scheduler, opt_checkpoint, loaded_step = load_atlas_model(
load_path, opt, reset_params=reset_params, eval_only=eval_only
)
logger.info(f"Model loaded from {load_path}")
step = 0 if opt.model_path != "none" else loaded_step
return model, optimizer, scheduler, retr_optimizer, retr_scheduler, opt, step
def save_atlas_model(model, optimizer, scheduler, retr_optimizer, retr_scheduler, step, opt, dir_path, name):
if opt.save_optimizer and opt.shard_optim:
optimizer.consolidate_state_dict()
if retr_optimizer:
retr_optimizer.consolidate_state_dict()
if not opt.is_main:
return 0
def symlink_force(target, link_name):
try:
os.symlink(target, link_name)
except OSError as e:
if e.errno == errno.EEXIST:
os.remove(link_name)
os.symlink(target, link_name)
else:
raise e
model_to_save = model.module if hasattr(model, "module") else model
path = os.path.join(dir_path, "checkpoint")
epoch_path = os.path.join(path, name) # "step-%s" % step)
os.makedirs(epoch_path, exist_ok=True)
cp = os.path.join(path, "latest")
fp = os.path.join(epoch_path, "model.pth.tar")
optim_state = optimizer.state_dict() if opt.save_optimizer else None
if retr_optimizer and opt.save_optimizer:
retr_optim_state = retr_optimizer.state_dict()
else:
retr_optim_state = None
checkpoint = {
"step": step,
"model": model_to_save.state_dict(),
"optimizer": optim_state,
"retr_optimizer": retr_optim_state,
"scheduler": scheduler.state_dict(),
"retr_scheduler": retr_scheduler.state_dict() if retr_scheduler else None,
"opt": opt,
}
torch.save(checkpoint, fp)
symlink_force(epoch_path, cp)
if opt.save_optimizer and opt.shard_optim:
optimizer._all_states = []
|
atlas-main
|
src/model_io.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch.optim import adamw as _adamw
AdamW = _adamw.AdamW
adamw = _adamw.F.adamw
class AdamWFP32Copy(AdamW):
r"""Implements AdamW algorithm.
.. math::
\begin{aligned}
&\rule{110mm}{0.4pt} \\
&\textbf{input} : \gamma \text{(lr)}, \: \beta_1, \beta_2
\text{(betas)}, \: \theta_0 \text{(params)}, \: f(\theta) \text{(objective)},
\: \epsilon \text{ (epsilon)} \\
&\hspace{13mm} \lambda \text{(weight decay)}, \: \textit{amsgrad},
\: \textit{maximize} \\
&\textbf{initialize} : m_0 \leftarrow 0 \text{ (first moment)}, v_0 \leftarrow 0
\text{ ( second moment)}, \: \widehat{v_0}^{max}\leftarrow 0 \\[-1.ex]
&\rule{110mm}{0.4pt} \\
&\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\
&\hspace{5mm}\textbf{if} \: \textit{maximize}: \\
&\hspace{10mm}g_t \leftarrow -\nabla_{\theta} f_t (\theta_{t-1}) \\
&\hspace{5mm}\textbf{else} \\
&\hspace{10mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\
&\hspace{5mm} \theta_t \leftarrow \theta_{t-1} - \gamma \lambda \theta_{t-1} \\
&\hspace{5mm}m_t \leftarrow \beta_1 m_{t-1} + (1 - \beta_1) g_t \\
&\hspace{5mm}v_t \leftarrow \beta_2 v_{t-1} + (1-\beta_2) g^2_t \\
&\hspace{5mm}\widehat{m_t} \leftarrow m_t/\big(1-\beta_1^t \big) \\
&\hspace{5mm}\widehat{v_t} \leftarrow v_t/\big(1-\beta_2^t \big) \\
&\hspace{5mm}\textbf{if} \: amsgrad \\
&\hspace{10mm}\widehat{v_t}^{max} \leftarrow \mathrm{max}(\widehat{v_t}^{max},
\widehat{v_t}) \\
&\hspace{10mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t}/
\big(\sqrt{\widehat{v_t}^{max}} + \epsilon \big) \\
&\hspace{5mm}\textbf{else} \\
&\hspace{10mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t}/
\big(\sqrt{\widehat{v_t}} + \epsilon \big) \\
&\rule{110mm}{0.4pt} \\[-1.ex]
&\bf{return} \: \theta_t \\[-1.ex]
&\rule{110mm}{0.4pt} \\[-1.ex]
\end{aligned}
For further details regarding the algorithm we refer to `Decoupled Weight Decay Regularization`_.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay coefficient (default: 1e-2)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False)
maximize (bool, optional): maximize the params based on the objective, instead of
minimizing (default: False)
foreach (bool, optional): whether foreach implementation of optimizer
is used (default: None)
.. _Decoupled Weight Decay Regularization:
https://arxiv.org/abs/1711.05101
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
@torch.no_grad()
def step(self, closure=None, scale=1.0):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad = []
grads = []
exp_avgs = []
exp_avg_sqs = []
state_sums = []
max_exp_avg_sqs = []
state_steps = []
amsgrad = group["amsgrad"]
beta1, beta2 = group["betas"]
for p in group["params"]:
if p.grad is None:
continue
pgrad = p.grad
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
state["float32copy"] = p.to(torch.float32, memory_format=torch.preserve_format)
p = state["float32copy"]
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p, memory_format=torch.preserve_format)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(p, memory_format=torch.preserve_format)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state["max_exp_avg_sq"] = torch.zeros_like(p, memory_format=torch.preserve_format)
p = state["float32copy"]
params_with_grad.append(p)
# grads.append(p.grad)
if pgrad.is_sparse:
raise RuntimeError("AdamW does not support sparse gradients")
grads.append(pgrad.float() / scale)
exp_avgs.append(state["exp_avg"])
exp_avg_sqs.append(state["exp_avg_sq"])
if amsgrad:
max_exp_avg_sqs.append(state["max_exp_avg_sq"])
# update the steps for each param group update
state["step"] += 1
# record the step after step update
state_steps.append(state["step"])
adam_params = {
"params": params_with_grad,
"grads": grads,
"exp_avgs": exp_avgs,
"exp_avg_sqs": exp_avg_sqs,
"max_exp_avg_sqs": max_exp_avg_sqs,
"state_steps": state_steps,
"amsgrad": amsgrad,
"beta1": beta1,
"beta2": beta2,
"lr": group["lr"],
"weight_decay": group["weight_decay"],
"eps": group["eps"],
}
if "maximize" in group:
adam_params["maximize"] = group["maximize"]
if "foreach" in group:
adam_params["foreach"] = group["foreach"]
adamw(**adam_params)
for p in group["params"]:
if p.grad is None:
continue
state = self.state[p]
p.copy_(state["float32copy"])
return loss
|
atlas-main
|
src/AdamWFP32Copy.py
|
# coding=utf-8
# Copyright 2018 Mesh TensorFlow authors, T5 Authors and HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch T5 model. """
import copy
import math
import os
import warnings
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from torch.utils.checkpoint import checkpoint
from transformers.activations import ACT2FN
from transformers.file_utils import (
DUMMY_INPUTS,
DUMMY_MASK,
add_start_docstrings,
add_start_docstrings_to_model_forward,
is_torch_fx_proxy,
replace_return_docstrings,
)
from transformers.modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPastAndCrossAttentions,
Seq2SeqLMOutput,
Seq2SeqModelOutput,
)
from transformers.modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer
from transformers.utils import logging
from transformers.utils.model_parallel_utils import assert_device_map, get_device_map
from transformers.models.t5.configuration_t5 import T5Config
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "T5Config"
_TOKENIZER_FOR_DOC = "T5Tokenizer"
_CHECKPOINT_FOR_DOC = "t5-small"
####################################################
# This dict contains ids and associated url
# for the pretrained weights provided with the models
####################################################
T5_PRETRAINED_MODEL_ARCHIVE_LIST = [
"t5-small",
"t5-base",
"t5-large",
"t5-3b",
"t5-11b",
# See all T5 models at https://huggingface.co/models?filter=t5
]
####################################################
# This is a conversion method from TF 1.0 to PyTorch
# More details: https://medium.com/huggingface/from-tensorflow-to-pytorch-265f40ef2a28
####################################################
def load_tf_weights_in_t5(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
tf_weights = {}
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
tf_weights[name] = array
for txt_name in names:
name = txt_name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info(f"Skipping {'/'.join(name)}")
tf_weights.pop(txt_name, None)
continue
if "_slot_" in name[-1]:
logger.info(f"Skipping {'/'.join(name)}")
tf_weights.pop(txt_name, None)
continue
pointer = model
array = tf_weights[txt_name]
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] in ["kernel", "scale", "embedding"]:
pointer = getattr(pointer, "weight")
elif scope_names[0] == "self_attention":
pointer = getattr(pointer, "layer")
pointer = pointer[0]
elif scope_names[0] == "enc_dec_attention":
pointer = getattr(pointer, "layer")
pointer = pointer[1]
elif scope_names[0] == "dense_relu_dense":
pointer = getattr(pointer, "layer")
pointer = pointer[2]
elif scope_names[0] == "rms_norm":
if hasattr(pointer, "layer_norm"):
pointer = getattr(pointer, "layer_norm")
elif hasattr(pointer, "final_layer_norm"):
pointer = getattr(pointer, "final_layer_norm")
elif scope_names[0] == "scale":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
elif scope_names[0] == "decoder" and name[1] == "logits":
continue
elif scope_names[0] == "logits":
pointer = getattr(pointer, "lm_head")
elif scope_names[0] == "wi" and len(scope_names) > 1 and scope_names[1].isdigit():
pointer = getattr(pointer, f"wi_{scope_names[1]}")
continue
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info(f"Skipping {'/'.join(name)}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if scope_names[0] not in ["kernel", "scale", "embedding"]:
pointer = getattr(pointer, "weight")
if scope_names[0] != "embedding":
logger.info(f"Transposing numpy weight of shape {array.shape} for {name}")
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array.astype(np.float32))
tf_weights.pop(txt_name, None)
logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}.")
return model
####################################################
# PyTorch Models are constructed by sub-classing
# - torch.nn.Module for the layers and
# - PreTrainedModel for the models (it-self a sub-class of nn.Module)
####################################################
PARALLELIZE_DOCSTRING = r"""
This is an experimental feature and is a subject to change at a moment's notice.
Uses a device map to distribute attention modules of the model across several devices. If no device map is given,
it will evenly distribute blocks across all devices.
Args:
device_map (`Dict[int, list]`, optional, defaults to None):
A dictionary that maps attention modules to devices. Note that the embedding module and LMHead are always
automatically mapped to the first device (for esoteric reasons). That means that the first device should
have fewer attention modules mapped to it than other devices. For reference, the t5 models have the
following number of attention modules:
- t5-small: 6
- t5-base: 12
- t5-large: 24
- t5-3b: 24
- t5-11b: 24
Example:
```python
# Here is an example of a device map on a machine with 4 GPUs using t5-3b, which has a total of 24 attention modules:
model = T5ForConditionalGeneration.from_pretrained('t5-3b')
device_map = {0: [0, 1, 2],
1: [3, 4, 5, 6, 7, 8, 9],
2: [10, 11, 12, 13, 14, 15, 16],
3: [17, 18, 19, 20, 21, 22, 23]}
model.parallelize(device_map)
```
"""
DEPARALLELIZE_DOCSTRING = r"""
Moves the model to cpu from a model parallel state.
Example:
```python
# On a 4 GPU machine with t5-3b:
model = T5ForConditionalGeneration.from_pretrained('t5-3b')
device_map = {0: [0, 1, 2],
1: [3, 4, 5, 6, 7, 8, 9],
2: [10, 11, 12, 13, 14, 15, 16],
3: [17, 18, 19, 20, 21, 22, 23]}
model.parallelize(device_map) # Splits the model across several devices
model.deparallelize() # Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache()
```
"""
class T5LayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
Construct a layernorm module in the T5 style No bias and no subtraction of mean.
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
# layer norm should always be calculated in float32
variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
# convert into half-precision if necessary
if self.weight.dtype in [torch.float16, torch.bfloat16]:
hidden_states = hidden_states.to(self.weight.dtype)
return self.weight * hidden_states
class T5DenseReluDense(nn.Module):
def __init__(self, config):
super().__init__()
self.wi = nn.Linear(config.d_model, config.d_ff, bias=False)
self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(self, hidden_states):
hidden_states = self.wi(hidden_states)
hidden_states = nn.functional.relu(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.wo(hidden_states)
# hidden_states = torch.clamp(hidden_states, -1000, 1000)
return hidden_states
class T5DenseGatedGeluDense(nn.Module):
def __init__(self, config):
super().__init__()
self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False)
self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False)
self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
self.dropout = nn.Dropout(config.dropout_rate)
self.gelu_act = ACT2FN["gelu_new"]
def forward(self, hidden_states):
hidden_gelu = self.wi_0(hidden_states)
hidden_gelu = self.gelu_act(hidden_gelu.float()).type_as(hidden_states)
hidden_linear = self.wi_1(hidden_states)
hidden_states = hidden_gelu * hidden_linear
hidden_states = self.dropout(hidden_states)
hidden_states = self.wo(hidden_states)
# hidden_states = torch.clamp(hidden_states, -1000, 1000)
return hidden_states
class T5LayerFF(nn.Module):
def __init__(self, config):
super().__init__()
if config.feed_forward_proj == "relu":
self.DenseReluDense = T5DenseReluDense(config)
elif config.feed_forward_proj == "gated-gelu":
self.DenseReluDense = T5DenseGatedGeluDense(config)
else:
raise ValueError(
f"{self.config.feed_forward_proj} is not supported. Choose between `relu` and `gated-gelu`"
)
self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(self, hidden_states):
forwarded_states = self.layer_norm(hidden_states)
forwarded_states = self.DenseReluDense(forwarded_states)
hidden_states = hidden_states + self.dropout(forwarded_states)
return hidden_states
class T5Attention(nn.Module):
def __init__(self, config: T5Config, has_relative_attention_bias=False):
super().__init__()
self.is_decoder = config.is_decoder
self.has_relative_attention_bias = has_relative_attention_bias
self.relative_attention_num_buckets = config.relative_attention_num_buckets
self.d_model = config.d_model
self.key_value_proj_dim = config.d_kv
self.n_heads = config.num_heads
self.dropout = config.dropout_rate
self.inner_dim = self.n_heads * self.key_value_proj_dim
# Mesh TensorFlow initialization to avoid scaling before softmax
self.q = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.k = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.v = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.o = nn.Linear(self.inner_dim, self.d_model, bias=False)
if self.has_relative_attention_bias:
self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads)
self.pruned_heads = set()
self.gradient_checkpointing = False
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads)
# Prune linear layers
self.q = prune_linear_layer(self.q, index)
self.k = prune_linear_layer(self.k, index)
self.v = prune_linear_layer(self.v, index)
self.o = prune_linear_layer(self.o, index, dim=1)
# Update hyper params
self.n_heads = self.n_heads - len(heads)
self.inner_dim = self.key_value_proj_dim * self.n_heads
self.pruned_heads = self.pruned_heads.union(heads)
@staticmethod
def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
"""
Adapted from Mesh Tensorflow:
https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
Translate relative position to a bucket number for relative attention. The relative position is defined as
memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
This should allow for more graceful generalization to longer sequences than the model has been trained on
Args:
relative_position: an int32 Tensor
bidirectional: a boolean - whether the attention is bidirectional
num_buckets: an integer
max_distance: an integer
Returns:
a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
"""
relative_buckets = 0
if bidirectional:
num_buckets //= 2
relative_buckets += (relative_position > 0).to(torch.long) * num_buckets
relative_position = torch.abs(relative_position)
else:
relative_position = -torch.min(relative_position, torch.zeros_like(relative_position))
# now relative_position is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = relative_position < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
relative_postion_if_large = max_exact + (
torch.log(relative_position.float() / max_exact)
/ math.log(max_distance / max_exact)
* (num_buckets - max_exact)
).to(torch.long)
relative_postion_if_large = torch.min(
relative_postion_if_large, torch.full_like(relative_postion_if_large, num_buckets - 1)
)
relative_buckets += torch.where(is_small, relative_position, relative_postion_if_large)
return relative_buckets
def compute_bias(self, query_length, key_length):
"""Compute binned relative position bias"""
context_position = torch.arange(
query_length, dtype=torch.long, device=self.relative_attention_bias.weight.device
)[:, None]
memory_position = torch.arange(key_length, dtype=torch.long, device=self.relative_attention_bias.weight.device)[
None, :
]
relative_position = memory_position - context_position # shape (query_length, key_length)
relative_position_bucket = self._relative_position_bucket(
relative_position, # shape (query_length, key_length)
bidirectional=(not self.is_decoder),
num_buckets=self.relative_attention_num_buckets,
)
values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads)
values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length)
return values
def forward(
self,
hidden_states,
mask=None,
key_value_states=None,
position_bias=None,
past_key_value=None,
layer_head_mask=None,
query_length=None,
use_cache=False,
output_attentions=False,
):
"""
Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states).
"""
# Input is (batch_size, seq_length, dim)
# Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length)
# past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head)
batch_size, seq_length = hidden_states.shape[:2]
real_seq_length = seq_length
if past_key_value is not None:
assert (
len(past_key_value) == 2
), f"past_key_value should have 2 past states: keys and values. Got { len(past_key_value)} past states"
real_seq_length += past_key_value[0].shape[2] if query_length is None else query_length
key_length = real_seq_length if key_value_states is None else key_value_states.shape[1]
def shape(states):
"""projection"""
return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
def unshape(states):
"""reshape"""
return states.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim)
def project(hidden_states, proj_layer, key_value_states, past_key_value):
"""projects hidden states correctly to key/query states"""
if key_value_states is None:
# self-attn
# (batch_size, n_heads, seq_length, dim_per_head)
hidden_states = shape(proj_layer(hidden_states))
elif past_key_value is None:
# cross-attn
# (batch_size, n_heads, seq_length, dim_per_head)
hidden_states = shape(proj_layer(key_value_states))
if past_key_value is not None:
if key_value_states is None:
# self-attn
# (batch_size, n_heads, key_length, dim_per_head)
hidden_states = torch.cat([past_key_value, hidden_states], dim=2)
else:
# cross-attn
hidden_states = past_key_value
return hidden_states
# get query states
query_states = shape(self.q(hidden_states)) # (batch_size, n_heads, seq_length, dim_per_head)
# get key/value states
key_states = project(
hidden_states, self.k, key_value_states, past_key_value[0] if past_key_value is not None else None
)
value_states = project(
hidden_states, self.v, key_value_states, past_key_value[1] if past_key_value is not None else None
)
# compute scores
scores = torch.matmul(
query_states, key_states.transpose(3, 2)
) # equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9
if position_bias is None:
if not self.has_relative_attention_bias:
position_bias = torch.zeros(
(1, self.n_heads, real_seq_length, key_length), device=scores.device, dtype=scores.dtype
)
if self.gradient_checkpointing and self.training:
position_bias.requires_grad = True
else:
position_bias = self.compute_bias(real_seq_length, key_length)
# if key and values are already calculated
# we want only the last query position bias
if past_key_value is not None:
position_bias = position_bias[:, :, -hidden_states.size(1) :, :]
if mask is not None:
position_bias = position_bias + mask # (batch_size, n_heads, seq_length, key_length)
scores += position_bias
attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(
scores
) # (batch_size, n_heads, seq_length, key_length)
attn_weights = nn.functional.dropout(
attn_weights, p=self.dropout, training=self.training
) # (batch_size, n_heads, seq_length, key_length)
# Mask heads if we want to
if layer_head_mask is not None:
attn_weights = attn_weights * layer_head_mask
attn_output = unshape(torch.matmul(attn_weights, value_states)) # (batch_size, seq_length, dim)
attn_output = self.o(attn_output)
present_key_value_state = (key_states, value_states) if (self.is_decoder and use_cache) else None
outputs = (attn_output,) + (present_key_value_state,) + (position_bias,)
if output_attentions:
outputs = outputs + (attn_weights,)
return outputs
class T5LayerSelfAttention(nn.Module):
def __init__(self, config, has_relative_attention_bias=False):
super().__init__()
self.SelfAttention = T5Attention(config, has_relative_attention_bias=has_relative_attention_bias)
self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(
self,
hidden_states,
attention_mask=None,
position_bias=None,
layer_head_mask=None,
past_key_value=None,
use_cache=False,
output_attentions=False,
):
normed_hidden_states = self.layer_norm(hidden_states)
attention_output = self.SelfAttention(
normed_hidden_states,
mask=attention_mask,
position_bias=position_bias,
layer_head_mask=layer_head_mask,
past_key_value=past_key_value,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states = hidden_states + self.dropout(attention_output[0])
outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them
return outputs
class T5LayerCrossAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.EncDecAttention = T5Attention(config, has_relative_attention_bias=False)
self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(
self,
hidden_states,
key_value_states,
attention_mask=None,
position_bias=None,
layer_head_mask=None,
past_key_value=None,
use_cache=False,
query_length=None,
output_attentions=False,
):
normed_hidden_states = self.layer_norm(hidden_states)
attention_output = self.EncDecAttention(
normed_hidden_states,
mask=attention_mask,
key_value_states=key_value_states,
position_bias=position_bias,
layer_head_mask=layer_head_mask,
past_key_value=past_key_value,
use_cache=use_cache,
query_length=query_length,
output_attentions=output_attentions,
)
layer_output = hidden_states + self.dropout(attention_output[0])
outputs = (layer_output,) + attention_output[1:] # add attentions if we output them
return outputs
class T5Block(nn.Module):
def __init__(self, config, has_relative_attention_bias=False):
super().__init__()
self.is_decoder = config.is_decoder
self.layer = nn.ModuleList()
self.layer.append(T5LayerSelfAttention(config, has_relative_attention_bias=has_relative_attention_bias))
if self.is_decoder:
self.layer.append(T5LayerCrossAttention(config))
self.layer.append(T5LayerFF(config))
def forward(
self,
hidden_states,
attention_mask=None,
position_bias=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
encoder_decoder_position_bias=None,
layer_head_mask=None,
cross_attn_layer_head_mask=None,
past_key_value=None,
use_cache=False,
output_attentions=False,
return_dict=True,
):
if past_key_value is not None:
assert self.is_decoder, "Only decoder can use `past_key_values`"
expected_num_past_key_values = 2 if encoder_hidden_states is None else 4
if len(past_key_value) != expected_num_past_key_values:
raise ValueError(
f"There should be {expected_num_past_key_values} past states. "
f"{'2 (past / key) for cross attention. ' if expected_num_past_key_values == 4 else ''}"
f"Got {len(past_key_value)} past key / value states"
)
self_attn_past_key_value = past_key_value[:2]
cross_attn_past_key_value = past_key_value[2:]
else:
self_attn_past_key_value, cross_attn_past_key_value = None, None
self_attention_outputs = self.layer[0](
hidden_states,
attention_mask=attention_mask,
position_bias=position_bias,
layer_head_mask=layer_head_mask,
past_key_value=self_attn_past_key_value,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states, present_key_value_state = self_attention_outputs[:2]
attention_outputs = self_attention_outputs[2:] # Keep self-attention outputs and relative position weights
# clamp inf values to enable fp16 training
# if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
if torch.isinf(hidden_states).any():
print("a")
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
do_cross_attention = self.is_decoder and encoder_hidden_states is not None
if do_cross_attention:
# the actual query length is unknown for cross attention
# if using past key value states. Need to inject it here
if present_key_value_state is not None:
query_length = present_key_value_state[0].shape[2]
else:
query_length = None
cross_attention_outputs = self.layer[1](
hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
position_bias=encoder_decoder_position_bias,
layer_head_mask=cross_attn_layer_head_mask,
past_key_value=cross_attn_past_key_value,
query_length=query_length,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states = cross_attention_outputs[0]
# clamp inf values to enable fp16 training
# if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
if torch.isinf(hidden_states).any():
print("b")
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
# Combine self attn and cross attn key value states
if present_key_value_state is not None:
present_key_value_state = present_key_value_state + cross_attention_outputs[1]
# Keep cross-attention outputs and relative position weights
attention_outputs = attention_outputs + cross_attention_outputs[2:]
# Apply Feed Forward layer
hidden_states = self.layer[-1](hidden_states)
# clamp inf values to enable fp16 training
# if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
if torch.isinf(hidden_states).any():
print(f"c {torch.linalg.norm(hidden_states).item()}")
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if use_cache:
outputs = outputs + (present_key_value_state,) + attention_outputs
else:
outputs = outputs + attention_outputs
return outputs # hidden-states, present_key_value_states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights)
class T5PreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = T5Config
load_tf_weights = load_tf_weights_in_t5
base_model_prefix = "transformer"
is_parallelizable = True
supports_gradient_checkpointing = True
@property
def dummy_inputs(self):
input_ids = torch.tensor(DUMMY_INPUTS)
input_mask = torch.tensor(DUMMY_MASK)
dummy_inputs = {
"decoder_input_ids": input_ids,
"input_ids": input_ids,
"decoder_attention_mask": input_mask,
}
return dummy_inputs
def _init_weights(self, module):
"""Initialize the weights"""
factor = self.config.initializer_factor # Used for testing weights initialization
if isinstance(module, T5LayerNorm):
module.weight.data.fill_(factor * 1.0)
elif isinstance(module, (T5Model, T5ForConditionalGeneration, T5EncoderModel)):
# Mesh TensorFlow embeddings initialization
# See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624
module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0)
elif isinstance(module, T5DenseReluDense):
# Mesh TensorFlow FF initialization
# See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56
# and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89
module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
if hasattr(module.wi, "bias") and module.wi.bias is not None:
module.wi.bias.data.zero_()
module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5))
if hasattr(module.wo, "bias") and module.wo.bias is not None:
module.wo.bias.data.zero_()
elif isinstance(module, T5DenseGatedGeluDense):
module.wi_0.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
if hasattr(module.wi_0, "bias") and module.wi_0.bias is not None:
module.wi_0.bias.data.zero_()
module.wi_1.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
if hasattr(module.wi_1, "bias") and module.wi_1.bias is not None:
module.wi_1.bias.data.zero_()
module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5))
if hasattr(module.wo, "bias") and module.wo.bias is not None:
module.wo.bias.data.zero_()
elif isinstance(module, T5Attention):
# Mesh TensorFlow attention initialization to avoid scaling before softmax
# See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136
d_model = self.config.d_model
key_value_proj_dim = self.config.d_kv
n_heads = self.config.num_heads
module.q.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5))
module.k.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5))
module.v.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5))
module.o.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5))
if module.has_relative_attention_bias:
module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((d_model) ** -0.5))
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, (T5Attention, T5Stack)):
module.gradient_checkpointing = value
def _shift_right(self, input_ids):
decoder_start_token_id = self.config.decoder_start_token_id
pad_token_id = self.config.pad_token_id
assert (
decoder_start_token_id is not None
), "self.model.config.decoder_start_token_id has to be defined. In T5 it is usually set to the pad_token_id. See T5 docs for more information"
# shift inputs to the right
if is_torch_fx_proxy(input_ids):
# Item assignment is not supported natively for proxies.
shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id)
shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1)
else:
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
shifted_input_ids[..., 0] = decoder_start_token_id
assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined."
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
assert torch.all(shifted_input_ids >= 0).item(), "Verify that `shifted_input_ids` has only positive values"
return shifted_input_ids
class T5Stack(T5PreTrainedModel):
def __init__(self, config, embed_tokens=None):
super().__init__(config)
self.embed_tokens = embed_tokens
self.is_decoder = config.is_decoder
self.block = nn.ModuleList(
[T5Block(config, has_relative_attention_bias=bool(i == 0)) for i in range(config.num_layers)]
)
self.final_layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
# Initialize weights and apply final processing
self.post_init()
# Model parallel
self.model_parallel = False
self.device_map = None
self.gradient_checkpointing = False
@add_start_docstrings(PARALLELIZE_DOCSTRING)
def parallelize(self, device_map=None):
# Check validity of device_map
self.device_map = (
get_device_map(len(self.block), range(torch.cuda.device_count())) if device_map is None else device_map
)
assert_device_map(self.device_map, len(self.block))
self.model_parallel = True
self.first_device = "cpu" if "cpu" in self.device_map.keys() else "cuda:" + str(min(self.device_map.keys()))
self.last_device = "cuda:" + str(max(self.device_map.keys()))
# Load onto devices
for k, v in self.device_map.items():
for layer in v:
cuda_device = "cuda:" + str(k)
self.block[layer] = self.block[layer].to(cuda_device)
# Set embed_tokens to first layer
self.embed_tokens = self.embed_tokens.to(self.first_device)
# Set final layer norm to last device
self.final_layer_norm = self.final_layer_norm.to(self.last_device)
@add_start_docstrings(PARALLELIZE_DOCSTRING)
def deparallelize(self):
self.model_parallel = False
self.device_map = None
self.first_device = "cpu"
self.last_device = "cpu"
for i in range(len(self.block)):
self.block[i] = self.block[i].to("cpu")
self.embed_tokens = self.embed_tokens.to("cpu")
self.final_layer_norm = self.final_layer_norm.to("cpu")
torch.cuda.empty_cache()
def get_input_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, new_embeddings):
self.embed_tokens = new_embeddings
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
inputs_embeds=None,
head_mask=None,
cross_attn_head_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
# Model parallel
if self.model_parallel:
torch.cuda.set_device(self.first_device)
self.embed_tokens = self.embed_tokens.to(self.first_device)
use_cache = use_cache if use_cache is not None else self.config.use_cache
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
err_msg_prefix = "decoder_" if self.is_decoder else ""
raise ValueError(
f"You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time"
)
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
err_msg_prefix = "decoder_" if self.is_decoder else ""
raise ValueError(f"You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds")
if inputs_embeds is None:
assert self.embed_tokens is not None, "You have to initialize the model with valid token embeddings"
inputs_embeds = self.embed_tokens(input_ids)
batch_size, seq_length = input_shape
# required mask seq length can be calculated via length of past
mask_seq_length = past_key_values[0][0].shape[2] + seq_length if past_key_values is not None else seq_length
if use_cache is True:
assert self.is_decoder, f":obj:`use_cache` can only be set to `True` if {self} is used as a decoder"
if attention_mask is None:
attention_mask = torch.ones(batch_size, mask_seq_length).to(inputs_embeds.device)
if self.is_decoder and encoder_attention_mask is None and encoder_hidden_states is not None:
encoder_seq_length = encoder_hidden_states.shape[1]
encoder_attention_mask = torch.ones(
batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long
)
# initialize past_key_values with `None` if past does not exist
if past_key_values is None:
past_key_values = [None] * len(self.block)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, inputs_embeds.device)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
head_mask = self.get_head_mask(head_mask, self.config.num_layers)
cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers)
present_key_value_states = () if use_cache else None
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
all_cross_attentions = () if (output_attentions and self.is_decoder) else None
position_bias = None
encoder_decoder_position_bias = None
hidden_states = self.dropout(inputs_embeds)
for i, (layer_module, past_key_value) in enumerate(zip(self.block, past_key_values)):
layer_head_mask = head_mask[i]
cross_attn_layer_head_mask = cross_attn_head_mask[i]
# Model parallel
if self.model_parallel:
torch.cuda.set_device(hidden_states.device)
# Ensure that attention_mask is always on the same device as hidden_states
if attention_mask is not None:
attention_mask = attention_mask.to(hidden_states.device)
if position_bias is not None:
position_bias = position_bias.to(hidden_states.device)
if encoder_hidden_states is not None:
encoder_hidden_states = encoder_hidden_states.to(hidden_states.device)
if encoder_extended_attention_mask is not None:
encoder_extended_attention_mask = encoder_extended_attention_mask.to(hidden_states.device)
if encoder_decoder_position_bias is not None:
encoder_decoder_position_bias = encoder_decoder_position_bias.to(hidden_states.device)
if layer_head_mask is not None:
layer_head_mask = layer_head_mask.to(hidden_states.device)
if cross_attn_layer_head_mask is not None:
cross_attn_layer_head_mask = cross_attn_layer_head_mask.to(hidden_states.device)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warn(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return tuple(module(*inputs, use_cache, output_attentions))
return custom_forward
layer_outputs = checkpoint(
create_custom_forward(layer_module),
hidden_states,
extended_attention_mask,
position_bias,
encoder_hidden_states,
encoder_extended_attention_mask,
encoder_decoder_position_bias,
layer_head_mask,
cross_attn_layer_head_mask,
None, # past_key_value is always None with gradient checkpointing
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask=extended_attention_mask,
position_bias=position_bias,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
encoder_decoder_position_bias=encoder_decoder_position_bias,
layer_head_mask=layer_head_mask,
cross_attn_layer_head_mask=cross_attn_layer_head_mask,
past_key_value=past_key_value,
use_cache=use_cache,
output_attentions=output_attentions,
)
# layer_outputs is a tuple with:
# hidden-states, key-value-states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights)
if use_cache is False:
layer_outputs = layer_outputs[:1] + (None,) + layer_outputs[1:]
hidden_states, present_key_value_state = layer_outputs[:2]
# We share the position biases between the layers - the first layer store them
# layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights),
# (cross-attention position bias), (cross-attention weights)
position_bias = layer_outputs[2]
if self.is_decoder and encoder_hidden_states is not None:
encoder_decoder_position_bias = layer_outputs[4 if output_attentions else 3]
# append next layer key value states
if use_cache:
present_key_value_states = present_key_value_states + (present_key_value_state,)
if output_attentions:
all_attentions = all_attentions + (layer_outputs[3],)
if self.is_decoder:
all_cross_attentions = all_cross_attentions + (layer_outputs[5],)
# Model Parallel: If it's the last layer for that device, put things on the next device
if self.model_parallel:
for k, v in self.device_map.items():
if i == v[-1] and "cuda:" + str(k) != self.last_device:
hidden_states = hidden_states.to("cuda:" + str(k + 1))
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.dropout(hidden_states)
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
present_key_value_states,
all_hidden_states,
all_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=present_key_value_states,
hidden_states=all_hidden_states,
attentions=all_attentions,
cross_attentions=all_cross_attentions,
)
T5_START_DOCSTRING = r"""
The T5 model was proposed in [Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer](https://arxiv.org/abs/1910.10683) by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang,
Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu. It's an encoder decoder transformer pre-trained in a text-to-text
denoising generative setting.
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module)
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config ([`T5Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model
weights.
"""
T5_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you
should be able to pad the inputs on both the right and the left.
Indices can be obtained using [`T5Tokenizer`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for
detail.
[What are input IDs?](../glossary#input-ids)
To know more on how to prepare `input_ids` for pretraining take a look a [T5 Training](./t5#training).
attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`T5Tokenizer`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for
details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
T5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If
`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
To know more on how to prepare `decoder_input_ids` for pretraining take a look at [T5 Training](./t5#training).
decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will
also be used by default.
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules in the encoder. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in
`[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`:
*attentions*) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a
sequence of hidden states at the output of the last layer of the encoder. Used in the cross-attention of
the decoder.
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids`
(those that don't have their past key value states given to this model) of shape `(batch_size, 1)`
instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds`
have to be input (see `past_key_values`). This is useful if you want more control over how to convert
`decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds`
takes the value of `inputs_embeds`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up
decoding (see `past_key_values`).
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
T5_ENCODER_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you
should be able to pad the inputs on both the right and the left.
Indices can be obtained using [`T5Tokenizer`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for
detail.
To know more on how to prepare `input_ids` for pretraining take a look a [T5 Training](./t5#training).
attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
# Warning message for FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask
__HEAD_MASK_WARNING_MSG = """
The input argument `head_mask` was split into two arguments `head_mask` and `decoder_head_mask`. Currently,
`decoder_head_mask` is set to copy `head_mask`, but this feature is deprecated and will be removed in future versions.
If you do not want to use any `decoder_head_mask` now, please set `decoder_head_mask = torch.ones(num_layers,
num_heads)`.
"""
@add_start_docstrings(
"The bare T5 Model transformer outputting raw hidden-states without any specific head on top.",
T5_START_DOCSTRING,
)
class T5Model(T5PreTrainedModel):
_keys_to_ignore_on_load_missing = [
r"encoder\.embed_tokens\.weight",
r"decoder\.embed_tokens\.weight",
]
_keys_to_ignore_on_load_unexpected = [
r"decoder\.block\.0\.layer\.1\.EncDecAttention\.relative_attention_bias\.weight",
]
def __init__(self, config: T5Config):
super().__init__(config)
self.shared = nn.Embedding(config.vocab_size, config.d_model)
encoder_config = copy.deepcopy(config)
encoder_config.is_decoder = False
encoder_config.use_cache = False
encoder_config.is_encoder_decoder = False
self.encoder = T5Stack(encoder_config, self.shared)
decoder_config = copy.deepcopy(config)
decoder_config.is_decoder = True
decoder_config.is_encoder_decoder = False
decoder_config.num_layers = config.num_decoder_layers
self.decoder = T5Stack(decoder_config, self.shared)
# Initialize weights and apply final processing
self.post_init()
# Model parallel
self.model_parallel = False
self.device_map = None
@add_start_docstrings(PARALLELIZE_DOCSTRING)
def parallelize(self, device_map=None):
self.device_map = (
get_device_map(len(self.encoder.block), range(torch.cuda.device_count()))
if device_map is None
else device_map
)
assert_device_map(self.device_map, len(self.encoder.block))
self.encoder.parallelize(self.device_map)
self.decoder.parallelize(self.device_map)
self.model_parallel = True
@add_start_docstrings(DEPARALLELIZE_DOCSTRING)
def deparallelize(self):
self.encoder.deparallelize()
self.decoder.deparallelize()
self.encoder = self.encoder.to("cpu")
self.decoder = self.decoder.to("cpu")
self.model_parallel = False
self.device_map = None
torch.cuda.empty_cache()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
self.decoder.set_input_embeddings(new_embeddings)
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
encoder_outputs=None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Returns:
Example:
```python
>>> from transformers import T5Tokenizer, T5Model
>>> tokenizer = T5Tokenizer.from_pretrained('t5-small')
>>> model = T5Model.from_pretrained('t5-small')
>>> input_ids = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt").input_ids # Batch size 1
>>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1
>>> # forward pass
>>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
>>> last_hidden_states = outputs.last_hidden_state
```"""
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask
if head_mask is not None and decoder_head_mask is None:
if self.config.num_layers == self.config.num_decoder_layers:
warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning)
decoder_head_mask = head_mask
# Encode if needed (training, first prediction pass)
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
hidden_states = encoder_outputs[0]
if self.model_parallel:
torch.cuda.set_device(self.decoder.first_device)
# Set device for model parallelism
if self.model_parallel:
torch.cuda.set_device(self.decoder.first_device)
hidden_states = hidden_states.to(self.decoder.first_device)
if decoder_input_ids is not None:
decoder_input_ids = decoder_input_ids.to(self.decoder.first_device)
if attention_mask is not None:
attention_mask = attention_mask.to(self.decoder.first_device)
if decoder_attention_mask is not None:
decoder_attention_mask = decoder_attention_mask.to(self.decoder.first_device)
# Decode
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
inputs_embeds=decoder_inputs_embeds,
past_key_values=past_key_values,
encoder_hidden_states=hidden_states,
encoder_attention_mask=attention_mask,
head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return Seq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
@add_start_docstrings("""T5 Model with a `language modeling` head on top. """, T5_START_DOCSTRING)
class T5ForConditionalGeneration(T5PreTrainedModel):
_keys_to_ignore_on_load_missing = [
r"encoder\.embed_tokens\.weight",
r"decoder\.embed_tokens\.weight",
r"lm_head\.weight",
]
_keys_to_ignore_on_load_unexpected = [
r"decoder\.block\.0\.layer\.1\.EncDecAttention\.relative_attention_bias\.weight",
]
def __init__(self, config):
super().__init__(config)
self.model_dim = config.d_model
self.shared = nn.Embedding(config.vocab_size, config.d_model)
encoder_config = copy.deepcopy(config)
encoder_config.is_decoder = False
encoder_config.use_cache = False
encoder_config.is_encoder_decoder = False
self.encoder = T5Stack(encoder_config, self.shared)
decoder_config = copy.deepcopy(config)
decoder_config.is_decoder = True
decoder_config.is_encoder_decoder = False
decoder_config.num_layers = config.num_decoder_layers
self.decoder = T5Stack(decoder_config, self.shared)
self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
# Model parallel
self.model_parallel = False
self.device_map = None
@add_start_docstrings(PARALLELIZE_DOCSTRING)
def parallelize(self, device_map=None):
self.device_map = (
get_device_map(len(self.encoder.block), range(torch.cuda.device_count()))
if device_map is None
else device_map
)
assert_device_map(self.device_map, len(self.encoder.block))
self.encoder.parallelize(self.device_map)
self.decoder.parallelize(self.device_map)
self.lm_head = self.lm_head.to(self.decoder.first_device)
self.model_parallel = True
@add_start_docstrings(DEPARALLELIZE_DOCSTRING)
def deparallelize(self):
self.encoder.deparallelize()
self.decoder.deparallelize()
self.encoder = self.encoder.to("cpu")
self.decoder = self.decoder.to("cpu")
self.lm_head = self.lm_head.to("cpu")
self.model_parallel = False
self.device_map = None
torch.cuda.empty_cache()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
self.decoder.set_input_embeddings(new_embeddings)
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def get_output_embeddings(self):
return self.lm_head
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
@add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
encoder_outputs=None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for
labels in `[0, ..., config.vocab_size]`
Returns:
Examples:
```python
>>> from transformers import T5Tokenizer, T5ForConditionalGeneration
>>> tokenizer = T5Tokenizer.from_pretrained('t5-small')
>>> model = T5ForConditionalGeneration.from_pretrained('t5-small')
>>> # training
>>> input_ids = tokenizer('The <extra_id_0> walks in <extra_id_1> park', return_tensors='pt').input_ids
>>> labels = tokenizer('<extra_id_0> cute dog <extra_id_1> the <extra_id_2>', return_tensors='pt').input_ids
>>> outputs = model(input_ids=input_ids, labels=labels)
>>> loss = outputs.loss
>>> logits = outputs.logits
>>> # inference
>>> input_ids = tokenizer("summarize: studies have shown that owning a dog is good for you", return_tensors="pt").input_ids # Batch size 1
>>> outputs = model.generate(input_ids)
>>> print(tokenizer.decode(outputs[0], skip_special_tokens=True))
>>> # studies have shown that owning a dog is good for you.
```"""
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask
if head_mask is not None and decoder_head_mask is None:
if self.config.num_layers == self.config.num_decoder_layers:
warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning)
decoder_head_mask = head_mask
# Encode if needed (training, first prediction pass)
if encoder_outputs is None:
# Convert encoder inputs in embeddings if needed
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
hidden_states = encoder_outputs[0]
if self.model_parallel:
torch.cuda.set_device(self.decoder.first_device)
if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:
# get decoder inputs from shifting lm labels to the right
decoder_input_ids = self._shift_right(labels)
# Set device for model parallelism
if self.model_parallel:
torch.cuda.set_device(self.decoder.first_device)
hidden_states = hidden_states.to(self.decoder.first_device)
if decoder_input_ids is not None:
decoder_input_ids = decoder_input_ids.to(self.decoder.first_device)
if attention_mask is not None:
attention_mask = attention_mask.to(self.decoder.first_device)
if decoder_attention_mask is not None:
decoder_attention_mask = decoder_attention_mask.to(self.decoder.first_device)
# Decode
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
inputs_embeds=decoder_inputs_embeds,
past_key_values=past_key_values,
encoder_hidden_states=hidden_states,
encoder_attention_mask=attention_mask,
head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = decoder_outputs[0]
# Set device for model parallelism
if self.model_parallel:
torch.cuda.set_device(self.encoder.first_device)
self.lm_head = self.lm_head.to(self.encoder.first_device)
sequence_output = sequence_output.to(self.lm_head.weight.device)
if self.config.tie_word_embeddings:
# Rescale output before projecting on vocab
# See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586
sequence_output = sequence_output * (self.model_dim**-0.5)
lm_logits = self.lm_head(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-100)
loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))
# TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666
if not return_dict:
output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs
return ((loss,) + output) if loss is not None else output
return Seq2SeqLMOutput(
loss=loss,
logits=lm_logits,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
def prepare_inputs_for_generation(
self,
input_ids,
past=None,
attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
use_cache=None,
encoder_outputs=None,
**kwargs,
):
# cut decoder_input_ids if past is used
if past is not None:
input_ids = input_ids[:, -1:]
return {
"decoder_input_ids": input_ids,
"past_key_values": past,
"encoder_outputs": encoder_outputs,
"attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
"use_cache": use_cache,
}
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
return self._shift_right(labels)
def _reorder_cache(self, past, beam_idx):
# if decoder past is not included in output
# speedy decoding is disabled and no need to reorder
if past is None:
logger.warning("You might want to consider setting `use_cache=True` to speed up decoding")
return past
reordered_decoder_past = ()
for layer_past_states in past:
# get the correct batch idx from layer past batch dim
# batch dim of `past` is at 2nd position
reordered_layer_past_states = ()
for layer_past_state in layer_past_states:
# need to set correct `past` for each of the four key / value states
reordered_layer_past_states = reordered_layer_past_states + (
layer_past_state.index_select(0, beam_idx.to(layer_past_state.device)),
)
assert reordered_layer_past_states[0].shape == layer_past_states[0].shape
assert len(reordered_layer_past_states) == len(layer_past_states)
reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,)
return reordered_decoder_past
@add_start_docstrings(
"The bare T5 Model transformer outputting encoder's raw hidden-states without any specific head on top.",
T5_START_DOCSTRING,
)
class T5EncoderModel(T5PreTrainedModel):
authorized_missing_keys = [
r"encoder\.embed_tokens\.weight",
]
def __init__(self, config: T5Config):
super().__init__(config)
self.shared = nn.Embedding(config.vocab_size, config.d_model)
encoder_config = copy.deepcopy(config)
encoder_config.use_cache = False
encoder_config.is_encoder_decoder = False
self.encoder = T5Stack(encoder_config, self.shared)
# Initialize weights and apply final processing
self.post_init()
# Model parallel
self.model_parallel = False
self.device_map = None
@add_start_docstrings(PARALLELIZE_DOCSTRING)
def parallelize(self, device_map=None):
self.device_map = (
get_device_map(len(self.encoder.block), range(torch.cuda.device_count()))
if device_map is None
else device_map
)
assert_device_map(self.device_map, len(self.encoder.block))
self.encoder.parallelize(self.device_map)
self.model_parallel = True
@add_start_docstrings(DEPARALLELIZE_DOCSTRING)
def deparallelize(self):
self.encoder.deparallelize()
self.encoder = self.encoder.to("cpu")
self.model_parallel = False
self.device_map = None
torch.cuda.empty_cache()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
def get_encoder(self):
return self.encoder
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(T5_ENCODER_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Returns:
Example:
```python
>>> from transformers import T5Tokenizer, T5EncoderModel
>>> tokenizer = T5Tokenizer.from_pretrained('t5-small')
>>> model = T5EncoderModel.from_pretrained('t5-small')
>>> input_ids = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt").input_ids # Batch size 1
>>> outputs = model(input_ids=input_ids)
>>> last_hidden_states = outputs.last_hidden_state
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
return encoder_outputs
|
atlas-main
|
src/modeling_t5.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
import math
import os
import warnings
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from packaging import version
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from transformers.activations import ACT2FN
from transformers.modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
NextSentencePredictorOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from transformers.modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from transformers.utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from transformers.models.bert.configuration_bert import BertConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "bert-base-uncased"
_CONFIG_FOR_DOC = "BertConfig"
_TOKENIZER_FOR_DOC = "BertTokenizer"
BERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"bert-base-uncased",
"bert-large-uncased",
"bert-base-cased",
"bert-large-cased",
"bert-base-multilingual-uncased",
"bert-base-multilingual-cased",
"bert-base-chinese",
"bert-base-german-cased",
"bert-large-uncased-whole-word-masking",
"bert-large-cased-whole-word-masking",
"bert-large-uncased-whole-word-masking-finetuned-squad",
"bert-large-cased-whole-word-masking-finetuned-squad",
"bert-base-cased-finetuned-mrpc",
"bert-base-german-dbmdz-cased",
"bert-base-german-dbmdz-uncased",
"cl-tohoku/bert-base-japanese",
"cl-tohoku/bert-base-japanese-whole-word-masking",
"cl-tohoku/bert-base-japanese-char",
"cl-tohoku/bert-base-japanese-char-whole-word-masking",
"TurkuNLP/bert-base-finnish-cased-v1",
"TurkuNLP/bert-base-finnish-uncased-v1",
"wietsedv/bert-base-dutch-cased",
# See all BERT models at https://huggingface.co/models?filter=bert
]
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
Construct a layernorm module in the T5 style No bias and no subtraction of mean.
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
# layer norm should always be calculated in float32
mean = hidden_states.to(torch.float32).mean(-1, keepdim=True)
variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
hidden_states = (hidden_states - mean) * torch.rsqrt(variance + self.variance_epsilon)
# convert into half-precision if necessary
if self.weight.dtype in [torch.float16, torch.bfloat16]:
hidden_states = hidden_states.to(self.weight.dtype)
return self.weight * hidden_states + self.bias
def load_tf_weights_in_bert(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info(f"Skipping {'/'.join(name)}")
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info(f"Skipping {'/'.join(name)}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
if pointer.shape != array.shape:
raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array)
return model
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
if version.parse(torch.__version__) > version.parse("1.6.0"):
self.register_buffer(
"token_type_ids",
torch.zeros(self.position_ids.size(), dtype=torch.long),
persistent=False,
)
def forward(
self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
# Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
# when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
# issue #5664
if token_type_ids is None:
if hasattr(self, "token_type_ids"):
buffered_token_type_ids = self.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings.float()).type_as(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config, position_embedding_type=None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = position_embedding_type or getattr(config, "position_embedding_type", "absolute")
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
self.is_decoder = config.is_decoder
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.Tensor]:
mixed_query_layer = self.query(hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_layer = past_key_value[0]
value_layer = past_key_value[1]
attention_mask = encoder_attention_mask
elif is_cross_attention:
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
attention_mask = encoder_attention_mask
elif past_key_value is not None:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
else:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_layer, value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
if self.position_embedding_type == "relative_key":
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == "relative_key_query":
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores.float(), dim=-1).type_as(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
if self.is_decoder:
outputs = outputs + (past_key_value,)
return outputs
class BertSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = hidden_states + input_tensor
hidden_states = self.LayerNorm(hidden_states.float()).type_as(hidden_states)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config, position_embedding_type=None):
super().__init__()
self.self = BertSelfAttention(config, position_embedding_type=position_embedding_type)
self.output = BertSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.Tensor]:
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class BertIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = hidden_states + input_tensor
hidden_states = self.LayerNorm(hidden_states.float()).type_as(hidden_states)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = BertAttention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
if not self.is_decoder:
raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
self.crossattention = BertAttention(config, position_embedding_type="absolute")
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.Tensor]:
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
)
attention_output = self_attention_outputs[0]
# if decoder, the last output is tuple of self-attn cache
if self.is_decoder:
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
else:
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
cross_attn_present_key_value = None
if self.is_decoder and encoder_hidden_states is not None:
if not hasattr(self, "crossattention"):
raise ValueError(
f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
)
# cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
cross_attn_past_key_value,
output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
# add cross-attn cache to positions 3,4 of present_key_value tuple
cross_attn_present_key_value = cross_attention_outputs[-1]
present_key_value = present_key_value + cross_attn_present_key_value
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
# if decoder, return the attn key/values as the last output
if self.is_decoder:
outputs = outputs + (present_key_value,)
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = False,
output_hidden_states: Optional[bool] = False,
return_dict: Optional[bool] = True,
) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if use_cache else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
class BertPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states.float()).type_as(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertLMPredictionHead(config)
def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super().__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertLMPredictionHead(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class BertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = BertConfig
load_tf_weights = load_tf_weights_in_bert
base_model_prefix = "bert"
supports_gradient_checkpointing = True
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, BertEncoder):
module.gradient_checkpointing = value
@dataclass
class BertForPreTrainingOutput(ModelOutput):
"""
Output type of [`BertForPreTraining`].
Args:
loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
Total loss as the sum of the masked language modeling loss and the next sequence prediction
(classification) loss.
prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
before SoftMax).
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
prediction_logits: torch.FloatTensor = None
seq_relationship_logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
BERT_START_DOCSTRING = r"""
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`BertConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
BERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`BertTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare Bert Model transformer outputting raw hidden-states without any specific head on top.",
BERT_START_DOCSTRING,
)
class BertModel(BertPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in [Attention is
all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
`add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
"""
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPoolingAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
r"""
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
if hasattr(self.embeddings, "token_type_ids"):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@add_start_docstrings(
"""
Bert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next
sentence prediction (classification)` head.
""",
BERT_START_DOCSTRING,
)
class BertForPreTraining(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.cls = BertPreTrainingHeads(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=BertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
next_sentence_label: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.Tensor], BertForPreTrainingOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked),
the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence
pair (see `input_ids` docstring) Indices should be in `[0, 1]`:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
kwargs (`Dict[str, any]`, optional, defaults to *{}*):
Used to hide legacy arguments that have been deprecated.
Returns:
Example:
```python
>>> from transformers import BertTokenizer, BertForPreTraining
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
>>> model = BertForPreTraining.from_pretrained("bert-base-uncased")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.prediction_logits
>>> seq_relationship_logits = outputs.seq_relationship_logits
```
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output, pooled_output = outputs[:2]
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
total_loss = None
if labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
if not return_dict:
output = (prediction_scores, seq_relationship_score) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return BertForPreTrainingOutput(
loss=total_loss,
prediction_logits=prediction_scores,
seq_relationship_logits=seq_relationship_score,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""Bert Model with a `language modeling` head on top for CLM fine-tuning.""", BERT_START_DOCSTRING
)
class BertLMHeadModel(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
if not config.is_decoder:
logger.warning("If you want to use `BertLMHeadModel` as a standalone, add `is_decoder=True.`")
self.bert = BertModel(config, add_pooling_layer=False)
self.cls = BertOnlyMLMHead(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
past_key_values: Optional[List[torch.Tensor]] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
r"""
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
if the model is configured as a decoder.
encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used
in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be
in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100`
are ignored (masked), the loss is only computed for the tokens with labels n `[0, ...,
config.vocab_size]`
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up
decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
Returns:
Example:
```python
>>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained("bert-base-cased")
>>> config = BertConfig.from_pretrained("bert-base-cased")
>>> config.is_decoder = True
>>> model = BertLMHeadModel.from_pretrained("bert-base-cased", config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
```
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
lm_loss = None
if labels is not None:
# we are doing next-token prediction; shift prediction scores and input ids by one
shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
labels = labels[:, 1:].contiguous()
loss_fct = CrossEntropyLoss()
lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((lm_loss,) + output) if lm_loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=lm_loss,
logits=prediction_scores,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_shape)
# cut decoder_input_ids if past is used
if past is not None:
input_ids = input_ids[:, -1:]
return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past}
def _reorder_cache(self, past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
@add_start_docstrings("""Bert Model with a `language modeling` head on top.""", BERT_START_DOCSTRING)
class BertForMaskedLM(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
if config.is_decoder:
logger.warning(
"If you want to use `BertForMaskedLM` make sure `config.is_decoder=False` for "
"bi-directional self-attention."
)
self.bert = BertModel(config, add_pooling_layer=False)
self.cls = BertOnlyMLMHead(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.Tensor], MaskedLMOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
effective_batch_size = input_shape[0]
# add a dummy token
if self.config.pad_token_id is None:
raise ValueError("The PAD token should be defined for generation")
attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
dummy_token = torch.full(
(effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device
)
input_ids = torch.cat([input_ids, dummy_token], dim=1)
return {"input_ids": input_ids, "attention_mask": attention_mask}
@add_start_docstrings(
"""Bert Model with a `next sentence prediction (classification)` head on top.""",
BERT_START_DOCSTRING,
)
class BertForNextSentencePrediction(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyNSPHead(config)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**kwargs,
) -> Union[Tuple[torch.Tensor], NextSentencePredictorOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see `input_ids` docstring). Indices should be in `[0, 1]`:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
Returns:
Example:
```python
>>> from transformers import BertTokenizer, BertForNextSentencePrediction
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
>>> model = BertForNextSentencePrediction.from_pretrained("bert-base-uncased")
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
>>> encoding = tokenizer(prompt, next_sentence, return_tensors="pt")
>>> outputs = model(**encoding, labels=torch.LongTensor([1]))
>>> logits = outputs.logits
>>> assert logits[0, 0] < logits[0, 1] # next sentence was random
```
"""
if "next_sentence_label" in kwargs:
warnings.warn(
"The `next_sentence_label` argument is deprecated and will be removed in a future version, use `labels` instead.",
FutureWarning,
)
labels = kwargs.pop("next_sentence_label")
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
seq_relationship_scores = self.cls(pooled_output)
next_sentence_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1))
if not return_dict:
output = (seq_relationship_scores,) + outputs[2:]
return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output
return NextSentencePredictorOutput(
loss=next_sentence_loss,
logits=seq_relationship_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
output) e.g. for GLUE tasks.
""",
BERT_START_DOCSTRING,
)
class BertForSequenceClassification(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.bert = BertModel(config)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
BERT_START_DOCSTRING,
)
class BertForMultipleChoice(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, 1)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
BERT_START_DOCSTRING,
)
class BertForTokenClassification(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config, add_pooling_layer=False)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
BERT_START_DOCSTRING,
)
class BertForQuestionAnswering(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
start_positions: Optional[torch.Tensor] = None,
end_positions: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]:
r"""
start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
|
atlas-main
|
src/modeling_bert.py
|
# coding=utf-8
# Copyright 2018 Mesh TensorFlow authors, T5 Authors and HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import types
import torch
from torch import nn
from transformers.utils import logging
from src.modeling_t5 import T5ForConditionalGeneration, T5Stack
logger = logging.get_logger(__name__)
class FiDStack(T5Stack):
def __init__(self, config, embed_tokens=None):
super().__init__(config, embed_tokens=embed_tokens)
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
inputs_embeds=None,
head_mask=None,
cross_attn_head_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
if not self.is_decoder:
input_ids = input_ids.view(input_ids.size(0) * self.config.n_context, -1)
attention_mask = attention_mask.view(attention_mask.size(0) * self.config.n_context, -1)
output = super().forward(
input_ids=input_ids,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
inputs_embeds=inputs_embeds,
head_mask=head_mask,
cross_attn_head_mask=cross_attn_head_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not self.is_decoder:
if not return_dict:
last_hidden_states = output[0]
last_hidden_state = last_hidden_states.view(self.config.bsz, -1, last_hidden_states.size(-1))
output = tuple(
last_hidden_state,
*output[1:],
)
else:
last_hidden_state = output.last_hidden_state
output.last_hidden_state = last_hidden_state.view(self.config.bsz, -1, last_hidden_state.size(-1))
return output
class FiD(T5ForConditionalGeneration):
_keys_to_ignore_on_load_missing = [
r"encoder\.embed_tokens\.weight",
r"decoder\.embed_tokens\.weight",
r"lm_head\.weight",
]
_keys_to_ignore_on_load_unexpected = [
r"decoder\.block\.0\.layer\.1\.EncDecAttention\.relative_attention_bias\.weight",
]
def __init__(self, config):
super().__init__(config)
self.model_dim = config.d_model
self.shared = nn.Embedding(config.vocab_size, config.d_model)
encoder_config = copy.deepcopy(config)
encoder_config.is_decoder = False
encoder_config.use_cache = False
encoder_config.is_encoder_decoder = False
self.encoder = FiDStack(encoder_config, self.shared)
decoder_config = copy.deepcopy(config)
decoder_config.is_decoder = True
decoder_config.is_encoder_decoder = False
decoder_config.num_layers = config.num_decoder_layers
self.decoder = FiDStack(decoder_config, self.shared)
self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
# Model parallel
self.model_parallel = False
self.device_map = None
def set_checkpoint(self, use_checkpoint):
"""
Enable or disable checkpointing in the encoder.
See https://pytorch.org/docs/stable/checkpoint.html
"""
for mod in self.encoder.encoder.block:
mod.use_checkpoint = use_checkpoint
def reset_score_storage(self):
"""
Reset score storage, only used when cross-attention scores are saved
to train a retriever.
"""
for mod in self.decoder.block:
mod.layer[1].EncDecAttention.score_storage = None
mod.layer[1].EncDecAttention.normalized_score_storage = None
mod.layer[1].EncDecAttention.prob_storage = None
@torch.no_grad()
def get_crossattention_scores(self, n_passages, mask, labels, ids, mode="all", mask_query=None):
"""
Cross-attention scores are aggregated to obtain a single scalar per
passage. This scalar can be seen as a similarity score between the
question and the input passage. It is obtained by averaging the
cross-attention scores obtained on the first decoded token over heads,
layers, and tokens of the input passage.
More details in Distilling Knowledge from Reader to Retriever:
https://arxiv.org/abs/2012.04584.
"""
scores, norms, probs = [], [], []
for mod in self.decoder.block:
scores.append(mod.layer[1].EncDecAttention.score_storage)
norms.append(mod.layer[1].EncDecAttention.normalized_score_storage)
probs.append(mod.layer[1].EncDecAttention.prob_storage)
scores = torch.stack(scores)
norms = torch.stack(norms)
probs = torch.stack(probs)
output = {}
if "scores" in mode or "all" in mode:
self.aggregate_value(scores, mask, labels, n_passages, ids, mask_query, output, prefix="scores")
if "probs" in mode or "all" in mode:
self.aggregate_value(probs, mask, labels, n_passages, ids, mask_query, output, prefix="probs")
if "norms" in mode or "all" in mode:
self.aggregate_value(norms, mask, labels, n_passages, ids, mask_query, output, prefix="norms")
return output
def aggregate_value(self, scores, mask, labels, n_passages, ids, mask_query=None, output={}, prefix=""):
n_layers, bsz, n_tokens, total_tokens = scores.size()
ids = ids.view(bsz, n_passages, -1)
scores = scores.view(n_layers, bsz, n_tokens, n_passages, -1)
mask = mask.view(bsz, n_passages, -1)
scores = scores.masked_fill(~mask[None, :, None], 0.0)
ntokens_sum = 256 * n_layers * (~(labels == -100)).sum(dim=[1])[:, None]
ntokens_wquery = mask.sum(dim=[2]) * n_layers * (~(labels == -100)).sum(dim=[1])[:, None]
ntokens_first = mask.sum(dim=[2]) * n_layers
# Compute scores based on topk scores
scores = scores.sum(dim=[0])
for k in [5, 10, 20]:
topkscores = self.get_topk_score(k, scores, mask, labels, n_layers)
output[f"{prefix}top{k}"] = topkscores
scores = scores.masked_fill((labels == -100)[:, :, None, None], 0.0)
scores_wquery = scores.sum(dim=[1, 3])
scores_wquery_sepmask = scores.masked_fill(~(ids == 1)[:, None], 0).sum(dim=[1, 3])
output[f"{prefix}nosep"] = scores_wquery_sepmask / ntokens_sum
output[f"{prefix}first"] = scores[:, 0].sum(dim=[2]) / ntokens_first
output[f"{prefix}sum"] = scores_wquery / ntokens_sum
output[f"{prefix}avg"] = scores_wquery / ntokens_wquery
scores_woquery = None
# Compute scores based on scores without query
if not mask_query is None:
output[f"{prefix}woquery"] = self.get_woquery_score(scores, mask_query, mask, labels, n_layers)
return output
def get_topk_score(self, topk, scores, mask, labels, n_layers):
topkscores = torch.topk(scores, k=topk, dim=-1)[0].sum(dim=[3])
topkscores = topkscores.masked_fill((labels == -100)[:, :, None], 0.0)
ntokens_top = n_layers * (~(labels == -100)).sum(dim=[1])[:, None]
topkscores = topkscores.sum(dim=1) / (topk * ntokens_top)
return topkscores
def get_woquery_score(self, scores, mask_query, mask, labels, n_layers):
if scores.size(-1) > mask_query.size(-1):
zero_padding = torch.zeros(
[mask_query.size(0), scores.size(-1) - mask_query.size(-1)], device=mask_query.device, dtype=torch.bool
)
mask_query = torch.cat([mask_query, zero_padding], dim=-1)
mask_query = mask * (~mask_query[:, None])
scores_woquery = scores.masked_fill(~mask_query[:, None], 0.0)
# ntokens_woquery = mask_query.sum(dim=[2]) * n_layers * (~(labels==-100)).sum(dim=[1])[:, None]
ntokens_woquery = 256 * n_layers * (~(labels == -100)).sum(dim=[1])[:, None]
scores_woquery = scores_woquery.sum(dim=[1, 3])
return scores_woquery / ntokens_woquery
def overwrite_forward_crossattention(self):
"""
Replace cross-attention forward function, only used to save
cross-attention scores.
"""
for mod in self.decoder.block:
xattn = mod.layer[1].EncDecAttention
xattn.forward = types.MethodType(cross_attention_forward, xattn)
def create_crossattention_storage(self):
for mod in self.decoder.block:
xattn = mod.layer[1].EncDecAttention
xattn.score_storage = None
xattn.normalized_score_storage = None
xattn.prob_storage = None
def cross_attention_forward(
self,
hidden_states,
mask=None,
key_value_states=None,
position_bias=None,
past_key_value=None,
layer_head_mask=None,
query_length=None,
use_cache=False,
output_attentions=False,
):
"""
Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states).
"""
# Input is (batch_size, seq_length, dim)
# Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length)
# past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head)
batch_size, seq_length = hidden_states.shape[:2]
real_seq_length = seq_length
if past_key_value is not None:
assert (
len(past_key_value) == 2
), f"past_key_value should have 2 past states: keys and values. Got { len(past_key_value)} past states"
real_seq_length += past_key_value[0].shape[2] if query_length is None else query_length
key_length = real_seq_length if key_value_states is None else key_value_states.shape[1]
def shape(states):
"""projection"""
return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
def unshape(states):
"""reshape"""
return states.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim)
def project(hidden_states, proj_layer, key_value_states, past_key_value):
"""projects hidden states correctly to key/query states"""
if key_value_states is None:
# self-attn
# (batch_size, n_heads, seq_length, dim_per_head)
hidden_states = shape(proj_layer(hidden_states))
elif past_key_value is None:
# cross-attn
# (batch_size, n_heads, seq_length, dim_per_head)
hidden_states = shape(proj_layer(key_value_states))
if past_key_value is not None:
if key_value_states is None:
# self-attn
# (batch_size, n_heads, key_length, dim_per_head)
hidden_states = torch.cat([past_key_value, hidden_states], dim=2)
else:
# cross-attn
hidden_states = past_key_value
return hidden_states
# get query states
query_states = shape(self.q(hidden_states)) # (batch_size, n_heads, seq_length, dim_per_head)
# get key/value states
key_states = project(
hidden_states, self.k, key_value_states, past_key_value[0] if past_key_value is not None else None
)
value_states = project(
hidden_states, self.v, key_value_states, past_key_value[1] if past_key_value is not None else None
)
# compute scores
scores = torch.matmul(
query_states, key_states.transpose(3, 2)
) # equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9
if position_bias is None:
if not self.has_relative_attention_bias:
position_bias = torch.zeros(
(1, self.n_heads, real_seq_length, key_length), device=scores.device, dtype=scores.dtype
)
if self.gradient_checkpointing and self.training:
position_bias.requires_grad = True
else:
position_bias = self.compute_bias(real_seq_length, key_length)
# if key and values are already calculated
# we want only the last query position bias
if past_key_value is not None:
position_bias = position_bias[:, :, -hidden_states.size(1) :, :]
if mask is not None:
position_bias = position_bias + mask # (batch_size, n_heads, seq_length, key_length)
scores += position_bias
attn_weights = nn.functional.softmax(scores.float(), dim=-1) # .type_as(scores)
if hasattr(self, "score_storage"):
with torch.no_grad():
self.score_storage = scores.detach().mean(dim=1)
self.prob_storage = attn_weights.detach().mean(dim=1)
self.normalized_score_storage = (
(torch.norm(value_states.float(), dim=-1)[:, :, None] * attn_weights).detach().mean(dim=1)
)
attn_weights = nn.functional.dropout(attn_weights.type_as(scores), p=self.dropout, training=self.training)
# Mask heads if we want to
if layer_head_mask is not None:
attn_weights = attn_weights * layer_head_mask
attn_output = unshape(torch.matmul(attn_weights, value_states)) # (batch_size, seq_length, dim)
attn_output = self.o(attn_output)
present_key_value_state = (key_states, value_states) if (self.is_decoder and use_cache) else None
outputs = (attn_output,) + (present_key_value_state,) + (position_bias,)
if output_attentions:
outputs = outputs + (attn_weights,)
return outputs
|
atlas-main
|
src/fid.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import copy
import logging
import math
import time
from functools import reduce
from typing import List, Optional, Union
import numpy as np
import torch
import torch.nn as nn
from src import dist_utils
from src.retrievers import EMBEDDINGS_DIM
logger = logging.getLogger(__name__)
IGNORE_INDEX: int = -100
BERT_MAX_SEQ_LENGTH: int = 512
def encode_passages(batch, tokenizer, max_length):
bsz = len(batch)
n = max([len(example) for example in batch])
batch = [example + [""] * (n - len(example)) for example in batch]
batch = reduce(lambda a, b: a + b, batch)
tokens = tokenizer(
batch,
padding="max_length",
max_length=max_length,
return_tensors="pt",
truncation=True,
)
tokens = {k: v.view(bsz, n, -1) for k, v in tokens.items()}
return tokens
class Atlas(nn.Module):
def __init__(self, opt, reader, retriever, reader_tokenizer, retriever_tokenizer):
super(Atlas, self).__init__()
self.reader = reader
self.retriever = retriever
self.reader_tokenizer = reader_tokenizer
self.retriever_tokenizer = retriever_tokenizer
self.opt = opt
self.READER_ALL_TOKENS = list(self.reader_tokenizer.vocab.values())
def _get_fp16_retriever_copy(self):
if hasattr(self.retriever, "module"):
retriever_to_copy = self.retriever.module
else:
retriever_to_copy = self.retriever
return copy.deepcopy(retriever_to_copy).half().eval()
@torch.no_grad()
def build_index(self, index, passages, gpu_embedder_batch_size, logger=None):
n_batch = math.ceil(len(passages) / gpu_embedder_batch_size)
retrieverfp16 = self._get_fp16_retriever_copy()
total = 0
for i in range(n_batch):
batch = passages[i * gpu_embedder_batch_size : (i + 1) * gpu_embedder_batch_size]
batch = [self.opt.retriever_format.format(**example) for example in batch]
batch_enc = self.retriever_tokenizer(
batch,
padding="longest",
return_tensors="pt",
max_length=min(self.opt.text_maxlength, gpu_embedder_batch_size),
truncation=True,
)
embeddings = retrieverfp16(**_to_cuda(batch_enc), is_passages=True)
index.embeddings[:, total : total + len(embeddings)] = embeddings.T
total += len(embeddings)
if i % 500 == 0 and i > 0:
logger.info(f"Number of passages encoded: {total}")
dist_utils.barrier()
logger.info(f"{total} passages encoded on process: {dist_utils.get_rank()}")
if not index.is_index_trained():
logger.info(f"Building faiss indices")
index.train_index()
@torch.no_grad()
def _retrieve(
self,
index,
topk,
query,
query_ids_retriever,
query_mask_retriever,
batch_metadata=None,
filtering_fun=None,
iter_stats={},
):
self.retriever.eval()
if len(query) > 0:
query_emb = self.retriever(query_ids_retriever, query_mask_retriever, is_passages=False)
else:
query_emb = torch.empty((0, EMBEDDINGS_DIM)).cuda() # TODO: broken
if self.training:
self.retriever.train()
search_start = time.time()
if filtering_fun is not None:
passages, scores = index.search_knn(query_emb, topk * self.opt.filtering_overretrieve_ratio)
passages, scores = filtering_fun(batch_metadata, passages, scores, topk, training=self.training)
else:
passages, scores = index.search_knn(query_emb, topk)
iter_stats["runtime/search"] = (time.time() - search_start, 1)
return passages, scores, query_emb
@torch.no_grad()
def retrieve_with_rerank(
self,
index,
topk,
query,
query_ids_retriever,
query_mask_retriever,
batch_metadata=None,
filtering_fun=None,
iter_stats={},
):
bsz = len(query)
to_rerank = self.opt.n_to_rerank_with_retrieve_with_rerank
# first, do the retrieval
passages, _, query_emb = self._retrieve(
index,
to_rerank,
query,
query_ids_retriever,
query_mask_retriever,
batch_metadata,
filtering_fun,
iter_stats,
)
retrieverfp16 = self._get_fp16_retriever_copy()
fstr = self.opt.retriever_format
flat_passage_strings = [fstr.format(**p) for ps in passages for p in ps]
encoder_batch_size = min(len(flat_passage_strings), self.opt.per_gpu_embedder_batch_size)
passage_emb, output_passages, output_scores = (
query_emb.new_zeros(len(flat_passage_strings), query_emb.shape[-1]),
[],
[],
)
for b in range(0, len(flat_passage_strings), encoder_batch_size):
batch = flat_passage_strings[b : b + encoder_batch_size]
batch_enc = self.retriever_tokenizer(
batch,
padding="longest",
return_tensors="pt",
max_length=min(self.opt.text_maxlength, BERT_MAX_SEQ_LENGTH),
truncation=True,
)
batch_emb = retrieverfp16(**_to_cuda(batch_enc), is_passages=True).to(query_emb)
passage_emb[b : b + encoder_batch_size] = batch_emb
passage_emb = passage_emb.view(bsz, to_rerank, -1)
retriever_scores = torch.einsum("id, ijd->ij", [query_emb, passage_emb])
top_retriever_scores, top_retriever_inds = torch.topk(retriever_scores, topk, dim=1)
for i in range(bsz):
output_passages.append([passages[i][j] for j in top_retriever_inds[i]])
output_scores.append(top_retriever_scores[i].tolist())
return output_passages, output_scores
@torch.no_grad()
def retrieve(self, *args, **kwargs):
retrieve_func = self.retrieve_with_rerank if self.opt.retrieve_with_rerank else self._retrieve
passages, scores = retrieve_func(*args, **kwargs)[:2]
return passages, scores
def append_query(self, query, passages):
return [self.opt.encoder_format.format(query=query, **p) for p in passages]
def retriever_tokenize(self, query):
if self.retriever_tokenizer:
query_enc = self.retriever_tokenizer(
query,
max_length=min(self.opt.text_maxlength, BERT_MAX_SEQ_LENGTH),
padding="max_length",
truncation=True,
return_tensors="pt",
)
query_enc = _to_cuda(query_enc)
else:
query_enc = None
return _to_cuda(query_enc)
def reader_tokenize(self, query, target, target_tokens):
if target_tokens is None:
if self.opt.decoder_prompt_format is not None:
modified_query = [self.opt.decoder_prompt_format.format_map({"query": q}) for q in query]
target = [q + t for (q, t) in zip(modified_query, target)]
query_mask = self.reader_tokenizer(
modified_query,
max_length=self.opt.target_maxlength,
padding="max_length",
truncation=True,
return_tensors="pt",
add_special_tokens=False,
)["attention_mask"]
if self.opt.decoder_format is not None:
target = [self.opt.decoder_format.format(target=t) for t in target]
target = [t + "</s>" if not t.endswith("</s>") else t for t in target]
target_tokens = self.reader_tokenizer(
target,
max_length=self.opt.target_maxlength,
padding="max_length",
truncation=True,
return_tensors="pt",
add_special_tokens=False,
)
decoder_input_ids = self.reader._shift_right(target_tokens["input_ids"])
labels = target_tokens["input_ids"].masked_fill(~target_tokens["attention_mask"].bool(), IGNORE_INDEX)
# If decoder prompt is not None mask labels such that the model is not trained to predict the prompt
if self.opt.decoder_prompt_format is not None:
query_mask = self.reader_tokenizer(
modified_query,
max_length=self.opt.target_maxlength,
padding="max_length",
truncation=True,
return_tensors="pt",
add_special_tokens=False,
)["attention_mask"]
padding = torch.zeros((query_mask.size(0), target_tokens["input_ids"].size(-1) - query_mask.size(-1)))
query_mask = torch.cat([query_mask, padding], dim=1)
labels = labels.masked_fill(query_mask.bool(), IGNORE_INDEX)
return labels.cuda(), decoder_input_ids.cuda()
def tokenize(self, query, target, target_tokens):
if query is None and target is None:
return None, None, None
assert (
target_tokens is None or self.opt.decoder_prompt_format is None
), "decoder_prompt_format not compatible with target tokenized in iterator"
query_enc = self.retriever_tokenize(query) if not self.opt.use_file_passages else None
labels, decoder_input_ids = self.reader_tokenize(query, target, target_tokens)
return query_enc, labels, decoder_input_ids
def tokenize_passages(self, query, passages):
if len(query) == 0:
return None, None
query_passages = [self.append_query(q, p) for q, p in zip(query, passages)]
fstr = self.opt.retriever_format
retriever_passages = [[fstr.format(**p) for p in example] for example in passages]
if self.retriever_tokenizer:
retriever_tok = encode_passages(
retriever_passages,
self.retriever_tokenizer,
min(self.opt.text_maxlength, BERT_MAX_SEQ_LENGTH),
)
retriever_tok = _to_cuda(retriever_tok)
else:
retriever_tok = None
reader_tok = encode_passages(query_passages, self.reader_tokenizer, self.opt.text_maxlength)
reader_tok = _to_cuda(reader_tok)
return reader_tok, retriever_tok
def perplexity_score(self, reader_ids, reader_mask, decoder_input_ids, labels, cfg, bsz):
with torch.no_grad():
self.reader.eval()
total_context = reader_ids.size(1)
cfg.n_context = 1
cfg.bsz = bsz * total_context
reader_ids_score = reader_ids.view(bsz * total_context, -1)
reader_mask_score = reader_mask.view(bsz * total_context, -1)
repeated_decoder_input_ids = torch.repeat_interleave(decoder_input_ids, total_context, dim=0)
repeated_labels = torch.repeat_interleave(labels, total_context, dim=0)
reader_output = self.reader(
input_ids=reader_ids_score.cuda(),
attention_mask=reader_mask_score.cuda(),
decoder_input_ids=repeated_decoder_input_ids,
labels=repeated_labels,
use_cache=False,
)
token_loss = nn.functional.cross_entropy(
reader_output.logits.view(-1, reader_output.logits.size(-1)),
repeated_labels.flatten(),
reduction="none",
)
gold_score = token_loss.view(bsz, total_context, -1)
z = (repeated_labels.view(bsz, total_context, -1) > -1).sum(dim=-1)
gold_score = -gold_score.sum(dim=-1) / z
return gold_score
def eval_score(self, reader_ids, reader_mask, decoder_input_ids, labels, cfg, bsz, mask_query):
self.reader.eval()
self.reader.reset_score_storage()
cfg.bsz = reader_ids.size(0)
cfg.n_context = reader_ids.size(1)
reader_ids_score = reader_ids.view(reader_ids.size(0), -1)
reader_mask_score = reader_mask.view(reader_mask.size(0), -1)
with torch.no_grad():
reader_output = self.reader(
input_ids=reader_ids_score,
attention_mask=reader_mask_score,
decoder_input_ids=decoder_input_ids,
labels=labels,
use_cache=False,
)
crossattention_scores = self.reader.get_crossattention_scores(
cfg.n_context,
reader_mask_score,
labels=labels,
ids=reader_ids,
mode=self.opt.gold_score_mode,
mask_query=mask_query,
)
gold_score = select_crossattention_scores(crossattention_scores, self.opt.gold_score_mode)
if self.training:
self.reader.train()
return gold_score
def loop_score(self, reader_ids, reader_mask, decoder_input_ids, labels, cfg, bsz):
with torch.no_grad():
total_context = reader_ids.size(1)
doc_len = reader_ids.size(-1)
self.reader.eval()
cfg.bsz = bsz
cfg.n_context = total_context
reader_ids_score_eval = reader_ids.view(reader_ids.size(0), -1)
reader_mask_score_eval = reader_mask.view(reader_mask.size(0), -1)
# forward pass for calculating and caching the encoder states:
reader_output_eval = self.reader(
input_ids=reader_ids_score_eval,
attention_mask=reader_mask_score_eval,
decoder_input_ids=decoder_input_ids,
labels=labels,
use_cache=False,
)
eval_hidden_state = reader_output_eval.encoder_last_hidden_state
# run n_docs - 1 forward passes to calculate pp when leaving a doc out
gold_scores = []
for loo_index in range(total_context):
reader_mask_loo = reader_mask.clone()
reader_mask_loo[:, loo_index] = False # mask out this doc
loo_output_eval = self.reader(
encoder_outputs=[eval_hidden_state],
attention_mask=reader_mask_loo.view(bsz, (total_context) * doc_len),
decoder_input_ids=decoder_input_ids,
labels=labels,
use_cache=False,
)
token_loss = nn.functional.cross_entropy(
loo_output_eval.logits.view(-1, loo_output_eval.logits.size(-1)), labels.view(-1), reduction="none"
)
mean_loss = token_loss.view(bsz, labels.shape[-1]).sum(dim=-1) / (labels > -1).sum(-1)
gold_scores.append(mean_loss)
gold_score = torch.stack(gold_scores, dim=1)
return gold_score
@torch.no_grad()
def emdr_score(self, reader_ids, reader_mask, decoder_input_ids, labels, cfg, bsz):
self.reader.eval()
cfg.n_context = 1
cfg.bsz = bsz * self.opt.retriever_n_context
reader_ids_score = reader_ids.view(bsz * self.opt.retriever_n_context, -1)
reader_mask_score = reader_mask.view(bsz * self.opt.retriever_n_context, -1)
repeated_decoder_input_ids = torch.repeat_interleave(decoder_input_ids, self.opt.retriever_n_context, dim=0)
repeated_labels = torch.repeat_interleave(labels, self.opt.retriever_n_context, dim=0)
reader_output = self.reader(
input_ids=reader_ids_score.cuda(),
attention_mask=reader_mask_score.cuda(),
labels=repeated_labels,
use_cache=False,
)
gold_score = reader_output.logits
return gold_score
def forward(
self,
index,
query,
target,
target_tokens=None,
passages=None,
batch_metadata=None,
filtering_fun=None,
use_cache=False,
train_retriever=False,
iter_stats={},
):
forward_start = time.time()
bsz = len(query)
query_mask_reader = (
self.reader_tokenizer.batch_encode_plus(
query,
max_length=self.opt.text_maxlength,
padding="longest",
truncation=True,
return_tensors="pt",
add_special_tokens=False,
)["attention_mask"]
.bool()
.cuda()
)
query_enc, labels, decoder_input_ids = self.tokenize(query, target, target_tokens)
if not self.opt.use_file_passages:
retrieve_start = time.time()
passages, _ = self.retrieve(
index,
self.opt.retriever_n_context,
query,
query_enc["input_ids"],
query_enc["attention_mask"],
batch_metadata=batch_metadata,
filtering_fun=filtering_fun,
iter_stats=iter_stats,
)
iter_stats["runtime/retrieve"] = (time.time() - retrieve_start, 1)
reader_tokens, retriever_tokens = self.tokenize_passages(query, passages)
reader_ids = reader_tokens["input_ids"] # FIXME
reader_mask = reader_tokens["attention_mask"].bool()
n_context_training = min(self.opt.n_context, reader_ids.size(1))
cfg = self.reader.encoder.config
retriever_loss = None
if train_retriever:
if self.opt.use_gradient_checkpoint_retriever:
self.retriever.gradient_checkpointing_enable()
query_emb = self.retriever(**query_enc, is_passages=False)
if "std" in self.opt.gold_score_mode:
retriever_tokens = {k: v[:, :n_context_training] for k, v in retriever_tokens.items()}
retriever_tokens = {k: v.reshape(-1, v.size(-1)) for k, v in retriever_tokens.items()}
passage_emb = self.retriever(**retriever_tokens, is_passages=True).to(query_emb)
passage_emb = passage_emb.view(bsz, -1, passage_emb.size(-1))
retriever_score = torch.einsum("id, ijd->ij", [query_emb, passage_emb])
if self.opt.use_gradient_checkpoint_retriever:
self.retriever.gradient_checkpointing_disable()
if "eval" in self.opt.gold_score_mode:
gold_score = self.eval_score(
reader_ids, reader_mask, decoder_input_ids, labels, cfg, bsz, query_mask_reader
)
elif "loop" in self.opt.gold_score_mode:
gold_score = self.loop_score(reader_ids, reader_mask, decoder_input_ids, labels, cfg, bsz)
elif "ppmean" in self.opt.gold_score_mode:
gold_score = self.perplexity_score(reader_ids, reader_mask, decoder_input_ids, labels, cfg, bsz)
elif "emdr" in self.opt.gold_score_mode:
gold_score = self.emdr_score(reader_ids, reader_mask, decoder_input_ids, labels, cfg, bsz)
self.reader.reset_score_storage()
if self.training:
self.reader.train()
cfg.bsz = reader_ids.size(0)
cfg.n_context = n_context_training
reader_ids_training = reader_ids[:, :n_context_training].contiguous()
reader_mask_training = reader_mask[:, :n_context_training].contiguous()
reader_ids_training = reader_ids_training.view(reader_ids.size(0), -1)
reader_mask_training = reader_mask_training.view(reader_mask.size(0), -1)
if self.opt.use_gradient_checkpoint_reader:
self.reader.gradient_checkpointing_enable()
reader_output = self.reader(
input_ids=reader_ids_training,
attention_mask=reader_mask_training,
decoder_input_ids=decoder_input_ids,
labels=labels,
use_cache=False,
)
reader_loss = reader_output[0]
if self.opt.use_gradient_checkpoint_reader:
self.reader.gradient_checkpointing_disable()
if train_retriever:
if self.opt.compute_crossattention_stats or "std" in self.opt.gold_score_mode:
crossattention_scores = self.reader.get_crossattention_scores(
n_context_training,
reader_mask_training.cuda(),
ids=reader_ids_training.cuda(),
mask_query=query_mask_reader.cuda(),
labels=labels,
mode="all",
)
if "std" in self.opt.gold_score_mode:
gold_score = select_crossattention_scores(
crossattention_scores, self.opt.gold_score_mode
).detach() # TODO: is detach really useful here?
retriever_score = retriever_score / np.sqrt(query_emb.size(-1))
if self.opt.compute_crossattention_stats:
with torch.no_grad():
for k, v in crossattention_scores.items():
corr = torch.corrcoef(torch.stack([gold_score.view(-1), v.view(-1)]))
corr = corr[0, 1].item()
if np.isnan(corr):
corr = 0.0
iter_stats[f"corr/{k}"] = (corr, len(query))
if gold_score is not None:
gold_score = gold_score.float()
retriever_score = retriever_score.float()
if self.opt.gold_score_mode == "emdr":
retriever_loss = self.logprob(retriever_score, gold_score, labels)
else:
retriever_loss = self.kldivloss(retriever_score, gold_score)
self.reader.reset_score_storage()
iter_stats["loss/reader_loss"] = (reader_loss.item(), len(query))
if retriever_loss is not None:
iter_stats["loss/retriever_loss"] = (retriever_loss.item(), len(query))
iter_stats["runtime/forward"] = (time.time() - forward_start, 1)
return reader_loss, retriever_loss
def kldivloss(self, score, gold_score):
gold_score = torch.softmax(gold_score / self.opt.temperature_gold, dim=-1)
score = torch.nn.functional.log_softmax(score / self.opt.temperature_score, dim=-1)
return torch.nn.KLDivLoss()(score, gold_score)
def logprob(self, score, gold_score, labels):
with torch.no_grad():
repeated_labels = torch.repeat_interleave(labels, self.opt.retriever_n_context, dim=0)
repeated_labels[repeated_labels == IGNORE_INDEX] = 0
mask_labels = labels >= 0
gold_log_prob = torch.nn.functional.log_softmax(gold_score / self.opt.temperature_gold, dim=-1)
gold_log_probs = torch.gather(gold_log_prob, dim=-1, index=repeated_labels[..., None]).view(
gold_log_prob.size(0), -1
)
gold_log_probs = gold_log_probs.view(score.size(0), score.size(1), -1)
log_score = torch.nn.functional.log_softmax(score / self.opt.temperature_score, dim=-1)
log_prob = gold_log_probs + log_score[..., None]
logsumprobs = torch.logsumexp(log_prob, dim=1)
loss = -1 * torch.sum(logsumprobs * mask_labels) / torch.sum(mask_labels)
return loss
@torch.no_grad()
def compute_reader_loss_and_logits(self, tokens, decoder_input_ids, labels):
cfg = self.reader.encoder.config
cfg.bsz = tokens["input_ids"].size(0)
cfg.n_context = min(self.opt.n_context, tokens["input_ids"].size(1))
reader_loss = self.reader(
input_ids=tokens["input_ids"].cuda().view(tokens["input_ids"].size(0), -1),
attention_mask=tokens["attention_mask"].cuda().view(tokens["attention_mask"].size(0), -1),
decoder_input_ids=decoder_input_ids.cuda(),
labels=labels.cuda(),
use_cache=False,
)
return reader_loss[0].cpu().item(), reader_loss[1]
@torch.no_grad()
def generate(self, tokens, query, choices=None):
cfg = self.reader.encoder.config
cfg.bsz = tokens["input_ids"].size(0)
cfg.n_context = min(self.opt.n_context, tokens["input_ids"].size(1))
tokens = {k: v.view(v.size(0), -1) for k, v in tokens.items()}
bos_token_id = None
prefix_allowed_tokens_fn = None
if self.opt.decoder_prompt_format is not None:
prefix_str = [self.opt.decoder_prompt_format.format_map({"query": q}) for q in query]
prefix_allowed_tokens_fn = self.get_prefix_allowed_tokens_fn(prefix_str)
outputs = self.reader.generate(
input_ids=tokens["input_ids"].cuda(),
attention_mask=tokens["attention_mask"].cuda(),
num_return_sequences=1,
max_length=self.opt.generation_max_length,
min_length=self.opt.generation_min_length,
num_beams=self.opt.generation_num_beams,
length_penalty=self.opt.generation_length_penalty,
forced_bos_token_id=bos_token_id,
prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
)
return outputs
def get_prefix_allowed_tokens_fn(self, prefix_str: Optional[str] = None):
if prefix_str:
prefix_tokens_ids = self.reader_tokenizer.batch_encode_plus(prefix_str, add_special_tokens=False)[
"input_ids"
]
def prefix_allowed_tokens_fn(batch_id: int, input_ids: torch.Tensor) -> List[int]:
if input_ids.shape[-1] > len(prefix_tokens_ids[batch_id]):
return self.READER_ALL_TOKENS
return prefix_tokens_ids[batch_id][input_ids.shape[-1] - 1]
else:
prefix_allowed_tokens_fn = None
return prefix_allowed_tokens_fn
def select_crossattention_scores(scores, mode):
if "eval" in mode:
return scores[mode[len("eval") :]]
elif "std" in mode:
return scores[mode[len("std") :]]
def _to_cuda(tok_dict):
return {k: v.cuda() for k, v in tok_dict.items()}
|
atlas-main
|
src/atlas.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import datetime
import os
import subprocess
from logging import getLogger
import torch
logger = getLogger()
def init_distributed_mode_torchrun(params):
"""
Handle single and multi-GPU for singe-node jobs with torchrun.
Initialize the following variables:
- n_nodes
- node_id
- local_rank
- global_rank
- world_size
For NCCL verbose mode, use:
os.environ["NCCL_DEBUG"] = "INFO"
"""
params.local_rank = int(os.environ["LOCAL_RANK"])
params.node_id = 0
params.n_nodes = 1
params.global_rank = int(os.environ["RANK"])
params.world_size = int(os.environ["WORLD_SIZE"])
# define whether this is the master process / if we are in distributed mode
params.is_main = params.node_id == 0 and params.local_rank == 0
params.multi_node = params.n_nodes > 1
params.multi_gpu = params.world_size > 1
params.is_distributed = True
# summary
PREFIX = "%i - " % params.global_rank
# set GPU device
if params.is_distributed:
torch.cuda.set_device(params.local_rank)
device = torch.device("cuda", params.local_rank)
else:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
params.device = device
# initialize multi-GPU
if params.is_distributed:
# http://pytorch.apachecn.org/en/0.3.0/distributed.html#environment-variable-initialization
# 'env://' will read these environment variables:
# WORLD_SIZE - required; can be set either here, or in a call to init function
# RANK - required; can be set either here, or in a call to init function
# print("Initializing PyTorch distributed ...")
# Fix for if gloo sockets are inconsistent
p1 = subprocess.Popen(["ip", "r"], stdout=subprocess.PIPE)
p2 = subprocess.Popen(["grep", "default"], stdin=p1.stdout, stdout=subprocess.PIPE)
p1.stdout.close()
gloo_socket_ifname = subprocess.check_output(["awk", "{print $5}"], stdin=p2.stdout).decode("utf-8").strip()
p2.stdout.close()
os.environ["GLOO_SOCKET_IFNAME"] = gloo_socket_ifname
torch.distributed.init_process_group(
init_method="env://",
backend="nccl",
)
global GLOO_GROUP
GLOO_GROUP = torch.distributed.new_group(
list(range(params.world_size)),
backend="gloo",
timeout=datetime.timedelta(0, 600),
)
|
atlas-main
|
src/torchrun_utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.distributed as dist
from src import slurm
class Gather(torch.autograd.Function):
@staticmethod
def forward(ctx, x: torch.tensor):
output = [torch.zeros_like(x) for _ in range(dist.get_world_size())]
dist.all_gather(output, x)
return tuple(output)
@staticmethod
def backward(ctx, *grads):
all_gradients = torch.stack(grads)
dist.all_reduce(all_gradients)
return all_gradients[dist.get_rank()]
def gather_wgrad(x: torch.tensor, dim: int = 0):
if not dist.is_initialized():
return x
x_gather = Gather.apply(x)
x_gather = torch.cat(x_gather, dim=dim)
return x_gather
@torch.no_grad()
def all_gather(x: torch.tensor, dim: int = 0):
if not dist.is_initialized():
return x
x_gather = [torch.ones_like(x) for _ in range(dist.get_world_size())]
dist.all_gather(x_gather, x)
x_gather = torch.cat(x_gather, dim=dim)
return x_gather
@torch.no_grad()
def varsize_all_gather(x: torch.Tensor, dim: int = 0):
"""all_gather tensors of different sizes along the specified dimension with concatenation"""
if not dist.is_initialized():
return x
size = x.size(dim)
tensor_size = torch.tensor(size, device=x.device, dtype=torch.int64)
all_sizes = [torch.zeros_like(tensor_size) for _ in range(dist.get_world_size())]
dist.all_gather(all_sizes, tensor_size)
max_size = max([s.item() for s in all_sizes])
padding_tuple_size = [max_size - size if k == dim else x.size(k) for k in range(x.ndim)]
tensor_tuple_size = [max_size if k == dim else x.size(k) for k in range(x.ndim)]
if size != max_size:
padding = torch.empty(size=padding_tuple_size, dtype=x.dtype, device=x.device)
x = torch.cat((x, padding), dim=dim)
tensor_list = [torch.empty(tensor_tuple_size, device=x.device, dtype=x.dtype) for s in all_sizes]
dist.all_gather(tensor_list=tensor_list, tensor=x)
tensor_list = [torch.narrow(tensor, dim, start=0, length=all_sizes[k]) for k, tensor in enumerate(tensor_list)]
output = torch.cat(tensor_list, dim=dim)
return output
@torch.no_grad()
def varsize_gather(x: torch.Tensor, dst: int = 0, dim: int = 0):
"""gather tensors of different sizes along the specified dimension"""
if not dist.is_initialized():
return x
size = x.size(dim)
tensor_size = torch.tensor(size, device=x.device, dtype=torch.int64)
all_sizes = [torch.zeros_like(tensor_size) for _ in range(dist.get_world_size())]
dist.all_gather(all_sizes, tensor_size)
max_size = max([s.item() for s in all_sizes])
padding_tuple_size = [max_size - size if k == dim else x.size(k) for k in range(x.ndim)]
tensor_tuple_size = [max_size if k == dim else x.size(k) for k in range(x.ndim)]
if size != max_size:
padding = torch.empty(size=padding_tuple_size, dtype=x.dtype, device=x.device)
x = torch.cat((x, padding), dim=dim)
if get_rank() == dst:
tensor_list = [torch.empty(tensor_tuple_size, device=x.device, dtype=x.dtype) for s in all_sizes]
else:
tensor_list = None
dist.gather(x, gather_list=tensor_list, dst=dst)
if get_rank() == dst:
tensor_list = [torch.narrow(tensor, dim, start=0, length=all_sizes[k]) for k, tensor in enumerate(tensor_list)]
return tensor_list
@torch.no_grad()
def get_varsize(x: torch.Tensor, dim: int = 0):
"""gather tensors of different sizes along the first dimension"""
if not dist.is_initialized():
return torch.tensor([x.size(dim)])
# determine max size
size = torch.tensor([x.size(dim)], device=x.device, dtype=torch.int)
allsizes = [torch.zeros_like(size) for _ in range(dist.get_world_size())]
dist.all_gather(allsizes, size)
allsizes = torch.cat(allsizes)
return allsizes
@torch.no_grad()
def gather_number(x):
if not dist.is_initialized():
return [x]
output = [None for _ in range(get_world_size())]
dist.all_gather_object(output, x, group=slurm.get_gloo_group())
return output
def barrier():
if dist.is_initialized():
torch.distributed.barrier()
def get_rank():
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def is_main():
return get_rank() == 0
def get_world_size():
if not dist.is_initialized():
return 1
else:
return dist.get_world_size()
def average_main(x):
if not dist.is_initialized():
return x
if dist.is_initialized() and dist.get_world_size() > 1:
dist.reduce(x, 0, op=dist.ReduceOp.SUM)
if is_main():
x = x / dist.get_world_size()
return x
def sum_main(x):
if not dist.is_initialized():
return x
if dist.is_initialized() and dist.get_world_size() > 1:
dist.reduce(x, 0, op=dist.ReduceOp.SUM)
return x
def weighted_average(x, count):
if not dist.is_initialized():
if isinstance(x, torch.Tensor):
x = x.item()
return x, count
t_loss = torch.tensor([x * count]).cuda()
t_total = torch.tensor([count]).cuda()
t_loss = sum_main(t_loss)
t_total = sum_main(t_total)
return (t_loss / t_total).item(), t_total.item()
|
atlas-main
|
src/dist_utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import copy
import itertools
import string
import torch
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from src.evaluation import exact_match_score
from src.options import Options
from src.tasks.base import BaseTask
def _get_permutation_orderings(N, permutations_type):
li = list(range(N))
if permutations_type == "cyclic":
orderings = [li[N - i :] + li[: N - i] for i in range(N)]
elif permutations_type == "all":
orderings = list(itertools.permutations(li))
else:
orderings = [li]
return orderings
class Task(BaseTask):
metrics = ["debiased_accuracy", "accuracy", "eval_loss"]
def __init__(self, opt: Options, tokenizer: PreTrainedTokenizerBase, *args, **kwargs):
super().__init__()
self.tokenizer = tokenizer
self.maximum_question_length = 356
self.choices = string.ascii_uppercase[: opt.multiple_choice_num_options]
self.choice2index = {o: self.tokenizer(o)["input_ids"][0] for o in self.choices}
@staticmethod
def get_multiple_choice_question_prompt(tokenizer, question, choices, maximum_length=356):
def _length_in_tokens(string):
return len(tokenizer(string)["input_ids"])
def _get_prompt(question, choices_wseparator):
preprocessed_question = f"question: {question.strip()} options: {choices_wseparator} answer: <extra_id_0>"
return preprocessed_question
choices_wseparator = " ".join([f"({L}) {T}" for L, T in choices.items()]).strip()
question_with_options = _get_prompt(question, choices_wseparator)
if _length_in_tokens(question_with_options) > maximum_length:
max_qlen = maximum_length - _length_in_tokens(_get_prompt("", choices_wseparator))
truncated_question = tokenizer.decode(
tokenizer(question)["input_ids"][-max_qlen:], skip_special_tokens=True
)
question_with_options = _get_prompt(truncated_question, choices_wseparator)
return question_with_options
def process(self, example, *args, **kwargs):
preprocessed_question = self.get_multiple_choice_question_prompt(
self.tokenizer, example["question"], example["options"], maximum_length=self.maximum_question_length
)
target = f'<extra_id_0> {example["answer"]}'
return {
"query": preprocessed_question,
"target": target,
"choices": self.choices,
"passages": [{"title": "", "text": ""}],
"answers": [example["answer"]],
"metadata": example,
}
@staticmethod
def get_permutations(example, permutations_type):
"""clones example according to permutations_type (either "none", 'cyclic' or 'full'"""
options, answer = example["options"], example["answer"]
uid = example["question"] + " ".join(options.values())
choice_keys = list(sorted(options.keys()))
choice_values = [options[l] for l in choice_keys]
orderings = _get_permutation_orderings(len(choice_keys), permutations_type)
permuted_examples = []
for ordering in orderings:
permuted_options = {l: choice_values[o] for l, o in zip(choice_keys, ordering)}
permuted_answer = [k for k, ans in permuted_options.items() if ans == options[answer]][0]
permed_example = copy.deepcopy(example)
permed_example["options"] = permuted_options
permed_example["answer"] = permuted_answer
permed_example["is_original"] = permuted_options == example["options"]
permed_example["uid"] = uid
permuted_examples.append(permed_example)
return permuted_examples
@staticmethod
def data_iterator(*args, **kwargs):
# wrap base data iterator in the case of permuting examples
super_iterator = super(Task, Task).data_iterator(*args, **kwargs)
perms_type = (
kwargs["opt"].multiple_choice_eval_permutations
if kwargs.get("is_eval", False)
else kwargs["opt"].multiple_choice_train_permutations
)
for example in super_iterator:
for permed_item in Task.get_permutations(example, perms_type):
yield permed_item
def evaluation(self, prediction, ground_truths):
sample_metrics = {"accuracy": exact_match_score(prediction, ground_truths)}
return sample_metrics
def get_choice_logits(self, logits):
prediction_logits = {
letter: logits[1, letter_index].cpu().item() for letter, letter_index in self.choice2index.items()
}
return prediction_logits
def _get_original_instance(self, permutations):
return [p for p in permutations if p["metadata"]["is_original"]][0]
def _marginalize_across_permutations(self, permutations):
original_instance = self._get_original_instance(permutations)
text_answer_2_letter = {v: k for k, v in original_instance["metadata"]["options"].items()}
aggregate_probs = {}
for perm in permutations:
logits = torch.tensor([perm["choice_logits"][c] for c in self.choices])
probs = torch.softmax(logits, dim=0).tolist()
perm_text_options = [perm["metadata"]["options"][c] for c in self.choices]
for t, p in zip(perm_text_options, probs):
aggregate_probs.setdefault(t, []).append(p)
marginalized = {text_answer_2_letter[t]: torch.tensor(v).mean().item() for t, v in aggregate_probs.items()}
return marginalized, aggregate_probs
def _reduce_permutations(self, dataset_wpred):
to_agg = {}
for output in dataset_wpred:
to_agg.setdefault(output["metadata"]["uid"], []).append(output)
output_dataset_wpred = []
for _, perms in to_agg.items():
original_instance = copy.deepcopy(self._get_original_instance(perms))
scores, all_scores = self._marginalize_across_permutations(perms)
del original_instance["choice_logits"]
original_instance["choice_probs"] = scores
original_instance["generation"] = max(scores.items(), key=lambda x: x[1])[0]
original_instance["choice_probs"] = scores
original_instance["all_probs"] = all_scores
original_instance["permutations"] = perms
output_dataset_wpred.append(original_instance)
return output_dataset_wpred
def evaluation_postprocessing(self, metrics, dataset_with_predictions):
dataset_with_predictions = self._reduce_permutations(dataset_with_predictions)
metrics["debiased_accuracy"] = [
float(d["generation"] == d["metadata"]["answer"]) for d in dataset_with_predictions
]
return metrics, dataset_with_predictions
|
atlas-main
|
src/tasks/multiple_choice.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from src.evaluation import exact_match_score
from src.tasks.base import BaseTask
class Task(BaseTask):
metrics = ["accuracy"]
def process(self, example, *args, **kwargs):
clean_input = example["claim"]
clean_target = ""
if "label" in example:
target = example["label"]
if target == "NOT ENOUGH INFO":
clean_target = "maybe"
elif target == "REFUTES":
clean_target = "false"
elif target == "SUPPORTS":
clean_target = "true"
if not "passages" in example:
example["passages"] = [{"title": "", "text": ""}]
example["metadata"] = example.get("metadata", {})
example["query"] = f"question: {clean_input} answer: <extra_id_0>"
if clean_target is not None:
example["target"] = f"<extra_id_0> {clean_target}"
example["passages"] = [{"title": "", "text": ""}]
example["metadata"]["clean_target"] = clean_target
example["answers"] = [clean_target]
return example
def evaluation(self, prediction, ground_truths):
sample_metrics = {"accuracy": exact_match_score(prediction, ground_truths)}
return sample_metrics
|
atlas-main
|
src/tasks/fever.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import random
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from src.evaluation import exact_match_score, f1_score, rouge_score
from src.options import Options
from src.tasks.base import BaseTask, filter_results_by_id
class Task(BaseTask):
metrics = ["eval_loss", "accuracy", "f1", "rouge_1", "rouge_2", "rouge_L"]
def __init__(self, opt: Options, tokenizer: PreTrainedTokenizerBase, *args, **kwargs):
self.tokenizer = tokenizer
self.min_words = opt.min_words_per_lm_instance
self.mlm_noise_density = opt.mlm_noise_density
self.mlm_mean_noise_span_length = opt.mlm_mean_noise_span_length
self.text_maxlength = opt.text_maxlength
def filter(self, *args, **kwargs):
"""Remove the passage we are trying to denoise from retrieved results"""
return filter_results_by_id(*args, **kwargs)
def process(self, example, *args, **kwargs):
"""Noises the target field using T5 MLM masking, saves the orginal target in metadata,"""
clean_target = example["text"]
if len(clean_target.strip()) == 0:
return None
if self.min_words is not None and len(clean_target.split()) < self.min_words:
return None
output_example = {}
inp, out = self.apply_mlm_noise(
self.tokenizer,
clean_target,
self.mlm_noise_density,
self.mlm_mean_noise_span_length,
self.text_maxlength,
)
if not "passages" in example:
output_example["passages"] = [{"title": "", "text": ""}]
output_example["query"] = inp
output_example["target"] = out
output_example["metadata"] = example
output_example["metadata"]["clean_target"] = clean_target
return output_example
def evaluation(self, prediction, ground_truths):
sample_metrics = {}
sample_metrics["accuracy"] = exact_match_score(prediction, ground_truths)
sample_metrics["f1"] = f1_score(prediction, ground_truths)
rouge_1, rouge_2, rouge_L = rouge_score(prediction, ground_truths)
sample_metrics["rouge_1"] = rouge_1
sample_metrics["rouge_2"] = rouge_2
sample_metrics["rouge_L"] = rouge_L
return sample_metrics
@staticmethod
def apply_mlm_noise(
tokenizer,
text,
mlm_noise_density,
mlm_mean_noise_span_length,
max_input_length,
):
tokens = tokenizer(text, add_special_tokens=False, max_length=max_input_length, truncation=True)["input_ids"]
length = len(tokens)
num_noise_tokens = max(round(length * mlm_noise_density), 1)
num_noise_spans = max(round(num_noise_tokens / mlm_mean_noise_span_length), 1)
num_nonnoise_tokens = length - num_noise_tokens
def _get_span_lengths(num_items, num_segments):
positions = [i < (num_segments - 1) for i in range(num_items - 1)]
random.shuffle(positions)
positions.append(True)
output, prev_span_start = [], -1
for i, n in enumerate(positions):
if n:
output.append(i - prev_span_start)
prev_span_start = i
return output
noise_span_lengths = _get_span_lengths(num_noise_tokens, num_noise_spans)
nonnoise_span_lengths = _get_span_lengths(num_nonnoise_tokens, num_noise_spans)
inputs, outputs, offset = [], [], 0
for i, (inp_length, out_length) in enumerate(zip(nonnoise_span_lengths, noise_span_lengths)):
sentinel_id = tokenizer.additional_special_tokens_ids[i]
inputs += tokens[offset : offset + inp_length] + [sentinel_id]
offset += inp_length
outputs += [sentinel_id] + tokens[offset : offset + out_length]
offset += out_length
return tokenizer.decode(inputs), tokenizer.decode(outputs)
|
atlas-main
|
src/tasks/mlm.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from . import base, fever, kilt, lm, mlm, multiple_choice, qa, section
AVAILABLE_TASKS = {m.__name__.split(".")[-1]: m for m in [base, mlm, lm, multiple_choice, kilt, section, fever, qa]}
def get_task(opt, tokenizer):
if opt.task not in AVAILABLE_TASKS:
raise ValueError(f"{opt.task} not recognised")
task_module = AVAILABLE_TASKS[opt.task]
return task_module.Task(opt, tokenizer)
|
atlas-main
|
src/tasks/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
from src.evaluation import exact_match_score, f1_score, rouge_score
from src.options import Options
from src.tasks.base import BaseTask, filter_results_by_id
logger = logging.getLogger(__name__)
class Task(BaseTask):
metrics = ["eval_loss", "accuracy", "f1", "rouge_1", "rouge_2", "rouge_L"]
def __init__(self, opt: Options, *args, **kwargs):
self.min_words = opt.min_words_per_lm_instance
def process(self, example, *args, **kwargs):
if not "section" in example or len(example["section"].strip()) == 0:
return
query = ", ".join([example["title"], example["section"]])
text = example["text"]
if len(text.strip()) == 0:
return
if self.min_words is not None and len(text.split()) < self.min_words:
return
if not "passages" in example:
example["passages"] = [{"title": "", "text": ""}]
example["query"] = query
example["target"] = text
example["metadata"] = {}
example["metadata"]["id"] = example["id"]
return example
def evaluation(self, prediction, ground_truths):
sample_metrics = {}
sample_metrics["accuracy"] = exact_match_score(prediction, ground_truths)
sample_metrics["f1"] = f1_score(prediction, ground_truths)
rouge_1, rouge_2, rouge_L = rouge_score(prediction, ground_truths)
sample_metrics["rouge_1"] = rouge_1
sample_metrics["rouge_2"] = rouge_2
sample_metrics["rouge_L"] = rouge_L
return sample_metrics
def filter(self, *args, **kwargs):
"""Remove the passage we are trying to generate from retrieved results"""
return filter_results_by_id(*args, **kwargs)
|
atlas-main
|
src/tasks/section.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import random
from typing import List
from src.evaluation import exact_match_score, f1_score, normalize_answer
from src.tasks.base import BaseTask
class Task(BaseTask):
metrics = ["accuracy", "exact_match", "f1"]
def process(self, example, *args, **kwargs):
clean_input = example["input"]
answers = list(self.get_gold_answers(example))
if "filename" in example and "fever" in example["filename"]:
answers = ["true" if a == "SUPPORTS" else "false" for a in answers]
clean_target = random.choice(answers)
if not "passages" in example:
example["passages"] = [{"title": "", "text": ""}]
example["metadata"] = example.get("metadata", {})
example["query"] = f"question: {clean_input} answer: <extra_id_0>"
example["target"] = f"<extra_id_0> {clean_target}"
example["answers"] = answers
example["passages"] = [{"title": "", "text": ""}]
example["metadata"]["clean_target"] = clean_target
return example
def get_gold_answers(self, gold):
ground_truths = set()
for item in gold["output"]:
if "answer" in item and item["answer"] and len(item["answer"].strip()) > 0:
ground_truths.add(item["answer"].strip())
return ground_truths
def evaluation(self, prediction: str, ground_truths: List[str]):
sample_metrics = {
"accuracy": exact_match_score(prediction, ground_truths),
"exact_match": exact_match_score(prediction, ground_truths, normalize_answer),
"f1": f1_score(prediction, ground_truths, normalize_answer),
}
return sample_metrics
|
atlas-main
|
src/tasks/kilt.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import random
import re
from src.evaluation import exact_match_score, f1_score, rouge_score
from src.options import Options
from src.tasks.base import BaseTask, filter_results_by_id
logger = logging.getLogger(__name__)
class Task(BaseTask):
metrics = ["eval_loss", "accuracy", "f1", "rouge_1", "rouge_2", "rouge_L"]
def __init__(self, opt: Options, *args, **kwargs):
self.min_words = opt.min_words_per_lm_instance
self.min_context_ratio = opt.min_lm_context_ratio
self.max_context_ratio = opt.max_lm_context_ratio
def filter(self, *args, **kwargs):
"""Remove the passage we are trying to generate from retrieved results"""
return filter_results_by_id(*args, **kwargs)
def process(self, example, *args, **kwargs):
text = example["text"]
if len(text.strip()) == 0:
return
if self.min_words is not None and len(text.split()) < self.min_words:
return
inp, out = self.split(text, self.min_context_ratio, self.max_context_ratio)
if not "passages" in example:
example["passages"] = [{"title": "", "text": ""}]
example["query"] = inp
example["target"] = out
example["metadata"] = {}
example["metadata"]["id"] = example["id"]
return example
@staticmethod
def split(text, min_context_ratio, max_context_ratio):
"""Splits text into two segments for langauge modelling.
Left segment is conditioning context, right segment is for generating.
The left segment must be between min_context_ratio and max_context_ratio of right segement in terms of length.
"""
words = re.split(r"(\S+)", text)
min_length = int(max(2, len(words) * min_context_ratio))
max_length = int(max(min(len(words) - 2, len(words) * max_context_ratio), min_length + 1))
split_idx = random.randint(min_length, max_length)
inp = "".join(words[:split_idx])
out = "".join(words[split_idx:])
return inp, out
def evaluation(self, prediction, ground_truths):
sample_metrics = {}
sample_metrics["accuracy"] = exact_match_score(prediction, ground_truths)
sample_metrics["f1"] = f1_score(prediction, ground_truths)
rouge_1, rouge_2, rouge_L = rouge_score(prediction, ground_truths)
sample_metrics["rouge_1"] = rouge_1
sample_metrics["rouge_2"] = rouge_2
sample_metrics["rouge_L"] = rouge_L
return sample_metrics
|
atlas-main
|
src/tasks/lm.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import json
import logging
import random
from collections import defaultdict
from src.evaluation import exact_match_score
logger = logging.getLogger(__name__)
class BaseTask(object):
metrics = ["accuracy", "eval_loss"]
def __init__(self, *args, **kwargs):
self.filter = None
@staticmethod
def data_iterator(filenames, world_rank=-1, world_size=-1, repeat_if_less_than_world_size=False, *args, **kwargs):
if isinstance(filenames, str):
filenames = [filenames]
def _iter():
# iterate over files
return (line for filename in filenames for line in open(filename, encoding="utf-8"))
def _stop():
# stop iterating over data when at least one example has been fed to each worker
return (total_yielded >= world_size) if repeat_if_less_than_world_size else (total_yielded > 0)
total_yielded = 0
while not _stop():
for line in _iter():
total_yielded += 1
if world_rank > -1 and total_yielded % world_size != world_rank:
continue
example = json.loads(line)
yield example
@staticmethod
def batch_iterator(data_iterator, batch_size, drop_last=False, shuffle=False):
if shuffle:
data_iterator = BaseTask.shuffle_iterator(data_iterator)
batch = defaultdict(lambda: [])
batch["__size__"] = 0
batch_counter = 0
for example in data_iterator:
for k, v in example.items():
batch[k].append(v)
batch["__size__"] += 1
if batch["__size__"] == batch_size:
batch_counter += 1
yield batch
batch = defaultdict(lambda: [])
batch["__size__"] = 0
if batch["__size__"] > 0 and not drop_last:
yield batch
def evaluation(self, prediction, ground_truths):
"""most basic evaluation: checks if prediction matches ground truth"""
sample_metrics = {"accuracy": exact_match_score(prediction, ground_truths)}
return sample_metrics
@staticmethod
def shuffle_iterator(dataset):
d = list(dataset)
random.shuffle(d)
for x in d:
yield x
def process(self, example, *args, **kwargs):
"""most basic example processing, should be overwritten in subclasses"""
assert "target" in example, "base task requires a `target` field string to be defined"
assert "query" in example, "base task requires a `query` field string to be defined"
assert type(example["target"]) == str, "base task requires a `target` field string to be defined"
assert type(example["query"]) == str, "base task requires a `query` field string to be defined"
if not "passages" in example:
example["passages"] = [{"title": "", "text": ""}]
return example
def evaluation_postprocessing(self, metrics, dataset_with_predictions):
"""do any necessary postprocessing of generated predictions or metrics after the evaluation loop"""
return metrics, dataset_with_predictions
def filter_results_by_id(batch_metadata, passages, scores, topk, training=False):
"""
Removes retrieved passages from retrieved set if their id is the same as the instance in the batch metadata.
Useful for MLM or LM where we dont want model to "cheat" by retrieving the passgage it is denoising/generating.
If, once violating passages are removed, there are < topk results, the violating passages will be added back,
in with a warning
"""
if batch_metadata is None:
logger.warning("Trying to filter a batch with no metadata - probably a padding instance - just return the topk")
return [ps[:topk] for ps in passages], [ss[:topk] for ss in scores]
def _same_passage_chunk(source_metadata, passage):
return passage["id"] == source_metadata["id"]
output_passages, output_scores = [], []
for metadata, passage_li, scores_li in zip(batch_metadata, passages, scores):
filtered_passages_and_scores, violating_passages_and_scores = [], []
for (p, s) in zip(passage_li, scores_li):
if not _same_passage_chunk(metadata, p):
filtered_passages_and_scores.append((p, s))
else:
violating_passages_and_scores.append((p, s))
if topk > len(filtered_passages_and_scores):
logger.warning(f"{len(filtered_passages_and_scores)} passages after filtering for topk = {topk}")
filtered_passages_and_scores += violating_passages_and_scores
filtered_passages, filtered_scores = zip(*filtered_passages_and_scores)
output_passages.append(filtered_passages)
output_scores.append(filtered_scores)
return [ps[:topk] for ps in output_passages], [ss[:topk] for ss in output_scores]
|
atlas-main
|
src/tasks/base.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import random
from src.evaluation import exact_match_score, f1_score, normalize_answer
from src.options import Options
from src.tasks.base import BaseTask
class Task(BaseTask):
metrics = ["exact_match", "f1", "eval_loss"]
def __init__(self, opt: Options, *args, **kwargs):
super().__init__()
self.qa_prompt_format_str = opt.qa_prompt_format
def get_qa_prompt(self, question: str) -> str:
return self.qa_prompt_format_str.format(question=question)
def process(self, example, *args, **kwargs):
if "target" in example:
target = example["target"]
elif "answers" in example:
target = random.choice(example["answers"])
else:
target = None
if not "passages" in example:
example["passages"] = [{"title": "", "text": ""}]
example["metadata"] = example.get("metadata", {})
example["query"] = self.get_qa_prompt(example["question"])
if target is not None:
example["target"] = f"<extra_id_0> {target}"
return example
def evaluation(self, prediction, ground_truths):
sample_metrics = {
"exact_match": exact_match_score(prediction, ground_truths, normalize_answer),
"f1": f1_score(prediction, ground_truths, normalize_answer),
}
return sample_metrics
|
atlas-main
|
src/tasks/qa.py
|
#!/usr/bin/python
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import re
import os
import glob
import shutil
parser = argparse.ArgumentParser(
description='Generates XML RTL descriptor file for OpenCL compilation',
epilog='', formatter_class=argparse.RawTextHelpFormatter
)
requiredNamed = parser.add_argument_group('required named arguments')
requiredNamed.add_argument('--input', '-i', metavar='<input file>', type=str,
nargs=1, required=True, help='input file')
requiredNamed.add_argument('--output_xml', '-x', metavar='<output file>', type=str,
nargs=1, required=True, help='output file')
requiredNamed.add_argument('--output_rtl', '-t', metavar='<output file>', type=str,
nargs=1, required=True, help='output file')
requiredNamed.add_argument('--rtl_root', '-r', metavar='<rtl root>', type=str,
nargs=1, required=True, help='rtl root location')
requiredNamed.add_argument('--output_stub', '-s', metavar='<output file>', type=str,
nargs=1, required=True, help='output file')
args = parser.parse_args()
input_file = open(args.input[0], 'r')
output_xml = open(args.output_xml[0], 'w')
output_rtl = open(args.output_rtl[0], 'w')
output_stub = open(args.output_stub[0], 'w')
type_width = None
acc_width = None
product_width = None
acc_divide_cycles = None
type_divide_cycles = None
rtl_files = []
stub_files = []
def include_sv_files(file_list, cur_dir=False):
for filename in file_list:
if (not cur_dir):
filename = os.path.join(args.rtl_root[0], filename)
rtl_files.append(filename)
def include_files(file_list, cur_dir=False):
for filename in file_list:
if (not cur_dir):
filename = os.path.join(args.rtl_root[0], filename)
shutil.copyfile(filename, os.path.basename(filename))
def include_stub_files(file_list):
for filename in file_list:
stub_files.append(filename)
def set_type_width(w):
# FIXME: huh?
globals()['type_width'] = w
def set_acc_width(w):
globals()['acc_width'] = w
def set_product_width(w):
globals()['product_width'] = w
def set_acc_divide_cycles(c):
globals()['acc_divide_cycles'] = c
def set_type_divide_cycles(c):
globals()['type_divide_cycles'] = c
lines = []
doing_python = 0
code_block = ''
comment_indent = ''
RE_PYTHON_BLOCK_BEGIN = re.compile(r"^(\s*)START_PY(\s*)$")
RE_PYTHON_BLOCK_END = re.compile(r'^(\s*)END_PY(\s*)$')
for line in input_file:
reg0 = re.search(RE_PYTHON_BLOCK_BEGIN, line)
reg1 = re.search(RE_PYTHON_BLOCK_END, line)
if doing_python == 0 and reg0:
doing_python = 1
code_block = ''
lines.append(reg0.group(1) + '\n<!-- python -->\n')
comment_indent = reg0.group(1)
elif doing_python == 1 and reg1:
doing_python = 0
try:
exec(code_block)
except Exception:
print("Error in code:\n" + code_block + "\n")
raise
lines.append(reg1.group(1) + '\n<!-- end python -->\n')
elif doing_python == 1:
dum = re.sub(r"^(" + comment_indent + r")", r'', line)
code_block += dum
else:
# Main XML block
line = re.sub('(TYPE_WIDTH)', '{}'.format(type_width), line)
line = re.sub('(ACC_WIDTH)', '{}'.format(acc_width), line)
line = re.sub('(PRODUCT_WIDTH)', '{}'.format(product_width), line)
line = re.sub('(ACC_DIVIDE_CYCLES)', '{}'.format(acc_divide_cycles), line)
line = re.sub('(TYPE_DIVIDE_CYCLES)', '{}'.format(type_divide_cycles), line)
lines.append(line)
for line in lines:
output_xml.write(line)
input_file.close()
output_xml.close()
# write the single RTL file
for filename in rtl_files:
f = open(filename, 'r')
output_rtl.write("// ***\n// *** RTL from source file {}\n// ***\n\n".format(filename))
for line in f:
output_rtl.write(line)
f.close()
output_rtl.write("\n\n");
output_rtl.close()
# write the single stub OpenCL file
for filename in stub_files:
f = open(filename, 'r')
output_stub.write("// ***\n// *** OpenCL from source file {}\n// ***\n\n".format(filename))
for line in f:
output_stub.write(line)
f.close()
output_stub.write("\n\n");
output_stub.close()
|
deepfloat-main
|
bitstream/build_xml.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import shutil
import time
import glob
import subprocess
import sys
import math
import fpga
import fpga_resnet
import torch
from torch.utils.cpp_extension import CppExtension, BuildExtension
import torchvision.models as models
import validate
aocx_file = 'loglib'
ext, dev = fpga.init_fpga(aocx_file)
class FpgaNN():
def __init__(self, model, mul_factor=1.0):
self.model = model
self.output_p = None
self.mul_factor = mul_factor
def forward(self, input):
input_p = ext.to_posit(*dev, input)
self.output_p = self.model.forward(*dev, input_p)
# FIXME: attempt to fix d2h copy assert
dev[2].blockingWait()
return ext.to_float(*dev, self.output_p).mul_(self.mul_factor)
def forward_p(self, input):
input_p = ext.to_posit(*dev, input)
self.output_p = self.model.forward(*dev, input_p)
def forward_f(self):
return ext.to_float(*dev, self.output_p).mul_(self.mul_factor)
def get_fpga_mods(model):
def append_mod(mods, m, name):
mods.append([name, m])
mods = []
for m, name in zip([model.conv1, model.maxpool],
['conv1', 'maxpool']):
append_mod(mods, m, name)
for layer, layer_name in zip([model.layer1, model.layer2, model.layer3, model.layer4],
['layer1', 'layer2', 'layer3', 'layer4']):
for idx, seq in enumerate(layer):
for m, name in zip([seq.conv1, seq.conv2],
['conv1', 'conv2']):
append_mod(mods, m,
'{}.{}.{}'.format(layer_name, idx, name))
if (hasattr(seq, 'conv3')):
append_mod(mods, seq.conv3,
'{}.{}.{}'.format(layer_name, idx, 'conv3'))
if (seq.downsample):
append_mod(mods, seq.downsample,
'{}.{}.{}.0'.format(layer_name, idx, 'downsample'))
append_mod(mods, seq.add,
'{}.{}.{}'.format(layer_name, idx, 'add'))
for m, name in zip([model.avgpool, model.fc], ['avgpool', 'fc']):
append_mod(mods, m, name)
return mods
cpu_model = models.resnet50(True)
cpu_model.eval()
fc_n_scale = -4
fpga_model = fpga_resnet.resnet50(ext, *dev)
fpga_model.fc.setOutputScale(fc_n_scale)
fpga_resnet.fuse_resnet_params(ext, dev, cpu_model, fpga_model, fc_mul=1.0)
loader = validate.make_loader(batch_size=16, random=False)
scale = 2.0 ** fc_n_scale
mod = FpgaNN(fpga_model, 1.0 / scale)
print('ResNet-50 {}:'.format(aocx_file))
validate.validate(loader,
limit=None,
fpga_h=mod,
# reference_model=cpu_model)
reference_model=None)
|
deepfloat-main
|
py/run_fpga_resnet.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import shutil
import time
import glob
import subprocess
import sys
import math
import fpga
import validate
import torch
import torch.nn as nn
import torchvision.models as models
import torchvision.models.resnet as resnet
# def fuse_bn(conv, bn):
# conv_w = conv.weight.clone()
# conv_b = None
# if (conv.bias):
# conv_b = conv.bias.clone()
# else:
# conv_b = torch.FloatTensor(conv_w.size(0)).zero_()
# for c in range(conv_w.size(0)):
# bn_mean = bn.running_mean[c]
# bn_var = bn.running_var[c]
# bn_weight = bn.weight[c]
# bn_bias = bn.bias[c]
# inv_var = 1.0 / math.sqrt(bn_var + 1e-5)
# conv_w[c].mul_(bn_weight * inv_var)
# conv_b[c].add_(-bn_mean * inv_var * bn_weight + bn_bias)
# return conv_w, conv_b
# def fuse_resnet_params(m):
# convs = []
# convs.append([m.conv1, m.bn1])
# for seq in [m.layer1, m.layer2, m.layer3, m.layer4]:
# for bb in seq:
# convs.append([bb.conv1, bb.bn1])
# convs.append([bb.conv2, bb.bn2])
# if (bb.conv3):
# convs.append([bb.conv3, bb.bn3])
# if (bb.downsample):
# convs.append([bb.downsample[0], bb.downsample[1]])
# params = []
# for c in convs:
# w, b = fuse_bn(c[0], c[1])
# params.append(['conv', [w, b]])
# params.append(['fc', [m.fc.weight, m.fc.bias]])
# return params
# def orig_resnet_params(m):
# modules = []
# modules.extend([['conv', m.conv1], ['bn', m.bn1]])
# for seq in [m.layer1, m.layer2, m.layer3, m.layer4]:
# for bb in seq:
# modules.extend([['conv', bb.conv1], ['bn', bb.bn1]])
# modules.extend([['conv', bb.conv2], ['bn', bb.bn2]])
# if (bb.conv3):
# modules.extend([['conv', bb.conv3], ['bn', bb.bn3]])
# if (bb.downsample):
# modules.extend([['conv', bb.downsample[0]], ['bn', bb.downsample[1]]])
# modules.append(['fc', m.fc])
# params = []
# for m in modules:
# if (m[0] == 'conv'):
# if (m[1].bias != None):
# params.append([m[0], [m[1].weight,
# m[1].bias]])
# else:
# params.append([m[0], [m[1].weight]])
# elif (m[0] == 'bn'):
# params.append([m[0], [m[1].running_mean,
# m[1].running_var,
# m[1].weight,
# m[1].bias]])
# elif (m[0] == 'fc'):
# params.append([m[0], [m[1].weight,
# m[1].bias]])
# return params
# destructiely updates conv
def fuse_bn(conv, bn):
conv_w = conv.weight
conv_b = None
if (conv.bias):
conv_b = conv.bias
else:
conv_b = torch.FloatTensor(conv_w.size(0)).zero_()
conv.bias = torch.nn.Parameter(conv_b)
for c in range(conv_w.size(0)):
bn_mean = bn.running_mean[c]
bn_var = bn.running_var[c]
bn_weight = bn.weight[c]
bn_bias = bn.bias[c]
inv_var = 1.0 / math.sqrt(bn_var + 1e-5)
conv_w[c].mul_(bn_weight * inv_var)
conv_b[c].add_(-bn_mean * inv_var * bn_weight + bn_bias)
# param_stats = []
# act_stats = []
# def get_stats(t):
# t_abs = t.abs()
# t_sort = t_abs.view(t_abs.nelement()).sort()[0]
# num = t_sort.nelement()
# return [t_sort[int(0.5 * num)].item(),
# t_sort[int(0.9 * num)].item(),
# t_sort[int(0.95 * num)].item(),
# t_sort[int(0.99 * num)].item(),
# t_sort[int(0.995 * num)].item(),
# t_sort[int(0.999 * num)].item(),
# t_sort[-1].item()]
# def print_act(name, t):
# act_stats.append([name, get_stats(t)])
# def print_params(name, m):
# w = get_stats(m.weight)
# b = None
# if m.bias is not None:
# b = get_stats(m.bias)
# param_stats.append([name, w, b])
def new_forward(self, x):
residual = x
out = self.conv1(x)
out = self.relu(out)
out = self.conv2(out)
out = self.relu(out)
if (hasattr(self, 'conv3')):
out = self.conv3(out)
if self.downsample is not None:
residual = self.downsample[0](x)
out += residual
out = self.relu(out)
return out
def new_resnet_forward(self, x):
x = self.conv1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def fuse_resnet_params(m):
resnet.Bottleneck.forward = new_forward
resnet.ResNet.forward = new_resnet_forward
m.fused = True
fuse_bn(m.conv1, m.bn1)
del m.bn1
for seq in [m.layer1, m.layer2, m.layer3, m.layer4]:
seq.fused = True
for bb in seq:
bb.fused = True
fuse_bn(bb.conv1, bb.bn1)
del bb.bn1
fuse_bn(bb.conv2, bb.bn2)
del bb.bn2
if (hasattr(bb, 'conv3')):
fuse_bn(bb.conv3, bb.bn3)
del bb.bn3
if (bb.downsample):
fuse_bn(bb.downsample[0], bb.downsample[1])
del bb.downsample[1]
|
deepfloat-main
|
py/examine_resnet.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import shutil
import time
import glob
import subprocess
import sys
import math
import torch
from torch.utils.cpp_extension import CppExtension, BuildExtension
def init_fpga(aocx_file, dir='../bitstream'):
files = []
files.extend(glob.glob('../cpp/utils/*.cpp'))
files.extend(glob.glob('../cpp/ops/*.cpp'))
files.extend(glob.glob('../cpp/layers/*.cpp'))
files.append('../cpp/PythonInterface.cpp')
aocl_compile_conf = subprocess.check_output(
['aocl', 'compile-config']).decode('utf-8').strip()
aocl_link_conf = subprocess.check_output(
['aocl', 'link-config']).decode('utf-8').strip()
ext = torch.utils.cpp_extension.load(
name='fpga_extension',
sources=files,
extra_cflags=[aocl_compile_conf, '-g'],
extra_ldflags=[aocl_link_conf],
extra_include_paths=['../cpp/'],
verbose=False)
dev = ext.fpga_init(dir, aocx_file)
return ext, dev
|
deepfloat-main
|
py/fpga.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import math
from torch.utils.cpp_extension import CppExtension, BuildExtension
def inspect(name, ext, context, program, queue, x):
return
# f = ext.to_float(context, program, queue, x).abs_()
# print('{}: mean {} max {}'.format(name, f.mean(), f.max()))
class Sequential():
def __init__(self, *args):
self.modules = [*args]
def __len__(self):
return len(self.modules)
def __getitem__(self, idx):
return self.modules[idx]
def add(self, *args):
for a in arg:
self.modules.append(a)
def forward(self, context, program, queue, x):
for m in self.modules:
x = m.forward(context, program, queue, x)
return x
class BasicBlock():
expansion = 1
def __init__(self, ext, context, program, queue,
inplanes, planes, stride=1, downsample=None):
self.ext = ext
self.conv1 = ext.Conv2d(context, program, queue,
inplanes, planes,
3, stride,
1, 1,
False, 0, 0)
self.relu1 = ext.ReLU(context, program, queue)
self.conv2 = ext.Conv2d(context, program, queue,
planes, planes,
3, 1,
1, 1,
False, 0, 0)
self.relu2 = ext.ReLU(context, program, queue)
self.downsample = downsample
self.stride = stride
self.add = ext.Add(context, program, queue, 0, 0, 0)
def forward(self, context, program, queue, x):
residual = x
ext = self.ext
out = self.conv1.forward(context, program, queue, x)
inspect("conv1", ext, context, program, queue, out)
out = self.relu1.forward(context, program, queue, out)
inspect("relu1", ext, context, program, queue, out)
out = self.conv2.forward(context, program, queue, out)
inspect("conv2", ext, context, program, queue, out)
if self.downsample is not None:
residual = self.downsample.forward(context, program, queue, x)
inspect("residual downsample", ext, context, program, queue, residual)
self.add.setAdd(residual)
# inspect("residual", ext, context, program, queue, residual)
out = self.add.forward(context, program, queue, out)
# inspect("add", ext, context, program, queue, out)
out = self.relu2.forward(context, program, queue, out)
inspect("relu2", ext, context, program, queue, out)
return out
class Bottleneck():
expansion = 4
def __init__(self, ext, context, program, queue,
inplanes, planes, stride=1, downsample=None):
self.ext = ext
self.conv1 = ext.Conv2d(context, program, queue,
inplanes, planes,
1, 1,
0, 0,
False, 0, 0)
self.relu1 = ext.ReLU(context, program, queue)
self.conv2 = ext.Conv2d(context, program, queue,
planes, planes,
3, stride,
1, 1,
False, 0, 0)
self.relu2 = ext.ReLU(context, program, queue)
self.conv3 = ext.Conv2d(context, program, queue,
planes, planes * self.expansion,
1, 1,
0, 0,
False, 0, 0)
self.relu3 = ext.ReLU(context, program, queue)
self.downsample = downsample
self.stride = stride
self.add = ext.Add(context, program, queue, 0, 0, 0)
def forward(self, context, program, queue, x):
residual = x
ext = self.ext
out = self.conv1.forward(context, program, queue, x)
inspect("bottleneck conv1", ext, context, program, queue, out)
out = self.relu1.forward(context, program, queue, out)
inspect("bottleneck relu1", ext, context, program, queue, out)
out = self.conv2.forward(context, program, queue, out)
inspect("bottleneck conv2", ext, context, program, queue, out)
out = self.relu2.forward(context, program, queue, out)
inspect("bottleneck relu2", ext, context, program, queue, out)
out = self.conv3.forward(context, program, queue, out)
inspect("bottleneck conv3", ext, context, program, queue, out)
if self.downsample is not None:
residual = self.downsample.forward(context, program, queue, x)
inspect("residual downsample", ext, context, program, queue, residual)
self.add.setAdd(residual)
out = self.add.forward(context, program, queue, out)
inspect("bottleneck add", ext, context, program, queue, out)
out = self.relu3.forward(context, program, queue, out)
inspect("bottleneck relu3", ext, context, program, queue, out)
return out
class ResNet():
def __init__(self, ext, context, program, queue,
block, layers, num_classes=1000):
self.inplanes = 64
self.ext = ext
self.conv1 = ext.Conv2d(context, program, queue,
3, 64,
7, 2,
3, 3, False, 0, 0)
self.relu = ext.ReLU(context, program, queue)
self.maxpool = ext.Pool2d(context, program, queue,
3, 2, 1, 1, ext.PoolOp.Max, 0, 0)
self.layer1 = self._make_layer(ext, context, program, queue,
block, 64, layers[0])
self.layer2 = self._make_layer(ext, context, program, queue,
block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(ext, context, program, queue,
block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(ext, context, program, queue,
block, 512, layers[3], stride=2)
self.avgpool = ext.Pool2d(context, program, queue,
7, 1, 0, 0, ext.PoolOp.Avg, 0, 0)
self.view = ext.View(context, program, queue,
[[0], [1, 2, 3]])
self.fc = ext.Linear(context, program, queue,
512 * block.expansion, num_classes, True, 0, 0)
def _make_layer(self, ext, context, program, queue,
block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = ext.Conv2d(context, program, queue,
self.inplanes, planes * block.expansion,
1, stride, 0, 0,
False, 0, 0)
layers = []
layers.append(block(ext, context, program, queue,
self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(ext, context, program, queue,
self.inplanes, planes))
return Sequential(*layers)
def forward(self, context, program, queue, x):
ext = self.ext
inspect("input", ext, context, program, queue, x)
x = self.conv1.forward(context, program, queue, x)
inspect("conv1", ext, context, program, queue, x)
x = self.relu.forward(context, program, queue, x)
inspect("relu1", ext, context, program, queue, x)
x = self.maxpool.forward(context, program, queue, x)
inspect("maxpool", ext, context, program, queue, x)
x = self.layer1.forward(context, program, queue, x)
inspect("layer1 out", ext, context, program, queue, x)
x = self.layer2.forward(context, program, queue, x)
inspect("layer2 out", ext, context, program, queue, x)
x = self.layer3.forward(context, program, queue, x)
inspect("layer3 out", ext, context, program, queue, x)
x = self.layer4.forward(context, program, queue, x)
inspect("layer4 out", ext, context, program, queue, x)
x = self.avgpool.forward(context, program, queue, x)
inspect("avgpool out", ext, context, program, queue, x)
x = self.view.forward(context, program, queue, x)
inspect("view out", ext, context, program, queue, x)
x = self.fc.forward(context, program, queue, x)
inspect("fc out", ext, context, program, queue, x)
return x
def resnet18(ext, context, program, queue, pretrained=False, **kwargs):
model = ResNet(ext, context, program, queue, BasicBlock, [2, 2, 2, 2], **kwargs)
return model
def resnet34(ext, context, program, queue, pretrained=False, **kwargs):
model = ResNet(ext, context, program, queue, BasicBlock, [3, 4, 6, 3], **kwargs)
return model
def resnet50(ext, context, program, queue, pretrained=False, **kwargs):
model = ResNet(ext, context, program, queue, Bottleneck, [3, 4, 6, 3], **kwargs)
return model
def resnet101(ext, context, program, queue, pretrained=False, **kwargs):
model = ResNet(ext, context, program, queue, Bottleneck, [3, 4, 23, 3], **kwargs)
return model
def resnet152(ext, context, program, queue, pretrained=False, **kwargs):
model = ResNet(ext, context, program, queue, Bottleneck, [3, 8, 36, 3], **kwargs)
return model
def fuse_bn(conv, bn):
conv_w = conv.weight.clone()
conv_b = None
if (conv.bias):
conv_b = conv.bias.clone()
else:
conv_b = torch.FloatTensor(conv_w.size(0)).zero_()
for c in range(conv_w.size(0)):
bn_mean = bn.running_mean[c]
bn_var = bn.running_var[c]
bn_weight = bn.weight[c]
bn_bias = bn.bias[c]
inv_var = 1.0 / math.sqrt(bn_var + 1e-5)
conv_w[c].mul_(bn_weight * inv_var)
conv_b[c].add_(-bn_mean * inv_var * bn_weight + bn_bias)
return conv_w, conv_b
def apply_params(ext, dev, w, b, m):
w_p = ext.to_posit(*dev, w)
b_p = ext.to_posit(*dev, b)
m.setWeight(*dev, w_p)
m.setBias(*dev, b_p)
def fuse_apply_params(ext, dev, conv, bn, out_conv, w_scale=1.0, b_scale=1.0):
w, b = fuse_bn(conv, bn)
# w.mul_(w_scale)
# b.mul_(b_scale)
apply_params(ext, dev, w, b, out_conv)
def fuse_resnet_params(ext, dev, m_in, m_out, fc_mul=1.0):
fuse_apply_params(ext, dev, m_in.conv1, m_in.bn1, m_out.conv1)
for seq_in, seq_out in zip([m_in.layer1, m_in.layer2, m_in.layer3, m_in.layer4],
[m_out.layer1, m_out.layer2, m_out.layer3, m_out.layer4]):
for bb_in, bb_out in zip(seq_in, seq_out):
fuse_apply_params(ext, dev, bb_in.conv1, bb_in.bn1, bb_out.conv1)
fuse_apply_params(ext, dev, bb_in.conv2, bb_in.bn2, bb_out.conv2)
if (hasattr(bb_in, 'conv3')):
fuse_apply_params(ext, dev, bb_in.conv3, bb_in.bn3, bb_out.conv3)
if (bb_in.downsample):
fuse_apply_params(ext, dev, bb_in.downsample[0],
bb_in.downsample[1],
bb_out.downsample)
apply_params(ext, dev,
m_in.fc.weight.mul(fc_mul),
m_in.fc.bias.mul(fc_mul), m_out.fc)
def gather_act(ext, dev, model):
def append_act(ext, dev, acts, m):
acts.append(m.getInput())
acts = []
for m in [model.conv1, model.relu, model.maxpool]:
append_act(ext, dev, acts, m)
for l in [model.layer1, model.layer2, model.layer3, model.layer4]:
for s in l:
for m in [s.conv1, s.relu1, s.conv2, s.relu2]:
append_act(ext, dev, acts, m)
if (hasattr(s, 'conv3')):
append_act(ext, act, acts, s.conv3)
if (s.downsample):
append_act(ext, act, acts, s.downsample)
append_act(ext, act, acts, s.add)
for m in [model.avgpool, model.fc]:
append_act(ext, dev, acts, m)
return acts
|
deepfloat-main
|
py/fpga_resnet.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import shutil
import time
import glob
import subprocess
import sys
import math
import torch
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def validate(val_loader, limit, fpga_h=None, reference_model=None):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
end = time.time()
ref_batch_time = AverageMeter()
ref_losses = AverageMeter()
ref_top1 = AverageMeter()
ref_top5 = AverageMeter()
ref_end = time.time()
limit = limit or -1
criterion = nn.CrossEntropyLoss()
count = 0
for i, (input, target) in enumerate(val_loader):
count = count + 1
if (count > limit and not (limit == -1)):
break
if (fpga_h):
end = time.time()
# fpga_h.forward_p(input)
if (reference_model):
ref_end = time.time()
ref_output = reference_model.forward(input)
# ref_target_var = torch.autograd.Variable(target, volatile=True)
ref_target_var = torch.autograd.Variable(target)
ref_loss = criterion(ref_output, ref_target_var)
prec1, prec5 = accuracy(ref_output, target, topk=(1, 5))
ref_losses.update(ref_loss.item(), input.size(0))
ref_top1.update(prec1[0], input.size(0))
ref_top5.update(prec5[0], input.size(0))
# measure elapsed time
ref_batch_time.update(time.time() - ref_end)
print('CPU float32: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
(i + 1) * val_loader.batch_size,
len(val_loader) * val_loader.batch_size,
batch_time=ref_batch_time, loss=ref_losses,
top1=ref_top1, top5=ref_top5))
sys.stdout.flush()
if (fpga_h):
# output = fpga_h.forward_f()
output = fpga_h.forward(input)
# target_var = torch.autograd.Variable(target, volatile=True)
target_var = torch.autograd.Variable(target)
loss = criterion(output, target_var)
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
print('FPGA: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
(i + 1) * val_loader.batch_size,
len(val_loader) * val_loader.batch_size,
batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
sys.stdout.flush()
# return top1.avg.item(), top5.avg.item()
def make_loader(batch_size, random=False, seed=1):
valdir = '/home/jhj/imagenet/data/local/packages/ai-group.imagenet-full-size/prod/imagenet_full_size/val'
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
dataset = datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]))
sampler = None
if random:
sampler = torch.utils.data.RandomSampler(dataset)
torch.manual_seed(seed)
return torch.utils.data.DataLoader(
dataset,
sampler=sampler,
batch_size=batch_size,
shuffle=False,
num_workers=0)
def sample_loader(loader):
for (input, target) in loader:
return input, target
|
deepfloat-main
|
py/validate.py
|
#!/usr/bin/python3
# Simple Python Fixed-Point Module (SPFPM)
# (C)Copyright 2006-2018, RW Penney
# This file is (C)Copyright 2006-2018, RW Penney
# and is released under the Python-2.4.2 license
# (see http://www.python.org/psf/license),
# it therefore comes with NO WARRANTY, and NO CLAIMS OF FITNESS FOR ANY PURPOSE.
# However, the author welcomes *constructive* feedback
# and bug-fixes via: rwpenney 'AT' users 'DOT' sourceforge 'DOT' net
"""
The Simple Python Fixed-Point Module (SPFPM) provides objects of types
FXnum and FXfamily which implement basic mathematical operations
on fixed-point binary numbers (i.e. having a fixed number of
fractional binary digits, with the number of integer digits being either
arbitrary or subject to a user-defined limit).
FXnum objects exist within a user-controllable collection of families
managed by the FXfamily class, which sets the number of fractional
& integer digits for each family. This can be used, for example,
to ensure that a set of 8-bit quantities can be manipulated consistently
and kept separate from a set of 200-bit quantities in the same program.
Conversion between FXnum objects in different families is supported,
but solely through an explicit cast.
>>> x = FXnum(2.1) # default FXfamily, with 64-bits
>>> print(x)
2.10000000000000008881
>>> x = FXnum(21) / 10 # fractional error ~1/2^64 or ~5e-20
>>> print(x)
2.09999999999999999996
>>> rx = x.sqrt() # rx created in same family as x
>>> print(rx)
1.44913767461894385735
>>> v = x + 2 * rx
>>> print(v)
4.99827534923788771467
>>> y = FXnum(3.2, FXfamily(12)) # lower-precision 12-bit number
>>> ly = y.log() # ly created in same family as y
>>> print(ly) # fractional error ~1/2^12 or ~2e-4
1.1628
>>> print(ly.exp())
3.1987
>>> fy = float(y)
>>> print(fy)
3.199951171875
>>> # a = x + y # throws exception - different families
>>> a = x + FXnum(y, _defaultFamily)
>>> print(a)
5.30007324218749999996
>>> b = rx + x # ok - same families
>>> # c = rx + ly # throws exception - different families
>>> d = ly + y # ok - same families
>>> a = FXnum(1.4, FXfamily(12, 4)) # limit magnitude to 2^(4-1)
>>> print(a)
1.3999
>>> print(a * 5, a * -5)
6.9995 -6.9995
>>> #print(a * 6, a * -6) # throws exception indicating overflow
>>> fam = FXfamily(200)
>>> print(fam.pi)
3.1415926535897932384626433832795028841971693993751058209749444
>>> # Accurate to 60 decimal places ^- first error
Note:
Be careful not to assume that a large number of fractional bits within
a number will necessarily mean large accuracy. For example, computations
involving exponentiation and logarithms are intrinsically vulnerable to
magnifying mere rounding errors in their inputs into significant errors
in their outputs. This is a fact of life with any approximation to
real arithmetic using finite-precision quantities.
SPFPM is provided as-is, with no warranty of any form.
"""
SPFPM_VERSION = '1.4.4'
class FXfamily(object):
"""Descriptor of the accuracy of a set of fixed-point numbers.
This class defines the fixed-point resolution of a set of FXnum objects.
All arithmetic operations between FXnum objects that are
not explicitly cast into a different FXfamily
must share the same FXfamily.
Multiple FXfamily objects can exist within the same application so that,
for example, sets of 12-bit, 32-bit & 200-bit quantities
can be manipulated concurrently.
"""
def __init__(self, n_bits=64, n_intbits=None):
self.fraction_bits = n_bits # Bits to right of binary point
self.integer_bits = n_intbits # Bits to left of binary point (including sign)
self.scale = 1 << n_bits
self._roundup = 1 << (n_bits - 1)
try:
thresh = 1 << (n_bits + n_intbits - 1)
def validate(scaledval):
if scaledval >= thresh or scaledval < -thresh:
raise FXoverflowError
except:
def validate(scaledval): return
self.validate = validate
# Cached values of various mathematical constants:
self._exp1, self._log2, self._pi, self._sqrt2 = (None,) * 4
@property
def resolution(self):
"""The number of fractional binary digits"""
return self.fraction_bits
@property
def exp1(self):
"""Inverse natural logarithm of unity."""
if self._exp1 is None:
# Brute-force calculation of exp(1) using augmented accuracy:
augfamily = self.augment()
augexp = FXnum(1, augfamily)._rawexp()
arg = 1 / FXnum(4, augfamily)
q0 = arg._rawexp()
q1 = q0 * q0
augexp = q1 * q1
self._exp1 = FXnum(augexp, self)
return self._exp1
@property
def log2(self):
"""Natural logarithm of two."""
if self._log2 is None:
# Brute-force calculation of log(2) using augmented accuracy
# via log(2) = 5log(3^12 / 2^19) - 12log(3^5 / 2^8)
augfamily = self.augment()
q0 = FXnum((3 ** 12) - (1 << 19), augfamily) >> 19
q1 = FXnum((3 ** 5) - (1 << 8), augfamily) >> 8
auglog2 = (5 * q0._rawlog(isDelta=True)
- 12 * q1._rawlog(isDelta=True))
self._log2 = FXnum(auglog2, self)
return self._log2
@property
def pi(self):
"""Ratio of circle's perimeter to its diameter."""
if self._pi is None:
# Use Bailey–Borwein–Plouffe representation of Pi,
# involving powers of 1/16 and simple rational terms:
augfamily = self.augment()
augpi = augfamily(0)
k4 = 0
while True:
k8 = k4 * 2
term = (4 / augfamily(k8 + 1)
- 2 / augfamily(k8 + 4)
- 1 / augfamily(k8 + 5)
- 1 / augfamily(k8 + 6)) >> k4
if term.scaledval == 0: break
augpi += term
k4 += 4
self._pi = FXnum(augpi, self)
return self._pi
@property
def sqrt2(self):
"""Square-root of two."""
if self._sqrt2 is None:
augfamily = self.augment()
x = FXnum(3, augfamily) >> 1
while True:
# Apply Newton-Raphson iteration to f(x)=2/(x*x)-1:
delta = (x * (2 - x * x)) >> 2
x += delta
if abs(delta.scaledval) <= 1:
break
self._sqrt2 = FXnum(x, self)
return self._sqrt2
@property
def unity(self):
"""The multiplicative identity."""
return FXnum(1, self)
@property
def zero(self):
"""The additive identity."""
return FXnum(0, self)
def __hash__(self):
return hash(self.fraction_bits)
def __repr__(self):
return 'FXfamily(n_bits={}, n_intbits={})'.format(self.fraction_bits,
self.integer_bits)
def __eq__(self, other):
try:
return (self.fraction_bits == other.fraction_bits
and self.integer_bits == other.integer_bits)
except AttributeError:
return false
def __ne__(self, other):
try:
return (self.fraction_bits != other.fraction_bits
or self.integer_bits != other.integer_bits)
except AttributeError:
return true
def __call__(self, val):
"""Create a fixed-point number within this family."""
return FXnum(val, family=self)
def convert(self, other, other_val):
"""Convert number from different number of fraction-bits"""
bit_inc = self.fraction_bits - other.fraction_bits
if bit_inc == 0:
return other_val
elif bit_inc > 0:
new_val = other_val << bit_inc
if other_val > 0:
new_val |= 1 << (bit_inc - 1)
else:
new_val |= ((1 << (bit_inc -1)) - 1)
return new_val
else:
# Safest approach is to truncate bits, rather than rounding:
return (other_val >> -bit_inc)
def augment(self, opcount=None):
"""Construct new FXfamily with enhanced resolution.
The returned FXfamily will have an increased number of fractional bits,
sufficient to accommodate the worst-case accumulation of 1-LSB errors
over the specified number of operations. If the supplied
operation-count is None, then this defaults to
the existing number of fractional digits.
"""
nb = opcount if opcount is not None else self.fraction_bits
augbits = 4
while nb > 0:
augbits += 1
nb >>= 1
return FXfamily(self.fraction_bits + augbits)
# ^^^ class FXfamily ^^^
_defaultFamily = FXfamily()
####
# Exceptions
#
class FXexception(ArithmeticError):
"""Base-class of exceptions generated by SPFPM operations"""
class FXdomainError(FXexception):
"""Signal that input argument of mathematical function is unsuitable"""
class FXoverflowError(FXexception):
"""Signal that value has overflowed its most-significant bit"""
class FXfamilyError(FXexception, TypeError):
"""Signal that family-types of FXnums in binary operation are mismatched"""
class FXbrokenError(FXexception):
"""Signal some form of internal error, e.g. broken logic"""
class FXnum(object):
"""Representation of a binary fixed-point real number."""
__slots__ = ('family', 'scaledval')
def __init__(self, val=0, family=_defaultFamily, **kwargs):
self.family = family
converter = family.convert
try:
# Assume that val is similar to FXnum:
self.scaledval = converter(val.family, val.scaledval)
except AttributeError:
self.scaledval = kwargs.get('scaled_value',
int(val * family.scale))
self.family.validate(self.scaledval)
@classmethod
def _rawbuild(cls, fam, sv):
"""Shortcut for creating new FXnum instance, for internal use only."""
num = object.__new__(cls)
fam.validate(sv)
num.family = fam
num.scaledval = sv
return num
def __hash__(self):
return hash(self.scaledval) ^ hash(self.family)
def __repr__(self):
"""Create unambiguous string representation of self"""
return 'FXnum(family={}, scaled_value={})'.format(self.family,
self.scaledval)
# Conversion operations:
def __int__(self):
"""Cast to integer"""
if self.scaledval >= 0:
return int(self.scaledval // self.family.scale)
else:
return int((self.scaledval + self.family.scale - 1) // self.family.scale)
def __float__(self):
"""Cast to floating-point"""
return float(self.scaledval) / float(self.family.scale)
def _CastOrFail_(self, other):
"""Turn number into FXnum or check that it is in same family"""
try:
# Binary operations must involve members of same family
if self.family != other.family:
raise FXfamilyError(1)
except AttributeError:
# Automatic casting from types other than FXnum is allowed:
other = FXnum(other, self.family)
return other
# Unary arithmetic operations:
def __abs__(self):
"""Modulus"""
if self.scaledval < 0:
return -self
else:
return self
def __neg__(self):
"""Change sign"""
return FXnum._rawbuild(self.family, -self.scaledval)
def __pos__(self):
"""Identity operation"""
return self
# Arithmetic comparison tests:
def __eq__(self, other):
"""Equality test"""
other = self._CastOrFail_(other)
return self.scaledval == other.scaledval and self.family == other.family
def __ne__(self, other):
"""Inequality test"""
other = self._CastOrFail_(other)
return self.scaledval != other.scaledval
def __ge__(self, other):
"""Greater-or-equal test"""
other = self._CastOrFail_(other)
return self.scaledval >= other.scaledval
def __gt__(self, other):
"""Greater-than test"""
other = self._CastOrFail_(other)
return self.scaledval > other.scaledval
def __le__(self, other):
"""Less-or-equal test"""
other = self._CastOrFail_(other)
return self.scaledval <= other.scaledval
def __lt__(self, other):
"""Greater-than test"""
other = self._CastOrFail_(other)
return self.scaledval < other.scaledval
def __bool__(self):
"""Test for truth/falsehood"""
return (self.scaledval != 0)
def __nonzero__(self):
"""Test for non-zero"""
return (self.scaledval != 0)
# Arithmetic combinations:
def __add__(self, other):
"""Add another number"""
other = self._CastOrFail_(other)
return FXnum._rawbuild(self.family,
(self.scaledval + other.scaledval))
def __radd__(self, other):
return FXnum(other, self.family) + self
def __sub__(self, other):
"""Subtract another number"""
other = self._CastOrFail_(other)
return FXnum._rawbuild(self.family,
(self.scaledval - other.scaledval))
def __rsub__(self, other):
return FXnum(other, self.family) - self
def __mul__(self, other):
"""Multiply by another number"""
other = self._CastOrFail_(other)
return FXnum._rawbuild(self.family,
((self.scaledval * other.scaledval
+ self.family._roundup)
// self.family.scale))
def __rmul__(self, other):
return FXnum(other, self.family) * self
def __lshift__(self, shift):
return FXnum._rawbuild(self.family,
(self.scaledval << shift))
def __rshift__(self, shift):
return FXnum._rawbuild(self.family,
(self.scaledval >> shift))
def __truediv__(self, other):
"""Divide by another number (without truncation)"""
other = self._CastOrFail_(other)
return FXnum._rawbuild(self.family,
((self.scaledval * self.family.scale
+ self.family._roundup)
// other.scaledval))
__div__ = __truediv__
def __rtruediv__(self, other):
return FXnum(other, self.family) / self
__rdiv__ = __rtruediv__
# Printing/converstion routines:
def __str__(self):
"""Convert number (as decimal) into string"""
return self.toDecimalString()
def toDecimalString(self, precision=None, round10=False):
"""Convert number (as decimal) into string
precision - The maximum number of digits after the decimal point.
round10 - Round last decimal digit of fractional part,
by adding 0.5/10^precision.
"""
# Despite rebinding costs, list+join idiom appears slower here
# than string concatenation building 'rep' from successive digits
famScale = self.family.scale
if precision is None or not isinstance(precision, int):
precision = int((3 + self.family.fraction_bits) / 3.32)
# Each fractional bit adds about log_10(2) decimal digits
val = self.scaledval
rep = ''
if self.scaledval < 0:
rep = '-'
val *= -1
if round10:
# Round (decimal) fractional part by adding half of last-digit:
decimalScale = 10 ** precision
val = (val * decimalScale + famScale // 2) // decimalScale
whole = val // famScale
frac = val - whole * famScale
rep += str(whole)
if frac != 0 and precision > 0:
rep += '.'
idx = 0
while idx < precision and frac != 0:
frac *= 10
q = frac // famScale
rep += str(q)
frac -= q * famScale
idx += 1
return rep
def toBinaryString(self, logBase=1, twosComp=True):
"""Convert number into string in base 2/4/8/16
logBase - log_2 of the number base for printing.
(e.g. 1 for binary, 3 for octal, 4 for hexadecimal).
This must be no greater than 4.
twosComp - Whether to convert negative numbers into
twos-complement form. If this is False,
then negative numbers are simply prefixed
by a minus sign.
Note that when negative numbers are converted to twos-complement form,
this may involve estimating how many bits are needed
to contain the integer part if this is not specified by the FXfamily.
"""
if not isinstance(logBase, int) or logBase > 4 or logBase < 1:
raise ValueError('Cannot convert to base greater than 16')
sign, prefix = 1, ''
if self.scaledval < 0 and not twosComp:
sign, prefix = -1, '-'
(bits, intDigits, fracDigits) = \
(sign * self)._toTwosComplement(logBase)
digits = []
mask = (1 << logBase) - 1
for dig in range(intDigits+fracDigits):
digits.append('{:1x}'.format(bits & mask))
bits >>= logBase
digits = ''.join(reversed(digits))
return prefix + digits[:-fracDigits] + '.' + digits[-fracDigits:]
def _toTwosComplement(self, logBase=1):
"""Convert binary representation to twos-complement for printing.
This will convert negative numbers into their twos-complement form,
and automatically guess the number of digits required to represent
the integer part of the invoking number. The returned bit-pattern
is aligned so that it has a whole number of digits (in base 1<<logBase)
both before and after the binary/octal/hexadecimal-point.
"""
fracDigits = (self.family.resolution + logBase - 1) // logBase
bitPattern = self.scaledval
if self.family.integer_bits is not None:
intDigits = (self.family.integer_bits + logBase - 1) // logBase
else:
intDigits = 1
intPart = self.scaledval >> self.family.resolution
if intPart >= 0:
while intPart >= (1 << (intDigits * logBase)):
intDigits += 1
else:
while (1 << (intDigits * logBase - 1)) + intPart < 0:
intDigits += 1
if bitPattern < 0:
bitPattern += 1 << (intDigits * logBase + self.family.resolution)
bitPattern <<= (fracDigits * logBase - self.family.resolution)
return (bitPattern, intDigits, fracDigits)
# Mathematical functions:
def __pow__(self, other, modulus=None):
"""Evaluate self ^ other"""
assert modulus is None
if self == 0:
return self.family.unity
ipwr = int(other)
rmdr = (other -ipwr)
if rmdr == 0:
frac = self.family.unity
else:
frac = (rmdr * self.log()).exp()
return self.intpower(ipwr) * frac
def __rpow__(self, other):
return FXnum(other, self.family) ** self
def intpower(self, pwr):
"""Compute integer power by repeated squaring"""
assert isinstance(pwr, int)
invert = False
if pwr < 0:
pwr *= -1
invert = True
result = self.family.unity
term = self
while True:
if pwr & 1:
result *= term
pwr >>= 1
if not pwr:
break
term *= term
if invert:
result = FXnum(1, self.family) / result
return result
def sqrt(self):
"""Compute square-root of given number."""
if self.scaledval < 0:
raise FXdomainError
elif self.scaledval == 0:
return self
# Calculate crude initial approximation:
rt = FXnum(family=self.family,
scaled_value=(1 << (self.family.fraction_bits // 2)))
val = self.scaledval
while val > 0:
val >>= 2
rt.scaledval <<= 1
# Refine approximation by Newton iteration:
while True:
delta = (rt - self / rt) >> 1
rt -= delta
if delta.scaledval == 0: break
return rt
def exp(self):
"""Compute exponential of given number"""
pwr = int(self)
return (self - pwr)._rawexp() * (self.family.exp1 ** pwr)
def _rawexp(self):
"""Brute-force exponential of given number (assumed smallish)"""
ex = self.family.unity
term = self.family.unity
idx = 1
while True:
term *= self / idx
ex += term
idx += 1
if term.scaledval == 0: break
return ex
def log(self):
"""Compute (natural) logarithm of given number"""
if self.scaledval <= 0:
raise FXdomainError
elif self == 1:
return FXnum(0, self.family)
uprthresh = FXnum(1.6, self.family)
lwrthresh = uprthresh / 2
count = 0
val = self
while val > uprthresh:
val /= 2
count += 1
while val < lwrthresh:
val *= 2
count -= 1
return val._rawlog() + count * self.family.log2
def _rawlog(self, isDelta=False):
"""Compute (natural) logarithm of given number (assumed close to 1)"""
lg = self.family.zero
if isDelta:
z = self / (self + 2)
else:
z = (self - 1) / (self + 1)
z2 = z * z
term = 2 * z
idx = 1
while True:
lg += term / idx
term *= z2
idx += 2
if term.scaledval == 0: break
return lg
def sin(self):
"""Compute sine of given number (as angle in radians)"""
(ang, idx, reflect) = self._angnorm()
idx = idx % 4
if idx == 0: sn = ang._rawQsine(False)
elif idx == 1: sn = ang._rawQsine(True)
elif idx == 2: sn = -ang._rawQsine(False)
elif idx == 3: sn = -ang._rawQsine(True)
else: raise FXbrokenError
if reflect: sn *= -1
return sn
def asin(self):
"""Compute inverse sine of given number"""
arg = self
reflect = False
if self < 0:
arg *= -1
reflect = True
if arg <= 0.5:
asn = arg._rawarcsin()
else:
# apply 1-cos2t transformation:
cs2 = (1 - arg) / 2
if cs2 < 0: raise FXdomainError
asn = self.family.pi / 2 - 2 * cs2.sqrt()._rawarcsin()
if reflect: asn *= -1
return asn
def _rawarcsin(self):
"""Brute-force inverse-sine of given number.
This requires roughly as many integer bits as fractional bits,
in order to accommodate (2n!)/(n!n!).
"""
asn = FXnum(1, self.family)
x2 = self * self
x2n = x2
half = self.family.unity / 2
nCn = 2 # (2n)! / ((n!)^2)
idx = 1
while True:
delta = x2n * ((FXnum(nCn, self.family) >> (2 * idx))
/ (2 * idx + 1))
asn += delta
if delta.scaledval == 0: break
idx += 1
x2n *= x2
nCn = (nCn * 2 * (2 * idx - 1)) // idx
return self * asn
def cos(self):
"""Compute cosine of given number (as angle in radians)"""
(ang, idx, reflect) = self._angnorm()
idx = idx % 4
if idx == 0: cs = ang._rawQsine(True)
elif idx == 1: cs = -ang._rawQsine(False)
elif idx == 2: cs = -ang._rawQsine(True)
elif idx == 3: cs = ang._rawQsine(False)
else: raise FXbrokenError
return cs
def acos(self):
"""Compute inverse cosine of given number"""
arg = self
reflect = False
if self < 0:
arg *= -1
reflect = True
if arg <= 0.5:
acs = self.family.pi / 2 - arg._rawarcsin()
else:
# apply 1-cos2t transformation:
sn2 = (1 - arg) / 2
if sn2 < 0: raise FXdomainError
acs = 2 * (sn2.sqrt())._rawarcsin()
if reflect: acs = self.family.pi - acs
return acs
def sincos(self):
"""Compute sine & cosine of given number (as angle in radians)"""
(ang, idx, reflect) = self._angnorm()
osn = ang._rawQsine(False)
ocs = ang._rawQsine(True)
# transform according to sin(ang+offset), cos(ang+offset):
idx = idx % 4
if idx == 0: (sn, cs) = (osn, ocs)
elif idx == 1: (sn, cs) = (ocs, -osn)
elif idx == 2: (sn, cs) = (-osn, -ocs)
elif idx == 3: (sn, cs) = (-ocs, osn)
else: raise FXbrokenError
if reflect: sn *= -1
return (sn, cs)
def _angnorm(self):
"""Helper function for reducing angle modulo 2.Pi"""
reflect = False
ang = self
if ang < 0:
ang *= -1
reflect = True
# Find nearest multiple of pi/2:
halfpi = self.family.pi / 2
idx = int(ang / halfpi + 0.5)
ang -= idx * halfpi
return (ang, idx, reflect)
def _rawQsine(self, doCos=False, doHyp=False):
"""Helper function for brute-force calculation of sine & cosine"""
sn = self.family.zero
if doHyp:
x2 = self * self
else:
x2 = -self * self
term = self.family.unity
if doCos: idx = 1
else: idx = 2
while True:
sn += term
term *= x2 / (idx * (idx + 1))
idx += 2
if term.scaledval == 0: break
if doCos: return sn
else: return self * sn
def tan(self):
"""Compute tangent of given number (as angle in radians)"""
(sn, cs) = self.sincos()
return sn / cs
def atan(self):
"""Compute inverse-tangent of given number (as angle in radians)"""
reflect = False
recip = False
double = False
tan = self
if tan < 0:
tan *= -1
reflect = True
if tan > 1:
tan = 1 / tan
recip = True
if tan > 0.414:
tan = ((1 + tan * tan).sqrt() - 1) / tan
double = True
ang = tan._rawarctan()
if double:
ang *= 2
if recip:
ang = self.family.pi / 2 - ang
if reflect:
ang *= -1
return ang
def _rawarctan(self):
"""Brute-force inverse-tangent of given number (for |self|<1)."""
atn = 1
x2 = self * self
omx2 = 1 - x2
opx2 = 1 + x2
x4 = x2 * x2
term = x2
idx = 1
while True:
# Combine pair of successive terms with opposite signs:
delta = term * (4 * idx * omx2 + opx2) / (16 * idx * idx - 1)
atn -= delta
term *= x4
idx += 1
if delta.scaledval == 0: break
return self * atn
# ^^^ class FXnum ^^^
if __name__ == "__main__":
import doctest
try:
doctest.testmod()
except TypeError:
print("*** Problems running doctest module ***")
# vim: set ts=4 sw=4 et:
|
deepfloat-main
|
rtl/log/luts/FixedPoint.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import FixedPoint
import math
import argparse
import io
parser = argparse.ArgumentParser(
description='Generates pow2 and log2 tables for log-linear conversions',
epilog='', formatter_class=argparse.RawTextHelpFormatter
)
group = parser.add_argument_group('arguments')
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
parser.add_argument("--mem", type=str2bool, nargs='?',
const=True, default=False,
help="generate memory tables")
group.add_argument('--bits_in', '-bi', metavar='<bits in>', type=int,
nargs=1, required=True,
help='bits for input')
group.add_argument('--bits_out', '-bo', metavar='<bits out>', type=int,
nargs=1, required=True,
help='bits for output')
parser.add_argument('--log', type=str2bool, nargs='?',
const=True, default=False,
help="generate log2 table only")
parser.add_argument('--pow', type=str2bool, nargs='?',
const=True, default=False,
help="generate pow2 table only")
parser.add_argument('--pow_delta', type=str2bool, nargs='?',
const=True, default=False,
help="generate pow2 delta table only")
parser.add_argument('--log_delta', type=str2bool, nargs='?',
const=True, default=False,
help="generate log2 delta table only")
parser.add_argument('--str', type=str2bool, nargs='?',
const=True, default=False,
help="print to stdout only")
def get_r2ne(x, bits):
str = x.toBinaryString()
assert str[1] == '.'
keep_bit = str[2+bits-1] == '1'
guard_bit = str[2+bits] == '1'
round_bit = str[2+bits+1] == '1'
sticky_bits = str[2+bits+2:].find('1') != -1
round_down = (not guard_bit) or ((not keep_bit) and guard_bit and (not round_bit) and (not sticky_bits))
return not round_down
def get_fraction(x, bits=-1):
str = x.toBinaryString()
# Find the fixed point
idx = str.find('.')
if bits == -1:
return str[idx+1:]
else:
return str[idx+1:idx+1+bits]
args = parser.parse_args()
overlaps = {}
#
# Non-delta
#
def get_pow2_expansion(i, in_bits, out_bits, enable_rounding=True):
prec_bits = out_bits * 4
fam20 = FixedPoint.FXfamily(prec_bits)
x = (FixedPoint.FXnum(i, fam20) / (2 ** in_bits))
orig_x = x
orig_str = x.toBinaryString()[2:2+in_bits]
x = pow(2, x)
pow2_str = x.toBinaryString()
keep_bit = pow2_str[2+out_bits-1] == '1'
guard_bit = pow2_str[2+out_bits] == '1'
round_bit = pow2_str[2+out_bits+1] == '1'
sticky_bits = pow2_str[2+out_bits+2:].find('1') != -1
round_down = (not guard_bit) or ((not keep_bit) and guard_bit and (not round_bit) and (not sticky_bits))
if (not round_down and enable_rounding):
add = FixedPoint.FXnum(1, fam20) >> out_bits
x = x + add
before_round = pow2_str[2:2+out_bits]
after_round = x.toBinaryString()[2:2+out_bits]
is_overlap = False
if after_round in overlaps:
is_overlap = True
else:
overlaps[after_round] = True
# can also formulate as what to subtract, excepting 0
# print(orig_str, (x - (1 + orig_x)).toBinaryString()[2+2:4 + out_bits - 2])
return orig_str, before_round, after_round, not round_down, is_overlap
def get_log2_expansion(i, in_bits, out_bits, enable_rounding=True):
prec_bits = out_bits * 4
fam20 = FixedPoint.FXfamily(prec_bits)
x = (FixedPoint.FXnum(i, fam20) / (2 ** in_bits))
orig_str = x.toBinaryString()[2:2+in_bits]
x = (x + 1).log() / math.log(2)
pow2_str = x.toBinaryString()
keep_bit = pow2_str[2+out_bits-1] == '1'
guard_bit = pow2_str[2+out_bits] == '1'
round_bit = pow2_str[2+out_bits+1] == '1'
sticky_bits = pow2_str[2+out_bits+2:].find('1') != -1
round_down = (not guard_bit) or ((not keep_bit) and guard_bit and (not round_bit) and (not sticky_bits))
if (not round_down and enable_rounding):
add = FixedPoint.FXnum(1, fam20) >> out_bits
x = x + add
before_round = pow2_str[2:2+out_bits]
after_round = x.toBinaryString()[2:2+out_bits]
is_overlap = False
if after_round in overlaps:
is_overlap = True
else:
overlaps[after_round] = True
return orig_str, before_round, after_round, not round_down, is_overlap
#
# delta
#
def get_pow2_delta_expansion(i, in_bits, out_bits, enable_rounding=True):
prec_bits = out_bits * 4
fam20 = FixedPoint.FXfamily(prec_bits)
x = (FixedPoint.FXnum(i, fam20) / (2 ** in_bits))
orig_x = x
orig_str = x.toBinaryString()[2:2+in_bits]
pow2_x = pow(2, x)
pow2_str = x.toBinaryString()
round_up = get_r2ne(pow2_x, out_bits)
pow2_round_x = pow2_x
if (round_up and enable_rounding):
add = FixedPoint.FXnum(1, fam20) >> out_bits
pow2_round_x = pow2_x + add
# As an out_bits-sized fixed point number
fam_out = FixedPoint.FXfamily(out_bits)
y = FixedPoint.FXnum(pow2_round_x, fam_out)
cur = FixedPoint.FXnum(i, fam_out) / (2 ** in_bits)
# This is what we are encoding, all values except for 0 are negative
delta_y = y - cur
delta_y = delta_y << 3
delta_y_truncated = FixedPoint.FXnum(delta_y, FixedPoint.FXfamily(out_bits-3))
delta_y_truncated = delta_y_truncated - 7
# print(y.toBinaryString(), cur.toBinaryString(), (y - cur).toBinaryString(), get_fraction(delta_y_truncated))
# Now, see if we can recover y from delta_y_truncated
recover_y = FixedPoint.FXnum(delta_y_truncated, fam_out)
recover_y = recover_y + 7
recover_y = recover_y >> 3
recover_val = cur + recover_y
assert recover_val == y
before_round = get_fraction(pow2_x, out_bits)
after_round = get_fraction(pow2_round_x, out_bits)
return orig_str, after_round, get_fraction(delta_y_truncated)
def get_log2_delta_expansion(i, in_bits, out_bits, enable_rounding=True):
prec_bits = out_bits * 4
fam20 = FixedPoint.FXfamily(prec_bits)
x = (FixedPoint.FXnum(i, fam20) / (2 ** in_bits))
orig_x = x
orig_str = x.toBinaryString()[2:2+in_bits]
log2_x = (x + 1).log() / math.log(2)
log2_str = x.toBinaryString()
round_up = get_r2ne(log2_x, out_bits)
log2_round_x = log2_x
if (round_up and enable_rounding):
add = FixedPoint.FXnum(1, fam20) >> out_bits
log2_round_x = log2_x + add
# As an out_bits-sized fixed point number
fam_out = FixedPoint.FXfamily(out_bits)
y = FixedPoint.FXnum(log2_round_x, fam_out)
cur = FixedPoint.FXnum(i, fam_out) / (2 ** in_bits)
# This is what we are encoding, all values except for 0 are negative
delta_y = y - cur
delta_y = delta_y << 3
# print('cur {} round {} delta {}'.format(cur.toBinaryString(), y.toBinaryString(), delta_y.toBinaryString()))
delta_y_truncated = FixedPoint.FXnum(delta_y, FixedPoint.FXfamily(out_bits-3))
delta_y_truncated = delta_y_truncated - 7
# Now, see if we can recover y from delta_y_truncated
recover_y = FixedPoint.FXnum(delta_y_truncated, fam_out)
recover_y = recover_y + 7
recover_y = recover_y >> 3
recover_val = cur + recover_y
# print('here', recover_val.toBinaryString())
assert recover_val == y
before_round = get_fraction(log2_x, out_bits)
after_round = get_fraction(log2_round_x, out_bits)
return orig_str, after_round, get_fraction(delta_y_truncated)
#
# module generation
#
def gen_pow2(file, gen_mem, in_bits, out_bits):
if (not gen_mem):
header = """
module Pow2LUT_{}x{}
(input [{}:0] in,
output logic [{}:0] out);
always_comb begin
case (in)
""".format(in_bits, out_bits, in_bits-1, out_bits-1)
file.write(header)
had_overlap = False
for i in range(2 ** in_bits):
in_fixed, before_fixed, out_fixed, r, is_overlap = get_pow2_expansion(i, in_bits, out_bits)
if (gen_mem):
file.write(out_fixed)
file.write('\n')
else:
overlap_str = ''
if (is_overlap and r):
had_overlap = True
overlap_str = ' // overlap + round'
elif (is_overlap):
had_overlap = True
overlap_str = ' // overlap'
elif (r):
had_overlap = True
overlap_str = ' // round'
file.write(' {}\'b{}: out = {}\'b{};{}\n'.format(
in_bits,
in_fixed,
out_bits,
out_fixed,
overlap_str))
if (not gen_mem):
file.write(' default: out = {}\'b{};\n'.format(out_bits, 'x' * out_bits))
file.write(' endcase\n')
file.write(' end\n')
file.write('endmodule\n')
def gen_log2(file, gen_mem, in_bits, out_bits):
if (not gen_mem):
header = """
module Log2LUT_{}x{}
(input [{}:0] in,
output logic [{}:0] out);
always_comb begin
case (in)
""".format(in_bits, out_bits, in_bits-1, out_bits)
file.write(header)
had_overlap = False
for i in range(2 ** in_bits):
in_fixed, before_fixed, out_fixed, r, is_overlap = get_log2_expansion(i, in_bits, out_bits, True)
if (i < 2 ** (in_bits - 1) or out_fixed != ('0' * out_bits)):
r = False
if (gen_mem):
file.write('{}{}\n'.format(int(r), out_fixed))
else:
overlap_str = ''
if (is_overlap and r):
had_overlap = True
overlap_str = ' // overlap + round'
elif (is_overlap):
had_overlap = True
overlap_str = ' // overlap'
elif (r):
had_overlap = True
overlap_str = ' // round'
file.write(' {}\'b{}: out = {}\'b{}{};{}\n'.format(
in_bits,
in_fixed,
out_bits + 1,
int(r),
out_fixed,
overlap_str))
if (not gen_mem):
file.write(' default: out = {}\'b{};\n'.format(out_bits + 1, 'x' * (out_bits + 1)))
file.write(' endcase\n')
file.write(' end\n')
file.write('endmodule\n')
def gen_pow2_delta(file, gen_mem, in_bits, out_bits):
if (not gen_mem):
header = """
module Pow2DeltaLUT_{}x{}
(input [{}:0] in,
output logic [{}:0] out);
always_comb begin
case (in)
""".format(in_bits, out_bits, in_bits-1, out_bits-4)
file.write(header)
for i in range(2 ** in_bits):
in_fixed, out_fixed, delta = get_pow2_delta_expansion(i, in_bits, out_bits)
if (gen_mem):
file.write(delta)
file.write('\n')
else:
file.write(' {}\'b{}: out = {}\'b{};\n'.format(
in_bits,
in_fixed,
out_bits-3,
delta))
if (not gen_mem):
file.write(' default: out = {}\'b{};\n'.format(out_bits-3, 'x' * (out_bits-3)))
file.write(' endcase\n')
file.write(' end\n')
file.write('endmodule\n')
def gen_log2_delta(file, gen_mem, in_bits, out_bits):
if (not gen_mem):
header = """
module Log2DeltaLUT_{}x{}
(input [{}:0] in,
output logic [{}:0] out);
always_comb begin
case (in)
""".format(in_bits, out_bits, in_bits-1, out_bits-4)
file.write(header)
for i in range(2 ** in_bits):
in_fixed, out_fixed, delta = get_log2_delta_expansion(i, in_bits, out_bits)
if (gen_mem):
file.write(delta)
file.write('\n')
else:
file.write(' {}\'b{}: out = {}\'b{};\n'.format(
in_bits,
in_fixed,
out_bits-3,
delta))
if (not gen_mem):
file.write(' default: out = {}\'b{};\n'.format(out_bits-3, 'x' * (out_bits-3)))
file.write(' endcase\n')
file.write(' end\n')
file.write('endmodule\n')
# def gen_pow2_mem(file, in_bits, out_bits):
# header = """
# module Pow2Mem_{}x{}
# (input [{}:0] in,
# output logic [{}:0] out);
# logic [{}:0] mem[0:(2**{})-1];
# initial begin
# $readmemb("pow2_{}x{}.hex", mem);
# end
# always_comb begin
# out = mem[in];
# end
# endmodule
# """.format(in_bits, out_bits, in_bits-1, out_bits-1, out_bits-1, in_bits, in_bits, out_bits)
# file.write(header)
# def gen_log2_mem(file, in_bits, out_bits):
# header = """
# module Log2Mem_{}x{}
# (input [{}:0] in,
# output logic [{}:0] out);
# logic [{}:0] mem[0:(2**{})-1];
# initial begin
# $readmemb("log2_{}x{}.hex", mem);
# end
# always_comb begin
# out = mem[in];
# end
# endmodule
# """.format(in_bits, out_bits, in_bits-1, out_bits, out_bits, in_bits, in_bits, out_bits)
# file.write(header)
in_bits = args.bits_in[0]
out_bits = args.bits_out[0]
def make_file(name):
if (args.str):
return io.StringIO()
return open(name, 'w')
def close_file(f):
if (args.str):
print(f.getvalue())
else:
f.close()
if (args.pow):
f = make_file('Pow2LUT_{}x{}.sv'.format(in_bits, out_bits))
gen_pow2(f, False, in_bits, out_bits)
close_file(f)
# if (args.mem):
# f = make_file('Pow2Mem_{}x{}.sv'.format(in_bits, out_bits))
# gen_pow2_mem(f, in_bits, out_bits)
# close_file(f)
# f = make_file('pow2_{}x{}.hex'.format(in_bits, out_bits))
# gen_pow2(f, True, in_bits, out_bits)
# close_file(f)
if (args.pow_delta):
f = make_file('Pow2DeltaLUT_{}x{}.sv'.format(in_bits, out_bits))
gen_pow2_delta(f, False, in_bits, out_bits)
close_file(f)
if (args.log):
f = make_file('Log2LUT_{}x{}.sv'.format(in_bits, out_bits))
gen_log2(f, False, in_bits, out_bits)
close_file(f)
if (args.log_delta):
f = make_file('Log2DeltaLUT_{}x{}.sv'.format(in_bits, out_bits))
gen_log2_delta(f, False, in_bits, out_bits)
close_file(f)
# if (args.mem):
# f = make_file('Log2Mem_{}x{}.sv'.format(in_bits, out_bits))
# gen_log2_mem(f, in_bits, out_bits)
# close_file(f)
# f = make_file('log2_{}x{}.hex'.format(in_bits, out_bits))
# gen_log2(f, True, out_bits, in_bits)
# close_file(f)
|
deepfloat-main
|
rtl/log/luts/gen_tables.py
|
AutoCTR-main
|
utils/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import numpy as np
import torch
from graphviz import Digraph
from torch.autograd import Variable
logger = logging.getLogger(__name__)
def size_to_str(size):
return "(" + (", ").join(["%d" % v for v in size]) + ")"
def visualize(model):
feats = create_fake_feats(model.feature_config)
pred = model(feats)
return net_visual(pred, params=dict(model.named_parameters()))
# default batch size = 2 so that BN layers can work
def create_fake_feats(feature_config, batch_size=2):
num_dense_feat = len(feature_config.dense.features)
feats = {"dense": torch.FloatTensor(np.random.rand(batch_size, num_dense_feat))}
feats.update(
{
feat.name: {
"data": torch.LongTensor([]),
"offsets": torch.LongTensor([0] * batch_size),
}
for feat in feature_config.sparse.features
}
)
return feats
def net_visual(var, params=None):
""" Produces Graphviz representation of PyTorch autograd graph.
Blue nodes are the Variables that require grad, orange are Tensors
saved for backward in torch.autograd.Function
Args:
var: output Variable
params: dict of (name, Variable) to add names to node that
require grad (TODO: make optional)
"""
if params is not None:
assert all(isinstance(p, Variable) for p in params.values())
param_map = {id(v): k for k, v in params.items()}
node_attr = {
"style": "filled",
"shape": "box",
"align": "left",
"fontsize": "12",
"ranksep": "0.1",
"height": "0.2",
}
graph_attr = {"size": "12,12"}
dot = Digraph(node_attr=node_attr, graph_attr=graph_attr)
seen = set()
output_nodes = (
(var.grad_fn,) if not isinstance(var, tuple) else tuple(v.grad_fn for v in var)
)
def add_nodes(var):
if var not in seen:
if torch.is_tensor(var):
# note: this used to show .saved_tensors in pytorch0.2, but stopped
# working as it was moved to ATen and Variable-Tensor merged
dot.node(str(id(var)), size_to_str(var.size()), fillcolor="orange")
elif hasattr(var, "variable"):
u = var.variable
name = param_map[id(u)] if params is not None else ""
node_name = "%s\n %s" % (name, size_to_str(u.size()))
dot.node(str(id(var)), node_name, fillcolor="lightblue")
elif var in output_nodes:
dot.node(
str(id(var)), str(type(var).__name__), fillcolor="darkolivegreen1"
)
else:
dot.node(str(id(var)), str(type(var).__name__))
seen.add(var)
if hasattr(var, "next_functions"):
for u in var.next_functions:
if u[0] is not None:
dot.edge(str(id(u[0])), str(id(var)))
add_nodes(u[0])
if hasattr(var, "saved_tensors"):
for t in var.saved_tensors:
dot.edge(str(id(t)), str(id(var)))
add_nodes(t)
# handle multiple outputs
if isinstance(var, tuple):
for v in var:
add_nodes(v.grad_fn)
else:
add_nodes(var.grad_fn)
_resize_graph(dot)
return dot
def _resize_graph(dot, size_per_element=0.15, min_size=12):
"""Resize the graph according to how much content it contains.
Modify the graph in place.
"""
# Get the approximate number of nodes and edges
num_rows = len(dot.body)
content_size = num_rows * size_per_element
size = max(min_size, content_size)
size_str = str(size) + "," + str(size)
dot.graph_attr.update(size=size_str)
|
AutoCTR-main
|
utils/viz_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import sys
sys.path.append('gen-py')
import argparse
import json
from block_config import ttypes as b_config
from config import ttypes as config
def get_args():
parser = argparse.ArgumentParser(
description="Neural Recommendation Model Searching Script for Kaggle Dataset"
)
# configs for final fit only
parser.add_argument("--model-file", type=str, default="",
help="a json file contain the model structure for final fit")
parser.add_argument("--save-model", action="store_true", default=False, help="save model or not during the final fit process")
# configs for search and final fit
parser.add_argument("--data-file", type=str, default="", help="data for search or final fit")
parser.add_argument("--data-set-name", type=str, default="", help="dataset name", choices=["criteo", "avazu", "kdd2012"])
parser.add_argument("--log-freq", type=int, default=10, help="log freqency of model training (# of epochs)")
parser.add_argument("--splits", type=str, default="0.8:0.1",
help="split of train,val,test, e.g., 0.8:0.1 means 80% train, 10% val, 10% test")
parser.add_argument("--batch-size", type=int, default=100, help="batch size for training each model")
parser.add_argument("--hash-size", type=int, default=10000, help="hash size for the features")
parser.add_argument("--learning-rate", type=float, default=0.001, help="learning rate of each model")
parser.add_argument("--nepochs", type=int, default=50, help="maximum epoch for training a model")
parser.add_argument("--num-workers", type=int, default=4,
help="number of workers (cpus) to preprocess data")
parser.add_argument("--num-trainers", type=int, default=1,
help="number of training for cpu training, currently this is abandoned and to be removed, we only support gpu training now")
parser.add_argument("--repeat-checker-off", action="store_true", default=False, help="check and avoid repeating searching same architectures")
parser.add_argument(
"--save-model-path", type=str, default="", help="the file path to save the models during the search process"
)
parser.add_argument("--search-nepochs", type=int, default=3, help="number of search iterations")
parser.add_argument(
"--reward-type",
default="logloss",
type=str,
choices=["logloss", "auc"],
help="measurement for the search model to compare models"
)
parser.add_argument(
"--searcher-type",
default="random",
type=str,
choices=["random", "evo"],
help="search algorithm"
)
parser.add_argument("--max-num-block", type=int, default=5, help="maximum number of blocks in each model in the search space")
parser.add_argument(
"--feature-processing-type", default="", type=str, choices=["idasp"], help="if we want to treat dense feature as sparse features"
)
# hyperparameters for proposed evo algorithm
parser.add_argument("--population-size", type=int, default=3,
help="size of the population, it also decides how many random initialization architectures we will do")
parser.add_argument("--candidate-size", type=float, default=2,
help="number of candidates to be picked from the population, the best one will be used to generate offsprings")
parser.add_argument("--sampler-type", type=int, default=10, help="number of neigbors for each candidate")
parser.add_argument("--historical-sample-path", type=str, default="", help="path for historical architectures to warm start the evo searcher")
parser.add_argument("--historical-sample-num", type=int, default=0, help="number of historical architectures to warm start the evo searcher")
parser.add_argument(
"--survival-type", default="comb", type=str, choices=["age", "fit", "mix", "comb"],
help="survival type, comb is multi-objective survival function, mix is a two-step survival function"
)
# search space config
parser.add_argument(
"--macro-space-type", type=int, default=config.MacroSearchSpaceType.INPUT_GROUP,
help="search space for features, either group sparse/dense features or not, please check out the /if/config.thrift for more detail"
)
parser.add_argument(
"--micro-space-types",
default="close",
type=str,
choices=[
"close",
"micro_mlp",
],
help="micro search space for blocks, currently only mlp have a micro space hyperparameter (units in each mlp layer), close means do not search mlp units",
)
# general search config
parser.add_argument("--num-machines", type=int, default=1, help="number of GPUs to be used")
parser.add_argument("--waiting-time", type=float, default=30,
help="waiting time for checking if the current running models are complete, default: check every 30 seconds")
parser.add_argument("--resume-file", type=str, default="", help="the file path to resume the search process")
parser.add_argument("--fbl-kill-time", type=float, default=1800,
help="time to kill a model during search, this is used to avoid some model crush and stuck during training")
parser.add_argument("--numpy-seed", type=int, default=123, help="numpy seed")
parser.add_argument("--torch-seed", type=int, default=4321, help="torch seed")
parser.add_argument("--warm-start-emb", action="store_true", default=False,
help="if we have a `.ckp` model weight to warm start the embeddings of the sparse features in each model")
# gpu config
parser.add_argument("--use-gpu", action="store_true", default=False, help="use gpu or not")
parser.add_argument("--maxLoad", type=float, default=0.5,
help="only load a model when the current used load of this gpu is lower than maxLoad")
parser.add_argument("--maxMemory", type=float, default=0.5,
help="only load a model when the current used memory of this gpu is lower than maxMemory")
parser.add_argument("--save-batches", action="store_true", default=False,
help="if we want to save the training data batches in the gpu memory, this will accelerate the speed")
parser.add_argument("--save-val-batches", action="store_true", default=False,
help="if we want to save the validation data batches in the gpu memory, this will accelerate the speed")
parser.add_argument("--total-gpus", type=int, default=1, help="total number of gpus on the machine")
parser.add_argument("--excludeID", type=str, default="", help="")
args = parser.parse_args()
if not args.save_model_path:
args.save_model_path = os.path.join(os.getcwd(), "results")
return args
def get_micro_space_types(args):
micro_space_types = args.micro_space_types.replace(" ", "")
micro_space_types = micro_space_types.split(",")
micro_space_types = list(set(micro_space_types))
micro_space_configs = []
if "close" in micro_space_types:
return [config.MicroSearchSpaceType(close=config.MicroClose())]
elif "micro_mlp" in micro_space_types:
micro_space_configs.append(
config.MicroSearchSpaceType(
micro_mlp=config.MicroMLPConfig(arc=[32, 64, 128, 256, 512, 1024])
)
)
elif "micro_cin" in micro_space_types:
micro_space_configs.append(
config.MicroSearchSpaceType(
micro_cin=config.MicroCINConfig(
arc=[64, 128, 256], num_of_layers=[1, 2, 3]
)
)
)
elif "micro_attention" in micro_space_types:
micro_space_configs.append(
config.MicroSearchSpaceType(
micro_attention=config.MicroAttentionConfig(
num_of_layers=[1, 2, 3],
num_of_heads=[1, 2, 3],
att_embed_dim=[],
dropout_prob=[],
)
)
)
else:
raise ValueError("Error micro space type.")
return micro_space_configs
def get_feature_processing_type(args):
feature_processing_type = args.feature_processing_type.replace(" ", "")
feature_processing_type = feature_processing_type.split(",")
feature_processing_type = list(set(feature_processing_type))
feature_processing_configs = []
if feature_processing_type != [""]:
if "idasp" in feature_processing_type:
feature_processing_configs.append(
config.FeatureProcessingType(idasp=config.InputDenseAsSparse())
)
else:
raise ValueError("Error micro space type.")
return feature_processing_configs
def get_searcher_config(args):
block_types = [
b_config.ExtendedBlockType.MLP_DENSE,
# b_config.ExtendedBlockType.MLP_EMB,
# b_config.ExtendedBlockType.CROSSNET,
# b_config.ExtendedBlockType.FM_DENSE,
b_config.ExtendedBlockType.FM_EMB,
# b_config.ExtendedBlockType.DOTPROCESSOR_DENSE,
b_config.ExtendedBlockType.DOTPROCESSOR_EMB,
# b_config.ExtendedBlockType.CAT_DENSE,
# b_config.ExtendedBlockType.CAT_EMB,
# b_config.ExtendedBlockType.CIN,
# b_config.ExtendedBlockType.ATTENTION,
]
if args.searcher_type == "random":
searcher_config = config.SearcherConfig(
random_searcher=config.RandomSearcherConfig(
max_num_block=args.max_num_block,
block_types=block_types,
macro_space_type=args.macro_space_type,
micro_space_types=get_micro_space_types(args),
feature_processing_type=get_feature_processing_type(args),
)
)
elif args.searcher_type == "evo":
searcher_config = config.SearcherConfig(
evolutionary_searcher=config.EvolutionarySearcherConfig(
max_num_block=args.max_num_block,
block_types=block_types,
population_size=args.population_size,
candidate_size=max(1, int(args.candidate_size)),
macro_space_type=args.macro_space_type,
micro_space_types=get_micro_space_types(args),
feature_processing_type=get_feature_processing_type(args),
)
)
return searcher_config
def get_trainer_config(args):
fp = os.getcwd()
if args.data_set_name == "criteo":
input_summary = json.load(open(fp + "/utils/fblearner_template/criteo_search.json"))
elif args.data_set_name == "avazu":
input_summary = json.load(open(fp + "/utils/fblearner_template/avazu_search.json"))
elif args.data_set_name == "kdd2012":
input_summary = json.load(open(fp + "/utils/fblearner_template/kdd2012_search.json"))
else:
input_summary = json.load(open(fp + "/utils/fblearner_template/criteo_search.json"))
return input_summary, args
def get_final_fit_trainer_config(args):
fp = os.getcwd()
if args.data_set_name == "criteo":
input_summary = json.load(open(fp + "/utils/fblearner_template/criteo_transfer.json"))
elif args.data_set_name == "avazu":
input_summary = json.load(open(fp + "/utils/fblearner_template/avazu_transfer.json"))
elif args.data_set_name == "kdd2012":
input_summary = json.load(open(fp + "/utils/fblearner_template/kdd2012_transfer.json"))
else:
input_summary = json.load(open(fp + "/utils/fblearner_template/criteo_transfer.json"))
return input_summary, args
def get_phenotype(args):
filenames = [args.model_file]
model_config_dicts = []
for filename in filenames:
with open(filename) as fp:
model_config_dict = json.load(fp)
fp.close()
model_config_dicts.append(model_config_dict)
return filenames, model_config_dicts
|
AutoCTR-main
|
utils/search_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
sys.path.append('gen-py')
import logging
from collections import namedtuple
from copy import deepcopy
import numpy as np
import torch
from torch.utils.data import DataLoader, Dataset
from config import ttypes as config
ReaderOption = namedtuple("ReaderOption", ["type", "options"])
logger = logging.getLogger(__name__)
kEpsilon = 1e-10
class DenseDataset(Dataset):
"""Dense dataset."""
def __init__(self, X, y, sample_weights=None):
self.X = torch.FloatTensor(X)
self.y = torch.FloatTensor(y)
if sample_weights is not None:
self.sample_weights = torch.FloatTensor(sample_weights)
else:
self.sample_weights = None
def __len__(self):
return len(self.y)
def __getitem__(self, idx):
sample = {}
sample["label"] = self.y[idx]
sample["dense"] = self.X[idx]
if self.sample_weights is not None:
sample["weight"] = self.sample_weights[idx]
return sample
def share_memory_(self):
self.X.share_memory_()
self.y.share_memory_()
if self.sample_weights is not None:
self.sample_weights.share_memory_()
############################################################
# criteo data utils
############################################################
class CriteoDataset(Dataset):
"""Criteo dataset."""
def __init__(self, X_cat, X_int, y, dense_transform=None):
self.X_cat, self.X_int, self.y, self.dense_transform = (
torch.LongTensor(X_cat),
torch.FloatTensor(X_int),
torch.FloatTensor(y),
dense_transform,
)
def __len__(self):
return len(self.y)
def __getitem__(self, idx):
# Criteo data only have categorical features as sparse feature
sample = {
"sparse_{}".format(i): torch.tensor([v + 1])
for i, v in enumerate(self.X_cat[idx])
}
sample["label"] = self.y[idx]
sample["dense"] = (
self.X_int[idx]
if self.dense_transform is None
else self.dense_transform(self.X_int[idx])
)
return sample
def share_memory_(self):
self.X_cat.share_memory_()
self.X_int.share_memory_()
self.y.share_memory_()
############################################################
# synthetic data utils
############################################################
def _set_random_seed(seed=0):
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
class SyntheticDataset(Dataset):
"""Synthetic dataset."""
def __init__(
self,
num_dense,
num_sparse,
max_sparse_id,
num_samples,
batch_size,
id_list_configs,
):
_set_random_seed()
# We generate 10k examples and then reuse the these examples during
# data reading
data_num_samples = 10000
self.num_batches = data_num_samples // batch_size
self.num_batch_samples = num_samples // batch_size
# Limit the number of examples as we are only doing benchmarking for
# synthetic data set.
dense = torch.randn((self.num_batches, batch_size, num_dense))
label = torch.randint(2, size=(self.num_batches, batch_size))
weight = None # torch.ones((self.num_batches, batch_size))
assert (
len(id_list_configs) == num_sparse or len(id_list_configs) == 1
), "len(id_list_configs) != num_sparse: {0} vs {1}".format(
len(id_list_configs), num_sparse
)
if len(id_list_configs) == 1:
id_list_configs = [deepcopy(id_list_configs[0]) for _ in range(num_sparse)]
sparse_id_list_len = [
[
[
min(max(0, int(x)), config.truncation)
for x in np.random.normal(
config.mean, config.std, size=(batch_size)
)
]
for config in id_list_configs
]
for _ in range(self.num_batches)
]
sparse = []
for k in range(self.num_batches):
sparse_batch = []
for i in range(num_sparse):
sparse_batch.append({})
ids = []
offsets = [0]
for j in range(batch_size):
id_list_len = sparse_id_list_len[k][i][j]
ids.extend(np.random.randint(max_sparse_id, size=id_list_len))
offsets.append(offsets[-1] + id_list_len)
sparse_batch[i]["data"] = torch.tensor(ids)
sparse_batch[i]["offsets"] = torch.tensor(offsets[:-1])
sparse.append(sparse_batch)
self.data = []
for i in range(self.num_batches):
batch = {}
batch["dense"] = dense[i]
batch["label"] = label[i]
batch["weight"] = weight[i] if weight is not None else None
batch["sparse"] = [sparse[i][j] for j in range(num_sparse)]
self.data.append(batch)
def __len__(self):
return self.num_batch_samples
def __getitem__(self, idx):
return self.data[idx % self.num_batches]
def synthetic_data_generator(
num_dense, num_sparse, max_sparse_id, num_samples, batch_size, id_list_configs
):
_set_random_seed()
# Limit the number of examples as we are only doing benchmarking for
# synthetic data set.
data_num_batches = min(1000, min(100000, num_samples) // batch_size)
dense = torch.randn((data_num_batches, batch_size, num_dense))
label = torch.randint(2, size=(data_num_batches, batch_size))
# weight = torch.ones((data_num_batches, batch_size))
assert (
len(id_list_configs) == num_sparse or len(id_list_configs) == 1
), "len(id_list_configs) != num_sparse: {0} vs {1}".format(
len(id_list_configs), num_sparse
)
if len(id_list_configs) == 1:
id_list_configs = [deepcopy(id_list_configs[0]) for _ in range(num_sparse)]
sparse_id_list_len = [
[
[
min(max(0, int(x)), config.truncation)
for x in np.random.normal(config.mean, config.std, size=(batch_size))
]
for config in id_list_configs
]
for _ in range(data_num_batches)
]
sparse = []
for k in range(data_num_batches):
sparse_batch = []
for i in range(num_sparse):
sparse_batch.append({})
ids = []
offsets = [0]
for j in range(batch_size):
id_list_len = sparse_id_list_len[k][i][j]
ids.extend(np.random.randint(max_sparse_id, size=id_list_len))
offsets.append(offsets[-1] + id_list_len)
sparse_batch[i]["data"] = torch.tensor(ids)
sparse_batch[i]["offsets"] = torch.tensor(offsets[:-1])
sparse.append(sparse_batch)
data = []
for i in range(data_num_batches):
batch = {}
batch["dense"] = dense[i]
batch["label"] = label[i]
batch["weight"] = None # weight[i]
batch["sparse"] = [sparse[i][j] for j in range(num_sparse)]
data.append(batch)
return data
def get_split_indices(splits, num_samples):
if np.sum(splits) >= 1.0:
raise ValueError("sum of splits should be smaller than 1.0")
bins = list(np.cumsum([0.0] + list(splits)))
bins.append(1.0)
indices = [
range(int(bins[i] * num_samples), int(bins[i + 1] * num_samples))
for i in range(len(splits) + 1)
]
if any(len(indice) <= 0 for indice in indices):
raise ValueError(
"Split {} is causing empty partitions: {}".format(
splits, [len(indice) for indice in indices]
)
)
return indices
def split_dense_dataset(data, splits, sample_weights=None):
"""
dataset: Dataset
splits: array of split ratio of length L, will create L+1 dataloaders
according to the ratio, the last partition is 1.0-sum(splits);
if None, return the entire dataset in dataloader
example:
splits= [0.8, 0.1] for a 80%, 10%, 10% splits
between train, validation, eval
"""
num_samples = len(data["y"])
indices = get_split_indices(splits=splits, num_samples=num_samples)
logger.info(
"Split data into partitions with size: {}".format(
[len(indice) for indice in indices]
)
)
datasets = []
for indice in indices:
dataset = DenseDataset(
data["X"][indice],
data["y"][indice],
None if sample_weights is None else sample_weights[indice],
)
datasets.append(dataset)
return datasets
def load_and_split_dataset(npz_file, splits=None):
"""
dataset: Dataset
splits: array of split ratio of length L, will create L+1 dataloaders
according to the ratio, the last partition is 1.0-sum(splits);
if None, return the entire dataset in dataloader
example:
splits= [0.8, 0.1] for a 80%, 10%, 10% splits
between train, validation, eval
"""
data = np.load(npz_file)
if splits is None:
return CriteoDataset(X_cat=data["X_cat"], X_int=data["X_int"], y=data["y"])
num_samples = len(data["y"])
indices = get_split_indices(splits=splits, num_samples=num_samples)
logger.info(
"Split data into partitions with size: {}".format(
[len(indice) for indice in indices]
)
)
return [
CriteoDataset(
X_cat=data["X_cat"][indice],
X_int=data["X_int"][indice],
y=data["y"][indice],
)
for indice in indices
]
############################################################
# batch processors
############################################################
def _save_transforms(dense_transform, filename):
torch.save({"dense_transform": dense_transform}, filename)
def _load_transforms(filename):
state = torch.load(filename)
return state["dense_transform"]
# the __call__ method for a BatchProcessor should return label, feats, weight:
# label: a (batch_size,) FloatTensor for labels
# weight: optional, None or (batch_size,) FloatTensor for per sample weights
# feats: dict for features
# feats['dense']: (batch_size, num_dense) FloatTensor for dense features
# feats['[sparse_feature_name]]']: for each sparse feature name (consistent
# with feature_config), it is a dict with two keys:
# 'data' and 'offsets'. See EmbeddingBag doc for the supported types.
class BatchProcessor(object):
def __init__(
self,
feature_config=None,
dense_transform=None,
device=None,
dense_feature_clamp=-1.0,
):
self.feature_config = deepcopy(feature_config)
self.dense_transform = dense_transform
self.device = torch.device("cpu") if device is None else device
self.dense_feature_clamp = dense_feature_clamp
def save_transforms(self, filename):
_save_transforms(self.dense_transform, filename)
def load_transforms(self, filename):
self.dense_transform = _load_transforms(filename)
def share_memory(self):
if self.dense_transform is not None:
self.dense_transform.share_memory_()
def __call__(self):
raise NotImplementedError
class DenseBatchProcessor(BatchProcessor):
def __call__(self, mini_batch):
for k, v in mini_batch.items():
if k == "dense":
v = v if self.dense_transform is None else self.dense_transform(v)
mini_batch[k] = v.to(device=self.device, dtype=torch.float32)
elif k in ["label", "weight"]:
mini_batch[k] = v.to(device=self.device, dtype=torch.float32)
else:
raise ValueError("invalid mini_batch key")
label = mini_batch.pop("label", None)
weight = mini_batch.pop("weight", None)
return label, mini_batch, weight
class CriteoBatchProcessor(BatchProcessor):
def __call__(self, mini_batch, transform=True, reverse=0):
if reverse == 1:
for k, v in mini_batch.items():
if k in ["dense", "label"]:
mini_batch[k] = v.to(device=self.device, dtype=torch.float32)
else:
mini_batch[k] = {
"data": v["data"].to(device=self.device, dtype=torch.long),
"offsets": None,
}
elif reverse == 2:
for k, v in mini_batch.items():
if k in ["dense", "label"]:
mini_batch[k] = v.to(device=torch.device("cpu"), dtype=torch.float32)
else:
mini_batch[k] = {
"data": v["data"].to(device=torch.device("cpu"), dtype=torch.long),
"offsets": None,
}
else:
if transform:
for k, v in mini_batch.items():
if k == "dense":
v = v if self.dense_transform is None else self.dense_transform(v)
mini_batch[k] = v.to(device=self.device, dtype=torch.float32)
elif k == "label":
mini_batch[k] = v.to(device=self.device, dtype=torch.float32)
else:
mini_batch[k] = {
"data": v.to(device=self.device, dtype=torch.long),
"offsets": None,
}
# else:
# for k, v in mini_batch.items():
# mini_batch[k] = v
# label = mini_batch.pop("label", None)
label = mini_batch["label"]
# Criteo does not have sample weights
weight = None
return label, mini_batch, weight
def loadDataset(file):
"""
Loads dataset from NumPy format.
Inputs:
file (str): path to the npz file of dataset (Kaggle or Terabyte)
Outputs:
X_cat (np.ndarray): categorical features
X_int (np.ndarray): continuous features
y (np.ndarray): labels
counts (list): number of categories for each categorical feature
"""
# load and preprocess data
with np.load(file) as data:
X_int = data["X_int"]
X_cat = data["X_cat"]
y = data["y"]
counts = data["counts"]
return X_cat, X_int, y, counts
############################################################
# dense transform
############################################################
class DenseTransform(object):
def __init__(self, mean, std):
self.mean = mean.cpu()
self.std = std.cpu()
def __call__(self, dense):
return (dense - self.mean) / self.std
def share_memory_(self):
self.mean.share_memory_()
self.std.share_memory_()
def create_dense_transform(train_dataloader, batch_processor, num_batches):
mean = 0.0
num_samples = 0
for i_batch, sample_batched in enumerate(train_dataloader):
if i_batch >= num_batches:
break
_, feats, _ = batch_processor(mini_batch=sample_batched)
dense = feats["dense"]
num_samples += dense.shape[0]
mean += torch.sum(dense.to(dtype=torch.float), dim=0)
mean /= num_samples
var = 0.0
num_samples = 0
for i_batch, sample_batched in enumerate(train_dataloader):
if i_batch >= num_batches:
break
_, feats, _ = batch_processor(mini_batch=sample_batched)
dense = feats["dense"]
num_samples += dense.shape[0]
var += torch.sum((dense.to(dtype=torch.float) - mean) ** 2, dim=0)
std = torch.sqrt((var + kEpsilon) / num_samples)
return DenseTransform(mean=mean, std=std)
def create_dense_transform_from_synthetic():
# Due to the dense features are sampled from normal distribution,
# we simply set mean and std based on normal distribution.
# We add this part is for benchmark purpose.
return DenseTransform(mean=torch.tensor(0), std=torch.tensor(1))
def prepare_data(data_options, performance_options, CUDA="cuda:0", pin_memory=False):
if data_options.getType() == config.DataConfig.FROM_FILE:
data_option = data_options.get_from_file()
(
datasets,
batch_processor,
train_dataloader,
val_dataloader,
eval_dataloader,
) = prepare_criteo_data(data_option, performance_options, CUDA, pin_memory)
else:
raise ValueError("Unknown data option type.")
dense_transform = create_dense_transform(
train_dataloader,
batch_processor,
num_batches=int(data_option.num_samples_meta / data_option.batch_size),
)
batch_processor.dense_transform = dense_transform
return datasets, batch_processor, train_dataloader, val_dataloader, eval_dataloader
def prepare_criteo_data(data_options, performance_options, CUDA, pin_memory=False):
logger.info("Loading data from {}".format(data_options.data_file))
datasets = load_and_split_dataset(
npz_file=data_options.data_file, splits=data_options.splits
)
logger.info("Data loaded")
# pin_memory=True,
train_dataloader, val_dataloader, eval_dataloader = (
DataLoader(dataset,
batch_size=data_options.batch_size,
pin_memory=pin_memory,
num_workers=performance_options.num_readers) for dataset in datasets
)
batch_processor = CriteoBatchProcessor(
device=(
torch.device(CUDA)
if performance_options.use_gpu
else torch.device("cpu")
)
)
return datasets, batch_processor, train_dataloader, val_dataloader, eval_dataloader
|
AutoCTR-main
|
utils/data.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from copy import deepcopy
import torch.nn as nn
from block_config import ttypes as b_config
from nasrec.blocks import set_block_from_config
from .base_net import BaseNet
from .utils import (
Optimizers,
apply_emb,
create_emb_dict,
create_optimizers_for_dense,
create_optimizers_for_embed,
)
logger = logging.getLogger(__name__)
class NASRecNet(BaseNet):
def __init__(self, model_config, feature_config):
super(NASRecNet, self).__init__(model_config, feature_config)
self.nasrec_net_option = self.model_config.get_nasrec_net()
self.num_block = len(self.nasrec_net_option.block_configs)
self._init_model_params()
self._build_arc()
def _init_model_params(self):
self.sparse_hash_size = {
item.name: int(item.hash_size)
for item in self.sparse_feature_options.features
}
self.feat_dim = {
"dense": {0: [self.num_dense_feat]},
"sparse": {
0: [self.sparse_feature_options.embed_dim] * self.num_sparse_feat
},
}
def _build_arc(self):
self.emb_dict = create_emb_dict(self.sparse_feature_options)
self.blocks = nn.ModuleList()
for block_config in self.nasrec_net_option.block_configs:
block = set_block_from_config(block_config, self.feat_dim)
self.feat_dim = block.dim_config(self.feat_dim)
self.blocks.append(block)
# build up final block
self.blocks.append(self._build_final_block())
def _build_final_block(self):
"""Construct the final block
"""
dense = deepcopy(self.feat_dim["dense"])
sparse = deepcopy(self.feat_dim["sparse"])
# make dicts of all features id (including intermidiate features)
for block_id in dense:
if len(dense[block_id]) > 0:
dense[block_id] = list(range(dense[block_id][0]))
else:
dense[block_id] = []
for block_id in sparse:
sparse[block_id] = list(range(len(sparse[block_id])))
# remove the features that has already been used as intermidiate input
for block_id in range(0, self.num_block):
dense_feat = self.blocks[block_id].feat_dense_id
sparse_feat = self.blocks[block_id].feat_sparse_id
for former_block_id in dense_feat:
tmp_ids = dense_feat[former_block_id]
dense[former_block_id] = (
(
[]
if tmp_ids == [-1]
else list(set(dense[former_block_id]) - set(tmp_ids))
)
if former_block_id in dense
else []
)
for former_block_id in sparse_feat:
tmp_ids = sparse_feat[former_block_id]
sparse[former_block_id] = (
(
[]
if tmp_ids == [-1]
else list(set(sparse[former_block_id]) - set(tmp_ids))
)
if former_block_id in sparse
else []
)
# convert feature dicts (dense & sparse) to feature configs
feat_configs = []
for block_id, feat_list in dense.items():
if block_id in sparse:
feat_config = b_config.FeatSelectionConfig(
block_id=block_id, dense=feat_list, sparse=sparse[block_id]
)
else:
feat_config = b_config.FeatSelectionConfig(
block_id=block_id, dense=feat_list, sparse=[]
)
feat_configs.append(feat_config)
for block_id, feat_list in sparse.items():
if block_id in dense:
continue
else:
feat_config = b_config.FeatSelectionConfig(
block_id=block_id, dense=[], sparse=feat_list
)
feat_configs.append(feat_config)
# construct the MLP block config
block_config = b_config.BlockConfig(
mlp_block=b_config.MLPBlockConfig(
name="MLPBlock",
block_id=self.num_block + 1,
arc=[1],
type=b_config.BlockType(dense=b_config.DenseBlockType()),
input_feat_config=feat_configs,
ly_act=False,
)
)
return set_block_from_config(block_config, self.feat_dim)
def get_optimizers(self):
optimizers = Optimizers()
# add dense optimizers
create_optimizers_for_dense(
optimizers,
named_parameters=self.named_parameters(),
dense_optim_config=self.dense_feature_options.optim,
)
# add sparse optimizers
create_optimizers_for_embed(
optimizers,
emb_dict=self.emb_dict,
sparse_feature_options=self.sparse_feature_options,
)
return optimizers
def forward(self, feats):
# process sparse features(using embeddings), resulting in a list of row vectors
feat_dict = {"dense": {0: feats["dense"]}} # if self.num_dense_feat > 0 else []
ly = apply_emb(feats, self.emb_dict, self.sparse_hash_size)
feat_dict["sparse"] = {
0: {feat_id: ly[feat_id] for feat_id in range(self.num_sparse_feat)}
}
# blocks
for qq, block in enumerate(self.blocks):
feat_dict = block(feat_dict)
p = feat_dict["dense"][self.blocks[-1].block_id]
return p.view(-1)
|
AutoCTR-main
|
models/nas_modules.py
|
AutoCTR-main
|
models/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
sys.path.append('gen-py')
import logging
import torch
from config import ttypes as config
from .nas_modules import NASRecNet
logger = logging.getLogger(__name__)
def build_model(model_config, feature_config):
if model_config.getType() == config.ModelConfig.NASREC_NET:
return build_nasrec_net(model_config, feature_config)
else:
raise ValueError("Unknown model type.")
def build_nasrec_net(model_config, feature_config):
return NASRecNet(model_config=model_config, feature_config=feature_config)
def save_model(filename, model):
logger.warning("Saving model to {}".format(filename))
state = {
"state_dict": model.state_dict(),
"model_config": model.model_config,
"feature_config": model.feature_config,
}
torch.save(state, filename)
def load_model(filename):
logger.warning("Loading model from {}".format(filename))
state = torch.load(filename, map_location='cpu')
model_config = state["model_config"]
feature_config = state["feature_config"]
model = build_model(model_config=model_config, feature_config=feature_config)
model.load_state_dict(state["state_dict"])
return model
|
AutoCTR-main
|
models/builder.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import torch
import torch.nn as nn
from config import ttypes as config
logger = logging.getLogger(__name__)
def apply_emb(feats, emb_dict, sparse_hash_size):
ly = []
for name, E in emb_dict.items():
if name not in feats:
raise ValueError("feature {} missing from input! ".format(name))
val = feats[name]
hash_size = sparse_hash_size[name]
V = E(input=torch.remainder(val["data"], hash_size), offsets=val["offsets"])
ly.append(V)
return ly
def create_mlp(ln, ly_act=False):
ln = list(ln)
layers = nn.ModuleList()
for i in range(1, len(ln) - 1):
layers.append(nn.Linear(int(ln[i - 1]), int(ln[i]), bias=True))
layers.append(nn.ReLU())
layers.append(nn.Linear(int(ln[-2]), int(ln[-1]), bias=True))
if ly_act:
layers.append(nn.ReLU())
return torch.nn.Sequential(*layers)
def create_emb(sparse_feature, comm_embed_dim):
embed_dim = (
sparse_feature.embed_dim if sparse_feature.embed_dim > 0 else comm_embed_dim
)
hash_size = sparse_feature.hash_size
if sparse_feature.pooling.getType() == config.PoolingConfig.SUM:
mode = "sum"
elif sparse_feature.pooling.getType() == config.PoolingConfig.AVG:
mode = "mean"
else:
raise ValueError(
"Unknown pooling option: {}".format(sparse_feature.pooling.getType())
)
# return nn.EmbeddingBag(hash_size, embed_dim, sparse=True, mode=mode)
a = nn.EmbeddingBag(hash_size, embed_dim, sparse=True, mode=mode)
nn.init.normal_(a.weight, 0, 0.01)
return a
def create_emb_dict(sparse_feature_options):
comm_embed_dim = sparse_feature_options.embed_dim
return nn.ModuleDict(
{
item.name: create_emb(sparse_feature=item, comm_embed_dim=comm_embed_dim)
for item in sparse_feature_options.features
}
)
def create_optim(params, optim_config):
if optim_config.getType() == config.OptimConfig.SGD:
opt_config = optim_config.get_sgd()
return torch.optim.SGD(
params,
lr=opt_config.lr,
momentum=opt_config.momentum,
dampening=opt_config.dampening,
weight_decay=opt_config.weight_decay,
nesterov=opt_config.nesterov,
)
elif optim_config.getType() == config.OptimConfig.ADAGRAD:
opt_config = optim_config.get_adagrad()
return torch.optim.Adagrad(
params,
lr=opt_config.lr,
lr_decay=opt_config.lr_decay,
weight_decay=opt_config.weight_decay,
initial_accumulator_value=opt_config.initial_accumulator_value,
)
elif optim_config.getType() == config.OptimConfig.SPARSE_ADAM:
opt_config = optim_config.get_sparse_adam()
return torch.optim.SparseAdam(
params,
lr=opt_config.lr,
betas=(opt_config.betas0, opt_config.betas1),
eps=opt_config.eps,
)
elif optim_config.getType() == config.OptimConfig.ADAM:
opt_config = optim_config.get_adam()
return torch.optim.Adam(
params,
lr=opt_config.lr,
weight_decay=opt_config.weight_decay,
amsgrad=opt_config.amsgrad,
betas=(opt_config.betas0, opt_config.betas1),
eps=opt_config.eps,
)
elif optim_config.getType() == config.OptimConfig.RMSPROP:
opt_config = optim_config.get_rmsprop()
return torch.optim.RMSprop(
params,
lr=opt_config.lr,
weight_decay=opt_config.weight_decay,
alpha=opt_config.alpha,
momentum=opt_config.momentum,
centered=opt_config.centered,
eps=opt_config.eps,
)
else:
raise ValueError("unknown optimizer type: {}".format(optim_config))
class Optimizers(object):
def __init__(self, optimizers=None, named_optimizers=None):
self.optimizers = [] if optimizers is None else optimizers
self.named_optimizers = {} if named_optimizers is None else named_optimizers
def add(self, optimizer, name=None):
if name is None:
self.optimizers.append(optimizer)
else:
assert (
name not in self.named_optimizers
), "optimizer for {} already exist!".format(name)
self.named_optimizers[name] = optimizer
def zero_grad(self):
for optimizer in self.optimizers:
optimizer.zero_grad()
for _, optimizer in self.named_optimizers.items():
optimizer.zero_grad()
def step(self):
for optimizer in self.optimizers:
optimizer.step()
for _, optimizer in self.named_optimizers.items():
optimizer.step()
# Assumes that embedding params have [sparse_name_key] (default "emb_dict")
# in their name. It is true for embeddings created via
# self.emb_dict = create_emb_dict(self.sparse_feature_options)
def create_optimizers_for_dense(
optimizers, named_parameters, dense_optim_config, sparse_name_key="emb_dict"
):
params = [param for name, param in named_parameters if sparse_name_key not in name]
logger.info(
"Creating optim for non-embedding params with config: "
"{}.".format(dense_optim_config)
)
logger.info(
"Creating optim for non-embedding params list:"
", ".join([name for name, _ in named_parameters if sparse_name_key not in name])
)
optimizers.add(
create_optim(params=params, optim_config=dense_optim_config), name="dense"
)
def create_optimizers_for_embed(optimizers, emb_dict, sparse_feature_options):
sparse_optim_config = sparse_feature_options.optim
for item in sparse_feature_options.features:
name = item.name
item_optim_config = sparse_optim_config if item.optim is None else item.optim
logger.info(
"Creating optim for {} with config: {}".format(name, item_optim_config)
)
optimizers.add(
create_optim(
params=emb_dict[name].parameters(), optim_config=item_optim_config
),
name=name,
)
|
AutoCTR-main
|
models/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from copy import deepcopy
import torch.nn as nn
logger = logging.getLogger(__name__)
class BaseNet(nn.Module):
def __init__(self, model_config, feature_config):
super(BaseNet, self).__init__()
# for serilization purpose
self.model_config = deepcopy(model_config)
self.feature_config = deepcopy(feature_config)
self.dense_feature_options = self.feature_config.dense
self.sparse_feature_options = self.feature_config.sparse
self.num_dense_feat = len(self.dense_feature_options.features)
self.num_sparse_feat = len(self.sparse_feature_options.features)
def _build_arc(self):
raise NotImplementedError
def get_optimizers(self):
raise NotImplementedError
def forward(self, fs):
raise NotImplementedError
|
AutoCTR-main
|
models/base_net.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import math
import numpy as np
import lightgbm as lgb
import scipy.stats as ss
from config import ttypes as config
from models.nas_modules import NASRecNet
from .base_searcher import BaseSearcher
logger = logging.getLogger(__name__)
def nCr(n,r):
f = math.factorial
return f(n) / f(r) / f(n-r)
def prob_comb(population_size, candidate_size):
prob = []
for rank in range(population_size, 0, -1):
prob.append(nCr(rank + candidate_size-1, candidate_size)/nCr(population_size + candidate_size, candidate_size + 1))
return prob
class EvolutionaryController(BaseSearcher):
"""Aging evolution: https://arxiv.org/abs/1802.01548
"""
def __init__(self, searcher_config, feature_config):
super(EvolutionaryController, self).__init__(searcher_config, feature_config)
self.controller_option = searcher_config.get_evolutionary_searcher()
self._init_base_searcher_params()
self.population_size = self.controller_option.population_size
self.candidate_size = self.controller_option.candidate_size
self.all_arc_vecs = None
self.all_rewards = None
self.all_params = None
self.all_flops = None
self.sampler_type = 1
self.eval_at = 3
self._build_arc()
self.sample_prob = prob_comb(self.population_size, self.candidate_size)
def _build_arc(self):
self.population_arc_queue = []
self.population_val_queue = []
def _selection_candidate(self, type=0):
if type == 0:
candidate_indices = np.sort(
np.random.choice(
self.population_size, self.candidate_size, replace=False
)
)
candidate_arcs = list(
map(self.population_arc_queue.__getitem__, candidate_indices)
)
candidate_vals = list(
map(self.population_val_queue.__getitem__, candidate_indices)
)
best_arc_idx = np.argmin(candidate_vals)
best_arc = candidate_arcs[best_arc_idx]
elif type == 1:
rank = ss.rankdata(np.array(self.population_val_queue), method='ordinal')
tmp_prob = [self.sample_prob[i-1] for i in rank]
best_arc_idx = np.random.choice(list(range(self.population_size)), p=tmp_prob)
best_arc = self.population_arc_queue[best_arc_idx]
return best_arc_idx, best_arc
def sample(self, batch_size=1, return_config=False, is_initial=True):
"""sample a batch_size number of NasRecNets from the controller, where
each node is made up of a set of blocks with number self.num_blocks.
If is_initial=True, random sample a batch size of arcs into population,
else sample a candidate size arch from population queue, get the best one,
mutate the best one to a new arch, repeat this a batch_size of time.
"""
if batch_size < 1:
raise ValueError("Wrong batch_size.")
nasrec_nets, all_vec_configs, nasrec_arc_vecs = [], [], []
for _ in range(batch_size):
if is_initial:
vecs, vec_configs = self.random_sample()
else:
best_arc_idx, best_arc = self._selection_candidate(type=1)
# mutate to get child
if self.sampler_type > 1:
vecs, vec_configs = self.ML_sampler(parent=best_arc)
else:
vecs, vec_configs = self.mutate_arc(parent=best_arc)
arc_vec = np.concatenate(vecs)
nasrec_arc_vecs.append(arc_vec)
all_vec_configs.append(vec_configs)
block_configs = self.vecs_to_model_config(vec_configs)
model_config = config.ModelConfig(
nasrec_net=config.NASRecNetConfig(block_configs=block_configs)
)
if return_config:
nasrec_nets.append(model_config)
else:
nasrec_nets.append(NASRecNet(model_config, self.feature_config))
return nasrec_nets, [], all_vec_configs, nasrec_arc_vecs
def update(self, actions, rewards, survival_type="age"):
"""add k new archs into the population queue and
kick out the k oldest archs"""
# add child to right of population
self.population_arc_queue += actions
self.population_val_queue += rewards
if survival_type == "age":
self.population_arc_queue = self.population_arc_queue[-self.population_size:]
self.population_val_queue = self.population_val_queue[-self.population_size:]
elif survival_type == "comb":
self.comb()
else:
if survival_type == "fit":
idx = sorted(
range(len(self.population_val_queue)),
key=lambda i: self.population_val_queue[i], reverse=True
)[-self.population_size:]
elif survival_type == "mix":
division = int(0.5 * self.population_size)
tmp_rewards = self.population_val_queue[:-division]
idx = sorted(range(len(tmp_rewards)), key=lambda i: tmp_rewards[i], reverse=True)[-division:]
age_arcs = self.population_arc_queue[-division:]
age_vals = self.population_val_queue[-division:]
self.population_arc_queue = np.array(self.population_arc_queue)[idx].tolist()
self.population_val_queue = np.array(self.population_val_queue)[idx].tolist()
if survival_type == "mix":
self.population_arc_queue += age_arcs
self.population_val_queue += age_vals
# if keep_largest:
# idx = sorted(
# range(len(self.population_val_queue)),
# key=lambda i: self.population_val_queue[i], reverse=True
# )[-self.population_size:]
# self.population_arc_queue = np.array(self.population_arc_queue)[idx].tolist()
# self.population_val_queue = np.array(self.population_val_queue)[idx].tolist()
# else:
# # remove dead from left of population if exceed population_size
# self.population_arc_queue = self.population_arc_queue[-self.population_size :]
# self.population_val_queue = self.population_val_queue[-self.population_size :]
if self.sampler_type > 1:
# QQ TODO: build GBDT_rank:
self.update_GBDT()
def comb(self, trade_off=[0.1, 1, 0.1, 1]):
if len(self.all_rewards) <= self.population_size:
self.population_arc_queue = self.all_actions[-self.population_size:]
self.population_val_queue = self.all_rewards[-self.population_size:]
else:
if trade_off[3] == 0:
rank_weight = ss.rankdata(np.array(self.all_rewards)) / len(self.all_rewards)
age_weight = np.array(range(len(self.all_rewards), 0, -1)) / len(self.all_rewards)
age_weight[:self.population_size] = age_weight[self.population_size - 1]
flops_weight = ss.rankdata(np.array(self.all_flops)) / len(self.all_flops)
all_weight = trade_off[0] * rank_weight + trade_off[1] * age_weight + trade_off[2] * flops_weight
idx = np.array(
sorted(range(len(all_weight)), key=lambda i: all_weight[i]))[:self.population_size]# < self.population_size # [-division:]
self.population_arc_queue = np.array(self.all_actions)[idx].tolist()
self.population_val_queue = np.array(self.all_rewards)[idx].tolist()
elif trade_off[3] == 1:
age_weight = np.array(range(len(self.all_rewards), 0, -1)) / len(self.all_rewards)
age_weight[:self.population_size] = age_weight[self.population_size - 1]
# filter with age weight
idx1 = np.array(
sorted(range(len(age_weight)), key=lambda i: age_weight[i]))[:2*self.population_size]
age_rewards = np.array(self.all_rewards)[idx1].tolist()
age_actions = np.array(self.all_actions)[idx1].tolist()
age_flops = np.array(self.all_flops)[idx1].tolist()
rank_weight = ss.rankdata(np.array(age_rewards)) / len(age_rewards)
age_weight = np.array(age_weight)[idx1]
flops_weight = ss.rankdata(np.array(age_flops)) / len(age_flops)
all_weight = trade_off[0] * rank_weight + trade_off[1] * age_weight + trade_off[2] * flops_weight
idx2 = np.array(
sorted(range(len(all_weight)), key=lambda i: all_weight[i]))[:self.population_size] # < self.population_size # [-division:]
self.population_arc_queue = np.array(age_actions)[idx2].tolist()
self.population_val_queue = np.array(age_rewards)[idx2].tolist()
def update_GBDT(self):
k = len(self.all_arc_vecs)
r = 0.8
# create dataset for lightgbm
X_train, X_test, y_train1, y_test1 = self.all_arc_vecs[:int(k * r)], \
self.all_arc_vecs[int(k * r):], \
self.all_rewards[:int(k * r)], \
self.all_rewards[int(k * r):]
X_train, X_test, y_train1, y_test1 = np.array(X_train), \
np.array(X_test), \
np.array(y_train1), \
np.array(y_test1)
logger.warning('Train Shape {}{}{}{}'.format(X_train.shape,
X_test.shape,
y_train1.shape,
y_test1.shape))
y_train = ss.rankdata(-y_train1) - 1
y_test = ss.rankdata(-y_test1) - 1
y_train = y_train.astype(int)
y_test = y_test.astype(int)
lgb_train = lgb.Dataset(X_train, y_train, group=np.array([len(y_train)])) # free_raw_data=False
lgb_eval = lgb.Dataset(X_test, y_test, group=np.array([len(y_test)]),
reference=lgb_train) # ,free_raw_data=False
# specify your configurations as a dict
params = {
'boosting_type': 'gbdt',
'objective': 'lambdarank', # 'regression', #
'metric': "ndcg", # "auc", #"ndcg", # {'l2', 'l1'},
'label_gain': np.array(list(range(len(y_train)))) * 2, #
'max_depth': 3, # 'num_leaves': 31,
'learning_rate': 0.05,
'feature_fraction': 0.9,
'bagging_fraction': 0.8,
'eval_at': self.eval_at,
'bagging_freq': 5,
'verbose': 0,
'num_threads': 5,
}
logger.warning('Starting training...')
# train
self.gbm = lgb.train(params,
lgb_train,
num_boost_round=1500,
valid_sets=lgb_eval,
early_stopping_rounds=150)
logger.warning('Finish training...')
def ML_sampler(self, parent):
vecs_list, arc_vec_list, vec_configs_list = [], [], []
i = 0
while i < self.sampler_type:
vecs, vec_configs = self.mutate_arc(parent=parent)
arc_vec = np.concatenate(vecs)
# check current
repeat_idx = (
[]
if not arc_vec_list
else np.where(
np.sum(abs(np.array(arc_vec_list) - arc_vec), 1) == 0
)[0]
)
if len(repeat_idx) != 0:
logger.warning("The architecture is same with: {}.".format(repeat_idx))
continue
# check all
repeat_idx = (
[]
if not self.all_arc_vecs
else np.where(
np.sum(abs(np.array(self.all_arc_vecs) - arc_vec), 1) == 0
)[0]
)
if len(repeat_idx) != 0:
logger.warning("The architecture is same all_arc_vectors with: {}.".format(repeat_idx))
continue
vecs_list.append(vecs)
arc_vec_list.append(arc_vec)
vec_configs_list.append(vec_configs)
i += 1
logger.warning('Test Shape {}'.format(np.array(arc_vec_list).shape))
y_pred = self.gbm.predict(np.array(arc_vec_list), num_iteration=self.gbm.best_iteration)
idx = np.where(y_pred == np.max(y_pred))[0][0]
return vecs_list[idx], vec_configs_list[idx]
|
AutoCTR-main
|
nasrec/evolutionary_controller.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from copy import deepcopy
import numpy as np
import torch
import torch.nn as nn
from block_config import ttypes as b_config
from config import ttypes as config
logger = logging.getLogger(__name__)
class BaseSearcher(nn.Module):
def __init__(self, searcher_config, feature_config):
super(BaseSearcher, self).__init__()
# for serilization purpose
self.searcher_config = deepcopy(searcher_config)
self.feature_config = deepcopy(feature_config)
self.dense_feature_options = self.feature_config.dense
self.sparse_feature_options = self.feature_config.sparse
self.num_dense_feat = len(self.dense_feature_options.features)
self.num_sparse_feat = len(self.sparse_feature_options.features)
def _set_micro_space_from_config(self):
# get micro space type list
self.micro_space_types = [
space_type.getType()
for space_type in self.controller_option.micro_space_types
]
# get feature processig type list
self.feature_processing_type = [
processing_type.getType()
for processing_type in self.controller_option.feature_processing_type
]
# set up corresponding micro space
for space_type in self.controller_option.micro_space_types:
if space_type.getType() == config.MicroSearchSpaceType.MICRO_MLP:
self.micro_mlp_option = space_type.get_micro_mlp()
elif space_type.getType() == config.MicroSearchSpaceType.MICRO_CIN:
self.micro_cin_option = space_type.get_micro_cin()
if len(self.micro_cin_option.arc) == 0:
self.micro_cin_option.arc = [128]
if len(self.micro_cin_option.num_of_layers) == 0:
self.micro_cin_option.num_of_layers = [1]
elif space_type.getType() == config.MicroSearchSpaceType.MICRO_ATTENTION:
self.micro_attention_option = space_type.get_micro_attention()
if len(self.micro_attention_option.num_of_layers) == 0:
self.micro_attention_option.num_of_layers = [1]
if len(self.micro_attention_option.num_of_heads) == 0:
self.micro_attention_option.num_of_heads = [2]
if len(self.micro_attention_option.att_embed_dim) == 0:
self.micro_attention_option.att_embed_dim = [10]
if len(self.micro_attention_option.dropout_prob) == 0:
self.micro_attention_option.dropout_prob = [0.0]
def _init_base_searcher_params(self):
# get micro search space configurations
self._set_micro_space_from_config()
# constraint search space
if (
self.controller_option.macro_space_type
== config.MacroSearchSpaceType.INPUT_GROUP
):
self.num_dense_feat = 1
self.num_sparse_feat = 1
# length of the DAG to be searched (exclude the final clf layer)
self.num_blocks = self.controller_option.max_num_block
# block_types to be searched
self.block_types = list(set(self.controller_option.block_types))
self.num_block_type = len(self.block_types)
if self.num_block_type == 0:
raise ValueError("Should provide at least one block type to be searched.")
# construct dictionaries to map between int and block types
self.type_int_dict = {
self.block_types[i]: i for i in range(self.num_block_type)
}
self.int_type_dict = {
i: self.block_types[i] for i in range(self.num_block_type)
}
# all tokens to be searched
self.num_tokens = {
"block_type": self.num_block_type,
"dense_feat": self.num_dense_feat,
"sparse_feat": self.num_sparse_feat,
"skip_connect": self.num_blocks,
}
self.token_names = ["block_type", "dense_feat", "sparse_feat", "skip_connect"]
if (
self.controller_option.macro_space_type
== config.MacroSearchSpaceType.INPUT_ELASTIC_PRIOR
):
# constraint search space with smooth learnable priors
self.num_tokens["elastic_prior"] = 2
self.token_names.append("elastic_prior")
self.num_total_tokens = sum(v for _, v in self.num_tokens.items())
if config.MicroSearchSpaceType.MICRO_MLP in self.micro_space_types:
if (
b_config.ExtendedBlockType.MLP_DENSE
in self.controller_option.block_types
):
self.num_tokens["mlp_dense"] = len(self.micro_mlp_option.arc)
self.token_names.append("mlp_dense")
self.num_total_tokens += 1
if b_config.ExtendedBlockType.MLP_EMB in self.controller_option.block_types:
self.num_tokens["mlp_emb"] = len(self.micro_mlp_option.arc)
self.token_names.append("mlp_emb")
self.num_total_tokens += 1
if config.MicroSearchSpaceType.MICRO_CIN in self.micro_space_types:
if b_config.ExtendedBlockType.CIN in self.controller_option.block_types:
self.num_tokens["cin"] = len(self.micro_cin_option.arc) + len(
self.micro_cin_option.num_of_layers
)
self.token_names.append("cin")
self.num_total_tokens += 1 if len(self.micro_cin_option.arc) > 0 else 0
self.num_total_tokens += (
1 if len(self.micro_cin_option.num_of_layers) > 0 else 0
)
if config.MicroSearchSpaceType.MICRO_ATTENTION in self.micro_space_types:
if (
b_config.ExtendedBlockType.ATTENTION
in self.controller_option.block_types
):
self.att_num_tokens = {
"head": len(self.micro_attention_option.num_of_heads),
"layer": len(self.micro_attention_option.num_of_layers),
"emb": len(self.micro_attention_option.att_embed_dim),
"drop": len(self.micro_attention_option.dropout_prob),
}
self.num_tokens["attention"] = sum(
v for _, v in self.att_num_tokens.items()
)
self.token_names.append("attention")
for _, v in self.att_num_tokens.items():
self.num_total_tokens += 1 if v != 0 else 0
def _build_arc(self):
raise NotImplementedError
def sample(self):
raise NotImplementedError
def update(self):
raise NotImplementedError
def random_sample(self):
vec_configs, vecs = [], []
for b_id in range(self.num_blocks):
# macro random search
block_type_vec = np.random.multinomial(
1, [1.0 / self.num_block_type] * self.num_block_type
)
block_type_id = np.argmax(block_type_vec)
dense_feat_vec = np.random.binomial(1, 0.5, self.num_dense_feat)
sparse_feat_vec = np.random.binomial(1, 0.5, self.num_sparse_feat)
skip_connection_vec = np.random.binomial(1, 0.5, self.num_blocks)
skip_connection_vec[b_id:] = 0 # cannot connect with later block
vec_config = {
"block_type": block_type_id,
"dense_feat": dense_feat_vec,
"sparse_feat": sparse_feat_vec,
"skip_connect": skip_connection_vec,
}
# micro random search
mlp_dense_vec, mlp_emb_vec, cin_vec, att_vec = (
np.array([]),
np.array([]),
np.array([]),
np.array([]),
)
if config.MicroSearchSpaceType.MICRO_MLP in self.micro_space_types:
if (
b_config.ExtendedBlockType.MLP_DENSE
in self.controller_option.block_types
):
mlp_dense_vec = np.argmax(
np.random.multinomial(
1,
[1.0 / len(self.micro_mlp_option.arc)]
* len(self.micro_mlp_option.arc),
)
)
vec_config["mlp_dense"] = mlp_dense_vec
mlp_dense_vec = np.array([mlp_dense_vec])
if (
b_config.ExtendedBlockType.MLP_EMB
in self.controller_option.block_types
):
mlp_emb_vec = np.argmax(
np.random.multinomial(
1,
[1.0 / len(self.micro_mlp_option.arc)]
* len(self.micro_mlp_option.arc),
)
)
vec_config["mlp_emb"] = mlp_emb_vec
mlp_emb_vec = np.array([mlp_emb_vec])
if config.MicroSearchSpaceType.MICRO_CIN in self.micro_space_types:
if b_config.ExtendedBlockType.CIN in self.controller_option.block_types:
cin_width = np.argmax(
np.random.multinomial(
1,
[1.0 / len(self.micro_cin_option.arc)]
* len(self.micro_cin_option.arc),
)
)
cin_depth = np.argmax(
np.random.multinomial(
1,
[1.0 / len(self.micro_cin_option.num_of_layers)]
* len(self.micro_cin_option.num_of_layers),
)
)
cin_vec = np.array([cin_width, cin_depth])
vec_config["cin"] = {"width": cin_width, "depth": cin_depth}
if config.MicroSearchSpaceType.MICRO_ATTENTION in self.micro_space_types:
if (
b_config.ExtendedBlockType.ATTENTION
in self.controller_option.block_types
):
att_head = np.argmax(
np.random.multinomial(
1,
[1.0 / self.att_num_tokens["head"]]
* self.att_num_tokens["head"],
)
)
att_layer = np.argmax(
np.random.multinomial(
1,
[1.0 / self.att_num_tokens["layer"]]
* self.att_num_tokens["layer"],
)
)
att_emb_dim = np.argmax(
np.random.multinomial(
1,
[1.0 / self.att_num_tokens["emb"]]
* self.att_num_tokens["emb"],
)
)
att_dropout_prob = np.argmax(
np.random.multinomial(
1,
[1.0 / self.att_num_tokens["drop"]]
* self.att_num_tokens["drop"],
)
)
att_vec = np.array(
[att_head, att_layer, att_emb_dim, att_dropout_prob]
)
vec_config["attention"] = {
"head": att_head,
"layer": att_layer,
"emb": att_emb_dim,
"drop": att_dropout_prob,
}
block_vec = np.concatenate(
[
block_type_vec,
dense_feat_vec,
sparse_feat_vec,
skip_connection_vec,
mlp_dense_vec,
mlp_emb_vec,
cin_vec,
att_vec,
]
)
vecs.append(block_vec)
vec_configs.append(vec_config)
# cat the config of a architecture to one vector
return vecs, vec_configs
def block_type_to_int(self, block_config):
if block_config.getType() == b_config.BlockConfig.MLP_BLOCK:
block_option = block_config.get_mlp_block()
key = (
b_config.ExtendedBlockType.MLP_DENSE
if block_option.type.getType() == b_config.BlockType.DENSE
else b_config.ExtendedBlockType.MLP_EMB
)
elif block_config.getType() == b_config.BlockConfig.CROSSNET_BLOCK:
block_option = block_config.get_crossnet_block()
key = b_config.ExtendedBlockType.CROSSNET
elif block_config.getType() == b_config.BlockConfig.FM_BLOCK:
block_option = block_config.get_fm_block()
key = (
b_config.ExtendedBlockType.FM_DENSE
if block_option.type.getType() == b_config.BlockType.DENSE
else b_config.ExtendedBlockType.FM_EMB
)
elif block_config.getType() == b_config.BlockConfig.DOTPROCESSOR_BLOCK:
block_option = block_config.get_dotprocessor_block()
key = (
b_config.ExtendedBlockType.DOTPROCESSOR_DENSE
if block_option.type.getType() == b_config.BlockType.DENSE
else b_config.ExtendedBlockType.DOTPROCESSOR_EMB
)
elif block_config.getType() == b_config.BlockConfig.CAT_BLOCK:
block_option = block_config.get_cat_block()
key = (
b_config.ExtendedBlockType.CAT_DENSE
if block_option.type.getType() == b_config.BlockType.DENSE
else b_config.ExtendedBlockType.CAT_EMB
)
elif block_config.getType() == b_config.BlockConfig.CIN:
block_option = block_config.get_cin_block()
key = b_config.ExtendedBlockType.CIN
elif block_config.getType() == b_config.BlockConfig.ATTENTION:
block_option = block_config.get_attention_block()
key = b_config.ExtendedBlockType.ATTENTION
return self.type_int_dict[key], block_option
def vecs_to_model_config(self, vecs):
block_configs = []
for block_id, vec in enumerate(vecs):
block_configs.append(self.vec_to_block_config(vec, block_id + 1))
return block_configs
def vec_to_block_config(self, vec, block_id):
"""convert a controller vector to block_config
"""
# split a vector and convert the corresponding part to the id format
block_type_id = (
vec["block_type"].numpy()[0]
if type(vec["block_type"]) is torch.Tensor
else vec["block_type"]
)
input_dense = vec["dense_feat"]
input_sparse = vec["sparse_feat"]
skip_connection = vec["skip_connect"]
if (
self.controller_option.macro_space_type
== config.MacroSearchSpaceType.INPUT_GROUP
):
input_dense_id = [-1] if input_dense == 1 else []
input_sparse_id = [-1] if input_sparse == 1 else []
else:
input_dense_id = [i for i, e in enumerate(input_dense) if e == 1]
input_sparse_id = [i for i, e in enumerate(input_sparse) if e == 1]
skip_connection_id = [
i + 1 for i, e in enumerate(skip_connection) if e == 1 and i + 1 < block_id
]
dense_as_sparse = (
True
if config.FeatureProcessingType.IDASP in self.feature_processing_type
else False
)
# construct input config
# orignal input features
input_feat_config = [
b_config.FeatSelectionConfig(
block_id=0, dense=input_dense_id, sparse=input_sparse_id
)
]
# input from other blocks' outputs
input_feat_config += [
b_config.FeatSelectionConfig(block_id=id, dense=[-1], sparse=[-1])
for id in skip_connection_id
]
comm_embed_dim = self.sparse_feature_options.embed_dim
block_type = self.int_type_dict[block_type_id]
if block_type == b_config.ExtendedBlockType.CROSSNET:
block_config = b_config.BlockConfig(
crossnet_block=b_config.CrossNetBlockConfig(
name="CrossNetBlocks",
block_id=block_id,
num_of_layers=1,
input_feat_config=input_feat_config,
cross_feat_config=input_feat_config,
)
)
elif block_type == b_config.ExtendedBlockType.ATTENTION:
head, layer, emb, drop = (
(
self.micro_attention_option.num_of_heads[vec["attention"]["head"]],
self.micro_attention_option.num_of_layers[
vec["attention"]["layer"]
],
self.micro_attention_option.att_embed_dim[vec["attention"]["emb"]],
self.micro_attention_option.dropout_prob[vec["attention"]["drop"]],
)
if "attention" in vec
else (2, 1, 10, 0.0)
)
block_config = b_config.BlockConfig(
attention_block=b_config.AttentionBlockConfig(
name="AttentionBlock",
block_id=block_id,
input_feat_config=input_feat_config,
emb_config=b_config.EmbedBlockType(
comm_embed_dim=comm_embed_dim, dense_as_sparse=dense_as_sparse
),
att_embed_dim=emb,
num_of_heads=head,
num_of_layers=layer,
dropout_prob=drop,
use_res=True,
batchnorm=False,
)
)
elif block_type == b_config.ExtendedBlockType.CIN:
arc = (
[self.micro_cin_option.arc[vec["cin"]["width"]]]
* self.micro_cin_option.num_of_layers[vec["cin"]["depth"]]
if "cin" in vec
else [128]
)
block_config = b_config.BlockConfig(
cin_block=b_config.CINBlockConfig(
name="CINBlock",
block_id=block_id,
emb_config=b_config.EmbedBlockType(
comm_embed_dim=comm_embed_dim, dense_as_sparse=dense_as_sparse
),
arc=arc,
split_half=True,
input_feat_config=input_feat_config,
)
)
elif block_type == b_config.ExtendedBlockType.MLP_DENSE:
arc = (
self.micro_mlp_option.arc[vec["mlp_dense"]]
if "mlp_dense" in vec
else 128
)
block_config = b_config.BlockConfig(
mlp_block=b_config.MLPBlockConfig(
name="MLPBlock",
block_id=block_id,
arc=[arc],
type=b_config.BlockType(dense=b_config.DenseBlockType()),
input_feat_config=input_feat_config,
)
)
elif block_type == b_config.ExtendedBlockType.MLP_EMB:
arc = self.micro_mlp_option.arc[vec["mlp_emb"]] if "mlp_emb" in vec else 128
block_config = b_config.BlockConfig(
mlp_block=b_config.MLPBlockConfig(
name="MLPBlock",
block_id=block_id,
arc=[arc],
type=b_config.BlockType(
emb=b_config.EmbedBlockType(
comm_embed_dim=comm_embed_dim,
dense_as_sparse=dense_as_sparse,
)
),
input_feat_config=input_feat_config,
)
)
elif block_type == b_config.ExtendedBlockType.FM_DENSE:
block_config = b_config.BlockConfig(
fm_block=b_config.FMBlockConfig(
name="FMBlock",
block_id=block_id,
type=b_config.BlockType(dense=b_config.DenseBlockType()),
input_feat_config=input_feat_config,
)
)
elif block_type == b_config.ExtendedBlockType.FM_EMB:
block_config = b_config.BlockConfig(
fm_block=b_config.FMBlockConfig(
name="FMBlock",
block_id=block_id,
type=b_config.BlockType(
emb=b_config.EmbedBlockType(
comm_embed_dim=comm_embed_dim,
dense_as_sparse=dense_as_sparse,
)
),
input_feat_config=input_feat_config,
)
)
elif block_type == b_config.ExtendedBlockType.DOTPROCESSOR_DENSE:
block_config = b_config.BlockConfig(
dotprocessor_block=b_config.DotProcessorBlockConfig(
name="DotProcessorBlock",
block_id=block_id,
type=b_config.BlockType(dense=b_config.DenseBlockType()),
input_feat_config=input_feat_config,
)
)
elif block_type == b_config.ExtendedBlockType.DOTPROCESSOR_EMB:
block_config = b_config.BlockConfig(
dotprocessor_block=b_config.DotProcessorBlockConfig(
name="DotProcessorBlock",
block_id=block_id,
type=b_config.BlockType(
emb=b_config.EmbedBlockType(
comm_embed_dim=comm_embed_dim,
dense_as_sparse=dense_as_sparse,
)
),
input_feat_config=input_feat_config,
)
)
elif block_type == b_config.ExtendedBlockType.CAT_DENSE:
block_config = b_config.BlockConfig(
cat_block=b_config.CatBlockConfig(
name="CatBlock",
block_id=block_id,
type=b_config.BlockType(dense=b_config.DenseBlockType()),
input_feat_config=input_feat_config,
)
)
elif block_type == b_config.ExtendedBlockType.CAT_EMB:
block_config = b_config.BlockConfig(
cat_block=b_config.CatBlockConfig(
name="CatBlock",
block_id=block_id,
type=b_config.BlockType(
emb=b_config.EmbedBlockType(
comm_embed_dim=comm_embed_dim,
dense_as_sparse=dense_as_sparse,
)
),
input_feat_config=input_feat_config,
)
)
return block_config
def dicts_to_vecs(self, dicts):
vecs = []
for block in dicts:
for token_name in self.num_tokens:
if token_name in ["block_type"]:
tmp_vec = np.zeros([self.num_tokens[token_name]])
tmp_vec[block[token_name]] = 1.0
vecs.append(tmp_vec)
elif token_name in ["mlp_dense", "mlp_emb"]:
tmp_vec = np.array([block[token_name]])
vecs.append(tmp_vec)
elif token_name == "cin":
tmp_vec = np.array([block["cin"]["width"], block["cin"]["depth"]])
vecs.append(tmp_vec)
elif token_name == "attention":
tmp_vec = np.array(
[
block["attention"]["head"],
block["attention"]["layer"],
block["attention"]["emb"],
block["attention"]["drop"],
]
)
vecs.append(tmp_vec)
else:
vecs.append(block[token_name])
return vecs
def _action_equal(self, action1, action2):
return (
action1 == action2
if type(action1) == dict
else np.array_equal(action1, action2)
)
def mutate_arc(self, parent):
child = deepcopy(parent)
# 1. choose block to mutate
block_id = np.random.choice(self.num_blocks, 1)[0]
# 2. choose one token of a block to mutate (e.g., block_type, dense_feat)
token_name = np.random.choice(self.token_names, 1)[0]
while token_name == "skip_connect" and block_id == 0:
block_id = np.random.choice(self.num_blocks, 1)[0]
token_name = np.random.choice(self.token_names, 1)[0]
while (
token_name == "cin"
and len(self.micro_cin_option.arc) == 1
and len(self.micro_cin_option.num_of_layers) == 1
) or (
token_name == "attention"
and self.att_num_tokens["head"] == 1
and self.att_num_tokens["layer"] == 1
and self.att_num_tokens["emb"] == 1
and self.att_num_tokens["drop"] == 1
):
token_name = np.random.choice(self.token_names, 1)[0]
# 3. mutate the corresponding token
new_action = child[block_id][token_name]
while self._action_equal(new_action, child[block_id][token_name]):
if token_name in ["block_type", "mlp_dense", "mlp_emb"]:
new_action_vec = np.random.multinomial(
1, [1.0 / self.num_tokens[token_name]] * self.num_tokens[token_name]
)
new_action = np.argmax(new_action_vec)
elif token_name == "cin":
cin_width = np.argmax(
np.random.multinomial(
1,
[1.0 / len(self.micro_cin_option.arc)]
* len(self.micro_cin_option.arc),
)
)
cin_depth = np.argmax(
np.random.multinomial(
1,
[1.0 / len(self.micro_cin_option.num_of_layers)]
* len(self.micro_cin_option.num_of_layers),
)
)
new_action = {"width": cin_width, "depth": cin_depth}
elif token_name == "attention":
head = np.argmax(
np.random.multinomial(
1,
[1.0 / self.att_num_tokens["head"]]
* self.att_num_tokens["head"],
)
)
layer = np.argmax(
np.random.multinomial(
1,
[1.0 / self.att_num_tokens["layer"]]
* self.att_num_tokens["layer"],
)
)
emb = np.argmax(
np.random.multinomial(
1,
[1.0 / self.att_num_tokens["emb"]] * self.att_num_tokens["emb"],
)
)
drop = np.argmax(
np.random.multinomial(
1,
[1.0 / self.att_num_tokens["drop"]]
* self.att_num_tokens["drop"],
)
)
new_action = {"head": head, "layer": layer, "emb": emb, "drop": drop}
else:
new_action = np.random.binomial(1, 0.5, self.num_tokens[token_name])
child[block_id][token_name] = new_action
vecs = self.dicts_to_vecs(child)
return vecs, child
|
AutoCTR-main
|
nasrec/base_searcher.py
|
AutoCTR-main
|
nasrec/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import torch
from config import ttypes as config
from .evolutionary_controller import EvolutionaryController
from .random_controller import RandomController
logger = logging.getLogger(__name__)
def build_searcher(searcher_config, feature_config):
if searcher_config.getType() == config.SearcherConfig.RANDOM_SEARCHER:
return build_random_searcher(searcher_config, feature_config)
elif searcher_config.getType() == config.SearcherConfig.EVOLUTIONARY_SEARCHER:
return build_evolutionary_searcher(searcher_config, feature_config)
else:
raise ValueError("Unknown searcher type.")
def build_random_searcher(searcher_config, feature_config):
return RandomController(
searcher_config=searcher_config, feature_config=feature_config
)
def build_evolutionary_searcher(searcher_config, feature_config):
return EvolutionaryController(
searcher_config=searcher_config, feature_config=feature_config
)
def save_searcher(filename, searcher):
logger.info("Saving searcher to {}".format(filename))
state = {
"state_dict": searcher.state_dict(),
"searcher_config": searcher.searcher_config,
"feature_config": searcher.feature_config,
}
torch.save(state, filename)
def load_searcher(filename):
logger.info("Loading searcher from {}".format(filename))
state = torch.load(filename)
searcher_config = state["searcher_config"]
feature_config = state["feature_config"]
searcher = build_searcher(
searcher_config=searcher_config, feature_config=feature_config
)
searcher.load_state_dict(state["state_dict"])
return searcher
|
AutoCTR-main
|
nasrec/builder.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import numpy as np
from config import ttypes as config
from models.nas_modules import NASRecNet
from .base_searcher import BaseSearcher
logger = logging.getLogger(__name__)
class RandomController(BaseSearcher):
def __init__(self, searcher_config, feature_config):
super(RandomController, self).__init__(searcher_config, feature_config)
self.controller_option = searcher_config.get_random_searcher()
self._init_base_searcher_params()
def _build_arc(self):
pass
def sample(self, batch_size=1, return_config=False):
"""Samples a batch_size number of NasRecNets from the controller, where
each node is made up of a set of blocks with number self.num_blocks
"""
if batch_size < 1:
raise ValueError("Wrong batch_size.")
nasrec_nets, all_vec_configs, nasrec_arc_vecs = [], [], []
for _ in range(batch_size):
vecs, vec_configs = self.random_sample()
arc_vec = np.concatenate(vecs)
nasrec_arc_vecs.append(arc_vec)
all_vec_configs.append(vec_configs)
block_configs = self.vecs_to_model_config(vec_configs)
model_config = config.ModelConfig(
nasrec_net=config.NASRecNetConfig(block_configs=block_configs)
)
if return_config:
nasrec_nets.append(model_config)
else:
nasrec_nets.append(NASRecNet(model_config, self.feature_config))
return nasrec_nets, [], all_vec_configs, nasrec_arc_vecs
def update(self, probs, rewards):
pass
|
AutoCTR-main
|
nasrec/random_controller.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import numpy as np
import torch
import torch.nn as nn
from graphviz import Digraph
logger = logging.getLogger(__name__)
def reward_normalization(rewards, alpha=3, bias=0.5):
return rewards
# return 0.5 * np.tanh((rewards - bias) * alpha) + 0.5
def clean_feat_id(feat_ids, feat_dim, feat_type):
"""check and modify feat_ids, remove nonexist features and empty block
Args:
feat_ids: dictionary of {block:feat_ids} to be cleaned
feat_dim: dictionary of {block:feat_dim} used to clean feat_ids
feat_type: a string indicating feature type (i.e., "dense" or "sprase")
"""
tmp = {
k: [
feat_id
for feat_id in set(feat_ids[k])
if feat_id < (feat_dim[k][0] if feat_type is "dense" else len(feat_dim[k]))
]
for k in set(feat_ids).intersection(set(feat_dim))
}
# remove empty and sorted
return {k: sorted(v) for k, v in tmp.items() if v}
def create_emb_converter(
num_dense_feat, feat_sparse_id, feat_sparse_dim, comm_embed_dim, num_dense_as_sp=0
):
# set embedding layers
feat_emb = nn.ModuleDict()
# set dense emb layer
if num_dense_feat > 0:
feat_emb["dense"] = (
nn.Linear(num_dense_feat, comm_embed_dim, bias=True)
if num_dense_feat != comm_embed_dim
else nn.Identity()
)
if num_dense_as_sp > 0:
feat_emb["dense_as_sparse"] = nn.Embedding(num_dense_as_sp, comm_embed_dim)
# set sparse emb layer
feat_emb["sparse"] = nn.ModuleDict()
sparse_in_dim = get_sparse_feat_dim(feat_sparse_id, feat_sparse_dim)
for block in feat_sparse_id:
feat_emb["sparse"][str(block)] = nn.ModuleDict()
if feat_sparse_id[block] == [-1]:
for feat_id in range(len(sparse_in_dim[block])):
feat_emb["sparse"][str(block)][str(feat_id)] = (
nn.Linear(sparse_in_dim[block][feat_id], comm_embed_dim, bias=True)
if sparse_in_dim[block][feat_id] != comm_embed_dim
else nn.Identity()
)
else:
for feat_id in feat_sparse_id[block]:
feat_emb["sparse"][str(block)][str(feat_id)] = (
nn.Linear(sparse_in_dim[block][feat_id], comm_embed_dim, bias=True)
if sparse_in_dim[block][feat_id] != comm_embed_dim
else nn.Identity()
)
return feat_emb
def convert_to_emb(
feat_dict,
feat_emb_layers,
num_dense_feat,
feat_sparse_id,
comm_embed_dim,
num_dense_as_sp=0,
):
"""
:param num_dense_as_sp: # of input dense features to be treated as sparse features
"""
# embedding all features into the same length and concatenate them into a matrix
# dense
feat = [] if num_dense_feat <= 0 else [feat_emb_layers["dense"](feat_dict["dense"])]
# sparse
sp_feats = []
for block in feat_sparse_id:
if feat_sparse_id[block] == [-1]:
for feat_id, sp in feat_dict["sparse"][block].items():
emb = feat_emb_layers["sparse"][str(block)][str(feat_id)]
sp = sp.to(dtype=torch.float)
sp_feats.append(emb(sp))
else:
for feat_id in feat_sparse_id[block]:
emb = feat_emb_layers["sparse"][str(block)][str(feat_id)]
sp = feat_dict["sparse"][block][feat_id]
sp = sp.to(dtype=torch.float)
sp_feats.append(emb(sp))
# dense_to_sparse
if num_dense_as_sp > 0:
emb_table = feat_emb_layers["dense_as_sparse"](
torch.tensor(list(range(num_dense_as_sp)))
)
emb_table = emb_table.repeat([feat_dict["dense_as_sparse"].shape[0], 1, 1])
dense_as_sp_feat = emb_table * feat_dict["dense_as_sparse"][:, :, None]
# concatenation
if feat + sp_feats:
feat = torch.cat(feat + sp_feats, dim=1)
batch_size = feat.shape[0]
feat = feat.view((batch_size, -1, comm_embed_dim))
if num_dense_as_sp > 0:
feat = torch.cat([feat, dense_as_sp_feat], dim=1)
else:
feat = dense_as_sp_feat
return feat
def cat_feats(feat_dict, feat_sparse_id):
# concatenate all features into one row vector
feat = [] if feat_dict["dense"].nelement() == 0 else [feat_dict["dense"]]
sp_feats = []
for block, feat_ids in feat_sparse_id.items():
if feat_ids == [-1]:
for feat_id in feat_dict["sparse"][block]:
sp = feat_dict["sparse"][block][feat_id]
sp = sp.to(dtype=torch.float)
sp_feats.append(sp)
else:
for feat_id in feat_sparse_id[block]:
sp = feat_dict["sparse"][block][feat_id]
sp = sp.to(dtype=torch.float)
sp_feats.append(sp)
return torch.cat(feat + sp_feats, dim=1)
def extract_dense_feat(feat_dense_dict, feat_dense_id):
# extract
dense = []
for block, feat_id in feat_dense_id.items():
if feat_dense_dict[block].nelement() != 0:
dense.append(
feat_dense_dict[block]
if feat_id == [-1]
else feat_dense_dict[block][:, feat_id]
)
return torch.cat(dense, dim=1) if dense else torch.Tensor([])
def config_to_dict(feat_configs):
feat_dense_id = {
feat_config.block_id: feat_config.dense
for feat_config in feat_configs
if len(feat_config.dense)
}
feat_sparse_id = {
feat_config.block_id: feat_config.sparse
for feat_config in feat_configs
if len(feat_config.sparse)
}
return feat_dense_id, feat_sparse_id
def get_sparse_feat_dim(feat_id_dict, feat_dim_dict):
# get sparse feature dimension
sparse_in_dim = {}
for block, feat_ids in feat_id_dict.items():
if feat_ids == [-1]:
sparse_in_dim[block] = feat_dim_dict[block]
else:
sparse_in_dim[block] = {}
for feat_id in feat_ids:
sparse_in_dim[block][feat_id] = feat_dim_dict[block][feat_id]
return sparse_in_dim
def get_sparse_feat_dim_num(feat_id_dict, feat_dim_dict):
# get sparse feature dimension
num_sparse_in_dim = 0
for block, feat_ids in feat_id_dict.items():
if feat_ids == [-1]:
num_sparse_in_dim += sum(feat_dim_dict[block])
else:
for feat_id in feat_ids:
num_sparse_in_dim += feat_dim_dict[block][feat_id]
return num_sparse_in_dim
def create_crossnet(num_of_layers, num_input_feat):
weight_w = torch.nn.ModuleList(
[torch.nn.Linear(num_input_feat, 1, bias=False) for _ in range(num_of_layers)]
)
weight_b = torch.nn.ParameterList(
[
torch.nn.Parameter(torch.zeros((num_input_feat,)))
for _ in range(num_of_layers)
]
)
batchnorm = torch.nn.ModuleList(
[nn.BatchNorm1d(num_input_feat, affine=False) for _ in range(num_of_layers)]
)
return weight_w, weight_b, batchnorm
def create_cin(layer_sizes, field_nums):
conv_layers, bias_layers, activation_layers = (
nn.ModuleList(),
nn.ParameterList(),
nn.ModuleList(),
)
for i, size in enumerate(layer_sizes):
single_conv_layer = nn.Conv2d(
in_channels=1, out_channels=size, kernel_size=(field_nums[i], field_nums[0])
)
conv_layers.append(single_conv_layer)
bias_layers.append(
nn.Parameter(torch.nn.init.normal_(torch.empty(size), mean=0.0, std=1e-6))
)
activation_layers.append(nn.ReLU())
return conv_layers, bias_layers, activation_layers
def create_transformer(
emb_dim, att_embed_dim, num_of_heads, num_of_layers, use_res, use_batchnorm
):
w_query, w_key, w_value, w_res, bn = (
nn.ModuleList(),
nn.ModuleList(),
nn.ModuleList(),
nn.ModuleList(),
nn.ModuleList(),
)
num_units = att_embed_dim * num_of_heads
emb_dim = [emb_dim] + (num_of_layers - 1) * [num_units]
for l in range(num_of_layers):
w_query.append(nn.Linear(emb_dim[l], num_units, bias=True))
w_key.append(nn.Linear(emb_dim[l], num_units, bias=True))
w_value.append(nn.Linear(emb_dim[l], num_units, bias=True))
if use_res:
w_res.append(nn.Linear(emb_dim[l], num_units, bias=True))
if use_batchnorm:
bn.append(nn.BatchNorm1d(num_units))
return w_query, w_key, w_value, w_res, bn
def nasnet_visual(nasrec_model):
""" function to visualize the nasrec net model
"""
dot = Digraph(comment="Graph", format="png")
with dot.subgraph() as s:
s.attr(rank="same")
s.node("0_d", "Dense", color="red")
s.node("0_s", "Sparse", color="red")
block_name = []
for i, block in enumerate(nasrec_model.blocks):
block_name.append(block.__str__() + "Block")
dot.node(
str(i + 1), str(i + 1) + "_" + block_name[-1], shape="box", color="green"
)
dense = block.feat_dense_id
sparse = block.feat_sparse_id
skip_block_id = set(dense.keys()).union(set(sparse.keys()))
cross_dense = []
cross_sparse = []
if block_name[-1] == "CrossNet":
cross_dense = block.cross_feat_dense_id
cross_sparse = block.cross_feat_sparse_id
skip_block_id = skip_block_id.union(set(cross_dense.keys()))
skip_block_id = skip_block_id.union(set(cross_sparse.keys()))
for id in skip_block_id:
if id == 0:
if id in dense or (cross_dense and id in cross_dense):
dot.edge("0_d", str(i + 1))
if id in sparse or (cross_sparse and id in cross_sparse):
dot.edge("0_s", str(i + 1))
else:
dot.edge(str(id), str(i + 1))
return dot
|
AutoCTR-main
|
nasrec/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from copy import deepcopy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from block_config import ttypes as b_config
from models.utils import create_mlp
from .utils import (
cat_feats,
clean_feat_id,
config_to_dict,
convert_to_emb,
create_cin,
create_crossnet,
create_emb_converter,
create_transformer,
extract_dense_feat,
get_sparse_feat_dim_num,
)
logger = logging.getLogger(__name__)
def set_block_from_config(block_config, feat_dim):
if block_config is None:
return None
name2block = {
b_config.BlockConfig.MLP_BLOCK: MLPBlock,
b_config.BlockConfig.CROSSNET_BLOCK: CrossNetBlock,
b_config.BlockConfig.FM_BLOCK: FMBlock,
b_config.BlockConfig.DOTPROCESSOR_BLOCK: DotProcessorBlock,
b_config.BlockConfig.CAT_BLOCK: CatBlock,
b_config.BlockConfig.CIN_BLOCK: CINBlock,
b_config.BlockConfig.ATTENTION_BLOCK: AttentionBlock,
}
block_name = block_config.getType() # block_config.name
block = name2block[block_name]
return block(block_config, feat_dim)
def save_block(block, filename):
logger.info("Saving block to {}".format(filename))
state = {
"state_dict": block.state_dict(),
"block_config": block.block_config,
"feat_dim": {"dense": block.feat_dense_dim, "sparse": block.feat_sparse_dim},
}
torch.save(state, filename)
def load_block(filename):
logger.info("Loading model from {}".format(filename))
state = torch.load(filename)
block_config = state["block_config"]
feat_dim = state["feat_dim"]
block = set_block_from_config(block_config=block_config, feat_dim=feat_dim)
block.load_state_dict(state["state_dict"])
return block
class BaseBlock(nn.Module):
def __init__(self, block_config, feat_dim):
super(BaseBlock, self).__init__()
# for serilization purpose
self.block_config = deepcopy(block_config)
# extract input feat_dim dictionary {block_id: feat_dim (list)}
self.feat_dense_dim = feat_dim["dense"]
self.feat_sparse_dim = feat_dim["sparse"]
def _init_basic_block_params(self):
self.block_id = self.block_option.block_id
self.input_feat_config = self.block_option.input_feat_config
# convert input feat_id into dictionary format {block_id: feat_id (list)}
self.feat_dense_id, self.feat_sparse_id = config_to_dict(
self.block_option.input_feat_config
)
# check and modify feat_ids
self.feat_dense_id = clean_feat_id(
self.feat_dense_id, self.feat_dense_dim, "dense"
)
self.feat_sparse_id = clean_feat_id(
self.feat_sparse_id, self.feat_sparse_dim, "sparse"
)
# get input feature number
# dense feature
self.num_dense_feat = sum(
(
self.feat_dense_dim[b][0] # all dense feats in block b
if self.feat_dense_id[b] == [-1]
else len(self.feat_dense_id[b])
)
for b in self.feat_dense_id
)
self.num_sparse_feat = sum(
(
len(self.feat_sparse_dim[b])
if self.feat_sparse_id[b] == [-1]
else len(self.feat_sparse_id[b])
)
for b in self.feat_sparse_id
)
def _refine_emb_arc(self):
# refine the arc if the raw input dense feature are treated as sparse
# treat input dense features in block 0 as sparse features if existed
self.dense_as_sparse_id, self.num_dense_as_sparse_feat = None, 0
if self.emb_config.dense_as_sparse and 0 in self.feat_dense_id:
self.dense_as_sparse_id = self.feat_dense_id.pop(0)
self.num_dense_as_sparse_feat = (
self.feat_dense_dim[0][0]
if self.dense_as_sparse_id == [-1]
else len(self.dense_as_sparse_id)
)
self.num_dense_feat -= self.num_dense_as_sparse_feat
self.num_sparse_feat += self.num_dense_as_sparse_feat
def forward(self, feat_dict):
raise NotImplementedError
def dim_config(self, feat_dim):
raise NotImplementedError
def __str__(self):
return type(self).__name__[:-5]
class MLPBlock(BaseBlock):
def __init__(self, block_config, feat_dim):
super(MLPBlock, self).__init__(block_config, feat_dim)
self.block_option = self.block_config.get_mlp_block()
self._init_basic_block_params()
self._build_arc()
def _build_arc(self):
if self.num_sparse_feat + self.num_dense_feat == 0:
return
if self.block_option.type.getType() == b_config.BlockType.DENSE:
# set mlp layer
self.num_input_feat = self.num_dense_feat
if self.num_sparse_feat > 0:
self.num_input_feat += get_sparse_feat_dim_num(
self.feat_sparse_id, self.feat_sparse_dim
)
self.layers = create_mlp(
[self.num_input_feat] + self.block_option.arc,
ly_act=self.block_option.ly_act,
)
elif self.block_option.type.getType() == b_config.BlockType.EMB:
self.emb_config = self.block_option.type.get_emb()
self._refine_emb_arc()
# set embeding layer
self.feat_emb = create_emb_converter(
self.num_dense_feat,
self.feat_sparse_id,
self.feat_sparse_dim,
self.emb_config.comm_embed_dim,
self.num_dense_as_sparse_feat,
)
# set mlp layer
self.layers = create_mlp(
[self.emb_config.comm_embed_dim] + self.block_option.arc,
ly_act=self.block_option.ly_act,
)
else:
raise ValueError("Unsupported configuration for MLPBlock type.")
def dim_config(self, feat_dim):
if self.num_sparse_feat + self.num_dense_feat != 0:
if self.block_option.type.getType() == b_config.BlockType.DENSE:
feat_dim["dense"][self.block_id] = [self.block_option.arc[-1]]
elif self.block_option.type.getType() == b_config.BlockType.EMB:
if self.num_dense_feat > 0:
feat_dim["sparse"][self.block_id] = [self.block_option.arc[-1]] * (
self.num_sparse_feat + 1
)
else:
feat_dim["sparse"][self.block_id] = [
self.block_option.arc[-1]
] * self.num_sparse_feat
return feat_dim
def forward(self, feat_dict):
if self.num_sparse_feat + self.num_dense_feat == 0:
return feat_dict
# extract dense features based on id
extracted_feat_dict = {
"dense": extract_dense_feat(feat_dict["dense"], self.feat_dense_id),
"sparse": feat_dict["sparse"],
}
if self.block_option.type.getType() == b_config.BlockType.DENSE:
feat = cat_feats(extracted_feat_dict, self.feat_sparse_id)
try:
p = self.layers(feat)
except:
exit()
feat_dict["dense"][self.block_id] = p
elif self.block_option.type.getType() == b_config.BlockType.EMB:
if self.num_dense_as_sparse_feat > 0:
extracted_feat_dict["dense_as_sparse"] = (
feat_dict["dense"][0]
if self.dense_as_sparse_id == [-1]
else feat_dict["dense"][0][:, self.dense_as_sparse_id]
)
feat = convert_to_emb(
extracted_feat_dict,
self.feat_emb,
self.num_dense_feat,
self.feat_sparse_id,
self.emb_config.comm_embed_dim,
self.num_dense_as_sparse_feat,
)
p = self.layers(feat)
feat_dict["sparse"][self.block_id] = {
feat_id: p[:, feat_id] for feat_id in range(p.shape[1]) # 1 for dense
}
return feat_dict
def __str__(self):
return (
super().__str__()
+ "("
+ ", ".join(str(item) for item in self.block_option.arc)
+ ")"
)
class CrossNetBlock(BaseBlock):
def __init__(self, block_config, feat_dim):
super(CrossNetBlock, self).__init__(block_config, feat_dim)
self.block_option = self.block_config.get_crossnet_block()
self.num_of_layers = self.block_option.num_of_layers
self._init_basic_block_params()
self._init_cross_params()
self._build_arc()
def _init_cross_params(self):
# cross input feat id
self.cross_feat_config = self.block_option.cross_feat_config
# convert cross input feat_id into dictionary format {block_id: feat_id (list)}
self.cross_feat_dense_id, self.cross_feat_sparse_id = config_to_dict(
self.block_option.cross_feat_config
)
# check and modify feat_ids
self.cross_feat_dense_id = clean_feat_id(
self.cross_feat_dense_id, self.feat_dense_dim, "dense"
)
self.cross_feat_sparse_id = clean_feat_id(
self.cross_feat_sparse_id, self.feat_sparse_dim, "dense"
)
# get cross input feature number
# dense feature
self.cross_num_dense_feat_per_block = []
for b in self.cross_feat_dense_id:
self.cross_num_dense_feat_per_block += (
self.feat_dense_dim[b] # all dense feats in block b
if self.cross_feat_dense_id[b] == [-1]
else [len(self.cross_feat_dense_id[b])]
)
# sparse feature
self.cross_num_sparse_feat_per_block = []
for b in self.cross_feat_sparse_id:
self.cross_num_sparse_feat_per_block += (
self.feat_sparse_dim[b]
if self.cross_feat_sparse_id[b] == [-1]
else [len(self.cross_feat_sparse_id[b])]
)
self.cross_num_dense_feat = sum(self.cross_num_dense_feat_per_block)
self.cross_num_sparse_feat = sum(self.cross_num_sparse_feat_per_block)
# remodify feat_ids if the block is emtpy block
if (
self.num_sparse_feat + self.num_dense_feat == 0
or self.cross_num_dense_feat + self.cross_num_sparse_feat == 0
):
self.feat_dense_id = {}
self.feat_sparse_id = {}
self.cross_feat_dense_id = {}
self.cross_feat_sparse_id = {}
def _build_arc(self):
if (
self.num_sparse_feat + self.num_dense_feat == 0
or self.cross_num_dense_feat + self.cross_num_sparse_feat == 0
):
return
self.num_input_feat = self.num_dense_feat
self.num_input_feat += get_sparse_feat_dim_num(
self.feat_sparse_id, self.feat_sparse_dim
)
self.cross_num_input_feat = self.cross_num_dense_feat
self.cross_num_input_feat += get_sparse_feat_dim_num(
self.cross_feat_sparse_id, self.feat_sparse_dim
)
if self.num_input_feat != self.cross_num_input_feat:
# construct a embedding layer
self.emb_layer = nn.Linear(self.cross_num_input_feat, self.num_input_feat)
self.weight_w, self.weight_b, self.batchnorm = create_crossnet(
self.num_of_layers, self.num_input_feat
)
def dim_config(self, feat_dim):
if (
self.num_sparse_feat + self.num_dense_feat != 0
and self.cross_num_dense_feat + self.cross_num_sparse_feat != 0
):
feat_dim["dense"][self.block_id] = [self.num_input_feat]
return feat_dim
def forward(self, feat_dict):
if (
self.num_sparse_feat + self.num_dense_feat == 0
or self.cross_num_dense_feat + self.cross_num_sparse_feat == 0
):
return feat_dict
# extract dense features based on id
extracted_feat_dict = {
"dense": extract_dense_feat(feat_dict["dense"], self.feat_dense_id),
"sparse": feat_dict["sparse"],
}
cross_feat_dict = {
"dense": extract_dense_feat(feat_dict["dense"], self.cross_feat_dense_id),
"sparse": feat_dict["sparse"],
}
# concatenate two feature dicts into two vectors
feat = cat_feats(extracted_feat_dict, self.feat_sparse_id)
cross_feat = cat_feats(cross_feat_dict, self.cross_feat_sparse_id)
# crossnet
if self.num_input_feat != self.cross_num_input_feat:
cross_feat = self.emb_layer(cross_feat)
for i in range(self.num_of_layers):
feat = cross_feat * self.weight_w[i](feat) + self.weight_b[i] + feat
if self.block_option.batchnorm:
feat = self.batchnorm[i](feat)
feat_dict["dense"][self.block_id] = feat
return feat_dict
def __str__(self):
return super().__str__()
class FMBlock(BaseBlock):
def __init__(self, block_config, feat_dim):
super(FMBlock, self).__init__(block_config, feat_dim)
self.block_option = self.block_config.get_fm_block()
self._init_basic_block_params()
self._build_arc()
def _build_arc(self):
if self.num_sparse_feat + self.num_dense_feat == 0:
return
if self.block_option.type.getType() == b_config.BlockType.DENSE:
self.num_input_feat = self.num_dense_feat
if self.num_sparse_feat > 0:
self.num_input_feat += get_sparse_feat_dim_num(
self.feat_sparse_id, self.feat_sparse_dim
)
# set FM layer
# first order embedding layer
self.weight_w_first = nn.Parameter(
torch.nn.init.normal_(torch.empty(self.num_input_feat))
)
self.weight_b_first = nn.Parameter(
torch.nn.init.normal_(torch.empty(self.num_input_feat))
)
# second order embedding layer
self.weight_w_second = nn.Parameter(
torch.nn.init.normal_(torch.empty(self.num_input_feat))
)
self.weight_b_second = nn.Parameter(
torch.nn.init.normal_(torch.empty(self.num_input_feat))
)
elif self.block_option.type.getType() == b_config.BlockType.EMB:
self.emb_config = self.block_option.type.get_emb()
self._refine_emb_arc()
# set FM layer
# first order embedding layer
self.first_order_feat_emb = create_emb_converter(
self.num_dense_feat,
self.feat_sparse_id,
self.feat_sparse_dim,
1,
self.num_dense_as_sparse_feat,
)
# second order embedding layer
self.second_order_feat_emb = create_emb_converter(
self.num_dense_feat,
self.feat_sparse_id,
self.feat_sparse_dim,
self.emb_config.comm_embed_dim,
self.num_dense_as_sparse_feat,
)
else:
raise ValueError("Unsupported configuration for FMBlock type.")
def dim_config(self, feat_dim):
if self.num_sparse_feat + self.num_dense_feat != 0:
feat_dim["dense"][self.block_id] = [1]
return feat_dim
def forward(self, feat_dict):
if self.num_sparse_feat + self.num_dense_feat == 0:
return feat_dict
# extract dense features based on id
extracted_feat_dict = {
"dense": extract_dense_feat(feat_dict["dense"], self.feat_dense_id),
"sparse": feat_dict["sparse"],
}
# compute FM layer
if self.block_option.type.getType() == b_config.BlockType.DENSE:
feat = cat_feats(extracted_feat_dict, self.feat_sparse_id)
feat1 = feat * self.weight_w_first + self.weight_b_first
feat2 = feat * self.weight_w_second + self.weight_b_second
p = self.fm_sum(feat1, feat2)
elif self.block_option.type.getType() == b_config.BlockType.EMB:
if self.num_dense_as_sparse_feat > 0:
extracted_feat_dict["dense_as_sparse"] = (
feat_dict["dense"][0]
if self.dense_as_sparse_id == [-1]
else feat_dict["dense"][0][:, self.dense_as_sparse_id]
)
feat1 = convert_to_emb(
extracted_feat_dict,
self.first_order_feat_emb,
self.num_dense_feat,
self.feat_sparse_id,
1,
self.num_dense_as_sparse_feat,
)
feat2 = convert_to_emb(
extracted_feat_dict,
self.second_order_feat_emb,
self.num_dense_feat,
self.feat_sparse_id,
self.emb_config.comm_embed_dim,
self.num_dense_as_sparse_feat,
)
p = self.fm_sum(feat1, feat2)
feat_dict["dense"][self.block_id] = p
return feat_dict
def fm_sum(self, feat1, feat2):
if self.block_option.type.getType() == b_config.BlockType.DENSE:
# first order
p1 = torch.sum(feat1, 1)
# second order
sum_square = torch.pow(torch.sum(feat2, 1), 2)
square_sum = torch.sum(torch.pow(feat2, 2), 1)
p2 = (sum_square - square_sum) * 0.5
p = p1 + p2
elif self.block_option.type.getType() == b_config.BlockType.EMB:
p1 = torch.sum(feat1, [1, 2])
sum_square = torch.pow(torch.sum(feat2, 1), 2)
square_sum = torch.sum(torch.pow(feat2, 2), 1)
p2 = (sum_square - square_sum) * 0.5
p = p1 + torch.sum(p2, 1)
return p[:, None]
def __str__(self):
return super().__str__()
class DotProcessorBlock(BaseBlock):
def __init__(self, block_config, feat_dim):
super(DotProcessorBlock, self).__init__(block_config, feat_dim)
self.block_option = self.block_config.get_dotprocessor_block()
self._init_basic_block_params()
self._build_arc()
def _build_arc(self):
if self.num_sparse_feat + self.num_dense_feat == 0:
return
if self.block_option.type.getType() == b_config.BlockType.DENSE:
self.num_input_feat = self.num_dense_feat
if self.num_sparse_feat > 0:
self.num_input_feat += get_sparse_feat_dim_num(
self.feat_sparse_id, self.feat_sparse_dim
)
# set DP layer
self.weight_w = nn.Parameter(
torch.nn.init.normal_(torch.empty(self.num_input_feat))
)
self.weight_b = nn.Parameter(
torch.nn.init.normal_(torch.empty(self.num_input_feat))
)
elif self.block_option.type.getType() == b_config.BlockType.EMB:
self.emb_config = self.block_option.type.get_emb()
self._refine_emb_arc()
self.num_input_feat = 1 + self.num_sparse_feat
# set Embedding Layer
self.feat_emb = create_emb_converter(
self.num_dense_feat,
self.feat_sparse_id,
self.feat_sparse_dim,
self.emb_config.comm_embed_dim,
self.num_dense_as_sparse_feat,
)
else:
raise ValueError("Unsupported configuration for DotProcessorBlock type.")
def dim_config(self, feat_dim):
if self.num_sparse_feat + self.num_dense_feat != 0:
feat_dim["dense"][self.block_id] = [
int(self.num_input_feat * (self.num_input_feat + 1) / 2)
]
return feat_dim
def forward(self, feat_dict):
if self.num_sparse_feat + self.num_dense_feat == 0:
return feat_dict
# extract dense features based on id
extracted_feat_dict = {
"dense": extract_dense_feat(feat_dict["dense"], self.feat_dense_id),
"sparse": feat_dict["sparse"],
}
# compute DP layer
if self.block_option.type.getType() == b_config.BlockType.DENSE:
feat = cat_feats(extracted_feat_dict, self.feat_sparse_id)
feat = feat * self.weight_w + self.weight_b
p = self.dp_sum(feat[:, :, None])
elif self.block_option.type.getType() == b_config.BlockType.EMB:
if self.num_dense_as_sparse_feat > 0:
extracted_feat_dict["dense_as_sparse"] = (
feat_dict["dense"][0]
if self.dense_as_sparse_id == [-1]
else feat_dict["dense"][0][:, self.dense_as_sparse_id]
)
feat = convert_to_emb(
extracted_feat_dict,
self.feat_emb,
self.num_dense_feat,
self.feat_sparse_id,
self.emb_config.comm_embed_dim,
self.num_dense_as_sparse_feat,
)
p = self.dp_sum(feat)
feat_dict["dense"][self.block_id] = p
return feat_dict
def dp_sum(self, feat):
Z = torch.matmul(feat, torch.transpose(feat, 1, 2))
Zflat = Z.view((feat.shape[0], -1))
num_ints = int(self.num_input_feat * (self.num_input_feat + 1) / 2)
return Zflat[:, :num_ints]
def __str__(self):
return super().__str__()
class CatBlock(BaseBlock):
def __init__(self, block_config, feat_dim):
super(CatBlock, self).__init__(block_config, feat_dim)
self.block_option = self.block_config.get_cat_block()
self._init_basic_block_params()
self._build_arc()
def _build_arc(self):
if self.num_sparse_feat + self.num_dense_feat == 0:
return
if self.block_option.type.getType() == b_config.BlockType.DENSE:
self.num_input_feat = self.num_dense_feat
if self.num_sparse_feat > 0:
self.num_input_feat += get_sparse_feat_dim_num(
self.feat_sparse_id, self.feat_sparse_dim
)
elif self.block_option.type.getType() == b_config.BlockType.EMB:
self.emb_config = self.block_option.type.get_emb()
self._refine_emb_arc()
self.num_input_feat = 1 + self.num_sparse_feat
# set Embedding Layer
self.feat_emb = create_emb_converter(
self.num_dense_feat,
self.feat_sparse_id,
self.feat_sparse_dim,
self.emb_config.comm_embed_dim,
self.num_dense_as_sparse_feat,
)
else:
raise ValueError("Unsupported configuration for CatBlock type.")
def dim_config(self, feat_dim):
if self.num_sparse_feat + self.num_dense_feat != 0:
if self.block_option.type.getType() == b_config.BlockType.DENSE:
feat_dim["dense"][self.block_id] = [self.num_input_feat]
elif self.block_option.type.getType() == b_config.BlockType.EMB:
feat_dim["sparse"][self.block_id] = (
[self.emb_config.comm_embed_dim] * (self.num_sparse_feat + 1)
if self.num_dense_feat > 0
else [self.emb_config.comm_embed_dim] * self.num_sparse_feat
)
return feat_dim
def forward(self, feat_dict):
if self.num_sparse_feat + self.num_dense_feat == 0:
return feat_dict
# extract dense features based on id
extracted_feat_dict = {
"dense": extract_dense_feat(feat_dict["dense"], self.feat_dense_id),
"sparse": feat_dict["sparse"],
}
# compute Cat layer
if self.block_option.type.getType() == b_config.BlockType.DENSE:
p = cat_feats(extracted_feat_dict, self.feat_sparse_id)
feat_dict["dense"][self.block_id] = (
p[:, None] if self.num_input_feat == 1 else p
)
elif self.block_option.type.getType() == b_config.BlockType.EMB:
if self.num_dense_as_sparse_feat > 0:
extracted_feat_dict["dense_as_sparse"] = (
feat_dict["dense"][0]
if self.dense_as_sparse_id == [-1]
else feat_dict["dense"][0][:, self.dense_as_sparse_id]
)
p = convert_to_emb(
extracted_feat_dict,
self.feat_emb,
self.num_dense_feat,
self.feat_sparse_id,
self.emb_config.comm_embed_dim,
self.num_dense_as_sparse_feat,
)
feat_dict["sparse"][self.block_id] = {
feat_id: p[:, feat_id] for feat_id in range(p.shape[1]) # 1 for dense
}
return feat_dict
def __str__(self):
return super().__str__()
class CINBlock(BaseBlock):
"""Compressed Interaction Network used in xDeepFM.
https://arxiv.org/pdf/1803.05170.pdf.
"""
def __init__(self, block_config, feat_dim):
super(CINBlock, self).__init__(block_config, feat_dim)
self.block_option = self.block_config.get_cin_block()
self.layer_sizes = self.block_option.arc
self._init_basic_block_params()
self._build_arc()
def _build_arc(self):
if self.num_sparse_feat + self.num_dense_feat == 0:
return
self.emb_config = self.block_option.emb_config
self._refine_emb_arc()
self.field_nums = [self.num_sparse_feat + 1]
for i, size in enumerate(self.layer_sizes):
if self.block_option.split_half:
if i != len(self.layer_sizes) - 1 and size % 2 > 0:
raise ValueError(
"layer_size must be even number except for the last layer when split_half=True"
)
self.field_nums.append(size // 2)
else:
self.field_nums.append(size)
# set embeding layers
self.feat_emb = create_emb_converter(
self.num_dense_feat,
self.feat_sparse_id,
self.feat_sparse_dim,
self.emb_config.comm_embed_dim,
self.num_dense_as_sparse_feat,
)
# set CIN convolutional layers
self.conv_layers, self.bias_layers, self.activation_layers = create_cin(
self.layer_sizes, self.field_nums
)
def dim_config(self, feat_dim):
if self.num_sparse_feat + self.num_dense_feat != 0:
feat_dim["dense"][self.block_id] = (
[sum(self.layer_sizes[:-1]) // 2 + self.layer_sizes[-1]]
if self.block_option.split_half
else [sum(self.layer_sizes)]
)
return feat_dim
def forward(self, feat_dict):
if self.num_sparse_feat + self.num_dense_feat == 0:
return feat_dict
# extract dense features based on id
extracted_feat_dict = {
"dense": extract_dense_feat(feat_dict["dense"], self.feat_dense_id),
"sparse": feat_dict["sparse"],
}
if self.num_dense_as_sparse_feat > 0:
extracted_feat_dict["dense_as_sparse"] = (
feat_dict["dense"][0]
if self.dense_as_sparse_id == [-1]
else feat_dict["dense"][0][:, self.dense_as_sparse_id]
)
# get feature matrix X0
feat = convert_to_emb(
extracted_feat_dict,
self.feat_emb,
self.num_dense_feat,
self.feat_sparse_id,
self.emb_config.comm_embed_dim,
self.num_dense_as_sparse_feat,
)
if feat.dim() != 3:
raise ValueError(
"Unexpected inputs dimensions %d, expect to be 3 dimensions"
% (feat.dim())
)
p = self.cin(feat)
feat_dict["dense"][self.block_id] = p
return feat_dict
def cin(self, feat):
dim = feat.shape[-1]
p = []
hidden_nn_layers = [feat]
cross_feats = torch.split(hidden_nn_layers[0], dim * [1], 2)
for l_idx, layer_size in enumerate(self.layer_sizes):
curr_feats = torch.split(hidden_nn_layers[-1], dim * [1], 2)
dot_result_m = torch.stack(
[
torch.bmm(curr_feats[t_idx], t.transpose(1, 2))
for t_idx, t in enumerate(cross_feats)
]
)
dot_result_m = dot_result_m.view(
-1, 1, dot_result_m.shape[2], dot_result_m.shape[3]
)
# apply conv, add bias, activation
curr_out = torch.squeeze(self.conv_layers[l_idx](dot_result_m))
curr_out = curr_out.view(dim, -1, layer_size) # (dim * batch_size * Hk)
curr_out = curr_out + self.bias_layers[l_idx]
curr_out = self.activation_layers[l_idx](curr_out)
curr_out = curr_out.permute(1, 2, 0)
if self.block_option.split_half:
if l_idx != len(self.layer_sizes) - 1:
next_hidden, direct_connect = torch.split(
curr_out, 2 * [layer_size // 2], 1
)
else:
direct_connect = curr_out
next_hidden = 0
else:
direct_connect = curr_out
next_hidden = curr_out
p.append(direct_connect)
hidden_nn_layers.append(next_hidden)
return torch.cat(p, 1).sum(-1)
def __str__(self):
return super().__str__()
class AttentionBlock(BaseBlock):
def __init__(self, block_config, feat_dim):
super(AttentionBlock, self).__init__(block_config, feat_dim)
self.block_option = self.block_config.get_attention_block()
self._init_basic_block_params()
self._build_arc()
def _build_arc(self):
if self.num_sparse_feat + self.num_dense_feat == 0:
return
self.emb_config = self.block_option.emb_config
self.att_embed_dim = self.block_option.att_embed_dim
self.num_of_heads = self.block_option.num_of_heads
self.num_of_layers = self.block_option.num_of_layers
self.use_res = self.block_option.use_res
self.use_batchnorm = self.block_option.batchnorm
self._dropout_p = self.block_option.dropout_prob
self._refine_emb_arc()
# set embeding layers
self.feat_emb = create_emb_converter(
self.num_dense_feat,
self.feat_sparse_id,
self.feat_sparse_dim,
self.emb_config.comm_embed_dim,
self.num_dense_as_sparse_feat,
)
# set attention params
self.query_layers, self.key_layers, self.value_layers, self.res_layers, self.bn_layers = create_transformer(
self.emb_config.comm_embed_dim,
self.att_embed_dim,
self.num_of_heads,
self.num_of_layers,
self.use_res,
self.use_batchnorm,
)
def dim_config(self, feat_dim):
if self.num_sparse_feat + self.num_dense_feat != 0:
if self.num_dense_feat > 0:
feat_dim["sparse"][self.block_id] = [
self.att_embed_dim * self.num_of_heads
] * (self.num_sparse_feat + 1)
else:
feat_dim["sparse"][self.block_id] = [
self.att_embed_dim * self.num_of_heads
] * self.num_sparse_feat
return feat_dim
def forward(self, feat_dict):
if self.num_sparse_feat + self.num_dense_feat == 0:
return feat_dict
# extract dense features based on id
extracted_feat_dict = {
"dense": extract_dense_feat(feat_dict["dense"], self.feat_dense_id),
"sparse": feat_dict["sparse"],
}
if self.num_dense_as_sparse_feat > 0:
extracted_feat_dict["dense_as_sparse"] = (
feat_dict["dense"][0]
if self.dense_as_sparse_id == [-1]
else feat_dict["dense"][0][:, self.dense_as_sparse_id]
)
# get feature matrix X0
feat = convert_to_emb(
extracted_feat_dict,
self.feat_emb,
self.num_dense_feat,
self.feat_sparse_id,
self.emb_config.comm_embed_dim,
self.num_dense_as_sparse_feat,
)
if feat.dim() != 3:
raise ValueError(
"Unexpected inputs dimensions %d, expect to be 3 dimensions"
% (feat.dim())
)
p = self.transformer(feat)
feat_dict["sparse"][self.block_id] = {
feat_id: p[:, feat_id] for feat_id in range(p.shape[1]) # 1 for dense
}
return feat_dict
def transformer(self, feat):
attention = feat
for l in range(self.num_of_layers):
Q = F.relu(self.query_layers[l](attention))
K = F.relu(self.key_layers[l](attention))
V = F.relu(self.value_layers[l](attention))
if self.use_res:
V_res = F.relu(self.res_layers[l](attention))
# Split and concat
Q_ = torch.cat(Q.split(split_size=self.att_embed_dim, dim=2), dim=0)
K_ = torch.cat(K.split(split_size=self.att_embed_dim, dim=2), dim=0)
V_ = torch.cat(V.split(split_size=self.att_embed_dim, dim=2), dim=0)
# calculate QK^T
weights = torch.matmul(Q_, K_.transpose(1, 2))
# normalize with sqrt(dk)
weights = weights / np.sqrt(self.att_embed_dim)
# put it to softmax
weights = F.softmax(weights, dim=-1)
# apply dropout
weights = F.dropout(weights, self._dropout_p)
# multiply it with V
attention = torch.matmul(weights, V_)
# convert attention back to its input original size
restore_chunk_size = int(attention.size(0) / self.num_of_heads)
attention = torch.cat(
attention.split(split_size=restore_chunk_size, dim=0), dim=2
)
# residual connection
if self.use_res:
attention += V_res
# TODO: do we need this?
attention = F.relu(attention)
# apply batch normalization
if self.use_batchnorm:
attention = self.bn_layers[l](attention.transpose(1, 2)).transpose(1, 2)
return attention
def __str__(self):
return super().__str__()
|
AutoCTR-main
|
nasrec/blocks.py
|
#
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
# @generated
#
|
AutoCTR-main
|
gen-py/__init__.py
|
#
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
# @generated
#
from __future__ import absolute_import
import six
from thrift.util.Recursive import fix_spec
from thrift.Thrift import *
from thrift.protocol.TProtocol import TProtocolException
import block_config.ttypes
from .ttypes import *
|
AutoCTR-main
|
gen-py/config/constants.py
|
#
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
# @generated
#
__all__ = ['ttypes', 'constants']
|
AutoCTR-main
|
gen-py/config/__init__.py
|
#
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
# @generated
#
from __future__ import absolute_import
import six
from thrift.util.Recursive import fix_spec
from thrift.Thrift import *
from thrift.protocol.TProtocol import TProtocolException
import block_config.ttypes
import pprint
import warnings
from thrift import Thrift
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from thrift.protocol import TCompactProtocol
from thrift.protocol import THeaderProtocol
fastproto = None
if not '__pypy__' in sys.builtin_module_names:
try:
from thrift.protocol import fastproto
except ImportError:
pass
all_structs = []
UTF8STRINGS = bool(0) or sys.version_info.major >= 3
__all__ = ['UTF8STRINGS', 'MacroSearchSpaceType', 'DataFromFileConfig', 'DataConfig', 'MicroClose', 'MicroMLPConfig', 'MicroCINConfig', 'MicroAttentionConfig', 'MicroSearchSpaceType', 'InputDenseAsSparse', 'FeatureProcessingType', 'NASRecNetConfig', 'RandomSearcherConfig', 'EvolutionarySearcherConfig', 'SearcherConfig', 'ModelConfig', 'SGDOptimConfig', 'AdagradOptimConfig', 'SparseAdamOptimConfig', 'AdamOptimConfig', 'RMSpropOptimConfig', 'OptimConfig', 'SumPooling', 'AvgPooling', 'PoolingConfig', 'SparseFeatureItem', 'SparseFeatureConfig', 'DenseFeatureConfig', 'FeatureConfig', 'BCEWithLogitsLoss', 'BCELoss', 'MSELoss', 'LossConfig', 'LoggingConfig', 'TrainConfig', 'EvalConfig', 'CheckpointConfig', 'KoskiReaderConfig', 'PerformanceConfig']
class MacroSearchSpaceType:
INPUT_DIFF = 1
INPUT_GROUP = 2
INPUT_DIFF_PRIOR = 3
INPUT_ELASTIC_PRIOR = 4
_VALUES_TO_NAMES = {
1: "INPUT_DIFF",
2: "INPUT_GROUP",
3: "INPUT_DIFF_PRIOR",
4: "INPUT_ELASTIC_PRIOR",
}
_NAMES_TO_VALUES = {
"INPUT_DIFF": 1,
"INPUT_GROUP": 2,
"INPUT_DIFF_PRIOR": 3,
"INPUT_ELASTIC_PRIOR": 4,
}
class DataFromFileConfig:
"""
Attributes:
- data_file
- batch_size
- num_batches
- splits
- num_samples_meta
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.data_file = iprot.readString().decode('utf-8') if UTF8STRINGS else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.batch_size = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.num_batches = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.LIST:
self.splits = []
(_etype3, _size0) = iprot.readListBegin()
if _size0 >= 0:
for _i4 in six.moves.range(_size0):
_elem5 = iprot.readFloat()
self.splits.append(_elem5)
else:
while iprot.peekList():
_elem6 = iprot.readFloat()
self.splits.append(_elem6)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.num_samples_meta = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('DataFromFileConfig')
if self.data_file != None:
oprot.writeFieldBegin('data_file', TType.STRING, 1)
oprot.writeString(self.data_file.encode('utf-8')) if UTF8STRINGS and not isinstance(self.data_file, bytes) else oprot.writeString(self.data_file)
oprot.writeFieldEnd()
if self.batch_size != None:
oprot.writeFieldBegin('batch_size', TType.I32, 2)
oprot.writeI32(self.batch_size)
oprot.writeFieldEnd()
if self.num_batches != None:
oprot.writeFieldBegin('num_batches', TType.I32, 3)
oprot.writeI32(self.num_batches)
oprot.writeFieldEnd()
if self.splits != None:
oprot.writeFieldBegin('splits', TType.LIST, 4)
oprot.writeListBegin(TType.FLOAT, len(self.splits))
for iter7 in self.splits:
oprot.writeFloat(iter7)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.num_samples_meta != None:
oprot.writeFieldBegin('num_samples_meta', TType.I32, 5)
oprot.writeI32(self.num_samples_meta)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.data_file is not None:
value = pprint.pformat(self.data_file, indent=0)
value = padding.join(value.splitlines(True))
L.append(' data_file=%s' % (value))
if self.batch_size is not None:
value = pprint.pformat(self.batch_size, indent=0)
value = padding.join(value.splitlines(True))
L.append(' batch_size=%s' % (value))
if self.num_batches is not None:
value = pprint.pformat(self.num_batches, indent=0)
value = padding.join(value.splitlines(True))
L.append(' num_batches=%s' % (value))
if self.splits is not None:
value = pprint.pformat(self.splits, indent=0)
value = padding.join(value.splitlines(True))
L.append(' splits=%s' % (value))
if self.num_samples_meta is not None:
value = pprint.pformat(self.num_samples_meta, indent=0)
value = padding.join(value.splitlines(True))
L.append(' num_samples_meta=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class DataConfig(object):
"""
Attributes:
- from_file
"""
thrift_spec = None
__init__ = None
__EMPTY__ = 0
FROM_FILE = 1
@staticmethod
def isUnion():
return True
def get_from_file(self):
assert self.field == 1
return self.value
def set_from_file(self, value):
self.field = 1
self.value = value
def getType(self):
return self.field
def __repr__(self):
value = pprint.pformat(self.value)
member = ''
if self.field == 1:
padding = ' ' * 10
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('from_file', value)
return "%s(%s)" % (self.__class__.__name__, member)
def read(self, iprot):
self.field = 0
self.value = None
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
from_file = DataFromFileConfig()
from_file.read(iprot)
assert self.field == 0 and self.value is None
self.set_from_file(from_file)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeUnionBegin('DataConfig')
if self.field == 1:
oprot.writeFieldBegin('from_file', TType.STRUCT, 1)
from_file = self.value
from_file.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeUnionEnd()
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class MicroClose:
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('MicroClose')
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class MicroMLPConfig:
"""
Attributes:
- arc
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.arc = []
(_etype11, _size8) = iprot.readListBegin()
if _size8 >= 0:
for _i12 in six.moves.range(_size8):
_elem13 = iprot.readI32()
self.arc.append(_elem13)
else:
while iprot.peekList():
_elem14 = iprot.readI32()
self.arc.append(_elem14)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('MicroMLPConfig')
if self.arc != None:
oprot.writeFieldBegin('arc', TType.LIST, 1)
oprot.writeListBegin(TType.I32, len(self.arc))
for iter15 in self.arc:
oprot.writeI32(iter15)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.arc is not None:
value = pprint.pformat(self.arc, indent=0)
value = padding.join(value.splitlines(True))
L.append(' arc=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class MicroCINConfig:
"""
Attributes:
- arc
- num_of_layers
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.arc = []
(_etype19, _size16) = iprot.readListBegin()
if _size16 >= 0:
for _i20 in six.moves.range(_size16):
_elem21 = iprot.readI32()
self.arc.append(_elem21)
else:
while iprot.peekList():
_elem22 = iprot.readI32()
self.arc.append(_elem22)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.num_of_layers = []
(_etype26, _size23) = iprot.readListBegin()
if _size23 >= 0:
for _i27 in six.moves.range(_size23):
_elem28 = iprot.readI32()
self.num_of_layers.append(_elem28)
else:
while iprot.peekList():
_elem29 = iprot.readI32()
self.num_of_layers.append(_elem29)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('MicroCINConfig')
if self.arc != None:
oprot.writeFieldBegin('arc', TType.LIST, 1)
oprot.writeListBegin(TType.I32, len(self.arc))
for iter30 in self.arc:
oprot.writeI32(iter30)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.num_of_layers != None:
oprot.writeFieldBegin('num_of_layers', TType.LIST, 2)
oprot.writeListBegin(TType.I32, len(self.num_of_layers))
for iter31 in self.num_of_layers:
oprot.writeI32(iter31)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.arc is not None:
value = pprint.pformat(self.arc, indent=0)
value = padding.join(value.splitlines(True))
L.append(' arc=%s' % (value))
if self.num_of_layers is not None:
value = pprint.pformat(self.num_of_layers, indent=0)
value = padding.join(value.splitlines(True))
L.append(' num_of_layers=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class MicroAttentionConfig:
"""
Attributes:
- num_of_layers
- num_of_heads
- att_embed_dim
- dropout_prob
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.num_of_layers = []
(_etype35, _size32) = iprot.readListBegin()
if _size32 >= 0:
for _i36 in six.moves.range(_size32):
_elem37 = iprot.readI32()
self.num_of_layers.append(_elem37)
else:
while iprot.peekList():
_elem38 = iprot.readI32()
self.num_of_layers.append(_elem38)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.num_of_heads = []
(_etype42, _size39) = iprot.readListBegin()
if _size39 >= 0:
for _i43 in six.moves.range(_size39):
_elem44 = iprot.readI32()
self.num_of_heads.append(_elem44)
else:
while iprot.peekList():
_elem45 = iprot.readI32()
self.num_of_heads.append(_elem45)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.att_embed_dim = []
(_etype49, _size46) = iprot.readListBegin()
if _size46 >= 0:
for _i50 in six.moves.range(_size46):
_elem51 = iprot.readI32()
self.att_embed_dim.append(_elem51)
else:
while iprot.peekList():
_elem52 = iprot.readI32()
self.att_embed_dim.append(_elem52)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.LIST:
self.dropout_prob = []
(_etype56, _size53) = iprot.readListBegin()
if _size53 >= 0:
for _i57 in six.moves.range(_size53):
_elem58 = iprot.readFloat()
self.dropout_prob.append(_elem58)
else:
while iprot.peekList():
_elem59 = iprot.readFloat()
self.dropout_prob.append(_elem59)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('MicroAttentionConfig')
if self.num_of_layers != None:
oprot.writeFieldBegin('num_of_layers', TType.LIST, 1)
oprot.writeListBegin(TType.I32, len(self.num_of_layers))
for iter60 in self.num_of_layers:
oprot.writeI32(iter60)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.num_of_heads != None:
oprot.writeFieldBegin('num_of_heads', TType.LIST, 2)
oprot.writeListBegin(TType.I32, len(self.num_of_heads))
for iter61 in self.num_of_heads:
oprot.writeI32(iter61)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.att_embed_dim != None:
oprot.writeFieldBegin('att_embed_dim', TType.LIST, 3)
oprot.writeListBegin(TType.I32, len(self.att_embed_dim))
for iter62 in self.att_embed_dim:
oprot.writeI32(iter62)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.dropout_prob != None:
oprot.writeFieldBegin('dropout_prob', TType.LIST, 4)
oprot.writeListBegin(TType.FLOAT, len(self.dropout_prob))
for iter63 in self.dropout_prob:
oprot.writeFloat(iter63)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.num_of_layers is not None:
value = pprint.pformat(self.num_of_layers, indent=0)
value = padding.join(value.splitlines(True))
L.append(' num_of_layers=%s' % (value))
if self.num_of_heads is not None:
value = pprint.pformat(self.num_of_heads, indent=0)
value = padding.join(value.splitlines(True))
L.append(' num_of_heads=%s' % (value))
if self.att_embed_dim is not None:
value = pprint.pformat(self.att_embed_dim, indent=0)
value = padding.join(value.splitlines(True))
L.append(' att_embed_dim=%s' % (value))
if self.dropout_prob is not None:
value = pprint.pformat(self.dropout_prob, indent=0)
value = padding.join(value.splitlines(True))
L.append(' dropout_prob=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class MicroSearchSpaceType(object):
"""
Attributes:
- close
- micro_mlp
- micro_cin
- micro_attention
"""
thrift_spec = None
__init__ = None
__EMPTY__ = 0
CLOSE = 1
MICRO_MLP = 2
MICRO_CIN = 3
MICRO_ATTENTION = 4
@staticmethod
def isUnion():
return True
def get_close(self):
assert self.field == 1
return self.value
def get_micro_mlp(self):
assert self.field == 2
return self.value
def get_micro_cin(self):
assert self.field == 3
return self.value
def get_micro_attention(self):
assert self.field == 4
return self.value
def set_close(self, value):
self.field = 1
self.value = value
def set_micro_mlp(self, value):
self.field = 2
self.value = value
def set_micro_cin(self, value):
self.field = 3
self.value = value
def set_micro_attention(self, value):
self.field = 4
self.value = value
def getType(self):
return self.field
def __repr__(self):
value = pprint.pformat(self.value)
member = ''
if self.field == 1:
padding = ' ' * 6
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('close', value)
if self.field == 2:
padding = ' ' * 10
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('micro_mlp', value)
if self.field == 3:
padding = ' ' * 10
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('micro_cin', value)
if self.field == 4:
padding = ' ' * 16
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('micro_attention', value)
return "%s(%s)" % (self.__class__.__name__, member)
def read(self, iprot):
self.field = 0
self.value = None
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
close = MicroClose()
close.read(iprot)
assert self.field == 0 and self.value is None
self.set_close(close)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
micro_mlp = MicroMLPConfig()
micro_mlp.read(iprot)
assert self.field == 0 and self.value is None
self.set_micro_mlp(micro_mlp)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
micro_cin = MicroCINConfig()
micro_cin.read(iprot)
assert self.field == 0 and self.value is None
self.set_micro_cin(micro_cin)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
micro_attention = MicroAttentionConfig()
micro_attention.read(iprot)
assert self.field == 0 and self.value is None
self.set_micro_attention(micro_attention)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeUnionBegin('MicroSearchSpaceType')
if self.field == 1:
oprot.writeFieldBegin('close', TType.STRUCT, 1)
close = self.value
close.write(oprot)
oprot.writeFieldEnd()
if self.field == 2:
oprot.writeFieldBegin('micro_mlp', TType.STRUCT, 2)
micro_mlp = self.value
micro_mlp.write(oprot)
oprot.writeFieldEnd()
if self.field == 3:
oprot.writeFieldBegin('micro_cin', TType.STRUCT, 3)
micro_cin = self.value
micro_cin.write(oprot)
oprot.writeFieldEnd()
if self.field == 4:
oprot.writeFieldBegin('micro_attention', TType.STRUCT, 4)
micro_attention = self.value
micro_attention.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeUnionEnd()
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class InputDenseAsSparse:
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('InputDenseAsSparse')
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class FeatureProcessingType(object):
"""
Attributes:
- idasp
"""
thrift_spec = None
__init__ = None
__EMPTY__ = 0
IDASP = 1
@staticmethod
def isUnion():
return True
def get_idasp(self):
assert self.field == 1
return self.value
def set_idasp(self, value):
self.field = 1
self.value = value
def getType(self):
return self.field
def __repr__(self):
value = pprint.pformat(self.value)
member = ''
if self.field == 1:
padding = ' ' * 6
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('idasp', value)
return "%s(%s)" % (self.__class__.__name__, member)
def read(self, iprot):
self.field = 0
self.value = None
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
idasp = InputDenseAsSparse()
idasp.read(iprot)
assert self.field == 0 and self.value is None
self.set_idasp(idasp)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeUnionBegin('FeatureProcessingType')
if self.field == 1:
oprot.writeFieldBegin('idasp', TType.STRUCT, 1)
idasp = self.value
idasp.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeUnionEnd()
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class NASRecNetConfig:
"""
Attributes:
- block_configs
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.block_configs = []
(_etype67, _size64) = iprot.readListBegin()
if _size64 >= 0:
for _i68 in six.moves.range(_size64):
_elem69 = block_config.ttypes.BlockConfig()
_elem69.read(iprot)
self.block_configs.append(_elem69)
else:
while iprot.peekList():
_elem70 = block_config.ttypes.BlockConfig()
_elem70.read(iprot)
self.block_configs.append(_elem70)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('NASRecNetConfig')
if self.block_configs != None:
oprot.writeFieldBegin('block_configs', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.block_configs))
for iter71 in self.block_configs:
iter71.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.block_configs is not None:
value = pprint.pformat(self.block_configs, indent=0)
value = padding.join(value.splitlines(True))
L.append(' block_configs=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class RandomSearcherConfig:
"""
Attributes:
- max_num_block
- block_types
- macro_space_type
- micro_space_types
- feature_processing_type
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.max_num_block = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.block_types = []
(_etype75, _size72) = iprot.readListBegin()
if _size72 >= 0:
for _i76 in six.moves.range(_size72):
_elem77 = iprot.readI32()
self.block_types.append(_elem77)
else:
while iprot.peekList():
_elem78 = iprot.readI32()
self.block_types.append(_elem78)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.macro_space_type = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.LIST:
self.micro_space_types = []
(_etype82, _size79) = iprot.readListBegin()
if _size79 >= 0:
for _i83 in six.moves.range(_size79):
_elem84 = MicroSearchSpaceType()
_elem84.read(iprot)
self.micro_space_types.append(_elem84)
else:
while iprot.peekList():
_elem85 = MicroSearchSpaceType()
_elem85.read(iprot)
self.micro_space_types.append(_elem85)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.LIST:
self.feature_processing_type = []
(_etype89, _size86) = iprot.readListBegin()
if _size86 >= 0:
for _i90 in six.moves.range(_size86):
_elem91 = FeatureProcessingType()
_elem91.read(iprot)
self.feature_processing_type.append(_elem91)
else:
while iprot.peekList():
_elem92 = FeatureProcessingType()
_elem92.read(iprot)
self.feature_processing_type.append(_elem92)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('RandomSearcherConfig')
if self.max_num_block != None:
oprot.writeFieldBegin('max_num_block', TType.I32, 1)
oprot.writeI32(self.max_num_block)
oprot.writeFieldEnd()
if self.block_types != None:
oprot.writeFieldBegin('block_types', TType.LIST, 2)
oprot.writeListBegin(TType.I32, len(self.block_types))
for iter93 in self.block_types:
oprot.writeI32(iter93)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.macro_space_type != None:
oprot.writeFieldBegin('macro_space_type', TType.I32, 3)
oprot.writeI32(self.macro_space_type)
oprot.writeFieldEnd()
if self.micro_space_types != None:
oprot.writeFieldBegin('micro_space_types', TType.LIST, 5)
oprot.writeListBegin(TType.STRUCT, len(self.micro_space_types))
for iter94 in self.micro_space_types:
iter94.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.feature_processing_type != None:
oprot.writeFieldBegin('feature_processing_type', TType.LIST, 6)
oprot.writeListBegin(TType.STRUCT, len(self.feature_processing_type))
for iter95 in self.feature_processing_type:
iter95.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.max_num_block is not None:
value = pprint.pformat(self.max_num_block, indent=0)
value = padding.join(value.splitlines(True))
L.append(' max_num_block=%s' % (value))
if self.block_types is not None:
value = pprint.pformat(self.block_types, indent=0)
value = padding.join(value.splitlines(True))
L.append(' block_types=%s' % (value))
if self.macro_space_type is not None:
value = pprint.pformat(self.macro_space_type, indent=0)
value = padding.join(value.splitlines(True))
L.append(' macro_space_type=%s' % (value))
if self.micro_space_types is not None:
value = pprint.pformat(self.micro_space_types, indent=0)
value = padding.join(value.splitlines(True))
L.append(' micro_space_types=%s' % (value))
if self.feature_processing_type is not None:
value = pprint.pformat(self.feature_processing_type, indent=0)
value = padding.join(value.splitlines(True))
L.append(' feature_processing_type=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class EvolutionarySearcherConfig:
"""
Attributes:
- max_num_block
- block_types
- population_size
- candidate_size
- macro_space_type
- micro_space_types
- feature_processing_type
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.max_num_block = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.block_types = []
(_etype99, _size96) = iprot.readListBegin()
if _size96 >= 0:
for _i100 in six.moves.range(_size96):
_elem101 = iprot.readI32()
self.block_types.append(_elem101)
else:
while iprot.peekList():
_elem102 = iprot.readI32()
self.block_types.append(_elem102)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.population_size = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.candidate_size = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.macro_space_type = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.LIST:
self.micro_space_types = []
(_etype106, _size103) = iprot.readListBegin()
if _size103 >= 0:
for _i107 in six.moves.range(_size103):
_elem108 = MicroSearchSpaceType()
_elem108.read(iprot)
self.micro_space_types.append(_elem108)
else:
while iprot.peekList():
_elem109 = MicroSearchSpaceType()
_elem109.read(iprot)
self.micro_space_types.append(_elem109)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.LIST:
self.feature_processing_type = []
(_etype113, _size110) = iprot.readListBegin()
if _size110 >= 0:
for _i114 in six.moves.range(_size110):
_elem115 = FeatureProcessingType()
_elem115.read(iprot)
self.feature_processing_type.append(_elem115)
else:
while iprot.peekList():
_elem116 = FeatureProcessingType()
_elem116.read(iprot)
self.feature_processing_type.append(_elem116)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('EvolutionarySearcherConfig')
if self.max_num_block != None:
oprot.writeFieldBegin('max_num_block', TType.I32, 1)
oprot.writeI32(self.max_num_block)
oprot.writeFieldEnd()
if self.block_types != None:
oprot.writeFieldBegin('block_types', TType.LIST, 2)
oprot.writeListBegin(TType.I32, len(self.block_types))
for iter117 in self.block_types:
oprot.writeI32(iter117)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.population_size != None:
oprot.writeFieldBegin('population_size', TType.I32, 3)
oprot.writeI32(self.population_size)
oprot.writeFieldEnd()
if self.candidate_size != None:
oprot.writeFieldBegin('candidate_size', TType.I32, 4)
oprot.writeI32(self.candidate_size)
oprot.writeFieldEnd()
if self.macro_space_type != None:
oprot.writeFieldBegin('macro_space_type', TType.I32, 5)
oprot.writeI32(self.macro_space_type)
oprot.writeFieldEnd()
if self.micro_space_types != None:
oprot.writeFieldBegin('micro_space_types', TType.LIST, 7)
oprot.writeListBegin(TType.STRUCT, len(self.micro_space_types))
for iter118 in self.micro_space_types:
iter118.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.feature_processing_type != None:
oprot.writeFieldBegin('feature_processing_type', TType.LIST, 8)
oprot.writeListBegin(TType.STRUCT, len(self.feature_processing_type))
for iter119 in self.feature_processing_type:
iter119.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.max_num_block is not None:
value = pprint.pformat(self.max_num_block, indent=0)
value = padding.join(value.splitlines(True))
L.append(' max_num_block=%s' % (value))
if self.block_types is not None:
value = pprint.pformat(self.block_types, indent=0)
value = padding.join(value.splitlines(True))
L.append(' block_types=%s' % (value))
if self.population_size is not None:
value = pprint.pformat(self.population_size, indent=0)
value = padding.join(value.splitlines(True))
L.append(' population_size=%s' % (value))
if self.candidate_size is not None:
value = pprint.pformat(self.candidate_size, indent=0)
value = padding.join(value.splitlines(True))
L.append(' candidate_size=%s' % (value))
if self.macro_space_type is not None:
value = pprint.pformat(self.macro_space_type, indent=0)
value = padding.join(value.splitlines(True))
L.append(' macro_space_type=%s' % (value))
if self.micro_space_types is not None:
value = pprint.pformat(self.micro_space_types, indent=0)
value = padding.join(value.splitlines(True))
L.append(' micro_space_types=%s' % (value))
if self.feature_processing_type is not None:
value = pprint.pformat(self.feature_processing_type, indent=0)
value = padding.join(value.splitlines(True))
L.append(' feature_processing_type=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class SearcherConfig(object):
"""
Attributes:
- random_searcher
- evolutionary_searcher
"""
thrift_spec = None
__init__ = None
__EMPTY__ = 0
RANDOM_SEARCHER = 1
EVOLUTIONARY_SEARCHER = 2
@staticmethod
def isUnion():
return True
def get_random_searcher(self):
assert self.field == 1
return self.value
def get_evolutionary_searcher(self):
assert self.field == 2
return self.value
def set_random_searcher(self, value):
self.field = 1
self.value = value
def set_evolutionary_searcher(self, value):
self.field = 2
self.value = value
def getType(self):
return self.field
def __repr__(self):
value = pprint.pformat(self.value)
member = ''
if self.field == 1:
padding = ' ' * 16
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('random_searcher', value)
if self.field == 2:
padding = ' ' * 22
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('evolutionary_searcher', value)
return "%s(%s)" % (self.__class__.__name__, member)
def read(self, iprot):
self.field = 0
self.value = None
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
random_searcher = RandomSearcherConfig()
random_searcher.read(iprot)
assert self.field == 0 and self.value is None
self.set_random_searcher(random_searcher)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
evolutionary_searcher = EvolutionarySearcherConfig()
evolutionary_searcher.read(iprot)
assert self.field == 0 and self.value is None
self.set_evolutionary_searcher(evolutionary_searcher)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeUnionBegin('SearcherConfig')
if self.field == 1:
oprot.writeFieldBegin('random_searcher', TType.STRUCT, 1)
random_searcher = self.value
random_searcher.write(oprot)
oprot.writeFieldEnd()
if self.field == 2:
oprot.writeFieldBegin('evolutionary_searcher', TType.STRUCT, 2)
evolutionary_searcher = self.value
evolutionary_searcher.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeUnionEnd()
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ModelConfig(object):
"""
Attributes:
- nasrec_net
"""
thrift_spec = None
__init__ = None
__EMPTY__ = 0
NASREC_NET = 1
@staticmethod
def isUnion():
return True
def get_nasrec_net(self):
assert self.field == 1
return self.value
def set_nasrec_net(self, value):
self.field = 1
self.value = value
def getType(self):
return self.field
def __repr__(self):
value = pprint.pformat(self.value)
member = ''
if self.field == 1:
padding = ' ' * 11
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('nasrec_net', value)
return "%s(%s)" % (self.__class__.__name__, member)
def read(self, iprot):
self.field = 0
self.value = None
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
nasrec_net = NASRecNetConfig()
nasrec_net.read(iprot)
assert self.field == 0 and self.value is None
self.set_nasrec_net(nasrec_net)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeUnionBegin('ModelConfig')
if self.field == 1:
oprot.writeFieldBegin('nasrec_net', TType.STRUCT, 1)
nasrec_net = self.value
nasrec_net.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeUnionEnd()
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class SGDOptimConfig:
"""
Attributes:
- lr
- momentum
- dampening
- nesterov
- weight_decay
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.FLOAT:
self.lr = iprot.readFloat()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.FLOAT:
self.momentum = iprot.readFloat()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.FLOAT:
self.dampening = iprot.readFloat()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.BOOL:
self.nesterov = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.FLOAT:
self.weight_decay = iprot.readFloat()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('SGDOptimConfig')
if self.lr != None:
oprot.writeFieldBegin('lr', TType.FLOAT, 1)
oprot.writeFloat(self.lr)
oprot.writeFieldEnd()
if self.momentum != None:
oprot.writeFieldBegin('momentum', TType.FLOAT, 2)
oprot.writeFloat(self.momentum)
oprot.writeFieldEnd()
if self.dampening != None:
oprot.writeFieldBegin('dampening', TType.FLOAT, 3)
oprot.writeFloat(self.dampening)
oprot.writeFieldEnd()
if self.nesterov != None:
oprot.writeFieldBegin('nesterov', TType.BOOL, 4)
oprot.writeBool(self.nesterov)
oprot.writeFieldEnd()
if self.weight_decay != None:
oprot.writeFieldBegin('weight_decay', TType.FLOAT, 5)
oprot.writeFloat(self.weight_decay)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.lr is not None:
value = pprint.pformat(self.lr, indent=0)
value = padding.join(value.splitlines(True))
L.append(' lr=%s' % (value))
if self.momentum is not None:
value = pprint.pformat(self.momentum, indent=0)
value = padding.join(value.splitlines(True))
L.append(' momentum=%s' % (value))
if self.dampening is not None:
value = pprint.pformat(self.dampening, indent=0)
value = padding.join(value.splitlines(True))
L.append(' dampening=%s' % (value))
if self.nesterov is not None:
value = pprint.pformat(self.nesterov, indent=0)
value = padding.join(value.splitlines(True))
L.append(' nesterov=%s' % (value))
if self.weight_decay is not None:
value = pprint.pformat(self.weight_decay, indent=0)
value = padding.join(value.splitlines(True))
L.append(' weight_decay=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class AdagradOptimConfig:
"""
Attributes:
- lr
- lr_decay
- weight_decay
- initial_accumulator_value
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.FLOAT:
self.lr = iprot.readFloat()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.FLOAT:
self.lr_decay = iprot.readFloat()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.FLOAT:
self.weight_decay = iprot.readFloat()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.FLOAT:
self.initial_accumulator_value = iprot.readFloat()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('AdagradOptimConfig')
if self.lr != None:
oprot.writeFieldBegin('lr', TType.FLOAT, 1)
oprot.writeFloat(self.lr)
oprot.writeFieldEnd()
if self.lr_decay != None:
oprot.writeFieldBegin('lr_decay', TType.FLOAT, 2)
oprot.writeFloat(self.lr_decay)
oprot.writeFieldEnd()
if self.weight_decay != None:
oprot.writeFieldBegin('weight_decay', TType.FLOAT, 3)
oprot.writeFloat(self.weight_decay)
oprot.writeFieldEnd()
if self.initial_accumulator_value != None:
oprot.writeFieldBegin('initial_accumulator_value', TType.FLOAT, 4)
oprot.writeFloat(self.initial_accumulator_value)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.lr is not None:
value = pprint.pformat(self.lr, indent=0)
value = padding.join(value.splitlines(True))
L.append(' lr=%s' % (value))
if self.lr_decay is not None:
value = pprint.pformat(self.lr_decay, indent=0)
value = padding.join(value.splitlines(True))
L.append(' lr_decay=%s' % (value))
if self.weight_decay is not None:
value = pprint.pformat(self.weight_decay, indent=0)
value = padding.join(value.splitlines(True))
L.append(' weight_decay=%s' % (value))
if self.initial_accumulator_value is not None:
value = pprint.pformat(self.initial_accumulator_value, indent=0)
value = padding.join(value.splitlines(True))
L.append(' initial_accumulator_value=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class SparseAdamOptimConfig:
"""
Attributes:
- lr
- betas0
- betas1
- eps
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.FLOAT:
self.lr = iprot.readFloat()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.FLOAT:
self.betas0 = iprot.readFloat()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.FLOAT:
self.betas1 = iprot.readFloat()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.FLOAT:
self.eps = iprot.readFloat()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('SparseAdamOptimConfig')
if self.lr != None:
oprot.writeFieldBegin('lr', TType.FLOAT, 1)
oprot.writeFloat(self.lr)
oprot.writeFieldEnd()
if self.betas0 != None:
oprot.writeFieldBegin('betas0', TType.FLOAT, 2)
oprot.writeFloat(self.betas0)
oprot.writeFieldEnd()
if self.betas1 != None:
oprot.writeFieldBegin('betas1', TType.FLOAT, 3)
oprot.writeFloat(self.betas1)
oprot.writeFieldEnd()
if self.eps != None:
oprot.writeFieldBegin('eps', TType.FLOAT, 4)
oprot.writeFloat(self.eps)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.lr is not None:
value = pprint.pformat(self.lr, indent=0)
value = padding.join(value.splitlines(True))
L.append(' lr=%s' % (value))
if self.betas0 is not None:
value = pprint.pformat(self.betas0, indent=0)
value = padding.join(value.splitlines(True))
L.append(' betas0=%s' % (value))
if self.betas1 is not None:
value = pprint.pformat(self.betas1, indent=0)
value = padding.join(value.splitlines(True))
L.append(' betas1=%s' % (value))
if self.eps is not None:
value = pprint.pformat(self.eps, indent=0)
value = padding.join(value.splitlines(True))
L.append(' eps=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class AdamOptimConfig:
"""
Attributes:
- lr
- amsgrad
- weight_decay
- betas0
- betas1
- eps
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.FLOAT:
self.lr = iprot.readFloat()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BOOL:
self.amsgrad = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.FLOAT:
self.weight_decay = iprot.readFloat()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.FLOAT:
self.betas0 = iprot.readFloat()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.FLOAT:
self.betas1 = iprot.readFloat()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.FLOAT:
self.eps = iprot.readFloat()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('AdamOptimConfig')
if self.lr != None:
oprot.writeFieldBegin('lr', TType.FLOAT, 1)
oprot.writeFloat(self.lr)
oprot.writeFieldEnd()
if self.amsgrad != None:
oprot.writeFieldBegin('amsgrad', TType.BOOL, 2)
oprot.writeBool(self.amsgrad)
oprot.writeFieldEnd()
if self.weight_decay != None:
oprot.writeFieldBegin('weight_decay', TType.FLOAT, 3)
oprot.writeFloat(self.weight_decay)
oprot.writeFieldEnd()
if self.betas0 != None:
oprot.writeFieldBegin('betas0', TType.FLOAT, 4)
oprot.writeFloat(self.betas0)
oprot.writeFieldEnd()
if self.betas1 != None:
oprot.writeFieldBegin('betas1', TType.FLOAT, 5)
oprot.writeFloat(self.betas1)
oprot.writeFieldEnd()
if self.eps != None:
oprot.writeFieldBegin('eps', TType.FLOAT, 6)
oprot.writeFloat(self.eps)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.lr is not None:
value = pprint.pformat(self.lr, indent=0)
value = padding.join(value.splitlines(True))
L.append(' lr=%s' % (value))
if self.amsgrad is not None:
value = pprint.pformat(self.amsgrad, indent=0)
value = padding.join(value.splitlines(True))
L.append(' amsgrad=%s' % (value))
if self.weight_decay is not None:
value = pprint.pformat(self.weight_decay, indent=0)
value = padding.join(value.splitlines(True))
L.append(' weight_decay=%s' % (value))
if self.betas0 is not None:
value = pprint.pformat(self.betas0, indent=0)
value = padding.join(value.splitlines(True))
L.append(' betas0=%s' % (value))
if self.betas1 is not None:
value = pprint.pformat(self.betas1, indent=0)
value = padding.join(value.splitlines(True))
L.append(' betas1=%s' % (value))
if self.eps is not None:
value = pprint.pformat(self.eps, indent=0)
value = padding.join(value.splitlines(True))
L.append(' eps=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class RMSpropOptimConfig:
"""
Attributes:
- lr
- alpha
- weight_decay
- momentum
- centered
- eps
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.FLOAT:
self.lr = iprot.readFloat()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.FLOAT:
self.alpha = iprot.readFloat()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.FLOAT:
self.weight_decay = iprot.readFloat()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.FLOAT:
self.momentum = iprot.readFloat()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.BOOL:
self.centered = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.FLOAT:
self.eps = iprot.readFloat()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('RMSpropOptimConfig')
if self.lr != None:
oprot.writeFieldBegin('lr', TType.FLOAT, 1)
oprot.writeFloat(self.lr)
oprot.writeFieldEnd()
if self.alpha != None:
oprot.writeFieldBegin('alpha', TType.FLOAT, 2)
oprot.writeFloat(self.alpha)
oprot.writeFieldEnd()
if self.weight_decay != None:
oprot.writeFieldBegin('weight_decay', TType.FLOAT, 3)
oprot.writeFloat(self.weight_decay)
oprot.writeFieldEnd()
if self.momentum != None:
oprot.writeFieldBegin('momentum', TType.FLOAT, 4)
oprot.writeFloat(self.momentum)
oprot.writeFieldEnd()
if self.centered != None:
oprot.writeFieldBegin('centered', TType.BOOL, 5)
oprot.writeBool(self.centered)
oprot.writeFieldEnd()
if self.eps != None:
oprot.writeFieldBegin('eps', TType.FLOAT, 6)
oprot.writeFloat(self.eps)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.lr is not None:
value = pprint.pformat(self.lr, indent=0)
value = padding.join(value.splitlines(True))
L.append(' lr=%s' % (value))
if self.alpha is not None:
value = pprint.pformat(self.alpha, indent=0)
value = padding.join(value.splitlines(True))
L.append(' alpha=%s' % (value))
if self.weight_decay is not None:
value = pprint.pformat(self.weight_decay, indent=0)
value = padding.join(value.splitlines(True))
L.append(' weight_decay=%s' % (value))
if self.momentum is not None:
value = pprint.pformat(self.momentum, indent=0)
value = padding.join(value.splitlines(True))
L.append(' momentum=%s' % (value))
if self.centered is not None:
value = pprint.pformat(self.centered, indent=0)
value = padding.join(value.splitlines(True))
L.append(' centered=%s' % (value))
if self.eps is not None:
value = pprint.pformat(self.eps, indent=0)
value = padding.join(value.splitlines(True))
L.append(' eps=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class OptimConfig(object):
"""
Attributes:
- sgd
- adagrad
- sparse_adam
- adam
- rmsprop
"""
thrift_spec = None
__init__ = None
__EMPTY__ = 0
SGD = 1
ADAGRAD = 2
SPARSE_ADAM = 3
ADAM = 4
RMSPROP = 5
@staticmethod
def isUnion():
return True
def get_sgd(self):
assert self.field == 1
return self.value
def get_adagrad(self):
assert self.field == 2
return self.value
def get_sparse_adam(self):
assert self.field == 3
return self.value
def get_adam(self):
assert self.field == 4
return self.value
def get_rmsprop(self):
assert self.field == 5
return self.value
def set_sgd(self, value):
self.field = 1
self.value = value
def set_adagrad(self, value):
self.field = 2
self.value = value
def set_sparse_adam(self, value):
self.field = 3
self.value = value
def set_adam(self, value):
self.field = 4
self.value = value
def set_rmsprop(self, value):
self.field = 5
self.value = value
def getType(self):
return self.field
def __repr__(self):
value = pprint.pformat(self.value)
member = ''
if self.field == 1:
padding = ' ' * 4
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('sgd', value)
if self.field == 2:
padding = ' ' * 8
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('adagrad', value)
if self.field == 3:
padding = ' ' * 12
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('sparse_adam', value)
if self.field == 4:
padding = ' ' * 5
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('adam', value)
if self.field == 5:
padding = ' ' * 8
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('rmsprop', value)
return "%s(%s)" % (self.__class__.__name__, member)
def read(self, iprot):
self.field = 0
self.value = None
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
sgd = SGDOptimConfig()
sgd.read(iprot)
assert self.field == 0 and self.value is None
self.set_sgd(sgd)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
adagrad = AdagradOptimConfig()
adagrad.read(iprot)
assert self.field == 0 and self.value is None
self.set_adagrad(adagrad)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
sparse_adam = SparseAdamOptimConfig()
sparse_adam.read(iprot)
assert self.field == 0 and self.value is None
self.set_sparse_adam(sparse_adam)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
adam = AdamOptimConfig()
adam.read(iprot)
assert self.field == 0 and self.value is None
self.set_adam(adam)
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRUCT:
rmsprop = RMSpropOptimConfig()
rmsprop.read(iprot)
assert self.field == 0 and self.value is None
self.set_rmsprop(rmsprop)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeUnionBegin('OptimConfig')
if self.field == 1:
oprot.writeFieldBegin('sgd', TType.STRUCT, 1)
sgd = self.value
sgd.write(oprot)
oprot.writeFieldEnd()
if self.field == 2:
oprot.writeFieldBegin('adagrad', TType.STRUCT, 2)
adagrad = self.value
adagrad.write(oprot)
oprot.writeFieldEnd()
if self.field == 3:
oprot.writeFieldBegin('sparse_adam', TType.STRUCT, 3)
sparse_adam = self.value
sparse_adam.write(oprot)
oprot.writeFieldEnd()
if self.field == 4:
oprot.writeFieldBegin('adam', TType.STRUCT, 4)
adam = self.value
adam.write(oprot)
oprot.writeFieldEnd()
if self.field == 5:
oprot.writeFieldBegin('rmsprop', TType.STRUCT, 5)
rmsprop = self.value
rmsprop.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeUnionEnd()
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class SumPooling:
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('SumPooling')
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class AvgPooling:
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('AvgPooling')
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class PoolingConfig(object):
"""
Attributes:
- sum
- avg
"""
thrift_spec = None
__init__ = None
__EMPTY__ = 0
SUM = 1
AVG = 2
@staticmethod
def isUnion():
return True
def get_sum(self):
assert self.field == 1
return self.value
def get_avg(self):
assert self.field == 2
return self.value
def set_sum(self, value):
self.field = 1
self.value = value
def set_avg(self, value):
self.field = 2
self.value = value
def getType(self):
return self.field
def __repr__(self):
value = pprint.pformat(self.value)
member = ''
if self.field == 1:
padding = ' ' * 4
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('sum', value)
if self.field == 2:
padding = ' ' * 4
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('avg', value)
return "%s(%s)" % (self.__class__.__name__, member)
def read(self, iprot):
self.field = 0
self.value = None
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
sum = SumPooling()
sum.read(iprot)
assert self.field == 0 and self.value is None
self.set_sum(sum)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
avg = AvgPooling()
avg.read(iprot)
assert self.field == 0 and self.value is None
self.set_avg(avg)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeUnionBegin('PoolingConfig')
if self.field == 1:
oprot.writeFieldBegin('sum', TType.STRUCT, 1)
sum = self.value
sum.write(oprot)
oprot.writeFieldEnd()
if self.field == 2:
oprot.writeFieldBegin('avg', TType.STRUCT, 2)
avg = self.value
avg.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeUnionEnd()
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class SparseFeatureItem:
"""
Attributes:
- name
- hash_size
- embed_dim
- optim
- pooling
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8') if UTF8STRINGS else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.hash_size = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.embed_dim = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
self.optim = OptimConfig()
self.optim.read(iprot)
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRUCT:
self.pooling = PoolingConfig()
self.pooling.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('SparseFeatureItem')
if self.name != None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name.encode('utf-8')) if UTF8STRINGS and not isinstance(self.name, bytes) else oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.hash_size != None:
oprot.writeFieldBegin('hash_size', TType.I32, 2)
oprot.writeI32(self.hash_size)
oprot.writeFieldEnd()
if self.embed_dim != None:
oprot.writeFieldBegin('embed_dim', TType.I32, 3)
oprot.writeI32(self.embed_dim)
oprot.writeFieldEnd()
if self.optim != None:
oprot.writeFieldBegin('optim', TType.STRUCT, 4)
self.optim.write(oprot)
oprot.writeFieldEnd()
if self.pooling != None:
oprot.writeFieldBegin('pooling', TType.STRUCT, 5)
self.pooling.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.name is not None:
value = pprint.pformat(self.name, indent=0)
value = padding.join(value.splitlines(True))
L.append(' name=%s' % (value))
if self.hash_size is not None:
value = pprint.pformat(self.hash_size, indent=0)
value = padding.join(value.splitlines(True))
L.append(' hash_size=%s' % (value))
if self.embed_dim is not None:
value = pprint.pformat(self.embed_dim, indent=0)
value = padding.join(value.splitlines(True))
L.append(' embed_dim=%s' % (value))
if self.optim is not None:
value = pprint.pformat(self.optim, indent=0)
value = padding.join(value.splitlines(True))
L.append(' optim=%s' % (value))
if self.pooling is not None:
value = pprint.pformat(self.pooling, indent=0)
value = padding.join(value.splitlines(True))
L.append(' pooling=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class SparseFeatureConfig:
"""
Attributes:
- features
- embed_dim
- optim
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.features = []
(_etype123, _size120) = iprot.readListBegin()
if _size120 >= 0:
for _i124 in six.moves.range(_size120):
_elem125 = SparseFeatureItem()
_elem125.read(iprot)
self.features.append(_elem125)
else:
while iprot.peekList():
_elem126 = SparseFeatureItem()
_elem126.read(iprot)
self.features.append(_elem126)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.embed_dim = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.optim = OptimConfig()
self.optim.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('SparseFeatureConfig')
if self.features != None:
oprot.writeFieldBegin('features', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.features))
for iter127 in self.features:
iter127.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.embed_dim != None:
oprot.writeFieldBegin('embed_dim', TType.I32, 2)
oprot.writeI32(self.embed_dim)
oprot.writeFieldEnd()
if self.optim != None:
oprot.writeFieldBegin('optim', TType.STRUCT, 3)
self.optim.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.features is not None:
value = pprint.pformat(self.features, indent=0)
value = padding.join(value.splitlines(True))
L.append(' features=%s' % (value))
if self.embed_dim is not None:
value = pprint.pformat(self.embed_dim, indent=0)
value = padding.join(value.splitlines(True))
L.append(' embed_dim=%s' % (value))
if self.optim is not None:
value = pprint.pformat(self.optim, indent=0)
value = padding.join(value.splitlines(True))
L.append(' optim=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class DenseFeatureConfig:
"""
Attributes:
- features
- optim
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.features = []
(_etype131, _size128) = iprot.readListBegin()
if _size128 >= 0:
for _i132 in six.moves.range(_size128):
_elem133 = iprot.readString().decode('utf-8') if UTF8STRINGS else iprot.readString()
self.features.append(_elem133)
else:
while iprot.peekList():
_elem134 = iprot.readString().decode('utf-8') if UTF8STRINGS else iprot.readString()
self.features.append(_elem134)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.optim = OptimConfig()
self.optim.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('DenseFeatureConfig')
if self.features != None:
oprot.writeFieldBegin('features', TType.LIST, 1)
oprot.writeListBegin(TType.STRING, len(self.features))
for iter135 in self.features:
oprot.writeString(iter135.encode('utf-8')) if UTF8STRINGS and not isinstance(iter135, bytes) else oprot.writeString(iter135)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.optim != None:
oprot.writeFieldBegin('optim', TType.STRUCT, 2)
self.optim.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.features is not None:
value = pprint.pformat(self.features, indent=0)
value = padding.join(value.splitlines(True))
L.append(' features=%s' % (value))
if self.optim is not None:
value = pprint.pformat(self.optim, indent=0)
value = padding.join(value.splitlines(True))
L.append(' optim=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class FeatureConfig:
"""
Attributes:
- dense
- sparse
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.dense = DenseFeatureConfig()
self.dense.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.sparse = SparseFeatureConfig()
self.sparse.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('FeatureConfig')
if self.dense != None:
oprot.writeFieldBegin('dense', TType.STRUCT, 1)
self.dense.write(oprot)
oprot.writeFieldEnd()
if self.sparse != None:
oprot.writeFieldBegin('sparse', TType.STRUCT, 2)
self.sparse.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.dense is not None:
value = pprint.pformat(self.dense, indent=0)
value = padding.join(value.splitlines(True))
L.append(' dense=%s' % (value))
if self.sparse is not None:
value = pprint.pformat(self.sparse, indent=0)
value = padding.join(value.splitlines(True))
L.append(' sparse=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class BCEWithLogitsLoss:
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('BCEWithLogitsLoss')
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class BCELoss:
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('BCELoss')
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class MSELoss:
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('MSELoss')
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class LossConfig(object):
"""
Attributes:
- bcewithlogits
- bce
- mse
"""
thrift_spec = None
__init__ = None
__EMPTY__ = 0
BCEWITHLOGITS = 1
BCE = 2
MSE = 3
@staticmethod
def isUnion():
return True
def get_bcewithlogits(self):
assert self.field == 1
return self.value
def get_bce(self):
assert self.field == 2
return self.value
def get_mse(self):
assert self.field == 3
return self.value
def set_bcewithlogits(self, value):
self.field = 1
self.value = value
def set_bce(self, value):
self.field = 2
self.value = value
def set_mse(self, value):
self.field = 3
self.value = value
def getType(self):
return self.field
def __repr__(self):
value = pprint.pformat(self.value)
member = ''
if self.field == 1:
padding = ' ' * 14
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('bcewithlogits', value)
if self.field == 2:
padding = ' ' * 4
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('bce', value)
if self.field == 3:
padding = ' ' * 4
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('mse', value)
return "%s(%s)" % (self.__class__.__name__, member)
def read(self, iprot):
self.field = 0
self.value = None
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
bcewithlogits = BCEWithLogitsLoss()
bcewithlogits.read(iprot)
assert self.field == 0 and self.value is None
self.set_bcewithlogits(bcewithlogits)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
bce = BCELoss()
bce.read(iprot)
assert self.field == 0 and self.value is None
self.set_bce(bce)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
mse = MSELoss()
mse.read(iprot)
assert self.field == 0 and self.value is None
self.set_mse(mse)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeUnionBegin('LossConfig')
if self.field == 1:
oprot.writeFieldBegin('bcewithlogits', TType.STRUCT, 1)
bcewithlogits = self.value
bcewithlogits.write(oprot)
oprot.writeFieldEnd()
if self.field == 2:
oprot.writeFieldBegin('bce', TType.STRUCT, 2)
bce = self.value
bce.write(oprot)
oprot.writeFieldEnd()
if self.field == 3:
oprot.writeFieldBegin('mse', TType.STRUCT, 3)
mse = self.value
mse.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeUnionEnd()
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class LoggingConfig:
"""
Attributes:
- log_freq
- tb_log_freq
- tb_log_model_weight_hist
- tb_log_pr_curve_batch
- tb_log_model_weight_filter_regex
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.log_freq = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.tb_log_freq = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.BOOL:
self.tb_log_model_weight_hist = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.BOOL:
self.tb_log_pr_curve_batch = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.LIST:
self.tb_log_model_weight_filter_regex = []
(_etype139, _size136) = iprot.readListBegin()
if _size136 >= 0:
for _i140 in six.moves.range(_size136):
_elem141 = iprot.readString().decode('utf-8') if UTF8STRINGS else iprot.readString()
self.tb_log_model_weight_filter_regex.append(_elem141)
else:
while iprot.peekList():
_elem142 = iprot.readString().decode('utf-8') if UTF8STRINGS else iprot.readString()
self.tb_log_model_weight_filter_regex.append(_elem142)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('LoggingConfig')
if self.log_freq != None:
oprot.writeFieldBegin('log_freq', TType.I32, 1)
oprot.writeI32(self.log_freq)
oprot.writeFieldEnd()
if self.tb_log_freq != None:
oprot.writeFieldBegin('tb_log_freq', TType.I32, 2)
oprot.writeI32(self.tb_log_freq)
oprot.writeFieldEnd()
if self.tb_log_model_weight_hist != None:
oprot.writeFieldBegin('tb_log_model_weight_hist', TType.BOOL, 3)
oprot.writeBool(self.tb_log_model_weight_hist)
oprot.writeFieldEnd()
if self.tb_log_pr_curve_batch != None:
oprot.writeFieldBegin('tb_log_pr_curve_batch', TType.BOOL, 4)
oprot.writeBool(self.tb_log_pr_curve_batch)
oprot.writeFieldEnd()
if self.tb_log_model_weight_filter_regex != None:
oprot.writeFieldBegin('tb_log_model_weight_filter_regex', TType.LIST, 5)
oprot.writeListBegin(TType.STRING, len(self.tb_log_model_weight_filter_regex))
for iter143 in self.tb_log_model_weight_filter_regex:
oprot.writeString(iter143.encode('utf-8')) if UTF8STRINGS and not isinstance(iter143, bytes) else oprot.writeString(iter143)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.log_freq is not None:
value = pprint.pformat(self.log_freq, indent=0)
value = padding.join(value.splitlines(True))
L.append(' log_freq=%s' % (value))
if self.tb_log_freq is not None:
value = pprint.pformat(self.tb_log_freq, indent=0)
value = padding.join(value.splitlines(True))
L.append(' tb_log_freq=%s' % (value))
if self.tb_log_model_weight_hist is not None:
value = pprint.pformat(self.tb_log_model_weight_hist, indent=0)
value = padding.join(value.splitlines(True))
L.append(' tb_log_model_weight_hist=%s' % (value))
if self.tb_log_pr_curve_batch is not None:
value = pprint.pformat(self.tb_log_pr_curve_batch, indent=0)
value = padding.join(value.splitlines(True))
L.append(' tb_log_pr_curve_batch=%s' % (value))
if self.tb_log_model_weight_filter_regex is not None:
value = pprint.pformat(self.tb_log_model_weight_filter_regex, indent=0)
value = padding.join(value.splitlines(True))
L.append(' tb_log_model_weight_filter_regex=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class TrainConfig:
"""
Attributes:
- logging_config
- nepochs
- early_stop_on_val_loss
- loss
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.logging_config = LoggingConfig()
self.logging_config.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.nepochs = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.BOOL:
self.early_stop_on_val_loss = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRUCT:
self.loss = LossConfig()
self.loss.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('TrainConfig')
if self.logging_config != None:
oprot.writeFieldBegin('logging_config', TType.STRUCT, 1)
self.logging_config.write(oprot)
oprot.writeFieldEnd()
if self.nepochs != None:
oprot.writeFieldBegin('nepochs', TType.I32, 3)
oprot.writeI32(self.nepochs)
oprot.writeFieldEnd()
if self.early_stop_on_val_loss != None:
oprot.writeFieldBegin('early_stop_on_val_loss', TType.BOOL, 5)
oprot.writeBool(self.early_stop_on_val_loss)
oprot.writeFieldEnd()
if self.loss != None:
oprot.writeFieldBegin('loss', TType.STRUCT, 6)
self.loss.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.logging_config is not None:
value = pprint.pformat(self.logging_config, indent=0)
value = padding.join(value.splitlines(True))
L.append(' logging_config=%s' % (value))
if self.nepochs is not None:
value = pprint.pformat(self.nepochs, indent=0)
value = padding.join(value.splitlines(True))
L.append(' nepochs=%s' % (value))
if self.early_stop_on_val_loss is not None:
value = pprint.pformat(self.early_stop_on_val_loss, indent=0)
value = padding.join(value.splitlines(True))
L.append(' early_stop_on_val_loss=%s' % (value))
if self.loss is not None:
value = pprint.pformat(self.loss, indent=0)
value = padding.join(value.splitlines(True))
L.append(' loss=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class EvalConfig:
"""
Attributes:
- logging_config
- loss
- compute_ne
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.logging_config = LoggingConfig()
self.logging_config.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.loss = LossConfig()
self.loss.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.BOOL:
self.compute_ne = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('EvalConfig')
if self.logging_config != None:
oprot.writeFieldBegin('logging_config', TType.STRUCT, 1)
self.logging_config.write(oprot)
oprot.writeFieldEnd()
if self.loss != None:
oprot.writeFieldBegin('loss', TType.STRUCT, 2)
self.loss.write(oprot)
oprot.writeFieldEnd()
if self.compute_ne != None:
oprot.writeFieldBegin('compute_ne', TType.BOOL, 3)
oprot.writeBool(self.compute_ne)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.logging_config is not None:
value = pprint.pformat(self.logging_config, indent=0)
value = padding.join(value.splitlines(True))
L.append(' logging_config=%s' % (value))
if self.loss is not None:
value = pprint.pformat(self.loss, indent=0)
value = padding.join(value.splitlines(True))
L.append(' loss=%s' % (value))
if self.compute_ne is not None:
value = pprint.pformat(self.compute_ne, indent=0)
value = padding.join(value.splitlines(True))
L.append(' compute_ne=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class CheckpointConfig:
"""
Attributes:
- ckp_interval
- ckp_path
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.ckp_interval = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.ckp_path = iprot.readString().decode('utf-8') if UTF8STRINGS else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('CheckpointConfig')
if self.ckp_interval != None:
oprot.writeFieldBegin('ckp_interval', TType.I32, 1)
oprot.writeI32(self.ckp_interval)
oprot.writeFieldEnd()
if self.ckp_path != None:
oprot.writeFieldBegin('ckp_path', TType.STRING, 2)
oprot.writeString(self.ckp_path.encode('utf-8')) if UTF8STRINGS and not isinstance(self.ckp_path, bytes) else oprot.writeString(self.ckp_path)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.ckp_interval is not None:
value = pprint.pformat(self.ckp_interval, indent=0)
value = padding.join(value.splitlines(True))
L.append(' ckp_interval=%s' % (value))
if self.ckp_path is not None:
value = pprint.pformat(self.ckp_path, indent=0)
value = padding.join(value.splitlines(True))
L.append(' ckp_path=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class KoskiReaderConfig:
"""
Attributes:
- prefetch_capacity
- pin_memory
- num_workers
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.prefetch_capacity = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BOOL:
self.pin_memory = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.num_workers = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('KoskiReaderConfig')
if self.prefetch_capacity != None:
oprot.writeFieldBegin('prefetch_capacity', TType.I64, 1)
oprot.writeI64(self.prefetch_capacity)
oprot.writeFieldEnd()
if self.pin_memory != None:
oprot.writeFieldBegin('pin_memory', TType.BOOL, 2)
oprot.writeBool(self.pin_memory)
oprot.writeFieldEnd()
if self.num_workers != None:
oprot.writeFieldBegin('num_workers', TType.I32, 3)
oprot.writeI32(self.num_workers)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.prefetch_capacity is not None:
value = pprint.pformat(self.prefetch_capacity, indent=0)
value = padding.join(value.splitlines(True))
L.append(' prefetch_capacity=%s' % (value))
if self.pin_memory is not None:
value = pprint.pformat(self.pin_memory, indent=0)
value = padding.join(value.splitlines(True))
L.append(' pin_memory=%s' % (value))
if self.num_workers is not None:
value = pprint.pformat(self.num_workers, indent=0)
value = padding.join(value.splitlines(True))
L.append(' num_workers=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class PerformanceConfig:
"""
Attributes:
- use_gpu
- num_readers
- num_trainers
- ckp_config
- data_queue_maxsize
- reader_threads
- num_gpu
- enable_profiling
- koski
- omp_num_threads
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.BOOL:
self.use_gpu = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.num_readers = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.num_trainers = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
self.ckp_config = CheckpointConfig()
self.ckp_config.read(iprot)
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.data_queue_maxsize = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I32:
self.reader_threads = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.I32:
self.num_gpu = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.BOOL:
self.enable_profiling = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.STRUCT:
self.koski = KoskiReaderConfig()
self.koski.read(iprot)
else:
iprot.skip(ftype)
elif fid == 10:
if ftype == TType.I32:
self.omp_num_threads = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('PerformanceConfig')
if self.use_gpu != None:
oprot.writeFieldBegin('use_gpu', TType.BOOL, 1)
oprot.writeBool(self.use_gpu)
oprot.writeFieldEnd()
if self.num_readers != None:
oprot.writeFieldBegin('num_readers', TType.I32, 2)
oprot.writeI32(self.num_readers)
oprot.writeFieldEnd()
if self.num_trainers != None:
oprot.writeFieldBegin('num_trainers', TType.I32, 3)
oprot.writeI32(self.num_trainers)
oprot.writeFieldEnd()
if self.ckp_config != None:
oprot.writeFieldBegin('ckp_config', TType.STRUCT, 4)
self.ckp_config.write(oprot)
oprot.writeFieldEnd()
if self.data_queue_maxsize != None:
oprot.writeFieldBegin('data_queue_maxsize', TType.I32, 5)
oprot.writeI32(self.data_queue_maxsize)
oprot.writeFieldEnd()
if self.reader_threads != None:
oprot.writeFieldBegin('reader_threads', TType.I32, 6)
oprot.writeI32(self.reader_threads)
oprot.writeFieldEnd()
if self.num_gpu != None:
oprot.writeFieldBegin('num_gpu', TType.I32, 7)
oprot.writeI32(self.num_gpu)
oprot.writeFieldEnd()
if self.enable_profiling != None and self.enable_profiling != self.thrift_spec[8][4]:
oprot.writeFieldBegin('enable_profiling', TType.BOOL, 8)
oprot.writeBool(self.enable_profiling)
oprot.writeFieldEnd()
if self.koski != None:
oprot.writeFieldBegin('koski', TType.STRUCT, 9)
self.koski.write(oprot)
oprot.writeFieldEnd()
if self.omp_num_threads != None and self.omp_num_threads != self.thrift_spec[10][4]:
oprot.writeFieldBegin('omp_num_threads', TType.I32, 10)
oprot.writeI32(self.omp_num_threads)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.use_gpu is not None:
value = pprint.pformat(self.use_gpu, indent=0)
value = padding.join(value.splitlines(True))
L.append(' use_gpu=%s' % (value))
if self.num_readers is not None:
value = pprint.pformat(self.num_readers, indent=0)
value = padding.join(value.splitlines(True))
L.append(' num_readers=%s' % (value))
if self.num_trainers is not None:
value = pprint.pformat(self.num_trainers, indent=0)
value = padding.join(value.splitlines(True))
L.append(' num_trainers=%s' % (value))
if self.ckp_config is not None:
value = pprint.pformat(self.ckp_config, indent=0)
value = padding.join(value.splitlines(True))
L.append(' ckp_config=%s' % (value))
if self.data_queue_maxsize is not None:
value = pprint.pformat(self.data_queue_maxsize, indent=0)
value = padding.join(value.splitlines(True))
L.append(' data_queue_maxsize=%s' % (value))
if self.reader_threads is not None:
value = pprint.pformat(self.reader_threads, indent=0)
value = padding.join(value.splitlines(True))
L.append(' reader_threads=%s' % (value))
if self.num_gpu is not None:
value = pprint.pformat(self.num_gpu, indent=0)
value = padding.join(value.splitlines(True))
L.append(' num_gpu=%s' % (value))
if self.enable_profiling is not None:
value = pprint.pformat(self.enable_profiling, indent=0)
value = padding.join(value.splitlines(True))
L.append(' enable_profiling=%s' % (value))
if self.koski is not None:
value = pprint.pformat(self.koski, indent=0)
value = padding.join(value.splitlines(True))
L.append(' koski=%s' % (value))
if self.omp_num_threads is not None:
value = pprint.pformat(self.omp_num_threads, indent=0)
value = padding.join(value.splitlines(True))
L.append(' omp_num_threads=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
all_structs.append(DataFromFileConfig)
DataFromFileConfig.thrift_spec = (
None, # 0
(1, TType.STRING, 'data_file', True, None, 2, ), # 1
(2, TType.I32, 'batch_size', None, 100, 2, ), # 2
(3, TType.I32, 'num_batches', None, -1, 2, ), # 3
(4, TType.LIST, 'splits', (TType.FLOAT,None), [
0.800000,
0.100000,
], 2, ), # 4
(5, TType.I32, 'num_samples_meta', None, 100000, 2, ), # 5
)
DataFromFileConfig.thrift_struct_annotations = {
}
DataFromFileConfig.thrift_field_annotations = {
}
def DataFromFileConfig__init__(self, data_file=None, batch_size=DataFromFileConfig.thrift_spec[2][4], num_batches=DataFromFileConfig.thrift_spec[3][4], splits=DataFromFileConfig.thrift_spec[4][4], num_samples_meta=DataFromFileConfig.thrift_spec[5][4],):
self.data_file = data_file
self.batch_size = batch_size
self.num_batches = num_batches
if splits is self.thrift_spec[4][4]:
splits = [
0.800000,
0.100000,
]
self.splits = splits
self.num_samples_meta = num_samples_meta
DataFromFileConfig.__init__ = DataFromFileConfig__init__
def DataFromFileConfig__setstate__(self, state):
state.setdefault('data_file', None)
state.setdefault('batch_size', 100)
state.setdefault('num_batches', -1)
state.setdefault('splits', [
0.800000,
0.100000,
])
state.setdefault('num_samples_meta', 100000)
self.__dict__ = state
DataFromFileConfig.__getstate__ = lambda self: self.__dict__.copy()
DataFromFileConfig.__setstate__ = DataFromFileConfig__setstate__
all_structs.append(DataConfig)
DataConfig.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'from_file', [DataFromFileConfig, DataFromFileConfig.thrift_spec, False], None, 2, ), # 1
)
DataConfig.thrift_struct_annotations = {
}
DataConfig.thrift_field_annotations = {
}
def DataConfig__init__(self, from_file=None,):
self.field = 0
self.value = None
if from_file is not None:
assert self.field == 0 and self.value is None
self.field = 1
self.value = from_file
DataConfig.__init__ = DataConfig__init__
all_structs.append(MicroClose)
MicroClose.thrift_spec = (
)
MicroClose.thrift_struct_annotations = {
}
MicroClose.thrift_field_annotations = {
}
all_structs.append(MicroMLPConfig)
MicroMLPConfig.thrift_spec = (
None, # 0
(1, TType.LIST, 'arc', (TType.I32,None), None, 2, ), # 1
)
MicroMLPConfig.thrift_struct_annotations = {
}
MicroMLPConfig.thrift_field_annotations = {
}
def MicroMLPConfig__init__(self, arc=None,):
self.arc = arc
MicroMLPConfig.__init__ = MicroMLPConfig__init__
def MicroMLPConfig__setstate__(self, state):
state.setdefault('arc', None)
self.__dict__ = state
MicroMLPConfig.__getstate__ = lambda self: self.__dict__.copy()
MicroMLPConfig.__setstate__ = MicroMLPConfig__setstate__
all_structs.append(MicroCINConfig)
MicroCINConfig.thrift_spec = (
None, # 0
(1, TType.LIST, 'arc', (TType.I32,None), None, 2, ), # 1
(2, TType.LIST, 'num_of_layers', (TType.I32,None), [
1,
2,
3,
], 2, ), # 2
)
MicroCINConfig.thrift_struct_annotations = {
}
MicroCINConfig.thrift_field_annotations = {
}
def MicroCINConfig__init__(self, arc=None, num_of_layers=MicroCINConfig.thrift_spec[2][4],):
self.arc = arc
if num_of_layers is self.thrift_spec[2][4]:
num_of_layers = [
1,
2,
3,
]
self.num_of_layers = num_of_layers
MicroCINConfig.__init__ = MicroCINConfig__init__
def MicroCINConfig__setstate__(self, state):
state.setdefault('arc', None)
state.setdefault('num_of_layers', [
1,
2,
3,
])
self.__dict__ = state
MicroCINConfig.__getstate__ = lambda self: self.__dict__.copy()
MicroCINConfig.__setstate__ = MicroCINConfig__setstate__
all_structs.append(MicroAttentionConfig)
MicroAttentionConfig.thrift_spec = (
None, # 0
(1, TType.LIST, 'num_of_layers', (TType.I32,None), [
1,
2,
3,
], 2, ), # 1
(2, TType.LIST, 'num_of_heads', (TType.I32,None), [
1,
2,
3,
], 2, ), # 2
(3, TType.LIST, 'att_embed_dim', (TType.I32,None), [
10,
], 2, ), # 3
(4, TType.LIST, 'dropout_prob', (TType.FLOAT,None), [
0.00000,
0.200000,
0.400000,
], 2, ), # 4
)
MicroAttentionConfig.thrift_struct_annotations = {
}
MicroAttentionConfig.thrift_field_annotations = {
}
def MicroAttentionConfig__init__(self, num_of_layers=MicroAttentionConfig.thrift_spec[1][4], num_of_heads=MicroAttentionConfig.thrift_spec[2][4], att_embed_dim=MicroAttentionConfig.thrift_spec[3][4], dropout_prob=MicroAttentionConfig.thrift_spec[4][4],):
if num_of_layers is self.thrift_spec[1][4]:
num_of_layers = [
1,
2,
3,
]
self.num_of_layers = num_of_layers
if num_of_heads is self.thrift_spec[2][4]:
num_of_heads = [
1,
2,
3,
]
self.num_of_heads = num_of_heads
if att_embed_dim is self.thrift_spec[3][4]:
att_embed_dim = [
10,
]
self.att_embed_dim = att_embed_dim
if dropout_prob is self.thrift_spec[4][4]:
dropout_prob = [
0.00000,
0.200000,
0.400000,
]
self.dropout_prob = dropout_prob
MicroAttentionConfig.__init__ = MicroAttentionConfig__init__
def MicroAttentionConfig__setstate__(self, state):
state.setdefault('num_of_layers', [
1,
2,
3,
])
state.setdefault('num_of_heads', [
1,
2,
3,
])
state.setdefault('att_embed_dim', [
10,
])
state.setdefault('dropout_prob', [
0.00000,
0.200000,
0.400000,
])
self.__dict__ = state
MicroAttentionConfig.__getstate__ = lambda self: self.__dict__.copy()
MicroAttentionConfig.__setstate__ = MicroAttentionConfig__setstate__
all_structs.append(MicroSearchSpaceType)
MicroSearchSpaceType.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'close', [MicroClose, MicroClose.thrift_spec, False], None, 2, ), # 1
(2, TType.STRUCT, 'micro_mlp', [MicroMLPConfig, MicroMLPConfig.thrift_spec, False], None, 2, ), # 2
(3, TType.STRUCT, 'micro_cin', [MicroCINConfig, MicroCINConfig.thrift_spec, False], None, 2, ), # 3
(4, TType.STRUCT, 'micro_attention', [MicroAttentionConfig, MicroAttentionConfig.thrift_spec, False], None, 2, ), # 4
)
MicroSearchSpaceType.thrift_struct_annotations = {
}
MicroSearchSpaceType.thrift_field_annotations = {
}
def MicroSearchSpaceType__init__(self, close=None, micro_mlp=None, micro_cin=None, micro_attention=None,):
self.field = 0
self.value = None
if close is not None:
assert self.field == 0 and self.value is None
self.field = 1
self.value = close
if micro_mlp is not None:
assert self.field == 0 and self.value is None
self.field = 2
self.value = micro_mlp
if micro_cin is not None:
assert self.field == 0 and self.value is None
self.field = 3
self.value = micro_cin
if micro_attention is not None:
assert self.field == 0 and self.value is None
self.field = 4
self.value = micro_attention
MicroSearchSpaceType.__init__ = MicroSearchSpaceType__init__
all_structs.append(InputDenseAsSparse)
InputDenseAsSparse.thrift_spec = (
)
InputDenseAsSparse.thrift_struct_annotations = {
}
InputDenseAsSparse.thrift_field_annotations = {
}
all_structs.append(FeatureProcessingType)
FeatureProcessingType.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'idasp', [InputDenseAsSparse, InputDenseAsSparse.thrift_spec, False], None, 2, ), # 1
)
FeatureProcessingType.thrift_struct_annotations = {
}
FeatureProcessingType.thrift_field_annotations = {
}
def FeatureProcessingType__init__(self, idasp=None,):
self.field = 0
self.value = None
if idasp is not None:
assert self.field == 0 and self.value is None
self.field = 1
self.value = idasp
FeatureProcessingType.__init__ = FeatureProcessingType__init__
all_structs.append(NASRecNetConfig)
NASRecNetConfig.thrift_spec = (
None, # 0
(1, TType.LIST, 'block_configs', (TType.STRUCT,[block_config.ttypes.BlockConfig, block_config.ttypes.BlockConfig.thrift_spec, True]), None, 2, ), # 1
)
NASRecNetConfig.thrift_struct_annotations = {
}
NASRecNetConfig.thrift_field_annotations = {
}
def NASRecNetConfig__init__(self, block_configs=None,):
self.block_configs = block_configs
NASRecNetConfig.__init__ = NASRecNetConfig__init__
def NASRecNetConfig__setstate__(self, state):
state.setdefault('block_configs', None)
self.__dict__ = state
NASRecNetConfig.__getstate__ = lambda self: self.__dict__.copy()
NASRecNetConfig.__setstate__ = NASRecNetConfig__setstate__
all_structs.append(RandomSearcherConfig)
RandomSearcherConfig.thrift_spec = (
None, # 0
(1, TType.I32, 'max_num_block', None, 3, 2, ), # 1
(2, TType.LIST, 'block_types', (TType.I32,block_config.ttypes.ExtendedBlockType), None, 2, ), # 2
(3, TType.I32, 'macro_space_type', MacroSearchSpaceType, 1, 2, ), # 3
None, # 4
(5, TType.LIST, 'micro_space_types', (TType.STRUCT,[MicroSearchSpaceType, MicroSearchSpaceType.thrift_spec, True]), None, 2, ), # 5
(6, TType.LIST, 'feature_processing_type', (TType.STRUCT,[FeatureProcessingType, FeatureProcessingType.thrift_spec, True]), [
], 2, ), # 6
)
RandomSearcherConfig.thrift_struct_annotations = {
}
RandomSearcherConfig.thrift_field_annotations = {
}
def RandomSearcherConfig__init__(self, max_num_block=RandomSearcherConfig.thrift_spec[1][4], block_types=None, macro_space_type=RandomSearcherConfig.thrift_spec[3][4], micro_space_types=None, feature_processing_type=RandomSearcherConfig.thrift_spec[6][4],):
self.max_num_block = max_num_block
self.block_types = block_types
self.macro_space_type = macro_space_type
self.micro_space_types = micro_space_types
if feature_processing_type is self.thrift_spec[6][4]:
feature_processing_type = [
]
self.feature_processing_type = feature_processing_type
RandomSearcherConfig.__init__ = RandomSearcherConfig__init__
def RandomSearcherConfig__setstate__(self, state):
state.setdefault('max_num_block', 3)
state.setdefault('block_types', None)
state.setdefault('macro_space_type', 1)
state.setdefault('micro_space_types', None)
state.setdefault('feature_processing_type', [
])
self.__dict__ = state
RandomSearcherConfig.__getstate__ = lambda self: self.__dict__.copy()
RandomSearcherConfig.__setstate__ = RandomSearcherConfig__setstate__
all_structs.append(EvolutionarySearcherConfig)
EvolutionarySearcherConfig.thrift_spec = (
None, # 0
(1, TType.I32, 'max_num_block', None, 3, 2, ), # 1
(2, TType.LIST, 'block_types', (TType.I32,block_config.ttypes.ExtendedBlockType), None, 2, ), # 2
(3, TType.I32, 'population_size', None, 10, 2, ), # 3
(4, TType.I32, 'candidate_size', None, 5, 2, ), # 4
(5, TType.I32, 'macro_space_type', MacroSearchSpaceType, 1, 2, ), # 5
None, # 6
(7, TType.LIST, 'micro_space_types', (TType.STRUCT,[MicroSearchSpaceType, MicroSearchSpaceType.thrift_spec, True]), None, 2, ), # 7
(8, TType.LIST, 'feature_processing_type', (TType.STRUCT,[FeatureProcessingType, FeatureProcessingType.thrift_spec, True]), [
], 2, ), # 8
)
EvolutionarySearcherConfig.thrift_struct_annotations = {
}
EvolutionarySearcherConfig.thrift_field_annotations = {
}
def EvolutionarySearcherConfig__init__(self, max_num_block=EvolutionarySearcherConfig.thrift_spec[1][4], block_types=None, population_size=EvolutionarySearcherConfig.thrift_spec[3][4], candidate_size=EvolutionarySearcherConfig.thrift_spec[4][4], macro_space_type=EvolutionarySearcherConfig.thrift_spec[5][4], micro_space_types=None, feature_processing_type=EvolutionarySearcherConfig.thrift_spec[8][4],):
self.max_num_block = max_num_block
self.block_types = block_types
self.population_size = population_size
self.candidate_size = candidate_size
self.macro_space_type = macro_space_type
self.micro_space_types = micro_space_types
if feature_processing_type is self.thrift_spec[8][4]:
feature_processing_type = [
]
self.feature_processing_type = feature_processing_type
EvolutionarySearcherConfig.__init__ = EvolutionarySearcherConfig__init__
def EvolutionarySearcherConfig__setstate__(self, state):
state.setdefault('max_num_block', 3)
state.setdefault('block_types', None)
state.setdefault('population_size', 10)
state.setdefault('candidate_size', 5)
state.setdefault('macro_space_type', 1)
state.setdefault('micro_space_types', None)
state.setdefault('feature_processing_type', [
])
self.__dict__ = state
EvolutionarySearcherConfig.__getstate__ = lambda self: self.__dict__.copy()
EvolutionarySearcherConfig.__setstate__ = EvolutionarySearcherConfig__setstate__
all_structs.append(SearcherConfig)
SearcherConfig.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'random_searcher', [RandomSearcherConfig, RandomSearcherConfig.thrift_spec, False], None, 2, ), # 1
(2, TType.STRUCT, 'evolutionary_searcher', [EvolutionarySearcherConfig, EvolutionarySearcherConfig.thrift_spec, False], None, 2, ), # 2
)
SearcherConfig.thrift_struct_annotations = {
}
SearcherConfig.thrift_field_annotations = {
}
def SearcherConfig__init__(self, random_searcher=None, evolutionary_searcher=None,):
self.field = 0
self.value = None
if random_searcher is not None:
assert self.field == 0 and self.value is None
self.field = 1
self.value = random_searcher
if evolutionary_searcher is not None:
assert self.field == 0 and self.value is None
self.field = 2
self.value = evolutionary_searcher
SearcherConfig.__init__ = SearcherConfig__init__
all_structs.append(ModelConfig)
ModelConfig.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'nasrec_net', [NASRecNetConfig, NASRecNetConfig.thrift_spec, False], None, 2, ), # 1
)
ModelConfig.thrift_struct_annotations = {
}
ModelConfig.thrift_field_annotations = {
}
def ModelConfig__init__(self, nasrec_net=None,):
self.field = 0
self.value = None
if nasrec_net is not None:
assert self.field == 0 and self.value is None
self.field = 1
self.value = nasrec_net
ModelConfig.__init__ = ModelConfig__init__
all_structs.append(SGDOptimConfig)
SGDOptimConfig.thrift_spec = (
None, # 0
(1, TType.FLOAT, 'lr', None, 0.0100000, 2, ), # 1
(2, TType.FLOAT, 'momentum', None, 0.00000, 2, ), # 2
(3, TType.FLOAT, 'dampening', None, 0.00000, 2, ), # 3
(4, TType.BOOL, 'nesterov', None, False, 2, ), # 4
(5, TType.FLOAT, 'weight_decay', None, 0.00000, 2, ), # 5
)
SGDOptimConfig.thrift_struct_annotations = {
}
SGDOptimConfig.thrift_field_annotations = {
}
def SGDOptimConfig__init__(self, lr=SGDOptimConfig.thrift_spec[1][4], momentum=SGDOptimConfig.thrift_spec[2][4], dampening=SGDOptimConfig.thrift_spec[3][4], nesterov=SGDOptimConfig.thrift_spec[4][4], weight_decay=SGDOptimConfig.thrift_spec[5][4],):
self.lr = lr
self.momentum = momentum
self.dampening = dampening
self.nesterov = nesterov
self.weight_decay = weight_decay
SGDOptimConfig.__init__ = SGDOptimConfig__init__
def SGDOptimConfig__setstate__(self, state):
state.setdefault('lr', 0.0100000)
state.setdefault('momentum', 0.00000)
state.setdefault('dampening', 0.00000)
state.setdefault('nesterov', False)
state.setdefault('weight_decay', 0.00000)
self.__dict__ = state
SGDOptimConfig.__getstate__ = lambda self: self.__dict__.copy()
SGDOptimConfig.__setstate__ = SGDOptimConfig__setstate__
all_structs.append(AdagradOptimConfig)
AdagradOptimConfig.thrift_spec = (
None, # 0
(1, TType.FLOAT, 'lr', None, 0.0100000, 2, ), # 1
(2, TType.FLOAT, 'lr_decay', None, 0.00000, 2, ), # 2
(3, TType.FLOAT, 'weight_decay', None, 0.00000, 2, ), # 3
(4, TType.FLOAT, 'initial_accumulator_value', None, 0.00000, 2, ), # 4
)
AdagradOptimConfig.thrift_struct_annotations = {
}
AdagradOptimConfig.thrift_field_annotations = {
}
def AdagradOptimConfig__init__(self, lr=AdagradOptimConfig.thrift_spec[1][4], lr_decay=AdagradOptimConfig.thrift_spec[2][4], weight_decay=AdagradOptimConfig.thrift_spec[3][4], initial_accumulator_value=AdagradOptimConfig.thrift_spec[4][4],):
self.lr = lr
self.lr_decay = lr_decay
self.weight_decay = weight_decay
self.initial_accumulator_value = initial_accumulator_value
AdagradOptimConfig.__init__ = AdagradOptimConfig__init__
def AdagradOptimConfig__setstate__(self, state):
state.setdefault('lr', 0.0100000)
state.setdefault('lr_decay', 0.00000)
state.setdefault('weight_decay', 0.00000)
state.setdefault('initial_accumulator_value', 0.00000)
self.__dict__ = state
AdagradOptimConfig.__getstate__ = lambda self: self.__dict__.copy()
AdagradOptimConfig.__setstate__ = AdagradOptimConfig__setstate__
all_structs.append(SparseAdamOptimConfig)
SparseAdamOptimConfig.thrift_spec = (
None, # 0
(1, TType.FLOAT, 'lr', None, 0.00100000, 2, ), # 1
(2, TType.FLOAT, 'betas0', None, 0.900000, 2, ), # 2
(3, TType.FLOAT, 'betas1', None, 0.999000, 2, ), # 3
(4, TType.FLOAT, 'eps', None, 1.00000e-08, 2, ), # 4
)
SparseAdamOptimConfig.thrift_struct_annotations = {
}
SparseAdamOptimConfig.thrift_field_annotations = {
}
def SparseAdamOptimConfig__init__(self, lr=SparseAdamOptimConfig.thrift_spec[1][4], betas0=SparseAdamOptimConfig.thrift_spec[2][4], betas1=SparseAdamOptimConfig.thrift_spec[3][4], eps=SparseAdamOptimConfig.thrift_spec[4][4],):
self.lr = lr
self.betas0 = betas0
self.betas1 = betas1
self.eps = eps
SparseAdamOptimConfig.__init__ = SparseAdamOptimConfig__init__
def SparseAdamOptimConfig__setstate__(self, state):
state.setdefault('lr', 0.00100000)
state.setdefault('betas0', 0.900000)
state.setdefault('betas1', 0.999000)
state.setdefault('eps', 1.00000e-08)
self.__dict__ = state
SparseAdamOptimConfig.__getstate__ = lambda self: self.__dict__.copy()
SparseAdamOptimConfig.__setstate__ = SparseAdamOptimConfig__setstate__
all_structs.append(AdamOptimConfig)
AdamOptimConfig.thrift_spec = (
None, # 0
(1, TType.FLOAT, 'lr', None, 0.00100000, 2, ), # 1
(2, TType.BOOL, 'amsgrad', None, False, 2, ), # 2
(3, TType.FLOAT, 'weight_decay', None, 0.00000, 2, ), # 3
(4, TType.FLOAT, 'betas0', None, 0.900000, 2, ), # 4
(5, TType.FLOAT, 'betas1', None, 0.999000, 2, ), # 5
(6, TType.FLOAT, 'eps', None, 1.00000e-08, 2, ), # 6
)
AdamOptimConfig.thrift_struct_annotations = {
}
AdamOptimConfig.thrift_field_annotations = {
}
def AdamOptimConfig__init__(self, lr=AdamOptimConfig.thrift_spec[1][4], amsgrad=AdamOptimConfig.thrift_spec[2][4], weight_decay=AdamOptimConfig.thrift_spec[3][4], betas0=AdamOptimConfig.thrift_spec[4][4], betas1=AdamOptimConfig.thrift_spec[5][4], eps=AdamOptimConfig.thrift_spec[6][4],):
self.lr = lr
self.amsgrad = amsgrad
self.weight_decay = weight_decay
self.betas0 = betas0
self.betas1 = betas1
self.eps = eps
AdamOptimConfig.__init__ = AdamOptimConfig__init__
def AdamOptimConfig__setstate__(self, state):
state.setdefault('lr', 0.00100000)
state.setdefault('amsgrad', False)
state.setdefault('weight_decay', 0.00000)
state.setdefault('betas0', 0.900000)
state.setdefault('betas1', 0.999000)
state.setdefault('eps', 1.00000e-08)
self.__dict__ = state
AdamOptimConfig.__getstate__ = lambda self: self.__dict__.copy()
AdamOptimConfig.__setstate__ = AdamOptimConfig__setstate__
all_structs.append(RMSpropOptimConfig)
RMSpropOptimConfig.thrift_spec = (
None, # 0
(1, TType.FLOAT, 'lr', None, 0.0100000, 2, ), # 1
(2, TType.FLOAT, 'alpha', None, 0.990000, 2, ), # 2
(3, TType.FLOAT, 'weight_decay', None, 0.00000, 2, ), # 3
(4, TType.FLOAT, 'momentum', None, 0.00000, 2, ), # 4
(5, TType.BOOL, 'centered', None, False, 2, ), # 5
(6, TType.FLOAT, 'eps', None, 1.00000e-08, 2, ), # 6
)
RMSpropOptimConfig.thrift_struct_annotations = {
}
RMSpropOptimConfig.thrift_field_annotations = {
}
def RMSpropOptimConfig__init__(self, lr=RMSpropOptimConfig.thrift_spec[1][4], alpha=RMSpropOptimConfig.thrift_spec[2][4], weight_decay=RMSpropOptimConfig.thrift_spec[3][4], momentum=RMSpropOptimConfig.thrift_spec[4][4], centered=RMSpropOptimConfig.thrift_spec[5][4], eps=RMSpropOptimConfig.thrift_spec[6][4],):
self.lr = lr
self.alpha = alpha
self.weight_decay = weight_decay
self.momentum = momentum
self.centered = centered
self.eps = eps
RMSpropOptimConfig.__init__ = RMSpropOptimConfig__init__
def RMSpropOptimConfig__setstate__(self, state):
state.setdefault('lr', 0.0100000)
state.setdefault('alpha', 0.990000)
state.setdefault('weight_decay', 0.00000)
state.setdefault('momentum', 0.00000)
state.setdefault('centered', False)
state.setdefault('eps', 1.00000e-08)
self.__dict__ = state
RMSpropOptimConfig.__getstate__ = lambda self: self.__dict__.copy()
RMSpropOptimConfig.__setstate__ = RMSpropOptimConfig__setstate__
all_structs.append(OptimConfig)
OptimConfig.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'sgd', [SGDOptimConfig, SGDOptimConfig.thrift_spec, False], None, 2, ), # 1
(2, TType.STRUCT, 'adagrad', [AdagradOptimConfig, AdagradOptimConfig.thrift_spec, False], AdagradOptimConfig(**{
}), 2, ), # 2
(3, TType.STRUCT, 'sparse_adam', [SparseAdamOptimConfig, SparseAdamOptimConfig.thrift_spec, False], None, 2, ), # 3
(4, TType.STRUCT, 'adam', [AdamOptimConfig, AdamOptimConfig.thrift_spec, False], None, 2, ), # 4
(5, TType.STRUCT, 'rmsprop', [RMSpropOptimConfig, RMSpropOptimConfig.thrift_spec, False], None, 2, ), # 5
)
OptimConfig.thrift_struct_annotations = {
}
OptimConfig.thrift_field_annotations = {
}
def OptimConfig__init__(self, sgd=None, adagrad=OptimConfig.thrift_spec[2][4], sparse_adam=None, adam=None, rmsprop=None,):
self.field = 0
self.value = None
if sgd is not None:
assert self.field == 0 and self.value is None
self.field = 1
self.value = sgd
if adagrad is not None:
assert self.field == 0 and self.value is None
self.field = 2
self.value = adagrad
if sparse_adam is not None:
assert self.field == 0 and self.value is None
self.field = 3
self.value = sparse_adam
if adam is not None:
assert self.field == 0 and self.value is None
self.field = 4
self.value = adam
if rmsprop is not None:
assert self.field == 0 and self.value is None
self.field = 5
self.value = rmsprop
OptimConfig.__init__ = OptimConfig__init__
all_structs.append(SumPooling)
SumPooling.thrift_spec = (
)
SumPooling.thrift_struct_annotations = {
}
SumPooling.thrift_field_annotations = {
}
all_structs.append(AvgPooling)
AvgPooling.thrift_spec = (
)
AvgPooling.thrift_struct_annotations = {
}
AvgPooling.thrift_field_annotations = {
}
all_structs.append(PoolingConfig)
PoolingConfig.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'sum', [SumPooling, SumPooling.thrift_spec, False], SumPooling(**{
}), 2, ), # 1
(2, TType.STRUCT, 'avg', [AvgPooling, AvgPooling.thrift_spec, False], None, 2, ), # 2
)
PoolingConfig.thrift_struct_annotations = {
}
PoolingConfig.thrift_field_annotations = {
}
def PoolingConfig__init__(self, sum=PoolingConfig.thrift_spec[1][4], avg=None,):
self.field = 0
self.value = None
if sum is not None:
assert self.field == 0 and self.value is None
self.field = 1
self.value = sum
if avg is not None:
assert self.field == 0 and self.value is None
self.field = 2
self.value = avg
PoolingConfig.__init__ = PoolingConfig__init__
all_structs.append(SparseFeatureItem)
SparseFeatureItem.thrift_spec = (
None, # 0
(1, TType.STRING, 'name', True, None, 2, ), # 1
(2, TType.I32, 'hash_size', None, 10000, 2, ), # 2
(3, TType.I32, 'embed_dim', None, -1, 2, ), # 3
(4, TType.STRUCT, 'optim', [OptimConfig, OptimConfig.thrift_spec, True], None, 1, ), # 4
(5, TType.STRUCT, 'pooling', [PoolingConfig, PoolingConfig.thrift_spec, True], PoolingConfig(**{
"sum" : SumPooling(**{
}),
}), 2, ), # 5
)
SparseFeatureItem.thrift_struct_annotations = {
}
SparseFeatureItem.thrift_field_annotations = {
}
def SparseFeatureItem__init__(self, name=None, hash_size=SparseFeatureItem.thrift_spec[2][4], embed_dim=SparseFeatureItem.thrift_spec[3][4], optim=None, pooling=SparseFeatureItem.thrift_spec[5][4],):
self.name = name
self.hash_size = hash_size
self.embed_dim = embed_dim
self.optim = optim
if pooling is self.thrift_spec[5][4]:
pooling = PoolingConfig(**{
"sum" : SumPooling(**{
}),
})
self.pooling = pooling
SparseFeatureItem.__init__ = SparseFeatureItem__init__
def SparseFeatureItem__setstate__(self, state):
state.setdefault('name', None)
state.setdefault('hash_size', 10000)
state.setdefault('embed_dim', -1)
state.setdefault('optim', None)
state.setdefault('pooling', PoolingConfig(**{
"sum" : SumPooling(**{
}),
}))
self.__dict__ = state
SparseFeatureItem.__getstate__ = lambda self: self.__dict__.copy()
SparseFeatureItem.__setstate__ = SparseFeatureItem__setstate__
all_structs.append(SparseFeatureConfig)
SparseFeatureConfig.thrift_spec = (
None, # 0
(1, TType.LIST, 'features', (TType.STRUCT,[SparseFeatureItem, SparseFeatureItem.thrift_spec, False]), [
], 2, ), # 1
(2, TType.I32, 'embed_dim', None, -1, 2, ), # 2
(3, TType.STRUCT, 'optim', [OptimConfig, OptimConfig.thrift_spec, True], None, 2, ), # 3
)
SparseFeatureConfig.thrift_struct_annotations = {
}
SparseFeatureConfig.thrift_field_annotations = {
}
def SparseFeatureConfig__init__(self, features=SparseFeatureConfig.thrift_spec[1][4], embed_dim=SparseFeatureConfig.thrift_spec[2][4], optim=None,):
if features is self.thrift_spec[1][4]:
features = [
]
self.features = features
self.embed_dim = embed_dim
self.optim = optim
SparseFeatureConfig.__init__ = SparseFeatureConfig__init__
def SparseFeatureConfig__setstate__(self, state):
state.setdefault('features', [
])
state.setdefault('embed_dim', -1)
state.setdefault('optim', None)
self.__dict__ = state
SparseFeatureConfig.__getstate__ = lambda self: self.__dict__.copy()
SparseFeatureConfig.__setstate__ = SparseFeatureConfig__setstate__
all_structs.append(DenseFeatureConfig)
DenseFeatureConfig.thrift_spec = (
None, # 0
(1, TType.LIST, 'features', (TType.STRING,True), None, 2, ), # 1
(2, TType.STRUCT, 'optim', [OptimConfig, OptimConfig.thrift_spec, True], None, 2, ), # 2
)
DenseFeatureConfig.thrift_struct_annotations = {
}
DenseFeatureConfig.thrift_field_annotations = {
}
def DenseFeatureConfig__init__(self, features=None, optim=None,):
self.features = features
self.optim = optim
DenseFeatureConfig.__init__ = DenseFeatureConfig__init__
def DenseFeatureConfig__setstate__(self, state):
state.setdefault('features', None)
state.setdefault('optim', None)
self.__dict__ = state
DenseFeatureConfig.__getstate__ = lambda self: self.__dict__.copy()
DenseFeatureConfig.__setstate__ = DenseFeatureConfig__setstate__
all_structs.append(FeatureConfig)
FeatureConfig.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'dense', [DenseFeatureConfig, DenseFeatureConfig.thrift_spec, False], None, 2, ), # 1
(2, TType.STRUCT, 'sparse', [SparseFeatureConfig, SparseFeatureConfig.thrift_spec, False], None, 2, ), # 2
)
FeatureConfig.thrift_struct_annotations = {
}
FeatureConfig.thrift_field_annotations = {
}
def FeatureConfig__init__(self, dense=None, sparse=None,):
self.dense = dense
self.sparse = sparse
FeatureConfig.__init__ = FeatureConfig__init__
def FeatureConfig__setstate__(self, state):
state.setdefault('dense', None)
state.setdefault('sparse', None)
self.__dict__ = state
FeatureConfig.__getstate__ = lambda self: self.__dict__.copy()
FeatureConfig.__setstate__ = FeatureConfig__setstate__
all_structs.append(BCEWithLogitsLoss)
BCEWithLogitsLoss.thrift_spec = (
)
BCEWithLogitsLoss.thrift_struct_annotations = {
}
BCEWithLogitsLoss.thrift_field_annotations = {
}
all_structs.append(BCELoss)
BCELoss.thrift_spec = (
)
BCELoss.thrift_struct_annotations = {
}
BCELoss.thrift_field_annotations = {
}
all_structs.append(MSELoss)
MSELoss.thrift_spec = (
)
MSELoss.thrift_struct_annotations = {
}
MSELoss.thrift_field_annotations = {
}
all_structs.append(LossConfig)
LossConfig.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'bcewithlogits', [BCEWithLogitsLoss, BCEWithLogitsLoss.thrift_spec, False], None, 2, ), # 1
(2, TType.STRUCT, 'bce', [BCELoss, BCELoss.thrift_spec, False], None, 2, ), # 2
(3, TType.STRUCT, 'mse', [MSELoss, MSELoss.thrift_spec, False], None, 2, ), # 3
)
LossConfig.thrift_struct_annotations = {
}
LossConfig.thrift_field_annotations = {
}
def LossConfig__init__(self, bcewithlogits=None, bce=None, mse=None,):
self.field = 0
self.value = None
if bcewithlogits is not None:
assert self.field == 0 and self.value is None
self.field = 1
self.value = bcewithlogits
if bce is not None:
assert self.field == 0 and self.value is None
self.field = 2
self.value = bce
if mse is not None:
assert self.field == 0 and self.value is None
self.field = 3
self.value = mse
LossConfig.__init__ = LossConfig__init__
all_structs.append(LoggingConfig)
LoggingConfig.thrift_spec = (
None, # 0
(1, TType.I32, 'log_freq', None, 10000, 2, ), # 1
(2, TType.I32, 'tb_log_freq', None, -1, 2, ), # 2
(3, TType.BOOL, 'tb_log_model_weight_hist', None, False, 2, ), # 3
(4, TType.BOOL, 'tb_log_pr_curve_batch', None, True, 2, ), # 4
(5, TType.LIST, 'tb_log_model_weight_filter_regex', (TType.STRING,True), [
"sparse",
], 2, ), # 5
)
LoggingConfig.thrift_struct_annotations = {
}
LoggingConfig.thrift_field_annotations = {
}
def LoggingConfig__init__(self, log_freq=LoggingConfig.thrift_spec[1][4], tb_log_freq=LoggingConfig.thrift_spec[2][4], tb_log_model_weight_hist=LoggingConfig.thrift_spec[3][4], tb_log_pr_curve_batch=LoggingConfig.thrift_spec[4][4], tb_log_model_weight_filter_regex=LoggingConfig.thrift_spec[5][4],):
self.log_freq = log_freq
self.tb_log_freq = tb_log_freq
self.tb_log_model_weight_hist = tb_log_model_weight_hist
self.tb_log_pr_curve_batch = tb_log_pr_curve_batch
if tb_log_model_weight_filter_regex is self.thrift_spec[5][4]:
tb_log_model_weight_filter_regex = [
"sparse",
]
self.tb_log_model_weight_filter_regex = tb_log_model_weight_filter_regex
LoggingConfig.__init__ = LoggingConfig__init__
def LoggingConfig__setstate__(self, state):
state.setdefault('log_freq', 10000)
state.setdefault('tb_log_freq', -1)
state.setdefault('tb_log_model_weight_hist', False)
state.setdefault('tb_log_pr_curve_batch', True)
state.setdefault('tb_log_model_weight_filter_regex', [
"sparse",
])
self.__dict__ = state
LoggingConfig.__getstate__ = lambda self: self.__dict__.copy()
LoggingConfig.__setstate__ = LoggingConfig__setstate__
all_structs.append(TrainConfig)
TrainConfig.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'logging_config', [LoggingConfig, LoggingConfig.thrift_spec, False], None, 2, ), # 1
None, # 2
(3, TType.I32, 'nepochs', None, 1, 2, ), # 3
None, # 4
(5, TType.BOOL, 'early_stop_on_val_loss', None, True, 2, ), # 5
(6, TType.STRUCT, 'loss', [LossConfig, LossConfig.thrift_spec, True], LossConfig(**{
"bcewithlogits" : BCEWithLogitsLoss(**{
}),
}), 2, ), # 6
)
TrainConfig.thrift_struct_annotations = {
}
TrainConfig.thrift_field_annotations = {
}
def TrainConfig__init__(self, logging_config=None, nepochs=TrainConfig.thrift_spec[3][4], early_stop_on_val_loss=TrainConfig.thrift_spec[5][4], loss=TrainConfig.thrift_spec[6][4],):
self.logging_config = logging_config
self.nepochs = nepochs
self.early_stop_on_val_loss = early_stop_on_val_loss
if loss is self.thrift_spec[6][4]:
loss = LossConfig(**{
"bcewithlogits" : BCEWithLogitsLoss(**{
}),
})
self.loss = loss
TrainConfig.__init__ = TrainConfig__init__
def TrainConfig__setstate__(self, state):
state.setdefault('logging_config', None)
state.setdefault('nepochs', 1)
state.setdefault('early_stop_on_val_loss', True)
state.setdefault('loss', LossConfig(**{
"bcewithlogits" : BCEWithLogitsLoss(**{
}),
}))
self.__dict__ = state
TrainConfig.__getstate__ = lambda self: self.__dict__.copy()
TrainConfig.__setstate__ = TrainConfig__setstate__
all_structs.append(EvalConfig)
EvalConfig.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'logging_config', [LoggingConfig, LoggingConfig.thrift_spec, False], None, 2, ), # 1
(2, TType.STRUCT, 'loss', [LossConfig, LossConfig.thrift_spec, True], LossConfig(**{
"bcewithlogits" : BCEWithLogitsLoss(**{
}),
}), 2, ), # 2
(3, TType.BOOL, 'compute_ne', None, True, 2, ), # 3
)
EvalConfig.thrift_struct_annotations = {
}
EvalConfig.thrift_field_annotations = {
}
def EvalConfig__init__(self, logging_config=None, loss=EvalConfig.thrift_spec[2][4], compute_ne=EvalConfig.thrift_spec[3][4],):
self.logging_config = logging_config
if loss is self.thrift_spec[2][4]:
loss = LossConfig(**{
"bcewithlogits" : BCEWithLogitsLoss(**{
}),
})
self.loss = loss
self.compute_ne = compute_ne
EvalConfig.__init__ = EvalConfig__init__
def EvalConfig__setstate__(self, state):
state.setdefault('logging_config', None)
state.setdefault('loss', LossConfig(**{
"bcewithlogits" : BCEWithLogitsLoss(**{
}),
}))
state.setdefault('compute_ne', True)
self.__dict__ = state
EvalConfig.__getstate__ = lambda self: self.__dict__.copy()
EvalConfig.__setstate__ = EvalConfig__setstate__
all_structs.append(CheckpointConfig)
CheckpointConfig.thrift_spec = (
None, # 0
(1, TType.I32, 'ckp_interval', None, 10, 2, ), # 1
(2, TType.STRING, 'ckp_path', True, "", 2, ), # 2
)
CheckpointConfig.thrift_struct_annotations = {
}
CheckpointConfig.thrift_field_annotations = {
}
def CheckpointConfig__init__(self, ckp_interval=CheckpointConfig.thrift_spec[1][4], ckp_path=CheckpointConfig.thrift_spec[2][4],):
self.ckp_interval = ckp_interval
self.ckp_path = ckp_path
CheckpointConfig.__init__ = CheckpointConfig__init__
def CheckpointConfig__setstate__(self, state):
state.setdefault('ckp_interval', 10)
state.setdefault('ckp_path', "")
self.__dict__ = state
CheckpointConfig.__getstate__ = lambda self: self.__dict__.copy()
CheckpointConfig.__setstate__ = CheckpointConfig__setstate__
all_structs.append(KoskiReaderConfig)
KoskiReaderConfig.thrift_spec = (
None, # 0
(1, TType.I64, 'prefetch_capacity', None, 128, 2, ), # 1
(2, TType.BOOL, 'pin_memory', None, True, 2, ), # 2
(3, TType.I32, 'num_workers', None, 4, 2, ), # 3
)
KoskiReaderConfig.thrift_struct_annotations = {
}
KoskiReaderConfig.thrift_field_annotations = {
}
def KoskiReaderConfig__init__(self, prefetch_capacity=KoskiReaderConfig.thrift_spec[1][4], pin_memory=KoskiReaderConfig.thrift_spec[2][4], num_workers=KoskiReaderConfig.thrift_spec[3][4],):
self.prefetch_capacity = prefetch_capacity
self.pin_memory = pin_memory
self.num_workers = num_workers
KoskiReaderConfig.__init__ = KoskiReaderConfig__init__
def KoskiReaderConfig__setstate__(self, state):
state.setdefault('prefetch_capacity', 128)
state.setdefault('pin_memory', True)
state.setdefault('num_workers', 4)
self.__dict__ = state
KoskiReaderConfig.__getstate__ = lambda self: self.__dict__.copy()
KoskiReaderConfig.__setstate__ = KoskiReaderConfig__setstate__
all_structs.append(PerformanceConfig)
PerformanceConfig.thrift_spec = (
None, # 0
(1, TType.BOOL, 'use_gpu', None, False, 2, ), # 1
(2, TType.I32, 'num_readers', None, 4, 2, ), # 2
(3, TType.I32, 'num_trainers', None, 1, 2, ), # 3
(4, TType.STRUCT, 'ckp_config', [CheckpointConfig, CheckpointConfig.thrift_spec, False], CheckpointConfig(**{
"ckp_interval" : 10,
}), 2, ), # 4
(5, TType.I32, 'data_queue_maxsize', None, 100, 2, ), # 5
(6, TType.I32, 'reader_threads', None, 8, 2, ), # 6
(7, TType.I32, 'num_gpu', None, 1, 2, ), # 7
(8, TType.BOOL, 'enable_profiling', None, False, 1, ), # 8
(9, TType.STRUCT, 'koski', [KoskiReaderConfig, KoskiReaderConfig.thrift_spec, False], None, 1, ), # 9
(10, TType.I32, 'omp_num_threads', None, 0, 1, ), # 10
)
PerformanceConfig.thrift_struct_annotations = {
}
PerformanceConfig.thrift_field_annotations = {
}
def PerformanceConfig__init__(self, use_gpu=PerformanceConfig.thrift_spec[1][4], num_readers=PerformanceConfig.thrift_spec[2][4], num_trainers=PerformanceConfig.thrift_spec[3][4], ckp_config=PerformanceConfig.thrift_spec[4][4], data_queue_maxsize=PerformanceConfig.thrift_spec[5][4], reader_threads=PerformanceConfig.thrift_spec[6][4], num_gpu=PerformanceConfig.thrift_spec[7][4], enable_profiling=PerformanceConfig.thrift_spec[8][4], koski=None, omp_num_threads=PerformanceConfig.thrift_spec[10][4],):
self.use_gpu = use_gpu
self.num_readers = num_readers
self.num_trainers = num_trainers
if ckp_config is self.thrift_spec[4][4]:
ckp_config = CheckpointConfig(**{
"ckp_interval" : 10,
})
self.ckp_config = ckp_config
self.data_queue_maxsize = data_queue_maxsize
self.reader_threads = reader_threads
self.num_gpu = num_gpu
self.enable_profiling = enable_profiling
self.koski = koski
self.omp_num_threads = omp_num_threads
PerformanceConfig.__init__ = PerformanceConfig__init__
def PerformanceConfig__setstate__(self, state):
state.setdefault('use_gpu', False)
state.setdefault('num_readers', 4)
state.setdefault('num_trainers', 1)
state.setdefault('ckp_config', CheckpointConfig(**{
"ckp_interval" : 10,
}))
state.setdefault('data_queue_maxsize', 100)
state.setdefault('reader_threads', 8)
state.setdefault('num_gpu', 1)
state.setdefault('enable_profiling', False)
state.setdefault('koski', None)
state.setdefault('omp_num_threads', 0)
self.__dict__ = state
PerformanceConfig.__getstate__ = lambda self: self.__dict__.copy()
PerformanceConfig.__setstate__ = PerformanceConfig__setstate__
fix_spec(all_structs)
del all_structs
|
AutoCTR-main
|
gen-py/config/ttypes.py
|
#
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
# @generated
#
from __future__ import absolute_import
import six
from thrift.util.Recursive import fix_spec
from thrift.Thrift import *
from thrift.protocol.TProtocol import TProtocolException
from .ttypes import *
|
AutoCTR-main
|
gen-py/block_config/constants.py
|
#
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
# @generated
#
__all__ = ['ttypes', 'constants']
|
AutoCTR-main
|
gen-py/block_config/__init__.py
|
#
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
# @generated
#
from __future__ import absolute_import
import six
from thrift.util.Recursive import fix_spec
from thrift.Thrift import *
from thrift.protocol.TProtocol import TProtocolException
import pprint
import warnings
from thrift import Thrift
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from thrift.protocol import TCompactProtocol
from thrift.protocol import THeaderProtocol
fastproto = None
if not '__pypy__' in sys.builtin_module_names:
try:
from thrift.protocol import fastproto
except ImportError:
pass
all_structs = []
UTF8STRINGS = bool(0) or sys.version_info.major >= 3
__all__ = ['UTF8STRINGS', 'ExtendedBlockType', 'FeatSelectionConfig', 'DenseBlockType', 'EmbedBlockType', 'BlockType', 'MLPBlockConfig', 'CrossNetBlockConfig', 'FMBlockConfig', 'DotProcessorBlockConfig', 'CatBlockConfig', 'CINBlockConfig', 'AttentionBlockConfig', 'BlockConfig']
class ExtendedBlockType:
MLP_DENSE = 1
MLP_EMB = 2
CROSSNET = 3
FM_DENSE = 4
FM_EMB = 5
DOTPROCESSOR_DENSE = 6
DOTPROCESSOR_EMB = 7
CAT_DENSE = 8
CAT_EMB = 9
CIN = 10
ATTENTION = 11
_VALUES_TO_NAMES = {
1: "MLP_DENSE",
2: "MLP_EMB",
3: "CROSSNET",
4: "FM_DENSE",
5: "FM_EMB",
6: "DOTPROCESSOR_DENSE",
7: "DOTPROCESSOR_EMB",
8: "CAT_DENSE",
9: "CAT_EMB",
10: "CIN",
11: "ATTENTION",
}
_NAMES_TO_VALUES = {
"MLP_DENSE": 1,
"MLP_EMB": 2,
"CROSSNET": 3,
"FM_DENSE": 4,
"FM_EMB": 5,
"DOTPROCESSOR_DENSE": 6,
"DOTPROCESSOR_EMB": 7,
"CAT_DENSE": 8,
"CAT_EMB": 9,
"CIN": 10,
"ATTENTION": 11,
}
class FeatSelectionConfig:
"""
Attributes:
- block_id
- dense
- sparse
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.block_id = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.dense = []
(_etype3, _size0) = iprot.readListBegin()
if _size0 >= 0:
for _i4 in six.moves.range(_size0):
_elem5 = iprot.readI32()
self.dense.append(_elem5)
else:
while iprot.peekList():
_elem6 = iprot.readI32()
self.dense.append(_elem6)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.sparse = []
(_etype10, _size7) = iprot.readListBegin()
if _size7 >= 0:
for _i11 in six.moves.range(_size7):
_elem12 = iprot.readI32()
self.sparse.append(_elem12)
else:
while iprot.peekList():
_elem13 = iprot.readI32()
self.sparse.append(_elem13)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('FeatSelectionConfig')
if self.block_id != None:
oprot.writeFieldBegin('block_id', TType.I32, 1)
oprot.writeI32(self.block_id)
oprot.writeFieldEnd()
if self.dense != None:
oprot.writeFieldBegin('dense', TType.LIST, 2)
oprot.writeListBegin(TType.I32, len(self.dense))
for iter14 in self.dense:
oprot.writeI32(iter14)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.sparse != None:
oprot.writeFieldBegin('sparse', TType.LIST, 3)
oprot.writeListBegin(TType.I32, len(self.sparse))
for iter15 in self.sparse:
oprot.writeI32(iter15)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.block_id is not None:
value = pprint.pformat(self.block_id, indent=0)
value = padding.join(value.splitlines(True))
L.append(' block_id=%s' % (value))
if self.dense is not None:
value = pprint.pformat(self.dense, indent=0)
value = padding.join(value.splitlines(True))
L.append(' dense=%s' % (value))
if self.sparse is not None:
value = pprint.pformat(self.sparse, indent=0)
value = padding.join(value.splitlines(True))
L.append(' sparse=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class DenseBlockType:
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('DenseBlockType')
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class EmbedBlockType:
"""
Attributes:
- comm_embed_dim
- dense_as_sparse
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.comm_embed_dim = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BOOL:
self.dense_as_sparse = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('EmbedBlockType')
if self.comm_embed_dim != None:
oprot.writeFieldBegin('comm_embed_dim', TType.I32, 1)
oprot.writeI32(self.comm_embed_dim)
oprot.writeFieldEnd()
if self.dense_as_sparse != None:
oprot.writeFieldBegin('dense_as_sparse', TType.BOOL, 2)
oprot.writeBool(self.dense_as_sparse)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.comm_embed_dim is not None:
value = pprint.pformat(self.comm_embed_dim, indent=0)
value = padding.join(value.splitlines(True))
L.append(' comm_embed_dim=%s' % (value))
if self.dense_as_sparse is not None:
value = pprint.pformat(self.dense_as_sparse, indent=0)
value = padding.join(value.splitlines(True))
L.append(' dense_as_sparse=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class BlockType(object):
"""
Attributes:
- dense
- emb
"""
thrift_spec = None
__init__ = None
__EMPTY__ = 0
DENSE = 1
EMB = 2
@staticmethod
def isUnion():
return True
def get_dense(self):
assert self.field == 1
return self.value
def get_emb(self):
assert self.field == 2
return self.value
def set_dense(self, value):
self.field = 1
self.value = value
def set_emb(self, value):
self.field = 2
self.value = value
def getType(self):
return self.field
def __repr__(self):
value = pprint.pformat(self.value)
member = ''
if self.field == 1:
padding = ' ' * 6
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('dense', value)
if self.field == 2:
padding = ' ' * 4
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('emb', value)
return "%s(%s)" % (self.__class__.__name__, member)
def read(self, iprot):
self.field = 0
self.value = None
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
dense = DenseBlockType()
dense.read(iprot)
assert self.field == 0 and self.value is None
self.set_dense(dense)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
emb = EmbedBlockType()
emb.read(iprot)
assert self.field == 0 and self.value is None
self.set_emb(emb)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeUnionBegin('BlockType')
if self.field == 1:
oprot.writeFieldBegin('dense', TType.STRUCT, 1)
dense = self.value
dense.write(oprot)
oprot.writeFieldEnd()
if self.field == 2:
oprot.writeFieldBegin('emb', TType.STRUCT, 2)
emb = self.value
emb.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeUnionEnd()
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class MLPBlockConfig:
"""
Attributes:
- name
- block_id
- input_feat_config
- type
- arc
- ly_act
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8') if UTF8STRINGS else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.block_id = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.input_feat_config = []
(_etype19, _size16) = iprot.readListBegin()
if _size16 >= 0:
for _i20 in six.moves.range(_size16):
_elem21 = FeatSelectionConfig()
_elem21.read(iprot)
self.input_feat_config.append(_elem21)
else:
while iprot.peekList():
_elem22 = FeatSelectionConfig()
_elem22.read(iprot)
self.input_feat_config.append(_elem22)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
self.type = BlockType()
self.type.read(iprot)
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.LIST:
self.arc = []
(_etype26, _size23) = iprot.readListBegin()
if _size23 >= 0:
for _i27 in six.moves.range(_size23):
_elem28 = iprot.readI32()
self.arc.append(_elem28)
else:
while iprot.peekList():
_elem29 = iprot.readI32()
self.arc.append(_elem29)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.BOOL:
self.ly_act = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('MLPBlockConfig')
if self.name != None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name.encode('utf-8')) if UTF8STRINGS and not isinstance(self.name, bytes) else oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.block_id != None:
oprot.writeFieldBegin('block_id', TType.I32, 2)
oprot.writeI32(self.block_id)
oprot.writeFieldEnd()
if self.input_feat_config != None:
oprot.writeFieldBegin('input_feat_config', TType.LIST, 3)
oprot.writeListBegin(TType.STRUCT, len(self.input_feat_config))
for iter30 in self.input_feat_config:
iter30.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.type != None:
oprot.writeFieldBegin('type', TType.STRUCT, 4)
self.type.write(oprot)
oprot.writeFieldEnd()
if self.arc != None:
oprot.writeFieldBegin('arc', TType.LIST, 5)
oprot.writeListBegin(TType.I32, len(self.arc))
for iter31 in self.arc:
oprot.writeI32(iter31)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.ly_act != None:
oprot.writeFieldBegin('ly_act', TType.BOOL, 6)
oprot.writeBool(self.ly_act)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.name is not None:
value = pprint.pformat(self.name, indent=0)
value = padding.join(value.splitlines(True))
L.append(' name=%s' % (value))
if self.block_id is not None:
value = pprint.pformat(self.block_id, indent=0)
value = padding.join(value.splitlines(True))
L.append(' block_id=%s' % (value))
if self.input_feat_config is not None:
value = pprint.pformat(self.input_feat_config, indent=0)
value = padding.join(value.splitlines(True))
L.append(' input_feat_config=%s' % (value))
if self.type is not None:
value = pprint.pformat(self.type, indent=0)
value = padding.join(value.splitlines(True))
L.append(' type=%s' % (value))
if self.arc is not None:
value = pprint.pformat(self.arc, indent=0)
value = padding.join(value.splitlines(True))
L.append(' arc=%s' % (value))
if self.ly_act is not None:
value = pprint.pformat(self.ly_act, indent=0)
value = padding.join(value.splitlines(True))
L.append(' ly_act=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class CrossNetBlockConfig:
"""
Attributes:
- name
- block_id
- input_feat_config
- num_of_layers
- cross_feat_config
- batchnorm
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8') if UTF8STRINGS else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.block_id = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.input_feat_config = []
(_etype35, _size32) = iprot.readListBegin()
if _size32 >= 0:
for _i36 in six.moves.range(_size32):
_elem37 = FeatSelectionConfig()
_elem37.read(iprot)
self.input_feat_config.append(_elem37)
else:
while iprot.peekList():
_elem38 = FeatSelectionConfig()
_elem38.read(iprot)
self.input_feat_config.append(_elem38)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.num_of_layers = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.LIST:
self.cross_feat_config = []
(_etype42, _size39) = iprot.readListBegin()
if _size39 >= 0:
for _i43 in six.moves.range(_size39):
_elem44 = FeatSelectionConfig()
_elem44.read(iprot)
self.cross_feat_config.append(_elem44)
else:
while iprot.peekList():
_elem45 = FeatSelectionConfig()
_elem45.read(iprot)
self.cross_feat_config.append(_elem45)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.BOOL:
self.batchnorm = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('CrossNetBlockConfig')
if self.name != None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name.encode('utf-8')) if UTF8STRINGS and not isinstance(self.name, bytes) else oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.block_id != None:
oprot.writeFieldBegin('block_id', TType.I32, 2)
oprot.writeI32(self.block_id)
oprot.writeFieldEnd()
if self.input_feat_config != None:
oprot.writeFieldBegin('input_feat_config', TType.LIST, 3)
oprot.writeListBegin(TType.STRUCT, len(self.input_feat_config))
for iter46 in self.input_feat_config:
iter46.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.num_of_layers != None:
oprot.writeFieldBegin('num_of_layers', TType.I32, 4)
oprot.writeI32(self.num_of_layers)
oprot.writeFieldEnd()
if self.cross_feat_config != None:
oprot.writeFieldBegin('cross_feat_config', TType.LIST, 5)
oprot.writeListBegin(TType.STRUCT, len(self.cross_feat_config))
for iter47 in self.cross_feat_config:
iter47.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.batchnorm != None:
oprot.writeFieldBegin('batchnorm', TType.BOOL, 6)
oprot.writeBool(self.batchnorm)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.name is not None:
value = pprint.pformat(self.name, indent=0)
value = padding.join(value.splitlines(True))
L.append(' name=%s' % (value))
if self.block_id is not None:
value = pprint.pformat(self.block_id, indent=0)
value = padding.join(value.splitlines(True))
L.append(' block_id=%s' % (value))
if self.input_feat_config is not None:
value = pprint.pformat(self.input_feat_config, indent=0)
value = padding.join(value.splitlines(True))
L.append(' input_feat_config=%s' % (value))
if self.num_of_layers is not None:
value = pprint.pformat(self.num_of_layers, indent=0)
value = padding.join(value.splitlines(True))
L.append(' num_of_layers=%s' % (value))
if self.cross_feat_config is not None:
value = pprint.pformat(self.cross_feat_config, indent=0)
value = padding.join(value.splitlines(True))
L.append(' cross_feat_config=%s' % (value))
if self.batchnorm is not None:
value = pprint.pformat(self.batchnorm, indent=0)
value = padding.join(value.splitlines(True))
L.append(' batchnorm=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class FMBlockConfig:
"""
Attributes:
- name
- block_id
- input_feat_config
- type
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8') if UTF8STRINGS else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.block_id = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.input_feat_config = []
(_etype51, _size48) = iprot.readListBegin()
if _size48 >= 0:
for _i52 in six.moves.range(_size48):
_elem53 = FeatSelectionConfig()
_elem53.read(iprot)
self.input_feat_config.append(_elem53)
else:
while iprot.peekList():
_elem54 = FeatSelectionConfig()
_elem54.read(iprot)
self.input_feat_config.append(_elem54)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
self.type = BlockType()
self.type.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('FMBlockConfig')
if self.name != None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name.encode('utf-8')) if UTF8STRINGS and not isinstance(self.name, bytes) else oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.block_id != None:
oprot.writeFieldBegin('block_id', TType.I32, 2)
oprot.writeI32(self.block_id)
oprot.writeFieldEnd()
if self.input_feat_config != None:
oprot.writeFieldBegin('input_feat_config', TType.LIST, 3)
oprot.writeListBegin(TType.STRUCT, len(self.input_feat_config))
for iter55 in self.input_feat_config:
iter55.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.type != None:
oprot.writeFieldBegin('type', TType.STRUCT, 4)
self.type.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.name is not None:
value = pprint.pformat(self.name, indent=0)
value = padding.join(value.splitlines(True))
L.append(' name=%s' % (value))
if self.block_id is not None:
value = pprint.pformat(self.block_id, indent=0)
value = padding.join(value.splitlines(True))
L.append(' block_id=%s' % (value))
if self.input_feat_config is not None:
value = pprint.pformat(self.input_feat_config, indent=0)
value = padding.join(value.splitlines(True))
L.append(' input_feat_config=%s' % (value))
if self.type is not None:
value = pprint.pformat(self.type, indent=0)
value = padding.join(value.splitlines(True))
L.append(' type=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class DotProcessorBlockConfig:
"""
Attributes:
- name
- block_id
- input_feat_config
- type
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8') if UTF8STRINGS else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.block_id = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.input_feat_config = []
(_etype59, _size56) = iprot.readListBegin()
if _size56 >= 0:
for _i60 in six.moves.range(_size56):
_elem61 = FeatSelectionConfig()
_elem61.read(iprot)
self.input_feat_config.append(_elem61)
else:
while iprot.peekList():
_elem62 = FeatSelectionConfig()
_elem62.read(iprot)
self.input_feat_config.append(_elem62)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
self.type = BlockType()
self.type.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('DotProcessorBlockConfig')
if self.name != None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name.encode('utf-8')) if UTF8STRINGS and not isinstance(self.name, bytes) else oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.block_id != None:
oprot.writeFieldBegin('block_id', TType.I32, 2)
oprot.writeI32(self.block_id)
oprot.writeFieldEnd()
if self.input_feat_config != None:
oprot.writeFieldBegin('input_feat_config', TType.LIST, 3)
oprot.writeListBegin(TType.STRUCT, len(self.input_feat_config))
for iter63 in self.input_feat_config:
iter63.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.type != None:
oprot.writeFieldBegin('type', TType.STRUCT, 4)
self.type.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.name is not None:
value = pprint.pformat(self.name, indent=0)
value = padding.join(value.splitlines(True))
L.append(' name=%s' % (value))
if self.block_id is not None:
value = pprint.pformat(self.block_id, indent=0)
value = padding.join(value.splitlines(True))
L.append(' block_id=%s' % (value))
if self.input_feat_config is not None:
value = pprint.pformat(self.input_feat_config, indent=0)
value = padding.join(value.splitlines(True))
L.append(' input_feat_config=%s' % (value))
if self.type is not None:
value = pprint.pformat(self.type, indent=0)
value = padding.join(value.splitlines(True))
L.append(' type=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class CatBlockConfig:
"""
Attributes:
- name
- block_id
- input_feat_config
- type
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8') if UTF8STRINGS else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.block_id = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.input_feat_config = []
(_etype67, _size64) = iprot.readListBegin()
if _size64 >= 0:
for _i68 in six.moves.range(_size64):
_elem69 = FeatSelectionConfig()
_elem69.read(iprot)
self.input_feat_config.append(_elem69)
else:
while iprot.peekList():
_elem70 = FeatSelectionConfig()
_elem70.read(iprot)
self.input_feat_config.append(_elem70)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
self.type = BlockType()
self.type.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('CatBlockConfig')
if self.name != None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name.encode('utf-8')) if UTF8STRINGS and not isinstance(self.name, bytes) else oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.block_id != None:
oprot.writeFieldBegin('block_id', TType.I32, 2)
oprot.writeI32(self.block_id)
oprot.writeFieldEnd()
if self.input_feat_config != None:
oprot.writeFieldBegin('input_feat_config', TType.LIST, 3)
oprot.writeListBegin(TType.STRUCT, len(self.input_feat_config))
for iter71 in self.input_feat_config:
iter71.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.type != None:
oprot.writeFieldBegin('type', TType.STRUCT, 4)
self.type.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.name is not None:
value = pprint.pformat(self.name, indent=0)
value = padding.join(value.splitlines(True))
L.append(' name=%s' % (value))
if self.block_id is not None:
value = pprint.pformat(self.block_id, indent=0)
value = padding.join(value.splitlines(True))
L.append(' block_id=%s' % (value))
if self.input_feat_config is not None:
value = pprint.pformat(self.input_feat_config, indent=0)
value = padding.join(value.splitlines(True))
L.append(' input_feat_config=%s' % (value))
if self.type is not None:
value = pprint.pformat(self.type, indent=0)
value = padding.join(value.splitlines(True))
L.append(' type=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class CINBlockConfig:
"""
Attributes:
- name
- block_id
- input_feat_config
- emb_config
- arc
- split_half
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8') if UTF8STRINGS else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.block_id = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.input_feat_config = []
(_etype75, _size72) = iprot.readListBegin()
if _size72 >= 0:
for _i76 in six.moves.range(_size72):
_elem77 = FeatSelectionConfig()
_elem77.read(iprot)
self.input_feat_config.append(_elem77)
else:
while iprot.peekList():
_elem78 = FeatSelectionConfig()
_elem78.read(iprot)
self.input_feat_config.append(_elem78)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
self.emb_config = EmbedBlockType()
self.emb_config.read(iprot)
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.LIST:
self.arc = []
(_etype82, _size79) = iprot.readListBegin()
if _size79 >= 0:
for _i83 in six.moves.range(_size79):
_elem84 = iprot.readI32()
self.arc.append(_elem84)
else:
while iprot.peekList():
_elem85 = iprot.readI32()
self.arc.append(_elem85)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.BOOL:
self.split_half = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('CINBlockConfig')
if self.name != None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name.encode('utf-8')) if UTF8STRINGS and not isinstance(self.name, bytes) else oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.block_id != None:
oprot.writeFieldBegin('block_id', TType.I32, 2)
oprot.writeI32(self.block_id)
oprot.writeFieldEnd()
if self.input_feat_config != None:
oprot.writeFieldBegin('input_feat_config', TType.LIST, 3)
oprot.writeListBegin(TType.STRUCT, len(self.input_feat_config))
for iter86 in self.input_feat_config:
iter86.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.emb_config != None:
oprot.writeFieldBegin('emb_config', TType.STRUCT, 4)
self.emb_config.write(oprot)
oprot.writeFieldEnd()
if self.arc != None:
oprot.writeFieldBegin('arc', TType.LIST, 5)
oprot.writeListBegin(TType.I32, len(self.arc))
for iter87 in self.arc:
oprot.writeI32(iter87)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.split_half != None:
oprot.writeFieldBegin('split_half', TType.BOOL, 6)
oprot.writeBool(self.split_half)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.name is not None:
value = pprint.pformat(self.name, indent=0)
value = padding.join(value.splitlines(True))
L.append(' name=%s' % (value))
if self.block_id is not None:
value = pprint.pformat(self.block_id, indent=0)
value = padding.join(value.splitlines(True))
L.append(' block_id=%s' % (value))
if self.input_feat_config is not None:
value = pprint.pformat(self.input_feat_config, indent=0)
value = padding.join(value.splitlines(True))
L.append(' input_feat_config=%s' % (value))
if self.emb_config is not None:
value = pprint.pformat(self.emb_config, indent=0)
value = padding.join(value.splitlines(True))
L.append(' emb_config=%s' % (value))
if self.arc is not None:
value = pprint.pformat(self.arc, indent=0)
value = padding.join(value.splitlines(True))
L.append(' arc=%s' % (value))
if self.split_half is not None:
value = pprint.pformat(self.split_half, indent=0)
value = padding.join(value.splitlines(True))
L.append(' split_half=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class AttentionBlockConfig:
"""
Attributes:
- name
- block_id
- input_feat_config
- emb_config
- att_embed_dim
- num_of_heads
- num_of_layers
- dropout_prob
- use_res
- batchnorm
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8') if UTF8STRINGS else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.block_id = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.input_feat_config = []
(_etype91, _size88) = iprot.readListBegin()
if _size88 >= 0:
for _i92 in six.moves.range(_size88):
_elem93 = FeatSelectionConfig()
_elem93.read(iprot)
self.input_feat_config.append(_elem93)
else:
while iprot.peekList():
_elem94 = FeatSelectionConfig()
_elem94.read(iprot)
self.input_feat_config.append(_elem94)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
self.emb_config = EmbedBlockType()
self.emb_config.read(iprot)
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.att_embed_dim = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I32:
self.num_of_heads = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.I32:
self.num_of_layers = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.FLOAT:
self.dropout_prob = iprot.readFloat()
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.BOOL:
self.use_res = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 10:
if ftype == TType.BOOL:
self.batchnorm = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('AttentionBlockConfig')
if self.name != None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name.encode('utf-8')) if UTF8STRINGS and not isinstance(self.name, bytes) else oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.block_id != None:
oprot.writeFieldBegin('block_id', TType.I32, 2)
oprot.writeI32(self.block_id)
oprot.writeFieldEnd()
if self.input_feat_config != None:
oprot.writeFieldBegin('input_feat_config', TType.LIST, 3)
oprot.writeListBegin(TType.STRUCT, len(self.input_feat_config))
for iter95 in self.input_feat_config:
iter95.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.emb_config != None:
oprot.writeFieldBegin('emb_config', TType.STRUCT, 4)
self.emb_config.write(oprot)
oprot.writeFieldEnd()
if self.att_embed_dim != None:
oprot.writeFieldBegin('att_embed_dim', TType.I32, 5)
oprot.writeI32(self.att_embed_dim)
oprot.writeFieldEnd()
if self.num_of_heads != None:
oprot.writeFieldBegin('num_of_heads', TType.I32, 6)
oprot.writeI32(self.num_of_heads)
oprot.writeFieldEnd()
if self.num_of_layers != None:
oprot.writeFieldBegin('num_of_layers', TType.I32, 7)
oprot.writeI32(self.num_of_layers)
oprot.writeFieldEnd()
if self.dropout_prob != None:
oprot.writeFieldBegin('dropout_prob', TType.FLOAT, 8)
oprot.writeFloat(self.dropout_prob)
oprot.writeFieldEnd()
if self.use_res != None:
oprot.writeFieldBegin('use_res', TType.BOOL, 9)
oprot.writeBool(self.use_res)
oprot.writeFieldEnd()
if self.batchnorm != None:
oprot.writeFieldBegin('batchnorm', TType.BOOL, 10)
oprot.writeBool(self.batchnorm)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.name is not None:
value = pprint.pformat(self.name, indent=0)
value = padding.join(value.splitlines(True))
L.append(' name=%s' % (value))
if self.block_id is not None:
value = pprint.pformat(self.block_id, indent=0)
value = padding.join(value.splitlines(True))
L.append(' block_id=%s' % (value))
if self.input_feat_config is not None:
value = pprint.pformat(self.input_feat_config, indent=0)
value = padding.join(value.splitlines(True))
L.append(' input_feat_config=%s' % (value))
if self.emb_config is not None:
value = pprint.pformat(self.emb_config, indent=0)
value = padding.join(value.splitlines(True))
L.append(' emb_config=%s' % (value))
if self.att_embed_dim is not None:
value = pprint.pformat(self.att_embed_dim, indent=0)
value = padding.join(value.splitlines(True))
L.append(' att_embed_dim=%s' % (value))
if self.num_of_heads is not None:
value = pprint.pformat(self.num_of_heads, indent=0)
value = padding.join(value.splitlines(True))
L.append(' num_of_heads=%s' % (value))
if self.num_of_layers is not None:
value = pprint.pformat(self.num_of_layers, indent=0)
value = padding.join(value.splitlines(True))
L.append(' num_of_layers=%s' % (value))
if self.dropout_prob is not None:
value = pprint.pformat(self.dropout_prob, indent=0)
value = padding.join(value.splitlines(True))
L.append(' dropout_prob=%s' % (value))
if self.use_res is not None:
value = pprint.pformat(self.use_res, indent=0)
value = padding.join(value.splitlines(True))
L.append(' use_res=%s' % (value))
if self.batchnorm is not None:
value = pprint.pformat(self.batchnorm, indent=0)
value = padding.join(value.splitlines(True))
L.append(' batchnorm=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class BlockConfig(object):
"""
Attributes:
- mlp_block
- crossnet_block
- fm_block
- dotprocessor_block
- cat_block
- cin_block
- attention_block
"""
thrift_spec = None
__init__ = None
__EMPTY__ = 0
MLP_BLOCK = 1
CROSSNET_BLOCK = 2
FM_BLOCK = 3
DOTPROCESSOR_BLOCK = 4
CAT_BLOCK = 5
CIN_BLOCK = 6
ATTENTION_BLOCK = 7
@staticmethod
def isUnion():
return True
def get_mlp_block(self):
assert self.field == 1
return self.value
def get_crossnet_block(self):
assert self.field == 2
return self.value
def get_fm_block(self):
assert self.field == 3
return self.value
def get_dotprocessor_block(self):
assert self.field == 4
return self.value
def get_cat_block(self):
assert self.field == 5
return self.value
def get_cin_block(self):
assert self.field == 6
return self.value
def get_attention_block(self):
assert self.field == 7
return self.value
def set_mlp_block(self, value):
self.field = 1
self.value = value
def set_crossnet_block(self, value):
self.field = 2
self.value = value
def set_fm_block(self, value):
self.field = 3
self.value = value
def set_dotprocessor_block(self, value):
self.field = 4
self.value = value
def set_cat_block(self, value):
self.field = 5
self.value = value
def set_cin_block(self, value):
self.field = 6
self.value = value
def set_attention_block(self, value):
self.field = 7
self.value = value
def getType(self):
return self.field
def __repr__(self):
value = pprint.pformat(self.value)
member = ''
if self.field == 1:
padding = ' ' * 10
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('mlp_block', value)
if self.field == 2:
padding = ' ' * 15
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('crossnet_block', value)
if self.field == 3:
padding = ' ' * 9
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('fm_block', value)
if self.field == 4:
padding = ' ' * 19
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('dotprocessor_block', value)
if self.field == 5:
padding = ' ' * 10
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('cat_block', value)
if self.field == 6:
padding = ' ' * 10
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('cin_block', value)
if self.field == 7:
padding = ' ' * 16
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('attention_block', value)
return "%s(%s)" % (self.__class__.__name__, member)
def read(self, iprot):
self.field = 0
self.value = None
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
mlp_block = MLPBlockConfig()
mlp_block.read(iprot)
assert self.field == 0 and self.value is None
self.set_mlp_block(mlp_block)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
crossnet_block = CrossNetBlockConfig()
crossnet_block.read(iprot)
assert self.field == 0 and self.value is None
self.set_crossnet_block(crossnet_block)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
fm_block = FMBlockConfig()
fm_block.read(iprot)
assert self.field == 0 and self.value is None
self.set_fm_block(fm_block)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
dotprocessor_block = DotProcessorBlockConfig()
dotprocessor_block.read(iprot)
assert self.field == 0 and self.value is None
self.set_dotprocessor_block(dotprocessor_block)
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRUCT:
cat_block = CatBlockConfig()
cat_block.read(iprot)
assert self.field == 0 and self.value is None
self.set_cat_block(cat_block)
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRUCT:
cin_block = CINBlockConfig()
cin_block.read(iprot)
assert self.field == 0 and self.value is None
self.set_cin_block(cin_block)
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.STRUCT:
attention_block = AttentionBlockConfig()
attention_block.read(iprot)
assert self.field == 0 and self.value is None
self.set_attention_block(attention_block)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeUnionBegin('BlockConfig')
if self.field == 1:
oprot.writeFieldBegin('mlp_block', TType.STRUCT, 1)
mlp_block = self.value
mlp_block.write(oprot)
oprot.writeFieldEnd()
if self.field == 2:
oprot.writeFieldBegin('crossnet_block', TType.STRUCT, 2)
crossnet_block = self.value
crossnet_block.write(oprot)
oprot.writeFieldEnd()
if self.field == 3:
oprot.writeFieldBegin('fm_block', TType.STRUCT, 3)
fm_block = self.value
fm_block.write(oprot)
oprot.writeFieldEnd()
if self.field == 4:
oprot.writeFieldBegin('dotprocessor_block', TType.STRUCT, 4)
dotprocessor_block = self.value
dotprocessor_block.write(oprot)
oprot.writeFieldEnd()
if self.field == 5:
oprot.writeFieldBegin('cat_block', TType.STRUCT, 5)
cat_block = self.value
cat_block.write(oprot)
oprot.writeFieldEnd()
if self.field == 6:
oprot.writeFieldBegin('cin_block', TType.STRUCT, 6)
cin_block = self.value
cin_block.write(oprot)
oprot.writeFieldEnd()
if self.field == 7:
oprot.writeFieldBegin('attention_block', TType.STRUCT, 7)
attention_block = self.value
attention_block.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeUnionEnd()
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(FeatSelectionConfig)
FeatSelectionConfig.thrift_spec = (
None, # 0
(1, TType.I32, 'block_id', None, None, 2, ), # 1
(2, TType.LIST, 'dense', (TType.I32,None), None, 2, ), # 2
(3, TType.LIST, 'sparse', (TType.I32,None), None, 2, ), # 3
)
FeatSelectionConfig.thrift_struct_annotations = {
}
FeatSelectionConfig.thrift_field_annotations = {
}
def FeatSelectionConfig__init__(self, block_id=None, dense=None, sparse=None,):
self.block_id = block_id
self.dense = dense
self.sparse = sparse
FeatSelectionConfig.__init__ = FeatSelectionConfig__init__
def FeatSelectionConfig__setstate__(self, state):
state.setdefault('block_id', None)
state.setdefault('dense', None)
state.setdefault('sparse', None)
self.__dict__ = state
FeatSelectionConfig.__getstate__ = lambda self: self.__dict__.copy()
FeatSelectionConfig.__setstate__ = FeatSelectionConfig__setstate__
all_structs.append(DenseBlockType)
DenseBlockType.thrift_spec = (
)
DenseBlockType.thrift_struct_annotations = {
}
DenseBlockType.thrift_field_annotations = {
}
all_structs.append(EmbedBlockType)
EmbedBlockType.thrift_spec = (
None, # 0
(1, TType.I32, 'comm_embed_dim', None, None, 2, ), # 1
(2, TType.BOOL, 'dense_as_sparse', None, False, 2, ), # 2
)
EmbedBlockType.thrift_struct_annotations = {
}
EmbedBlockType.thrift_field_annotations = {
}
def EmbedBlockType__init__(self, comm_embed_dim=None, dense_as_sparse=EmbedBlockType.thrift_spec[2][4],):
self.comm_embed_dim = comm_embed_dim
self.dense_as_sparse = dense_as_sparse
EmbedBlockType.__init__ = EmbedBlockType__init__
def EmbedBlockType__setstate__(self, state):
state.setdefault('comm_embed_dim', None)
state.setdefault('dense_as_sparse', False)
self.__dict__ = state
EmbedBlockType.__getstate__ = lambda self: self.__dict__.copy()
EmbedBlockType.__setstate__ = EmbedBlockType__setstate__
all_structs.append(BlockType)
BlockType.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'dense', [DenseBlockType, DenseBlockType.thrift_spec, False], None, 2, ), # 1
(2, TType.STRUCT, 'emb', [EmbedBlockType, EmbedBlockType.thrift_spec, False], None, 2, ), # 2
)
BlockType.thrift_struct_annotations = {
}
BlockType.thrift_field_annotations = {
}
def BlockType__init__(self, dense=None, emb=None,):
self.field = 0
self.value = None
if dense is not None:
assert self.field == 0 and self.value is None
self.field = 1
self.value = dense
if emb is not None:
assert self.field == 0 and self.value is None
self.field = 2
self.value = emb
BlockType.__init__ = BlockType__init__
all_structs.append(MLPBlockConfig)
MLPBlockConfig.thrift_spec = (
None, # 0
(1, TType.STRING, 'name', True, "MLPBlock", 2, ), # 1
(2, TType.I32, 'block_id', None, None, 2, ), # 2
(3, TType.LIST, 'input_feat_config', (TType.STRUCT,[FeatSelectionConfig, FeatSelectionConfig.thrift_spec, False]), None, 2, ), # 3
(4, TType.STRUCT, 'type', [BlockType, BlockType.thrift_spec, True], None, 2, ), # 4
(5, TType.LIST, 'arc', (TType.I32,None), None, 2, ), # 5
(6, TType.BOOL, 'ly_act', None, True, 2, ), # 6
)
MLPBlockConfig.thrift_struct_annotations = {
}
MLPBlockConfig.thrift_field_annotations = {
}
def MLPBlockConfig__init__(self, name=MLPBlockConfig.thrift_spec[1][4], block_id=None, input_feat_config=None, type=None, arc=None, ly_act=MLPBlockConfig.thrift_spec[6][4],):
self.name = name
self.block_id = block_id
self.input_feat_config = input_feat_config
self.type = type
self.arc = arc
self.ly_act = ly_act
MLPBlockConfig.__init__ = MLPBlockConfig__init__
def MLPBlockConfig__setstate__(self, state):
state.setdefault('name', "MLPBlock")
state.setdefault('block_id', None)
state.setdefault('input_feat_config', None)
state.setdefault('type', None)
state.setdefault('arc', None)
state.setdefault('ly_act', True)
self.__dict__ = state
MLPBlockConfig.__getstate__ = lambda self: self.__dict__.copy()
MLPBlockConfig.__setstate__ = MLPBlockConfig__setstate__
all_structs.append(CrossNetBlockConfig)
CrossNetBlockConfig.thrift_spec = (
None, # 0
(1, TType.STRING, 'name', True, "CrossNetBlock", 2, ), # 1
(2, TType.I32, 'block_id', None, None, 2, ), # 2
(3, TType.LIST, 'input_feat_config', (TType.STRUCT,[FeatSelectionConfig, FeatSelectionConfig.thrift_spec, False]), None, 2, ), # 3
(4, TType.I32, 'num_of_layers', None, 2, 2, ), # 4
(5, TType.LIST, 'cross_feat_config', (TType.STRUCT,[FeatSelectionConfig, FeatSelectionConfig.thrift_spec, False]), None, 2, ), # 5
(6, TType.BOOL, 'batchnorm', None, False, 2, ), # 6
)
CrossNetBlockConfig.thrift_struct_annotations = {
}
CrossNetBlockConfig.thrift_field_annotations = {
}
def CrossNetBlockConfig__init__(self, name=CrossNetBlockConfig.thrift_spec[1][4], block_id=None, input_feat_config=None, num_of_layers=CrossNetBlockConfig.thrift_spec[4][4], cross_feat_config=None, batchnorm=CrossNetBlockConfig.thrift_spec[6][4],):
self.name = name
self.block_id = block_id
self.input_feat_config = input_feat_config
self.num_of_layers = num_of_layers
self.cross_feat_config = cross_feat_config
self.batchnorm = batchnorm
CrossNetBlockConfig.__init__ = CrossNetBlockConfig__init__
def CrossNetBlockConfig__setstate__(self, state):
state.setdefault('name', "CrossNetBlock")
state.setdefault('block_id', None)
state.setdefault('input_feat_config', None)
state.setdefault('num_of_layers', 2)
state.setdefault('cross_feat_config', None)
state.setdefault('batchnorm', False)
self.__dict__ = state
CrossNetBlockConfig.__getstate__ = lambda self: self.__dict__.copy()
CrossNetBlockConfig.__setstate__ = CrossNetBlockConfig__setstate__
all_structs.append(FMBlockConfig)
FMBlockConfig.thrift_spec = (
None, # 0
(1, TType.STRING, 'name', True, "FMBlock", 2, ), # 1
(2, TType.I32, 'block_id', None, None, 2, ), # 2
(3, TType.LIST, 'input_feat_config', (TType.STRUCT,[FeatSelectionConfig, FeatSelectionConfig.thrift_spec, False]), None, 2, ), # 3
(4, TType.STRUCT, 'type', [BlockType, BlockType.thrift_spec, True], None, 2, ), # 4
)
FMBlockConfig.thrift_struct_annotations = {
}
FMBlockConfig.thrift_field_annotations = {
}
def FMBlockConfig__init__(self, name=FMBlockConfig.thrift_spec[1][4], block_id=None, input_feat_config=None, type=None,):
self.name = name
self.block_id = block_id
self.input_feat_config = input_feat_config
self.type = type
FMBlockConfig.__init__ = FMBlockConfig__init__
def FMBlockConfig__setstate__(self, state):
state.setdefault('name', "FMBlock")
state.setdefault('block_id', None)
state.setdefault('input_feat_config', None)
state.setdefault('type', None)
self.__dict__ = state
FMBlockConfig.__getstate__ = lambda self: self.__dict__.copy()
FMBlockConfig.__setstate__ = FMBlockConfig__setstate__
all_structs.append(DotProcessorBlockConfig)
DotProcessorBlockConfig.thrift_spec = (
None, # 0
(1, TType.STRING, 'name', True, "DotProcessorBlock", 2, ), # 1
(2, TType.I32, 'block_id', None, None, 2, ), # 2
(3, TType.LIST, 'input_feat_config', (TType.STRUCT,[FeatSelectionConfig, FeatSelectionConfig.thrift_spec, False]), None, 2, ), # 3
(4, TType.STRUCT, 'type', [BlockType, BlockType.thrift_spec, True], None, 2, ), # 4
)
DotProcessorBlockConfig.thrift_struct_annotations = {
}
DotProcessorBlockConfig.thrift_field_annotations = {
}
def DotProcessorBlockConfig__init__(self, name=DotProcessorBlockConfig.thrift_spec[1][4], block_id=None, input_feat_config=None, type=None,):
self.name = name
self.block_id = block_id
self.input_feat_config = input_feat_config
self.type = type
DotProcessorBlockConfig.__init__ = DotProcessorBlockConfig__init__
def DotProcessorBlockConfig__setstate__(self, state):
state.setdefault('name', "DotProcessorBlock")
state.setdefault('block_id', None)
state.setdefault('input_feat_config', None)
state.setdefault('type', None)
self.__dict__ = state
DotProcessorBlockConfig.__getstate__ = lambda self: self.__dict__.copy()
DotProcessorBlockConfig.__setstate__ = DotProcessorBlockConfig__setstate__
all_structs.append(CatBlockConfig)
CatBlockConfig.thrift_spec = (
None, # 0
(1, TType.STRING, 'name', True, "CatBlock", 2, ), # 1
(2, TType.I32, 'block_id', None, None, 2, ), # 2
(3, TType.LIST, 'input_feat_config', (TType.STRUCT,[FeatSelectionConfig, FeatSelectionConfig.thrift_spec, False]), None, 2, ), # 3
(4, TType.STRUCT, 'type', [BlockType, BlockType.thrift_spec, True], None, 2, ), # 4
)
CatBlockConfig.thrift_struct_annotations = {
}
CatBlockConfig.thrift_field_annotations = {
}
def CatBlockConfig__init__(self, name=CatBlockConfig.thrift_spec[1][4], block_id=None, input_feat_config=None, type=None,):
self.name = name
self.block_id = block_id
self.input_feat_config = input_feat_config
self.type = type
CatBlockConfig.__init__ = CatBlockConfig__init__
def CatBlockConfig__setstate__(self, state):
state.setdefault('name', "CatBlock")
state.setdefault('block_id', None)
state.setdefault('input_feat_config', None)
state.setdefault('type', None)
self.__dict__ = state
CatBlockConfig.__getstate__ = lambda self: self.__dict__.copy()
CatBlockConfig.__setstate__ = CatBlockConfig__setstate__
all_structs.append(CINBlockConfig)
CINBlockConfig.thrift_spec = (
None, # 0
(1, TType.STRING, 'name', True, "CINBlock", 2, ), # 1
(2, TType.I32, 'block_id', None, None, 2, ), # 2
(3, TType.LIST, 'input_feat_config', (TType.STRUCT,[FeatSelectionConfig, FeatSelectionConfig.thrift_spec, False]), None, 2, ), # 3
(4, TType.STRUCT, 'emb_config', [EmbedBlockType, EmbedBlockType.thrift_spec, False], None, 2, ), # 4
(5, TType.LIST, 'arc', (TType.I32,None), None, 2, ), # 5
(6, TType.BOOL, 'split_half', None, True, 2, ), # 6
)
CINBlockConfig.thrift_struct_annotations = {
}
CINBlockConfig.thrift_field_annotations = {
}
def CINBlockConfig__init__(self, name=CINBlockConfig.thrift_spec[1][4], block_id=None, input_feat_config=None, emb_config=None, arc=None, split_half=CINBlockConfig.thrift_spec[6][4],):
self.name = name
self.block_id = block_id
self.input_feat_config = input_feat_config
self.emb_config = emb_config
self.arc = arc
self.split_half = split_half
CINBlockConfig.__init__ = CINBlockConfig__init__
def CINBlockConfig__setstate__(self, state):
state.setdefault('name', "CINBlock")
state.setdefault('block_id', None)
state.setdefault('input_feat_config', None)
state.setdefault('emb_config', None)
state.setdefault('arc', None)
state.setdefault('split_half', True)
self.__dict__ = state
CINBlockConfig.__getstate__ = lambda self: self.__dict__.copy()
CINBlockConfig.__setstate__ = CINBlockConfig__setstate__
all_structs.append(AttentionBlockConfig)
AttentionBlockConfig.thrift_spec = (
None, # 0
(1, TType.STRING, 'name', True, "AttentionBlock", 2, ), # 1
(2, TType.I32, 'block_id', None, None, 2, ), # 2
(3, TType.LIST, 'input_feat_config', (TType.STRUCT,[FeatSelectionConfig, FeatSelectionConfig.thrift_spec, False]), None, 2, ), # 3
(4, TType.STRUCT, 'emb_config', [EmbedBlockType, EmbedBlockType.thrift_spec, False], None, 2, ), # 4
(5, TType.I32, 'att_embed_dim', None, 10, 2, ), # 5
(6, TType.I32, 'num_of_heads', None, 2, 2, ), # 6
(7, TType.I32, 'num_of_layers', None, 1, 2, ), # 7
(8, TType.FLOAT, 'dropout_prob', None, 0.00000, 2, ), # 8
(9, TType.BOOL, 'use_res', None, True, 2, ), # 9
(10, TType.BOOL, 'batchnorm', None, False, 2, ), # 10
)
AttentionBlockConfig.thrift_struct_annotations = {
}
AttentionBlockConfig.thrift_field_annotations = {
}
def AttentionBlockConfig__init__(self, name=AttentionBlockConfig.thrift_spec[1][4], block_id=None, input_feat_config=None, emb_config=None, att_embed_dim=AttentionBlockConfig.thrift_spec[5][4], num_of_heads=AttentionBlockConfig.thrift_spec[6][4], num_of_layers=AttentionBlockConfig.thrift_spec[7][4], dropout_prob=AttentionBlockConfig.thrift_spec[8][4], use_res=AttentionBlockConfig.thrift_spec[9][4], batchnorm=AttentionBlockConfig.thrift_spec[10][4],):
self.name = name
self.block_id = block_id
self.input_feat_config = input_feat_config
self.emb_config = emb_config
self.att_embed_dim = att_embed_dim
self.num_of_heads = num_of_heads
self.num_of_layers = num_of_layers
self.dropout_prob = dropout_prob
self.use_res = use_res
self.batchnorm = batchnorm
AttentionBlockConfig.__init__ = AttentionBlockConfig__init__
def AttentionBlockConfig__setstate__(self, state):
state.setdefault('name', "AttentionBlock")
state.setdefault('block_id', None)
state.setdefault('input_feat_config', None)
state.setdefault('emb_config', None)
state.setdefault('att_embed_dim', 10)
state.setdefault('num_of_heads', 2)
state.setdefault('num_of_layers', 1)
state.setdefault('dropout_prob', 0.00000)
state.setdefault('use_res', True)
state.setdefault('batchnorm', False)
self.__dict__ = state
AttentionBlockConfig.__getstate__ = lambda self: self.__dict__.copy()
AttentionBlockConfig.__setstate__ = AttentionBlockConfig__setstate__
all_structs.append(BlockConfig)
BlockConfig.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'mlp_block', [MLPBlockConfig, MLPBlockConfig.thrift_spec, False], None, 2, ), # 1
(2, TType.STRUCT, 'crossnet_block', [CrossNetBlockConfig, CrossNetBlockConfig.thrift_spec, False], None, 2, ), # 2
(3, TType.STRUCT, 'fm_block', [FMBlockConfig, FMBlockConfig.thrift_spec, False], None, 2, ), # 3
(4, TType.STRUCT, 'dotprocessor_block', [DotProcessorBlockConfig, DotProcessorBlockConfig.thrift_spec, False], None, 2, ), # 4
(5, TType.STRUCT, 'cat_block', [CatBlockConfig, CatBlockConfig.thrift_spec, False], None, 2, ), # 5
(6, TType.STRUCT, 'cin_block', [CINBlockConfig, CINBlockConfig.thrift_spec, False], None, 2, ), # 6
(7, TType.STRUCT, 'attention_block', [AttentionBlockConfig, AttentionBlockConfig.thrift_spec, False], None, 2, ), # 7
)
BlockConfig.thrift_struct_annotations = {
}
BlockConfig.thrift_field_annotations = {
}
def BlockConfig__init__(self, mlp_block=None, crossnet_block=None, fm_block=None, dotprocessor_block=None, cat_block=None, cin_block=None, attention_block=None,):
self.field = 0
self.value = None
if mlp_block is not None:
assert self.field == 0 and self.value is None
self.field = 1
self.value = mlp_block
if crossnet_block is not None:
assert self.field == 0 and self.value is None
self.field = 2
self.value = crossnet_block
if fm_block is not None:
assert self.field == 0 and self.value is None
self.field = 3
self.value = fm_block
if dotprocessor_block is not None:
assert self.field == 0 and self.value is None
self.field = 4
self.value = dotprocessor_block
if cat_block is not None:
assert self.field == 0 and self.value is None
self.field = 5
self.value = cat_block
if cin_block is not None:
assert self.field == 0 and self.value is None
self.field = 6
self.value = cin_block
if attention_block is not None:
assert self.field == 0 and self.value is None
self.field = 7
self.value = attention_block
BlockConfig.__init__ = BlockConfig__init__
fix_spec(all_structs)
del all_structs
|
AutoCTR-main
|
gen-py/block_config/ttypes.py
|
AutoCTR-main
|
trainers/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import torch
from config import ttypes as config
logger = logging.getLogger(__name__)
def build_loss(model, loss_config):
if loss_config.getType() == config.LossConfig.BCEWITHLOGITS:
logger.warning(
"Creating BCEWithLogitsLoss: {}".format(loss_config.get_bcewithlogits())
)
return torch.nn.BCEWithLogitsLoss(reduction="none")
elif loss_config.getType() == config.LossConfig.MSE:
logger.warning("Creating MSELoss: {}".format(loss_config.get_mse()))
return torch.nn.MSELoss(reduction="none")
elif loss_config.getType() == config.LossConfig.BCE:
logger.warning("Creating BCELoss: {}".format(loss_config.get_bce()))
return torch.nn.BCELoss(reduction="none")
else:
raise ValueError("Unknown loss type.")
# TODO add equal weight training and calibration for ads data
def apply_loss(loss, pred, label, weight=None):
E = loss(pred, label)
return torch.mean(E) if weight is None else torch.mean(E * weight.view(-1))
|
AutoCTR-main
|
trainers/loss.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import re
import time
import numpy as np
import torch
logger = logging.getLogger(__name__)
def log_train_info(
start_time,
i_batch="N/A",
i_epoch="N/A",
trainer_id="N/A",
num_batches=1,
total_loss=0,
batch_size=None,
num_samples=None,
sample_weight_sum=None,
ctr=None,
lock=None,
on_gpu=False,
trainer_logger=None,
):
"""
Args:
total_loss, the sum of the averaged per batch loss
"""
if on_gpu:
torch.cuda.synchronize()
curr_time = time.time()
if trainer_logger is None:
trainer_logger = logger
if lock is not None:
lock.acquire()
try:
if num_samples is None:
assert (
batch_size is not None
), "batch_size and num_samples cannot both be None."
num_samples = num_batches * batch_size
if sample_weight_sum is None:
assert (
batch_size is not None
), "batch_size and sample_weight_sum cannot both be None."
sample_weight_sum = num_batches * batch_size
loss = total_loss / sample_weight_sum
ne = calculate_ne(loss, ctr) if ctr is not None else "N/A"
trainer_logger.warning(
"Trainer {} finished iteration {} of epoch {}, "
"{:.2f} qps, "
"window loss: {}, "
"window NE: {}".format(
trainer_id,
i_batch,
i_epoch,
num_samples / (curr_time - start_time),
loss,
ne,
)
)
finally:
if lock is not None:
lock.release()
return (loss, ne) if ctr is not None else loss
log_eval_info = log_train_info
def log_tb_info_batch(
writer,
model,
pred,
label,
optimizer, # not used
logging_options,
iter,
start_time,
trainer_id=None,
total_loss=0,
batch_size=-1, # not used
num_batches=-1, # not used
sample_weight_sum=None,
avg_loss=None,
ctr=None,
lock=None,
):
"""
Note that the reported value is the mean of per batch mean,
which is different from mean of the whole history
Args:
total_loss, the sum of the averaged per batch loss
"""
if writer is None:
return
if lock is not None:
lock.acquire()
try:
if avg_loss is None:
assert (
total_loss is not None and sample_weight_sum is not None
), "cannot compute avg_loss"
avg_loss = total_loss / sample_weight_sum
writer.add_scalar(
"{}batch/train_metric/loss".format(
"" if trainer_id is None else "trainer_{}/".format(trainer_id)
),
avg_loss,
iter,
)
if ctr is not None:
ne = calculate_ne(avg_loss, ctr)
writer.add_scalar(
"{}batch/train_metric/ne".format(
"" if trainer_id is None else "trainer_{}/".format(trainer_id)
),
ne,
iter,
)
if logging_options.tb_log_pr_curve_batch:
writer.add_pr_curve("PR Curve", label, pred, iter)
if logging_options.tb_log_model_weight_hist:
for name, param in model.named_parameters():
if any(
re.search(pattern, name)
for pattern in logging_options.tb_log_model_weight_filter_regex
):
continue
writer.add_histogram(name, param.clone().cpu().data.numpy(), iter)
finally:
if lock is not None:
lock.release()
def need_to_log_batch(counter, logging_options, batch_size):
return (
logging_options.log_freq > 0
and (counter + 1) % max(1, int(logging_options.log_freq / batch_size)) == 0
)
def need_to_log_tb(counter, logging_options, batch_size):
tb_log_freq = logging_options.tb_log_freq
return (
tb_log_freq > 0 and (counter + 1) % max(1, int(tb_log_freq / batch_size)) == 0
)
def is_checkpoint(counter, ckp_interval, ckp_path):
return ckp_interval > 0 and ckp_path and (counter + 1) % ckp_interval == 0
def calculate_ne(logloss, ctr):
if ctr <= 0.0 or ctr >= 1.0:
logger.error("CTR should be between 0.0 and 1.0")
return 0.0 if logloss == 0.0 else np.inf
return -logloss / (ctr * np.log(ctr) + (1.0 - ctr) * np.log(1 - ctr))
|
AutoCTR-main
|
trainers/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import time
import numpy as np
import torch
from sklearn.metrics import roc_auc_score
from .loss import apply_loss, build_loss
from .utils import log_tb_info_batch, log_train_info, need_to_log_batch, need_to_log_tb
from models.builder import save_model
try:
from fblearner.flow.util.visualization_utils import summary_writer
except ImportError:
pass
logger = logging.getLogger(__name__)
np.set_printoptions(precision=5)
torch.set_printoptions(precision=5)
THRESHOLD = -1 # -1 #7500
VAL_THRESHOLD = -1
def train(
model,
train_options,
train_dataloader=None,
batch_processor=None,
device=None,
val_dataloader=None,
trainer_id=0,
send_end=None,
train_dataloader_batches=None,
val_dataloader_batches=None,
batch_size=1024,
eval_dataloader=None,
eval_dataloader_batches=None,
save_model_name=None,
):
try:
writer = summary_writer()
except Exception:
logger.error("Failed to create the tensorboard summary writer.")
writer = None
prev_avg_val_loss, is_improving, is_local_optimal = None, True, False
optimizer = model.get_optimizers()
loss = build_loss(model, loss_config=train_options.loss)
output = []
logging_options = train_options.logging_config
batch_size = batch_size
if train_dataloader_batches is None:
train_dataloader_batches = train_dataloader
is_train_dataloader = True
else:
is_train_dataloader = False
if val_dataloader_batches is None:
val_dataloader_batches = val_dataloader
is_val_dataloader = True
else:
is_val_dataloader = False
if eval_dataloader_batches is None:
eval_dataloader_batches = eval_dataloader
is_eval_dataloader = True
else:
is_eval_dataloader = False
for i_epoch in range(0, train_options.nepochs):
start_time_epoch = time.time()
num_batches, avg_loss_epoch, q1, q2 = train_epoch(
model=model,
loss=loss,
optimizer=optimizer,
batch_processor=batch_processor,
trainer_id=trainer_id,
i_epoch=i_epoch,
device=device,
logging_options=logging_options,
writer=writer,
train_dataloader_batches=train_dataloader_batches,
batch_size=batch_size,
is_dataloader=is_train_dataloader,
)
logger.warning("Epoch:{}, Time for training: {}".format(i_epoch, time.time() - start_time_epoch))
avg_loss_epoch = log_train_info(
start_time=start_time_epoch,
i_batch=num_batches,
i_epoch=i_epoch,
trainer_id=trainer_id,
total_loss=avg_loss_epoch * num_batches * batch_size,
num_batches=num_batches,
batch_size=batch_size,
)
if writer is not None:
writer.add_scalar("train_metric/loss_epoch", avg_loss_epoch, i_epoch)
output.append({"i_epoch": i_epoch, "avg_train_loss": avg_loss_epoch})
if val_dataloader_batches is not None:
avg_val_loss, _, _, avg_auc = evaluate(
model=model,
loss=loss,
dataloader=val_dataloader_batches,
batch_processor=batch_processor,
device=device,
batch_size=batch_size,
is_dataloader=is_val_dataloader,
i_epoch=i_epoch,
)
output[-1]["avg_val_loss"] = avg_val_loss
output[-1]["roc_auc_score"] = avg_auc
if eval_dataloader_batches is not None:
avg_eval_loss, _, _, avg_eval_auc = evaluate(
model=model,
loss=loss,
dataloader=eval_dataloader_batches,
batch_processor=batch_processor,
device=device,
batch_size=batch_size,
is_dataloader=is_eval_dataloader,
i_epoch=i_epoch,
)
output[-1]["avg_eval_loss"] = avg_eval_loss
output[-1]["eval_roc_auc_score"] = avg_eval_auc
# check if local optimal
(
is_local_optimal,
is_improving,
prev_avg_val_loss,
) = _check_local_optimal(
i_epoch, is_improving, avg_val_loss, prev_avg_val_loss
)
# break if is local optimal
if is_local_optimal and train_options.early_stop_on_val_loss:
break
if save_model_name:
save_model(save_model_name, model)
if writer is not None:
writer.add_scalar("val_metric/loss_epoch", avg_val_loss, i_epoch)
logger.warning("Epoch:{}, validation loss: {}, roc_auc_score: {}, time: {}, q1: {}, q2: {}".format(i_epoch,
avg_val_loss,
avg_auc,
time.time() - start_time_epoch,
np.sum(
q1),
np.sum(
q2)))
if writer is not None:
writer.close()
if send_end:
send_end.send(output)
return output
def _check_local_optimal(i_epoch, is_improving, avg_val_loss, prev_avg_val_loss):
is_local_optimal = i_epoch > 0 and is_improving and avg_val_loss > prev_avg_val_loss
is_improving = i_epoch == 0 or prev_avg_val_loss > avg_val_loss
prev_avg_val_loss = avg_val_loss
return is_local_optimal, is_improving, prev_avg_val_loss
def train_epoch(
model,
loss,
optimizer,
batch_processor,
logging_options,
device,
trainer_id,
i_epoch,
lock=None,
writer=None,
train_dataloader_batches=None,
batch_size=1024,
is_dataloader=True,
):
model.train()
start_time, loss_val, num_batches, sample_weight_sum = time.time(), 0.0, 0, 0.0
start_time_tb, loss_val_tb, sample_weight_sum_tb = (time.time(), 0.0, 0.0)
loss_val_epoch, total_num_batches, sample_weight_sum_epoch = (
0.0,
len(train_dataloader_batches),
0.0,
)
batch_size = batch_size
q1, q2 = [], []
qq3 = time.perf_counter()
for i_batch, sample_batched in enumerate(train_dataloader_batches):
if not is_dataloader and i_batch <= THRESHOLD:
label, feats, weight = sample_batched
elif not is_dataloader and i_batch > THRESHOLD and i_epoch > 0:
label, feats, weight = batch_processor(mini_batch=sample_batched, reverse=1)
else:
try:
label, feats, weight = batch_processor(mini_batch=sample_batched)
except:
i_epoch += 1
label, feats, weight = batch_processor(mini_batch=sample_batched, reverse=1)
# forward pass
z_pred = model(feats=feats)
# backward pass
E = apply_loss(loss, z_pred, label, weight)
optimizer.zero_grad()
E.backward()
qq1 = time.perf_counter()
dd3 = qq1 - qq3
# torch.cuda.synchronize() # wait for mm to finish
qq2 = time.perf_counter()
optimizer.step()
# torch.cuda.synchronize() # wait for mm to finish
qq3 = time.perf_counter()
loss_val_batch = E.detach().cpu().numpy() * batch_size
sample_weight_sum_batch = (
batch_size if weight is None else torch.sum(weight).detach()
)
num_batches += 1
loss_val += loss_val_batch
loss_val_tb += loss_val_batch
loss_val_epoch += loss_val_batch
sample_weight_sum += sample_weight_sum_batch
sample_weight_sum_tb += sample_weight_sum_batch
sample_weight_sum_epoch += sample_weight_sum_batch
if need_to_log_batch(i_batch, logging_options, batch_size):
log_train_info(
i_batch=i_batch,
i_epoch=i_epoch,
trainer_id=trainer_id,
start_time=start_time,
total_loss=loss_val,
num_batches=num_batches,
sample_weight_sum=sample_weight_sum,
batch_size=batch_size,
lock=lock,
)
start_time, loss_val, num_batches, sample_weight_sum = (
time.time(),
0.0,
0,
0.0,
)
if writer is not None and need_to_log_tb(i_batch, logging_options, batch_size):
log_tb_info_batch(
writer=writer,
model=model,
pred=z_pred,
label=label,
optimizer=optimizer,
logging_options=logging_options,
iter=total_num_batches * i_epoch + i_batch,
start_time=start_time_tb,
trainer_id=trainer_id,
avg_loss=loss_val_tb / sample_weight_sum_tb,
lock=lock,
)
start_time_tb, loss_val_tb, sample_weight_sum_tb = (time.time(), 0.0, 0.0)
dd1 = qq2 - qq1
dd2 = qq3 - qq2
q1.append(dd2)
q2.append(dd3)
if not is_dataloader and i_batch > THRESHOLD:
label, feats, weight = batch_processor(mini_batch=sample_batched, reverse=2)
avg_loss = loss_val_epoch / sample_weight_sum_epoch
return i_batch, avg_loss, q1, q2
def evaluate(model, loss, dataloader, batch_processor, device, batch_size=1024, is_dataloader=True, i_epoch=0):
model.eval()
preds = []
labels = []
batch_size = batch_size
loss_val, sample_weight_sum = 0.0, 0.0
for i_batch, sample_batched in enumerate(dataloader):
if not is_dataloader and i_batch <= VAL_THRESHOLD:
label, feats, weight = sample_batched
elif not is_dataloader and i_batch > VAL_THRESHOLD and i_epoch > 0:
label, feats, weight = batch_processor(mini_batch=sample_batched, reverse=1)
else:
try:
label, feats, weight = batch_processor(mini_batch=sample_batched)
except:
i_epoch += 1
label, feats, weight = batch_processor(mini_batch=sample_batched, reverse=1)
# forward pass
z_pred = model(feats=feats)
# preds.append(z_pred.detach().cpu().numpy())
# labels.append(label.detach().cpu().numpy())
preds += z_pred.detach().cpu().numpy().tolist()
labels += label.detach().cpu().numpy().tolist()
E = apply_loss(loss, z_pred, label, weight)
loss_val += E.detach().cpu().numpy() * batch_size
sample_weight_sum += (
batch_size if weight is None else torch.sum(weight).detach().cpu().numpy()
)
if not is_dataloader and i_batch > VAL_THRESHOLD:
label, feats, weight = batch_processor(mini_batch=sample_batched, reverse=2)
# logger.warning("loss_val: {}, weight_sum {}".format(dataloader, is_dataloader))
avg_loss = loss_val / sample_weight_sum
# labels = np.asarray(labels).flatten()
# preds = np.asarray(preds).flatten()
try:
avg_auc = roc_auc_score(labels, preds)
except Exception:
idx = np.isfinite(preds)
avg_auc = roc_auc_score(np.array(labels)[idx], np.array(preds)[idx])
return avg_loss, labels, preds, avg_auc
|
AutoCTR-main
|
trainers/simple_final.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import time
import numpy as np
import torch
from sklearn.metrics import roc_auc_score
from .loss import apply_loss, build_loss
from .utils import log_tb_info_batch, log_train_info, need_to_log_batch, need_to_log_tb
try:
from fblearner.flow.util.visualization_utils import summary_writer
except ImportError:
pass
logger = logging.getLogger(__name__)
np.set_printoptions(precision=5)
torch.set_printoptions(precision=5)
THRESHOLD = 30000 # 7500 # -1 #7500
VAL_THRESHOLD = 10000
def train(
model,
train_options,
train_dataloader=None,
batch_processor=None,
device=None,
val_dataloader=None,
trainer_id=0,
send_end=None,
train_dataloader_batches=None,
val_dataloader_batches=None,
batch_size=1024,
):
try:
writer = summary_writer()
except Exception:
logger.error("Failed to create the tensorboard summary writer.")
writer = None
prev_avg_val_loss, is_improving, is_local_optimal = None, True, False
optimizer = model.get_optimizers()
loss = build_loss(model, loss_config=train_options.loss)
output = []
logging_options = train_options.logging_config
batch_size = batch_size
if train_dataloader_batches is None:
train_dataloader_batches = train_dataloader
is_train_dataloader = True
else:
is_train_dataloader = False
if val_dataloader_batches is None:
val_dataloader_batches = val_dataloader
is_val_dataloader = True
else:
is_val_dataloader = False
for i_epoch in range(0, train_options.nepochs):
start_time_epoch = time.time()
num_batches, avg_loss_epoch, q1, q2 = train_epoch(
model=model,
loss=loss,
optimizer=optimizer,
batch_processor=batch_processor,
trainer_id=trainer_id,
i_epoch=i_epoch,
device=device,
logging_options=logging_options,
writer=writer,
train_dataloader_batches=train_dataloader_batches,
batch_size=batch_size,
is_dataloader=is_train_dataloader,
)
logger.warning("Epoch:{}, Time for training: {}".format(i_epoch, time.time() - start_time_epoch))
avg_loss_epoch = log_train_info(
start_time=start_time_epoch,
i_batch=num_batches,
i_epoch=i_epoch,
trainer_id=trainer_id,
total_loss=avg_loss_epoch * num_batches * batch_size,
num_batches=num_batches,
batch_size=batch_size,
)
if writer is not None:
writer.add_scalar("train_metric/loss_epoch", avg_loss_epoch, i_epoch)
output.append({"i_epoch": i_epoch, "avg_train_loss": avg_loss_epoch})
if val_dataloader_batches is not None:
avg_val_loss, _, _, avg_auc = evaluate(
model=model,
loss=loss,
dataloader=val_dataloader_batches,
batch_processor=batch_processor,
device=device,
batch_size=batch_size,
is_dataloader=is_val_dataloader,
i_epoch=i_epoch,
)
output[-1]["avg_val_loss"] = avg_val_loss
output[-1]["roc_auc_score"] = avg_auc
# check if local optimal
(
is_local_optimal,
is_improving,
prev_avg_val_loss,
) = _check_local_optimal(
i_epoch, is_improving, avg_val_loss, prev_avg_val_loss
)
# break if is local optimal
if is_local_optimal and train_options.early_stop_on_val_loss:
break
if writer is not None:
writer.add_scalar("val_metric/loss_epoch", avg_val_loss, i_epoch)
logger.warning("Epoch:{}, validation loss: {}, roc_auc_score: {}, time: {}, q1: {}, q2: {}".format(i_epoch, avg_val_loss, avg_auc, time.time() - start_time_epoch, np.sum(q1), np.sum(q2)))
if writer is not None:
writer.close()
if send_end:
send_end.send(output)
return output
def _check_local_optimal(i_epoch, is_improving, avg_val_loss, prev_avg_val_loss):
is_local_optimal = i_epoch > 0 and is_improving and avg_val_loss > prev_avg_val_loss
is_improving = i_epoch == 0 or prev_avg_val_loss > avg_val_loss
prev_avg_val_loss = avg_val_loss
return is_local_optimal, is_improving, prev_avg_val_loss
def train_epoch(
model,
loss,
optimizer,
batch_processor,
logging_options,
device,
trainer_id,
i_epoch,
lock=None,
writer=None,
train_dataloader_batches=None,
batch_size=1024,
is_dataloader=True,
):
model.train()
start_time, loss_val, num_batches, sample_weight_sum = time.time(), 0.0, 0, 0.0
start_time_tb, loss_val_tb, sample_weight_sum_tb = (time.time(), 0.0, 0.0)
loss_val_epoch, total_num_batches, sample_weight_sum_epoch = (
0.0,
len(train_dataloader_batches),
0.0,
)
batch_size = batch_size
q1, q2 = [], []
qq3 = time.perf_counter()
for i_batch, sample_batched in enumerate(train_dataloader_batches):
if not is_dataloader and i_batch <= THRESHOLD:
label, feats, weight = sample_batched
elif not is_dataloader and i_batch > THRESHOLD and i_epoch > 0:
label, feats, weight = batch_processor(mini_batch=sample_batched, reverse=1)
else:
label, feats, weight = batch_processor(mini_batch=sample_batched)
# forward pass
z_pred = model(feats=feats)
# backward pass
E = apply_loss(loss, z_pred, label, weight)
optimizer.zero_grad()
E.backward()
qq1 = time.perf_counter()
dd3 = qq1 - qq3
# torch.cuda.synchronize() # wait for mm to finish
qq2 = time.perf_counter()
optimizer.step()
# torch.cuda.synchronize() # wait for mm to finish
qq3 = time.perf_counter()
loss_val_batch = E.detach().cpu().numpy() * batch_size
sample_weight_sum_batch = (
batch_size if weight is None else torch.sum(weight).detach()
)
num_batches += 1
loss_val += loss_val_batch
loss_val_tb += loss_val_batch
loss_val_epoch += loss_val_batch
sample_weight_sum += sample_weight_sum_batch
sample_weight_sum_tb += sample_weight_sum_batch
sample_weight_sum_epoch += sample_weight_sum_batch
if need_to_log_batch(i_batch, logging_options, batch_size):
log_train_info(
i_batch=i_batch,
i_epoch=i_epoch,
trainer_id=trainer_id,
start_time=start_time,
total_loss=loss_val,
num_batches=num_batches,
sample_weight_sum=sample_weight_sum,
batch_size=batch_size,
lock=lock,
)
start_time, loss_val, num_batches, sample_weight_sum = (
time.time(),
0.0,
0,
0.0,
)
if writer is not None and need_to_log_tb(i_batch, logging_options, batch_size):
log_tb_info_batch(
writer=writer,
model=model,
pred=z_pred,
label=label,
optimizer=optimizer,
logging_options=logging_options,
iter=total_num_batches * i_epoch + i_batch,
start_time=start_time_tb,
trainer_id=trainer_id,
avg_loss=loss_val_tb / sample_weight_sum_tb,
lock=lock,
)
start_time_tb, loss_val_tb, sample_weight_sum_tb = (time.time(), 0.0, 0.0)
dd1 = qq2-qq1
dd2 = qq3-qq2
q1.append(dd2)
q2.append(dd3)
if not is_dataloader and i_batch > THRESHOLD:
label, feats, weight = batch_processor(mini_batch=sample_batched, reverse=2)
avg_loss = loss_val_epoch / sample_weight_sum_epoch
return i_batch, avg_loss, q1, q2
def evaluate(model, loss, dataloader, batch_processor, device, batch_size=1024, is_dataloader=True, i_epoch=0):
model.eval()
preds = []
labels = []
batch_size = batch_size
loss_val, sample_weight_sum = 0.0, 0.0
for i_batch, sample_batched in enumerate(dataloader):
if not is_dataloader and i_batch <= VAL_THRESHOLD:
label, feats, weight = sample_batched
elif not is_dataloader and i_batch > VAL_THRESHOLD and i_epoch > 0:
label, feats, weight = batch_processor(mini_batch=sample_batched, reverse=1)
else:
label, feats, weight = batch_processor(mini_batch=sample_batched)
# forward pass
z_pred = model(feats=feats)
# preds.append(z_pred.detach().cpu().numpy())
# labels.append(label.detach().cpu().numpy())
preds += z_pred.detach().cpu().numpy().tolist()
labels += label.detach().cpu().numpy().tolist()
E = apply_loss(loss, z_pred, label, weight)
loss_val += E.detach().cpu().numpy() * batch_size
sample_weight_sum += (
batch_size if weight is None else torch.sum(weight).detach().cpu().numpy()
)
if not is_dataloader and i_batch > VAL_THRESHOLD:
label, feats, weight = batch_processor(mini_batch=sample_batched, reverse=2)
avg_loss = loss_val / sample_weight_sum
# labels = np.asarray(labels).flatten()
# preds = np.asarray(preds).flatten()
try:
avg_auc = roc_auc_score(labels, preds)
except Exception:
idx = np.isfinite(preds)
if len(np.array(labels)[idx]) > 1:
logger.warning("Valid value for AUC: {}".format(idx))
avg_auc = roc_auc_score(np.array(labels)[idx], np.array(preds)[idx])
else:
avg_auc = np.nan
return avg_loss, labels, preds, avg_auc
|
AutoCTR-main
|
trainers/simple.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
sys.path.append('gen-py')
import json
import logging
import os
import pickle
import time
import numpy as np
import torch
# os.system(f"mount -o remount,size={60*1024*1024*1024} /dev/shm")
from thrift.protocol import TSimpleJSONProtocol
from thrift.util import Serializer
from config import ttypes as config
from models.nas_modules import NASRecNet
from trainers.simple_final import train as simple_train
from utils.data import prepare_data
from utils.search_utils import get_args, get_final_fit_trainer_config, get_phenotype
from torch.multiprocessing import Pipe, Process, set_start_method
set_start_method('spawn', force=True)
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (100000, rlimit[1]))
import GPUtil
logging.basicConfig(level=logging.WARNING)
logger = logging.getLogger(__name__)
jfactory = TSimpleJSONProtocol.TSimpleJSONProtocolFactory()
THRESHOLD = -1 # -1 # 7500
VAL_THRESHOLD = -1
if __name__ == "__main__":
# get arguments
args = get_args()
logger.warning("All Args: {}".format(args))
# set seeds
np.random.seed(args.numpy_seed)
torch.manual_seed(args.torch_seed)
excludeID = [int(id) for id in args.excludeID.split(",")] if args.excludeID else []
# get model
filenames, model_config_dicts = get_phenotype(args)
# get trainer config
input_summary, args = get_final_fit_trainer_config(args)
# change dataset to small dataset
input_summary["data_options"]["from_file"]["data_file"] = args.data_file
input_summary["data_options"]["from_file"]["batch_size"] = args.batch_size
# change train_options
input_summary["train_options"]["nepochs"] = args.nepochs
input_summary["train_options"]["logging_config"]["log_freq"] = 100000
input_summary["train_options"]["logging_config"]["tb_log_freq"] = 100000
# change performance_options
input_summary["performance_options"]["num_readers"] = args.num_workers
input_summary["performance_options"]["num_trainers"] = args.num_trainers
input_summary["performance_options"]["use_gpu"] = args.use_gpu
# change optimizer
input_summary["feature_options"]["dense"]["optim"]["adam"]["lr"] = args.learning_rate
input_summary["feature_options"]["sparse"]["optim"]["sparse_adam"]["lr"] = args.learning_rate
# # change feature hashing size
# for i, feature in enumerate(input_summary["feature_options"]["sparse"]["features"]):
# if feature["hash_size"] > args.hash_size:
# input_summary["feature_options"]["sparse"]["features"][i]["hash_size"] = args.hash_size
# data_options
splits = [float(p) for p in args.splits.split(":")]
input_summary["data_options"]["from_file"]["splits"] = splits
# extract feature config for searcher construction and trainer
train_options = Serializer.deserialize(
jfactory,
json.dumps(input_summary["train_options"]),
config.TrainConfig(),
)
# extract feature config for searcher construction and trainer
feature_config = Serializer.deserialize(
jfactory,
json.dumps(input_summary["feature_options"]),
config.FeatureConfig(),
)
data_options = Serializer.deserialize(
jfactory,
json.dumps(input_summary["data_options"]),
config.DataConfig(),
)
performance_options = Serializer.deserialize(
jfactory,
json.dumps(input_summary["performance_options"]),
config.PerformanceConfig(),
)
# for datasaving purpose
batch_processor, train_dataloader, val_dataloader, eval_dataloader, \
train_dataloader_batches, val_dataloader_batches, eval_dataloader_batches \
= {}, {}, {}, {}, {}, {}, {}
for id in range(args.total_gpus):
if id not in excludeID:
CUDA = 'cuda:' + str(id)
if len(batch_processor) == 0:
(
_, # datasets
batch_processor[CUDA],
train_dataloader,
val_dataloader,
eval_dataloader,
) = prepare_data(data_options, performance_options, CUDA, pin_memory=False)
else:
(
_, # datasets
batch_processor[CUDA],
_,
_,
_, # eval_dataloader
) = prepare_data(data_options, performance_options, CUDA, pin_memory=True)
train_dataloader = None
val_dataloader = None
eval_dataloader = None
train_dataloader_batches[CUDA] = None
val_dataloader_batches[CUDA] = None
eval_dataloader_batches[CUDA] = None
if args.save_batches:
train_dataloader_batches[CUDA] = []
if len(batch_processor) == 1:
for i_batch, sample_batched in enumerate(train_dataloader):
if i_batch % 100 == 0:
logger.warning("i_batch {}".format(i_batch))
train_dataloader_batches[CUDA].append(sample_batched)
mark = CUDA
# if args.save_val_batches:
val_dataloader_batches[CUDA] = []
if len(batch_processor) == 1:
for i_batch, sample_batched in enumerate(val_dataloader):
if i_batch % 100 == 0:
logger.warning("i_batch {}".format(i_batch))
val_dataloader_batches[CUDA].append(sample_batched)
mark = CUDA
# if args.save_val_batches:
eval_dataloader_batches[CUDA] = []
if len(batch_processor) == 1:
for i_batch, sample_batched in enumerate(eval_dataloader):
if i_batch % 100 == 0:
logger.warning("i_batch {}".format(i_batch))
eval_dataloader_batches[CUDA].append(sample_batched)
mark = CUDA
if args.save_batches:
for i_batch, sample_batched in enumerate(train_dataloader_batches[mark]):
if i_batch % 100 == 0:
logger.warning("process_first_cuda_i_batch {}".format(i_batch))
if i_batch <= THRESHOLD:
train_dataloader_batches[mark][i_batch] = batch_processor[mark](
mini_batch=sample_batched)
if args.save_val_batches:
for i_batch, sample_batched in enumerate(val_dataloader_batches[mark]):
if i_batch % 100 == 0:
logger.warning("process_first_cuda_i_batch {}".format(i_batch))
if i_batch <= VAL_THRESHOLD:
val_dataloader_batches[mark][i_batch] = batch_processor[mark](
mini_batch=sample_batched)
for i_batch, sample_batched in enumerate(eval_dataloader_batches[mark]):
if i_batch % 100 == 0:
logger.warning("process_first_cuda_i_batch {}".format(i_batch))
if i_batch <= VAL_THRESHOLD:
eval_dataloader_batches[mark][i_batch] = batch_processor[mark](
mini_batch=sample_batched)
try:
deviceIDs = GPUtil.getAvailable(order='random',
limit=1,
maxLoad=args.maxLoad,
maxMemory=args.maxMemory,
excludeID=excludeID)
CUDA = 'cuda:' + str(deviceIDs[0])
except Exception:
logger.warning("No available device!")
for model_id, model_config_dict in enumerate(model_config_dicts):
nasrec_net = Serializer.deserialize(
jfactory,
json.dumps(model_config_dict),
config.ModelConfig(),
)
tmp_model = NASRecNet(nasrec_net, feature_config)
tmp_model.to(device=CUDA)
svfolder = os.path.join(args.save_model_path, "results", "final_fit")
svname = os.path.join(svfolder, filenames[model_id].split("/")[-1][:-5] + ".ckp")
if not os.path.exists(svfolder):
os.makedirs(svfolder)
output = simple_train(tmp_model,
train_options,
train_dataloader,
batch_processor[CUDA],
CUDA,
val_dataloader,
0,
None, # send_end,
train_dataloader_batches[CUDA],
val_dataloader_batches[CUDA],
args.batch_size,
eval_dataloader,
eval_dataloader_batches[CUDA],
save_model_name= svname if args.save_model else None,
)
logger.warning("Outputs of Model {} is: {}".format(filenames[model_id], output))
|
AutoCTR-main
|
scripts/final_fit.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import pandas as pd
import math
import numpy as np
from sklearn.model_selection import StratifiedKFold
from sklearn import preprocessing
def preprocess_criteo(datafile):
train_path="train.txt"
# train_path="train.txt"
train_path = os.path.join(datafile, train_path)
f1 = open(train_path,'r')
dic= {}
# generate three fold.
# train_x: value
# train_i: index
# train_y: label
f_train_value = open(os.path.join(datafile, 'train_x.txt'),'w')
f_train_index = open(os.path.join(datafile, 'train_i.txt'),'w')
f_train_label = open(os.path.join(datafile, 'train_y.txt'),'w')
num_dense, num_sparse = 13, 26
num_feature = num_dense + num_sparse
for i in range(num_feature):
dic[i] = {}
cnt_train = 0
#for debug
#limits = 10000
index = [1] * num_sparse
for line in f1:
cnt_train +=1
if cnt_train % 100000 ==0:
print('now train cnt : %d\n' % cnt_train)
#if cnt_train > limits:
# break
split = line.strip('\n').split('\t')
# 0-label, 1-13 numerical, 14-39 category
for i in range(num_dense, num_feature):
#dic_len = len(dic[i])
if split[i+1] not in dic[i]:
# [1, 0] 1 is the index for those whose appear times <= 10 0 indicates the appear times
dic[i][split[i+1]] = [1,0]
dic[i][split[i+1]][1] += 1
if dic[i][split[i+1]][0] == 1 and dic[i][split[i+1]][1] > 10:
index[i-num_dense] += 1
dic[i][split[i+1]][0] = index[i-num_dense]
f1.close()
print('total entries :%d\n' % (cnt_train - 1))
# calculate number of category features of every dimension
kinds = [num_dense]
for i in range(num_dense, num_feature):
kinds.append(index[i-num_dense])
print('number of dimensions : %d' % (len(kinds)-1))
print(kinds)
for i in range(1,len(kinds)):
kinds[i] += kinds[i-1]
print(kinds)
# make new data
f1 = open(train_path,'r')
cnt_train = 0
print('remake training data...\n')
for line in f1:
cnt_train +=1
if cnt_train % 100000 ==0:
print('now train cnt : %d\n' % cnt_train)
#if cnt_train > limits:
# break
entry = ['0'] * num_feature
index = [None] * num_feature
split = line.strip('\n').split('\t')
label = str(split[0])
for i in range(num_dense):
if split[i+1] != '':
entry[i] = (split[i+1])
index[i] = (i+1)
for i in range(num_dense, num_feature):
if split[i+1] != '':
entry[i] = '1'
index[i] = (dic[i][split[i+1]][0])
for j in range(num_sparse):
index[num_dense+j] += kinds[j]
index = [str(item) for item in index]
f_train_value.write(' '.join(entry)+'\n')
f_train_index.write(' '.join(index)+'\n')
f_train_label.write(label+'\n')
f1.close()
f_train_value.close()
f_train_index.close()
f_train_label.close()
def preprocess_avazu(datafile):
train_path = './train.csv'
f1 = open(train_path, 'r')
dic = {}
f_train_value = open('./train_x.txt', 'w')
f_train_index = open('./train_i.txt', 'w')
f_train_label = open('./train_y.txt', 'w')
debug = False
tune = False
Bound = [5] * 24
label_index = 1
Column = 24
numr_feat = []
numerical = [0] * Column
numerical[label_index] = -1
cate_feat = []
for i in range(Column):
if (numerical[i] == 0):
cate_feat.extend([i])
index_cnt = 0
index_others = [0] * Column
Max = [0] * Column
for i in numr_feat:
index_others[i] = index_cnt
index_cnt += 1
numerical[i] = 1
for i in cate_feat:
index_others[i] = index_cnt
index_cnt += 1
for i in range(Column):
dic[i] = dict()
cnt_line = 0
for line in f1:
cnt_line += 1
if (cnt_line == 1): continue # header
if (cnt_line % 1000000 == 0):
print ("cnt_line = %d, index_cnt = %d" % (cnt_line, index_cnt))
if (debug == True):
if (cnt_line >= 10000):
break
split = line.strip('\n').split(',')
for i in cate_feat:
if (split[i] != ''):
if split[i] not in dic[i]:
dic[i][split[i]] = [index_others[i], 0]
dic[i][split[i]][1] += 1
if (dic[i][split[i]][0] == index_others[i] and dic[i][split[i]][1] == Bound[i]):
dic[i][split[i]][0] = index_cnt
index_cnt += 1
if (tune == False):
label = split[label_index]
if (label != '0'): label = '1'
index = [0] * (Column - 1)
value = ['0'] * (Column - 1)
for i in range(Column):
cur = i
if (i == label_index): continue
if (i > label_index): cur = i - 1
if (numerical[i] == 1):
index[cur] = index_others[i]
if (split[i] != ''):
value[cur] = split[i]
# Max[i] = max(int(split[i]), Max[i])
else:
if (split[i] != ''):
index[cur] = dic[i][split[i]][0]
value[cur] = '1'
if (split[i] == ''):
value[cur] = '0'
f_train_index.write(' '.join(str(i) for i in index) + '\n')
f_train_value.write(' '.join(value) + '\n')
f_train_label.write(label + '\n')
f1.close()
f_train_index.close()
f_train_value.close()
f_train_label.close()
print ("Finished!")
print ("index_cnt = %d" % index_cnt)
# print ("max number for numerical features:")
# for i in numr_feat:
# print ("no.:%d max: %d" % (i, Max[i]))
def preprocess_kdd(datafile):
#coding=utf-8
#Email of the author: zjduan@pku.edu.cn
'''
0. Click:
1. Impression(numerical)
2. DisplayURL: (categorical)
3. AdID:(categorical)
4. AdvertiserID:(categorical)
5. Depth:(numerical)
6. Position:(numerical)
7. QueryID: (categorical) the key of the data file 'queryid_tokensid.txt'.
8. KeywordID: (categorical)the key of 'purchasedkeyword_tokensid.txt'.
9. TitleID: (categorical)the key of 'titleid_tokensid.txt'.
10. DescriptionID: (categorical)the key of 'descriptionid_tokensid.txt'.
11. UserID: (categorical)the key of 'userid_profile.txt'
12. User's Gender: (categorical)
13. User's Age: (categorical)
'''
train_path = './training.txt'
f1 = open(train_path, 'r')
f2 = open('./userid_profile.txt', 'r')
dic = {}
f_train_value = open('./train_x.txt', 'w')
f_train_index = open('./train_i.txt', 'w')
f_train_label = open('./train_y.txt', 'w')
debug = False
tune = False
Column = 12
Field = 13
numr_feat = [1,5,6]
numerical = [0] * Column
cate_feat = [2,3,4,7,8,9,10,11]
index_cnt = 0
index_others = [0] * (Field + 1)
Max = [0] * 12
numerical[0] = -1
for i in numr_feat:
index_others[i] = index_cnt
index_cnt += 1
numerical[i] = 1
for i in cate_feat:
index_others[i] = index_cnt
index_cnt += 1
for i in range(Field + 1):
dic[i] = dict()
###init user_dic
user_dic = dict()
cnt_line = 0
for line in f2:
cnt_line += 1
if (cnt_line % 1000000 == 0):
print ("cnt_line = %d, index_cnt = %d" % (cnt_line, index_cnt))
# if (debug == True):
# if (cnt_line >= 10000):
# break
split = line.strip('\n').split('\t')
user_dic[split[0]] = [split[1], split[2]]
if (split[1] not in dic[12]):
dic[12][split[1]] = [index_cnt, 0]
index_cnt += 1
if (split[2] not in dic[13]):
dic[13][split[2]] = [index_cnt, 0]
index_cnt += 1
cnt_line = 0
for line in f1:
cnt_line += 1
if (cnt_line % 1000000 == 0):
print ("cnt_line = %d, index_cnt = %d" % (cnt_line, index_cnt))
if (debug == True):
if (cnt_line >= 10000):
break
split = line.strip('\n').split('\t')
for i in cate_feat:
if (split[i] != ''):
if split[i] not in dic[i]:
dic[i][split[i]] = [index_others[i], 0]
dic[i][split[i]][1] += 1
if (dic[i][split[i]][0] == index_others[i] and dic[i][split[i]][1] == 10):
dic[i][split[i]][0] = index_cnt
index_cnt += 1
if (tune == False):
label = split[0]
if (label != '0'): label = '1'
index = [0] * Field
value = ['0'] * Field
for i in range(1, 12):
if (numerical[i] == 1):
index[i - 1] = index_others[i]
if (split[i] != ''):
value[i - 1] = split[i]
Max[i] = max(int(split[i]), Max[i])
else:
if (split[i] != ''):
index[i - 1] = dic[i][split[i]][0]
value[i - 1] = '1'
if (split[i] == ''):
value[i - 1] = '0'
if (i == 11 and split[i] == '0'):
value[i - 1] = '0'
### gender and age
if (split[11] == '' or (split[11] not in user_dic)):
index[12 - 1] = index_others[12]
value[12 - 1] = '0'
index[13 - 1] = index_others[13]
value[13 - 1] = '0'
else:
index[12 - 1] = dic[12][user_dic[split[11]][0]][0]
value[12 - 1] = '1'
index[13 - 1] = dic[13][user_dic[split[11]][1]][0]
value[13 - 1] = '1'
f_train_index.write(' '.join(str(i) for i in index) + '\n')
f_train_value.write(' '.join(value) + '\n')
f_train_label.write(label + '\n')
f1.close()
f_train_index.close()
f_train_value.close()
f_train_label.close()
print ("Finished!")
print ("index_cnt = %d" % index_cnt)
print ("max number for numerical features:")
for i in numr_feat:
print ("no.:%d max: %d" % (i, Max[i]))
def _load_data(_nrows=None, debug = False, datafile=""):
TRAIN_X = os.path.join(datafile, 'train_x.txt')
TRAIN_Y = os.path.join(datafile, 'train_y.txt')
print(TRAIN_X)
print(TRAIN_Y)
train_x = pd.read_csv(TRAIN_X,header=None,sep=' ',nrows=_nrows, dtype=np.float)
train_y = pd.read_csv(TRAIN_Y,header=None,sep=' ',nrows=_nrows, dtype=np.int32)
train_x = train_x.values
train_y = train_y.values.reshape([-1])
print('data loading done!')
print('training data : %d' % train_y.shape[0])
assert train_x.shape[0]==train_y.shape[0]
return train_x, train_y
def save_x_y(fold_index, train_x, train_y, datafile):
train_x_name = "train_x.npy"
train_y_name = "train_y.npy"
_get = lambda x, l: [x[i] for i in l]
for i in range(len(fold_index)):
print("now part %d" % (i+1))
part_index = fold_index[i]
Xv_train_, y_train_ = _get(train_x, part_index), _get(train_y, part_index)
save_dir_Xv = os.path.join(datafile, "part" + str(i+1))
save_dir_y = os.path.join(datafile, "part" + str(i+1))
if (os.path.exists(save_dir_Xv) == False):
os.makedirs(save_dir_Xv)
if (os.path.exists(save_dir_y) == False):
os.makedirs(save_dir_y)
save_path_Xv = os.path.join(save_dir_Xv, train_x_name)
save_path_y = os.path.join(save_dir_y, train_y_name)
np.save(save_path_Xv, Xv_train_)
np.save(save_path_y, y_train_)
def save_i(fold_index, datafile):
_get = lambda x, l: [x[i] for i in l]
TRAIN_I = os.path.join(datafile, 'train_i.txt')
train_i = pd.read_csv(TRAIN_I,header=None,sep=' ',nrows=None, dtype=np.int32)
train_i = train_i.values
feature_size = train_i.max() + 1
print ("feature_size = %d" % feature_size)
feature_size = [feature_size]
feature_size = np.array(feature_size)
np.save(os.path.join(datafile, "feature_size.npy"), feature_size)
# pivot = 40000000
# test_i = train_i[pivot:]
# train_i = train_i[:pivot]
# print("test_i size: %d" % len(test_i))
print("train_i size: %d" % len(train_i))
# np.save("../data/test/test_i.npy", test_i)
for i in range(len(fold_index)):
print("now part %d" % (i+1))
part_index = fold_index[i]
Xi_train_ = _get(train_i, part_index)
save_path_Xi = os.path.join(datafile, "part" + str(i+1), 'train_i.npy')
np.save(save_path_Xi, Xi_train_)
def stratifiedKfold(datafile):
train_x, train_y = _load_data(datafile=datafile)
print('loading data done!')
folds = list(StratifiedKFold(n_splits=10, shuffle=True,
random_state=2018).split(train_x, train_y))
fold_index = []
for i,(train_id, valid_id) in enumerate(folds):
fold_index.append(valid_id)
print("fold num: %d" % (len(fold_index)))
fold_index = np.array(fold_index)
np.save(os.path.join(datafile, "fold_index.npy"), fold_index)
save_x_y(fold_index, train_x, train_y, datafile=datafile)
print("save train_x_y done!")
fold_index = np.load(os.path.join(datafile, "fold_index.npy"), allow_pickle=True)
save_i(fold_index, datafile=datafile)
print("save index done!")
def scale(x):
if x > 2:
x = int(math.log(float(x))**2)
return x
def scale_dense_feat(datafile, dataset_name):
if args.dataset_name == "criteo":
num_dense = 13
elif args.dataset_name == "avazu":
return True
elif args.dataset_name == "kdd":
num_dense = 3
for i in range(1,11):
print('now part %d' % i)
data = np.load(os.path.join(datafile, 'part'+str(i), 'train_x.npy'), allow_pickle=True)
part = data[:,:num_dense]
for j in range(part.shape[0]):
if j % 100000 ==0:
print(j)
part[j] = list(map(scale, part[j]))
np.save(os.path.join(datafile, 'part' + str(i), 'train_x2.npy'), data)
def print_shape(name, var):
print("Shape of {}: {}".format(name, var.shape))
def check_existing_file(filename, force):
if os.path.isfile(filename):
print("file {} already exists!".format(filename))
if not force:
raise ValueError("aborting, use --force if you want to processed")
else:
print("Will override the file!")
def sample_data(args):
output_data_file = "{}{}.npz".format(args.data_file, args.save_filename)
check_existing_file(output_data_file, args.force)
data = np.load(args.sample_data_file, allow_pickle=True)
X_cat, X_int, y = data["X_cat"], data["X_int"], data["y"]
print_shape("X_cat", X_cat)
print_shape("X_int", X_int)
print_shape("y", y)
print("total number of data points: {}".format(len(y)))
print(
"saving first {} data points to {}{}.npz".format(
args.num_samples, args.data_file, args.save_filename
)
)
np.savez_compressed(
"{}{}.npz".format(args.data_file, args.save_filename),
X_int=X_int[0 : args.num_samples, :],
X_cat=X_cat[0 : args.num_samples, :],
y=y[0 : args.num_samples],
)
def compress_ids(feature, raw_to_new={}):
if raw_to_new is None:
start_idx = 1
raw_to_new = {}
else:
start_idx = 0
for i in range(len(feature)):
if feature[i] not in raw_to_new:
raw_to_new[feature[i]] = len(raw_to_new) + start_idx
feature[i] = raw_to_new[feature[i]]
return raw_to_new
def final_preprocess(datafile):
X_int = []
X_cat = []
y = []
missing_sparse = []
if args.dataset_name == "criteo":
num_dense, num_sparse = 13, 26
TRAIN_X = "train_x2.npy"
elif args.dataset_name == "avazu":
num_dense, num_sparse = 0, 23
TRAIN_X = "train_x.npy"
elif args.dataset_name == "kdd":
num_dense, num_sparse = 3, 10
TRAIN_X = "train_x2.npy"
TRAIN_Y = "train_y.npy"
TRAIN_I = "train_i.npy"
for i in [3,4,5,6,7,8,9,10,2,1]:#range(1,11): # todo
f = np.load(os.path.join(datafile, "part" + str(i), TRAIN_I), "r", allow_pickle=True)
g = np.load(os.path.join(datafile, "part" + str(i), TRAIN_X), "r", allow_pickle=True)
h = np.load(os.path.join(datafile, "part" + str(i), TRAIN_Y), "r", allow_pickle=True)
X_int_split = np.array(g[:, 0:num_dense])
X_cat_split = np.array(f[:, num_dense:])
y_split = h
missing_sparse_split = np.array(g[:,0:])
indices = np.arange(len(y_split))
indices = np.random.permutation(indices)
# shuffle data
X_cat_split = X_cat_split[indices]
X_int_split = X_int_split[indices]
y_split = y_split[indices].astype(np.float32)
missing_sparse_split = missing_sparse_split[indices]
X_int.append(X_int_split)
X_cat.append(X_cat_split)
y.append(y_split)
missing_sparse.append(missing_sparse_split)
X_int = np.concatenate(X_int)
X_cat = np.concatenate(X_cat)
y = np.concatenate(y)
missing_sparse = np.concatenate(missing_sparse)
print("expected feature size", X_cat.max() + 1)
flat = X_cat.flatten()
fset = set(flat)
print("expected size", len(fset))
missing_sparse_maps = []
for i in range(num_sparse):
missing_slice = missing_sparse[:,i]
if 0 in missing_slice:
locs = np.where(missing_slice==0)[0]
missing_sparse_maps.append({X_cat[locs[0],i]:0})
else:
missing_sparse_maps.append(None)
raw_to_new_ids = []
for i in range(X_cat.shape[1]):
print("compressing the ids for the {}-th feature.".format(i))
raw_to_new_ids.append(compress_ids(X_cat[:, i], missing_sparse_maps[i]))
total = 0
hashsizes = []
for i in range(len(raw_to_new_ids)):
hashsize = max(raw_to_new_ids[i].values())+1 # 1 is for the zero
hashsizes.append(hashsize)
print("sparse_" + str(i),"\t", hashsize)
total += hashsize
if args.dataset_name == "criteo":
hashsize_filename = "criteo_hashsizes.npy"
finaldata_filename = "criteo_processed.npz"
elif args.dataset_name == "avazu":
hashsize_filename = "avazu_hashsizes.npy"
finaldata_filename = "avazu_processed.npz"
elif args.dataset_name == "kdd":
hashsize_filename = "kdd2012_hashsizes.npy"
finaldata_filename = "kdd2012_processed.npz"
np.save(os.path.join(datafile, hashsize_filename), np.array(hashsizes))
np.savez_compressed(os.path.join(datafile, finaldata_filename), X_int=X_int, X_cat=X_cat, y=y)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Parse Data")
parser.add_argument("--dataset-name", default="criteo", choices=["criteo", "avazu", "kdd"])
parser.add_argument("--data-file", type=str, default="")
parser.add_argument("--sample-data-file", type=str, default="")
parser.add_argument("--save-filename", type=str, default="")
parser.add_argument("--mode", type=str, default="raw")
parser.add_argument("--num-samples", type=int, default=1000)
parser.add_argument("--force", action="store_true", default=False)
args = parser.parse_args()
if args.mode == "raw":
print(
"Load raw data and parse (compress id to consecutive space, "
"shuffle within ds) and save it."
)
if args.dataset_name == "criteo":
preprocess_criteo(datafile=args.data_file)
elif args.dataset_name == "avazu":
preprocess_avazu(datafile=args.data_file)
elif args.dataset_name == "kdd":
preprocess_kdd(datafile=args.data_file)
print("Start stratifiedKfold!")
stratifiedKfold(datafile=args.data_file)
print("Start scaling!")
scale_dense_feat(datafile=args.data_file, dataset_name=args.dataset_name)
print("Final preprocessing stage!")
final_preprocess(datafile=args.data_file)
print("Finish data preprocessing!")
elif args.mode == "sample":
print("Load processed data and take the first K data points and save it.")
sample_data(args)
else:
raise ValueError("Unknown mode: {}".format(args.mode))
|
AutoCTR-main
|
scripts/preprocess.py
|
AutoCTR-main
|
scripts/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
sys.path.append('gen-py')
import json
import logging
import os
import pickle
import time
import copy
import numpy as np
import torch
from thrift.protocol import TSimpleJSONProtocol
from thrift.util import Serializer
from config import ttypes as config
from models.nas_modules import NASRecNet
from models.builder import load_model
from nasrec.builder import build_searcher, load_searcher, save_searcher
from nasrec.utils import reward_normalization
from trainers.simple import train as simple_train
from utils.data import prepare_data
from utils.search_utils import get_args, get_trainer_config, get_searcher_config
from torch.multiprocessing import Pipe, Process, set_start_method
from thop import profile
set_start_method('spawn', force=True)
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (100000, rlimit[1]))
import GPUtil
logging.basicConfig(level=logging.WARNING)
logger = logging.getLogger(__name__)
jfactory = TSimpleJSONProtocol.TSimpleJSONProtocolFactory()
if __name__ == "__main__":
# get arguments
args = get_args()
logger.warning("All Args: {}".format(args))
# set seeds
np.random.seed(args.numpy_seed)
torch.manual_seed(args.torch_seed)
excludeID = [int(id) for id in args.excludeID.split(",")] if args.excludeID else []
deviceIDs = GPUtil.getAvailable(order='first', limit=1, maxLoad=0.9, maxMemory=0.8, excludeID=excludeID)
CUDA = 'cuda:' + str(deviceIDs[0])
device = torch.device("cpu")
if args.use_gpu:
if torch.cuda.is_available():
torch.backends.cudnn.deterministic = True
device = torch.device(CUDA)
else:
print("WARNING: CUDA is not available on this machine, proceed with CPU")
# load warm start emb dict
if args.warm_start_emb:
if args.data_set_name == "criteo":
ckp_name = "warm_start_criteo.ckp"
elif args.data_set_name == "avazu":
ckp_name = "warm_start_avazu.ckp"
elif args.data_set_name == "kdd2012":
ckp_name = "warm_start_kdd2012.ckp"
warm_start_filename = os.path.join(args.save_model_path, "models", ckp_name)
warm_start_model = load_model(warm_start_filename)
warm_start_emb_dict = warm_start_model.emb_dict
# get trainer config
input_summary, args = get_trainer_config(args)
# change dataset to small dataset
input_summary["data_options"]["from_file"]["data_file"] = args.data_file
input_summary["data_options"]["from_file"]["batch_size"] = args.batch_size
# change train_options
input_summary["train_options"]["nepochs"] = args.nepochs
input_summary["train_options"]["logging_config"]["log_freq"] = 100000
input_summary["train_options"]["logging_config"]["tb_log_freq"] = 100000
# change performance_options
input_summary["performance_options"]["num_readers"] = args.num_workers
input_summary["performance_options"]["num_trainers"] = args.num_trainers
input_summary["performance_options"]["use_gpu"] = args.use_gpu
# change optimizer
input_summary["feature_options"]["dense"]["optim"]["adam"]["lr"] = args.learning_rate
input_summary["feature_options"]["sparse"]["optim"]["sparse_adam"]["lr"] = args.learning_rate
# change feature hashing size
for i, feature in enumerate(input_summary["feature_options"]["sparse"]["features"]):
if feature["hash_size"] > args.hash_size:
input_summary["feature_options"]["sparse"]["features"][i]["hash_size"] = args.hash_size
# extract feature config for searcher construction and trainer
train_options = Serializer.deserialize(
jfactory,
json.dumps(input_summary["train_options"]),
config.TrainConfig(),
)
# extract feature config for searcher construction and trainer
feature_config = Serializer.deserialize(
jfactory,
json.dumps(input_summary["feature_options"]),
config.FeatureConfig(),
)
data_options = Serializer.deserialize(
jfactory,
json.dumps(input_summary["data_options"]),
config.DataConfig(),
)
performance_options = Serializer.deserialize(
jfactory,
json.dumps(input_summary["performance_options"]),
config.PerformanceConfig(),
)
# construct temporal directory to save models
if args.resume_file:
temp_dir = os.path.join(
args.save_model_path,
args.searcher_type,
args.data_set_name,
args.resume_file,
)
rewards = np.load(os.path.join(temp_dir, "rewards.npy"), allow_pickle=True).tolist()
all_roc_aucs = np.load(os.path.join(temp_dir, "all_roc_aucs.npy"), allow_pickle=True).tolist()
all_arc_vecs = np.load(os.path.join(temp_dir, "all_arc_vecs.npy"), allow_pickle=True).tolist()
all_actions = np.load(os.path.join(temp_dir, "all_actions.npy"), allow_pickle=True).tolist()
all_params = np.load(os.path.join(temp_dir, "all_params.npy"), allow_pickle=True).tolist()
all_flops = np.load(os.path.join(temp_dir, "all_flops.npy"), allow_pickle=True).tolist()
finished_model = np.load(os.path.join(temp_dir, "finished_model.npy"), allow_pickle=True).tolist()
fbl_meta = np.load(os.path.join(temp_dir, "fbl_meta.npy"), allow_pickle=True).tolist()
# unpickling meta data
with open(os.path.join(temp_dir, "meta.txt"), "rb") as fp:
[
best_val_loss,
best_model,
best_name,
best_fbl_id,
total_model,
epoch,
] = pickle.load(fp)
fp.close()
searcher = load_searcher(os.path.join(temp_dir, "searcher.ckp"))
if args.searcher_type == "evo":
is_initial = np.load(os.path.join(temp_dir, "is_initial.npy"), allow_pickle=True).tolist()
if args.searcher_type == "evo":
searcher.all_arc_vecs = all_arc_vecs
searcher.all_actions = all_actions
searcher.all_params = all_params
searcher.all_flops = all_flops
searcher.all_rewards = rewards
searcher.all_roc_aucs = all_roc_aucs
if args.survival_type == "age":
searcher.population_arc_queue = all_actions[-searcher.population_size:]
searcher.population_val_queue = rewards[-searcher.population_size:]
elif args.survival_type == "comb":
searcher.comb()
else:
if args.survival_type == "fit":
idx = sorted(range(len(rewards)), key=lambda i: rewards[i], reverse=True)[
-searcher.population_size:]
elif args.survival_type == "mix":
division = int(0.5 * searcher.population_size)
tmp_rewards = rewards[:-division]
idx = sorted(range(len(tmp_rewards)), key=lambda i: tmp_rewards[i], reverse=True)[-division:]
searcher.population_arc_queue = np.array(all_actions)[idx].tolist()
searcher.population_val_queue = np.array(rewards)[idx].tolist()
if args.survival_type == "mix":
searcher.population_arc_queue += all_actions[-division:]
searcher.population_val_queue += rewards[-division:]
logger.warning("Total_resume_length: arc_{}, val_{}".format(
len(searcher.population_arc_queue),
len(searcher.population_val_queue)
))
searcher.sampler_type = args.sampler_type
searcher.update_GBDT()
else:
if args.save_model_path:
temp_dir = os.path.join(
args.save_model_path,
args.searcher_type,
args.data_set_name,
time.strftime("%Y%m%d-%H%M%S"),
)
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
# construct searcher
searcher_config = get_searcher_config(args)
searcher = build_searcher(searcher_config, feature_config)
searcher.to(device=device)
best_val_loss = np.Inf
best_model = None
best_name = None
best_fbl_id = None
fbl_meta = []
rewards = []
all_roc_aucs = []
finished_model = []
total_model = -1
epoch = 0
# for checking repreated architectures
all_arc_vecs = []
# mark all actions (block_configs)
all_actions = []
all_params = []
all_flops = []
if args.searcher_type == "evo":
is_initial = True
all_forward_node_ids = []
all_virtual_losses = []
logger.warning("The running history is save in {}".format(temp_dir))
fbl_run_queue = []
fbl_result_queue = []
fbl_device_queue = []
fbl_time_queue = []
fbl_name_queue = []
fbl_id_queue = []
nasrec_net_queue = []
nasrec_arc_vec_queue = []
action_queue = []
params_queue = []
flops_queue = []
# for datasaving purpose
batch_processor, train_dataloader, val_dataloader, \
val_dataloader_batches, train_dataloader_batches = {}, {}, {}, {}, {}
for id in range(args.total_gpus):
if id not in excludeID:
CUDA = 'cuda:' + str(id)
if len(batch_processor) == 0:
(
_, # datasets
batch_processor[CUDA],
train_dataloader,
val_dataloader,
_, # eval_dataloader
) = prepare_data(data_options, performance_options, CUDA)
else:
(
_, # datasets
batch_processor[CUDA],
_,
_,
_, # eval_dataloader
) = prepare_data(data_options, performance_options, CUDA)
if args.save_batches:
train_dataloader_batches[CUDA] = []
val_dataloader_batches[CUDA] = []
if len(batch_processor) == 1:
for i_batch, sample_batched in enumerate(train_dataloader):
if i_batch % 100 == 0:
logger.warning("i_batch {}".format(i_batch))
train_dataloader_batches[CUDA].append(sample_batched)
for i_batch, sample_batched in enumerate(val_dataloader):
if i_batch % 100 == 0:
logger.warning("i_batch {}".format(i_batch))
val_dataloader_batches[CUDA].append(sample_batched)
mark = CUDA
else:
train_dataloader_batches[CUDA] = [[]] * len(train_dataloader_batches[mark])
val_dataloader_batches[CUDA] = [[]] * len(val_dataloader_batches[mark])
for i_batch, sample_batched in enumerate(train_dataloader_batches[mark]):
train_dataloader_batches[CUDA][i_batch] = {}
if i_batch % 100 == 0:
logger.warning("copy i_batch {}".format(i_batch))
for k, v in sample_batched.items():
train_dataloader_batches[CUDA][i_batch][k] = v.clone().detach()
# train_dataloader_batches[CUDA] = train_dataloader_batches[mark]
for i_batch, sample_batched in enumerate(train_dataloader_batches[CUDA]):
if i_batch % 100 == 0:
logger.warning("process_i_batch {}".format(i_batch))
train_dataloader_batches[CUDA][i_batch] = batch_processor[CUDA](
mini_batch=sample_batched)
for i_batch, sample_batched in enumerate(val_dataloader_batches[mark]):
val_dataloader_batches[CUDA][i_batch] = {}
if i_batch % 100 == 0:
logger.warning("copy i_batch {}".format(i_batch))
for k, v in sample_batched.items():
val_dataloader_batches[CUDA][i_batch][k] = v.clone().detach()
# val_dataloader_batches[CUDA] = val_dataloader_batches[mark]
for i_batch, sample_batched in enumerate(val_dataloader_batches[CUDA]):
if i_batch % 100 == 0:
logger.warning("process_i_batch {}".format(i_batch))
val_dataloader_batches[CUDA][i_batch] = batch_processor[CUDA](
mini_batch=sample_batched)
else:
train_dataloader_batches[CUDA] = None
val_dataloader_batches[CUDA] = None
if args.save_batches:
for i_batch, sample_batched in enumerate(train_dataloader_batches[mark]):
if i_batch % 100 == 0:
logger.warning("process_first_cuda_i_batch {}".format(i_batch))
train_dataloader_batches[mark][i_batch] = batch_processor[mark](
mini_batch=sample_batched)
for i_batch, sample_batched in enumerate(val_dataloader_batches[mark]):
if i_batch % 100 == 0:
logger.warning("process_first_cuda_i_batch {}".format(i_batch))
val_dataloader_batches[mark][i_batch] = batch_processor[mark](
mini_batch=sample_batched)
logger.warning("batch_processor {}".format(batch_processor))
# load historical samples (could from other searchers)
if args.historical_sample_path and args.historical_sample_num:
hist_dir = args.historical_sample_path
rewards = np.load(os.path.join(hist_dir, "rewards.npy"), allow_pickle=True).tolist()[
: args.historical_sample_num
]
all_actions = np.load(os.path.join(hist_dir, "all_actions.npy"), allow_pickle=True).tolist()[
: args.historical_sample_num
]
# TODO: all_params, all_flops
try:
all_params = np.load(os.path.join(hist_dir, "all_params.npy"), allow_pickle=True).tolist()[
: args.historical_sample_num
]
all_flops = np.load(os.path.join(hist_dir, "all_flops.npy"), allow_pickle=True).tolist()[
: args.historical_sample_num
]
except:
finished_model = np.load(os.path.join(hist_dir, "finished_model.npy"), allow_pickle=True).tolist()
all_params, all_flops = [], []
# Get the flops and params of the model
for i_batch, sample_batched in enumerate(train_dataloader_batches[CUDA]):
_, feats, _ = sample_batched
break
for nasrec_net_fp in finished_model:
with open(nasrec_net_fp, "r") as fp:
nasrec_net_config = json.load(fp)
nasrec_net = Serializer.deserialize(
jfactory,
json.dumps(nasrec_net_config),
config.ModelConfig(),
)
tmp_model = NASRecNet(nasrec_net, feature_config)
tmp_model.to(device=CUDA)
flops, params = profile(tmp_model, inputs=(feats, ), verbose=False)
flops = flops * 1.0 / args.batch_size
all_params.append(params)
all_flops.append(flops)
np.save(os.path.join(hist_dir, "all_params.npy"), np.array(all_params))
np.save(os.path.join(hist_dir, "all_flops.npy"), np.array(all_flops))
all_params = np.load(os.path.join(hist_dir, "all_params.npy"), allow_pickle=True).tolist()[
: args.historical_sample_num
]
all_flops = np.load(os.path.join(hist_dir, "all_flops.npy"), allow_pickle=True).tolist()[
: args.historical_sample_num
]
logger.warning(
"resume_all_params: {} all_flops: {}".format(all_params, all_flops)
)
# convert actions to vecs (we do not direcly read the vecs
# since we may change the vectorized expression of an arc)
all_arc_vecs = [
np.concatenate(searcher.dicts_to_vecs(action)) for action in all_actions
]
finished_model = np.load(os.path.join(hist_dir, "finished_model.npy"), allow_pickle=True).tolist()[
: args.historical_sample_num
]
fbl_meta = np.load(os.path.join(hist_dir, "fbl_meta.npy"), allow_pickle=True).tolist()[
: args.historical_sample_num
]
for mp_old in finished_model:
with open(mp_old, "r") as fp:
nasrec_net_old = json.load(fp)
fp.close()
mp_new = os.path.join(temp_dir, mp_old.split("/")[-1])
with open(mp_new, "w") as fp:
json.dump(nasrec_net_old, fp)
fp.close()
# unpickling meta data
best_idx = np.argmin(rewards)
best_val_loss = rewards[best_idx]
best_name, best_fbl_id = fbl_meta[best_idx]
logger.warning(
"resume_best_val_loss: {} best_idx: {} best_name {}, best_fbl_id {}".format(
best_val_loss, best_idx, best_name, best_fbl_id
)
)
best_model_filename = os.path.join(
hist_dir, finished_model[best_idx].split("/")[-1]
)
with open(best_model_filename, "r") as fp:
best_model = json.load(fp)
fp.close()
total_model = args.historical_sample_num
epoch = args.historical_sample_num
if args.searcher_type == "evo":
searcher.all_arc_vecs = all_arc_vecs
searcher.all_actions = all_actions
searcher.all_params = all_params
searcher.all_flops = all_flops
searcher.all_rewards = rewards
# searcher.all_roc_aucs = all_roc_aucs
if args.survival_type == "age":
searcher.population_arc_queue = all_actions[-searcher.population_size:]
searcher.population_val_queue = rewards[-searcher.population_size:]
elif args.survival_type == "comb":
searcher.comb()
else:
if args.survival_type == "fit":
idx = sorted(range(len(rewards)), key=lambda i: rewards[i], reverse=True)[
-searcher.population_size:]
elif args.survival_type == "mix":
division = int(0.5 * searcher.population_size)
tmp_rewards = rewards[:-division]
idx = sorted(range(len(tmp_rewards)), key=lambda i: tmp_rewards[i], reverse=True)[-division:]
searcher.population_arc_queue = np.array(all_actions)[idx].tolist()
searcher.population_val_queue = np.array(rewards)[idx].tolist()
if args.survival_type == "mix":
searcher.population_arc_queue += all_actions[-division:]
searcher.population_val_queue += rewards[-division:]
logger.warning("Total_hist_length: arc_{}, val_{}".format(
len(searcher.population_arc_queue),
len(searcher.population_val_queue)
))
if len(searcher.population_arc_queue) == searcher.population_size:
is_initial = False
searcher.sampler_type = args.sampler_type
searcher.update_GBDT()
while epoch < args.search_nepochs:
while len(fbl_run_queue) < args.num_machines:
logger.info(
"Using fblearner training with {} trainers.".format(args.num_trainers)
)
# Three steps NAS
# 1. generate arcs
if args.searcher_type == "evo":
nasrec_net, _, actions, nasrec_arc_vecs = searcher.sample(
batch_size=1, return_config=True, is_initial=is_initial
)
else:
nasrec_net, log_prob, actions, nasrec_arc_vecs = searcher.sample(
batch_size=1, return_config=True
)
nasrec_net = nasrec_net[0]
action = actions[0]
nasrec_arc_vec = nasrec_arc_vecs[0]
total_model += 1
# check if an arch has already been searched before
repeat_idx = (
[]
if not all_arc_vecs or args.repeat_checker_off
else np.where(
np.sum(abs(np.array(all_arc_vecs) - nasrec_arc_vec), 1) == 0
)[0]
)
if len(repeat_idx) != 0:
logger.warning("The architecture is same with: {}.".format(repeat_idx))
continue
repeat_idx_1 = (
[]
if not nasrec_arc_vec_queue or args.repeat_checker_off
else np.where(
np.sum(abs(np.array(nasrec_arc_vec_queue) - nasrec_arc_vec), 1) == 0
)[0]
)
# TODO: check correctness
if len(repeat_idx_1) != 0:
logger.warning("The architecture is same with the current running: {}.".format(repeat_idx_1))
continue
# 2. put on fblearner to get performance
model_option = Serializer.serialize(jfactory, nasrec_net)
model_option = json.loads(model_option)
input_summary["model_option"] = model_option
basename = (
"[exp autoctr] nasnet_model_search_"
+ args.searcher_type
+ "_macro_space_type_"
+ str(args.macro_space_type)
+ "_"
+ str(total_model)
+ "_updated_model_"
+ str(epoch)
)
try:
if len(repeat_idx) != 0:
break
# TODO: device
deviceIDs = GPUtil.getAvailable(order='random',
limit=1,
maxLoad=args.maxLoad,
maxMemory=args.maxMemory,
excludeID=excludeID)
CUDA = 'cuda:' + str(deviceIDs[0])
except Exception:
logger.warning("No available device!")
try:
recv_end, send_end = Pipe(False)
tmp_model = NASRecNet(nasrec_net, feature_config)
if args.warm_start_emb:
tmp_model.emb_dict = copy.deepcopy(warm_start_emb_dict)
tmp_model.to(device=CUDA)
# Get the flops and params of the model
for i_batch, sample_batched in enumerate(train_dataloader_batches[CUDA]):
_, feats, _ = sample_batched
break
flops, params = profile(tmp_model, inputs=(feats, ), verbose=False)
flops = flops * 1.0 / args.batch_size
logger.warning("The current flops {}, params {}".format(flops, params))
# launch a subprocess for model training
new_fbl_run = Process(target=simple_train,
args=(tmp_model,
train_options,
None, # train_dataloader[CUDA],
batch_processor[CUDA],
CUDA,
None,
0,
send_end,
train_dataloader_batches[CUDA],
val_dataloader_batches[CUDA],
args.batch_size
# args.save_batches,
))
new_fbl_run.start()
fbl_id_queue.append(total_model)
fbl_run_queue.append(new_fbl_run)
fbl_result_queue.append(recv_end)
fbl_device_queue.append(CUDA)
fbl_time_queue.append(0)
fbl_name_queue.append(basename)
nasrec_net_queue.append(model_option)
nasrec_arc_vec_queue.append(nasrec_arc_vec)
action_queue.append(action)
params_queue.append(params)
flops_queue.append(flops)
except Exception:
logger.warning("Model are cannot be registered now!!")
if len(repeat_idx) != 0:
# has repeated arch
(fbl_name, fbl_id) = fbl_meta[repeat_idx[0]]
rewards.append(rewards[repeat_idx[0]])
model_filename = finished_model[repeat_idx[0]]
with open(model_filename, "r") as fp:
nasrec_net = json.load(fp)
fp.close()
nasrec_arc_vec = all_arc_vecs[repeat_idx[0]]
action = all_actions[repeat_idx[0]]
params = all_params[repeat_idx[0]]
flops = all_flops[repeat_idx[0]]
else:
# check the status of all the current models
mark = args.num_machines
while mark == args.num_machines:
fbl_time_queue = [t + args.waiting_time for t in fbl_time_queue]
mark = 0
for i, fbl_run in enumerate(fbl_run_queue):
if (
fbl_run.exitcode is None
and fbl_time_queue[i] <= args.fbl_kill_time
):
mark += 1
else:
break
logger.warning("All model are currently running!")
time.sleep(args.waiting_time)
# get the terminated workflow
fbl_run = fbl_run_queue.pop(mark)
fbl_result = fbl_result_queue.pop(mark)
fbl_device = fbl_device_queue.pop(mark)
fbl_time = fbl_time_queue.pop(mark)
fbl_name = fbl_name_queue.pop(mark).split("_")
fbl_id = fbl_id_queue.pop(mark)
nasrec_net = nasrec_net_queue.pop(mark)
nasrec_arc_vec = nasrec_arc_vec_queue.pop(mark)
action = action_queue.pop(mark)
params = params_queue.pop(mark)
flops = flops_queue.pop(mark)
if fbl_time > args.fbl_kill_time:
fbl_run.terminate()
logger.warning(
"Model #_{} training Failed. ID: {}".format(fbl_name[-4], fbl_id)
)
epoch -= 1
continue
# there exist a model successed in queue
logger.warning("mark {}, len(fbl_run_queue) {}".format(mark, len(fbl_run_queue)))
try:
output = fbl_result.recv()
except Exception:
# Failed to extract results due to some transient issue.
logger.warning(
"The results of model #_{} are failed to be obtained. ID: {}. DeviceID: {}".format(
fbl_name[-4], fbl_id, fbl_device
)
)
epoch -= 1
continue
logger.warning(
"Outputs of Model f{}_M_{}_S_{}: {}".format(
fbl_id, fbl_name[-4], fbl_name[-1], output
)
)
if output[-2]["avg_val_loss"] is None or np.isnan(output[-2]["avg_val_loss"]) \
or output[-2]["roc_auc_score"] is None or np.isnan(output[-2]["roc_auc_score"]):
# Output is NaN sometimes.
logger.warning(
"Model #_{} validation output is Invalid (None)! ID: {}".format(
fbl_name[-4], fbl_id
)
)
epoch -= 1
continue
all_roc_aucs.append([output[-2]["avg_val_loss"], output[-2]["roc_auc_score"]])
if args.reward_type == "logloss":
rewards.append(output[-2]["avg_val_loss"])
elif args.reward_type == "auc":
rewards.append(1 - output[-2]["roc_auc_score"])
model_filename = os.path.join(
temp_dir, "M_" + str(fbl_name[-4]) + "_S_" + str(fbl_name[-1]) + ".json"
)
finished_model.append(model_filename)
fbl_meta.append((fbl_name, fbl_id))
all_arc_vecs.append(nasrec_arc_vec)
all_actions.append(action)
all_params.append(params)
all_flops.append(flops)
if args.save_model_path:
try:
logger.info("Saving model to {}".format(temp_dir))
with open(model_filename, "w") as fp:
json.dump(nasrec_net, fp)
fp.close()
np.save(os.path.join(temp_dir, "rewards.npy"), np.array(rewards))
np.save(os.path.join(temp_dir, "all_roc_aucs.npy"), np.array(all_roc_aucs))
np.save(
os.path.join(temp_dir, "all_arc_vecs.npy"), np.array(all_arc_vecs)
)
np.save(
os.path.join(temp_dir, "all_actions.npy"), np.array(all_actions)
)
np.save(
os.path.join(temp_dir, "all_params.npy"), np.array(all_params)
)
np.save(
os.path.join(temp_dir, "all_flops.npy"), np.array(all_flops)
)
np.save(
os.path.join(temp_dir, "finished_model.npy"),
np.array(finished_model),
)
np.save(os.path.join(temp_dir, "fbl_meta.npy"), np.array(fbl_meta))
if args.searcher_type == "evo":
np.save(os.path.join(temp_dir, "is_initial.npy"), np.array(is_initial))
except Exception:
logger.warning("Failed to save the model")
# update best arc
if rewards[-1] < best_val_loss:
best_fbl_id, best_model, best_val_loss, best_name = (
fbl_id,
nasrec_net,
rewards[-1],
fbl_name,
)
if args.save_model_path:
try:
logger.warning("Saving the best model to {}".format(temp_dir))
model_filename = os.path.join(temp_dir, "Best_Model" + ".json")
with open(model_filename, "w") as fp:
json.dump(best_model, fp)
fp.close()
with open(os.path.join(temp_dir, "best_model_id.txt"), "w") as fp:
fp.write(
"M_"
+ str(fbl_name[-4])
+ "_S_"
+ str(fbl_name[-1])
+ ".json"
+ "\n"
)
fp.close()
except Exception:
logger.warning("Failed to save the best model")
# pickling meta data for resume purpose
if args.save_model_path:
with open(os.path.join(temp_dir, "meta.txt"), "wb") as fp:
pickle.dump(
[
best_val_loss,
best_model,
best_name,
best_fbl_id,
total_model,
epoch,
],
fp,
)
fp.close()
logger.warning(
"{} model has been finished. The current best arc is: f{}_M_{}_S_{}. Its avg_val_loss is {}.".format(
len(rewards), best_fbl_id, best_name[-4], best_name[-1], best_val_loss
)
)
# 3. update searcher
epoch = len(rewards)
# epoch += 1
logger.warning("Searcher update epoch {}.".format(epoch))
if args.searcher_type == "evo":
searcher.all_arc_vecs = all_arc_vecs
searcher.all_actions = all_actions
searcher.all_params = all_params
searcher.all_flops = all_params
searcher.all_rewards = rewards
searcher.all_roc_aucs = all_roc_aucs
searcher.update([action], [rewards[-1]], survival_type=args.survival_type)
logger.warning("Total_length update: arc_{}, val_{}".format(
len(searcher.population_arc_queue),
len(searcher.population_val_queue)
))
if (
is_initial
and len(searcher.population_arc_queue) == args.population_size
):
is_initial = False
for proc in fbl_run_queue:
proc.terminate()
fbl_run_queue = []
fbl_time_queue = []
fbl_name_queue = []
fbl_id_queue = []
nasrec_net_queue = []
nasrec_arc_vec_queue = []
action_queue = []
params_queue = []
flops_queue = []
# save searcher
save_searcher(os.path.join(temp_dir, "searcher.ckp"), searcher)
# Kill all remaining workflows on fblearner
for proc in fbl_run_queue:
proc.terminate()
logger.warning(
"The best arc is: f{}_M_{}_S_{}. Its avg_val_loss is {}.".format(
best_fbl_id, best_name[-4], best_name[-1], best_val_loss
)
)
logger.warning("\nAll avg_val_loss are {}.".format(rewards))
if args.save_model_path:
try:
logger.warning("Saving the best model to {}".format(temp_dir))
model_filename = os.path.join(
temp_dir,
"Best_Model_M_"
+ str(fbl_name[-4])
+ "_S_"
+ str(fbl_name[-1])
+ ".json",
)
with open(model_filename, "w") as fp:
json.dump(best_model, fp)
fp.close()
except Exception:
logger.warning("Failed to save the best model")
|
AutoCTR-main
|
scripts/search.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE_CC-BY-NC4.0 file in the root directory of this source tree.
import argparse
import glob, json, os, re
import tarfile, zipfile
import urllib.request
import xml.etree.ElementTree as et
import numpy as np
import pandas as pd
from nltk.tokenize import sent_tokenize
from nltk_data.init import init_nltk_data
def download_files(directory, urls, unzipped_filename):
"""download files from the given URLs to a local directory"""
# Create a directory to store the downloaded files
download_directory = os.path.join(directory, "downloaded_files")
if not os.path.exists(download_directory):
os.mkdir(download_directory)
# Loop through the URLs and download each file
for dataset_name, url in urls.items():
filename = url.split("/")[-1]
filepath = os.path.join(download_directory, filename)
# Download the file
if os.path.exists(filepath):
print(f"Skipping downloading {dataset_name} as it already exists.")
else:
urllib.request.urlretrieve(url, filepath)
print(f"Successfully downloaded {dataset_name}")
if os.path.exists(
os.path.join(download_directory, unzipped_filename[dataset_name])
):
print(f"Skipping extracting {dataset_name} as it already has been done.")
else:
if dataset_name == "ReClor":
# Unzip the password-protected file
with zipfile.ZipFile(filepath, "r") as z:
z.extractall(
os.path.join(download_directory, "reclor"),
pwd=bytes("for_non-commercial_research_purpose_only", "utf-8"),
)
elif dataset_name == "MCScript2.0":
with zipfile.ZipFile(filepath, "r") as z:
z.extractall(os.path.join(download_directory, "mcscript"))
elif url[-3:] == "zip":
# Unzip the file
with zipfile.ZipFile(filepath, "r") as z:
z.extractall(download_directory)
elif url[-3:] == ".gz":
# Extract the archive to the same folder
with tarfile.open(filepath, "r") as t:
t.extractall(download_directory)
print(f"Successfully extracted {dataset_name}")
return download_directory
def process_sciq(download_directory):
"""process the SciQ json files and return Pandas df"""
train = pd.read_json(
os.path.join(download_directory, "SciQ dataset-2 3/train.json")
)
val = pd.read_json(os.path.join(download_directory, "SciQ dataset-2 3/valid.json"))
joined = pd.concat([train, val], keys=["train", "val"])
# remove fill-in-the-blank
sciQ = joined.loc[~joined.question.str.contains("_")]
# use NLTK sent tokenizer to count the number of sentences in the passage
sciQ["num_sentences"] = sciQ.support.apply(lambda x: sent_tokenize(x)).str.len()
sciQ["passage_id"] = sciQ.support.apply(hash)
# randomly shuffle answers
newcolnames = ["answer1", "answer2", "answer3", "answer4"]
np.random.seed(0)
sciQ[newcolnames] = sciQ.apply(
lambda x: pd.Series(
np.random.choice(
x[["distractor1", "distractor2", "distractor3", "correct_answer"]],
4,
replace=False,
),
index=newcolnames,
),
axis=1,
)
# retrieve correct answer
def get_correct_answer_num(row):
for i in [1, 2, 3, 4]:
if row["correct_answer"] == row["answer" + str(i)]:
return i
# finalize format and filter out long passages
sciQ["correct_answer_num"] = sciQ.apply(get_correct_answer_num, axis=1)
sciQ["passage_id"] = sciQ.groupby("support").ngroup()
sciQ_reset = (
sciQ.loc[sciQ.support.str.len() >= 1]
.reset_index()
.rename(columns={"support": "passage", "level_1": "question_id"})
)
sciQ_reset["split"] = sciQ_reset.level_0.apply(lambda x: "dev" if x == "val" else x)
sciQ_reset["dataset"] = "sciQ"
return sciQ_reset.loc[sciQ_reset.num_sentences <= 25][final_table_columns]
def process_multirc(download_directory):
"""process the MultiRC json files and return Pandas df"""
with open(os.path.join(download_directory, "splitv2/dev_83-fixedIds.json")) as f:
multirc_dev = json.load(f)["data"]
with open(os.path.join(download_directory, "splitv2/train_456-fixedIds.json")) as f:
multirc_train = json.load(f)["data"]
# unpack json format to pandas table
i = 0
multirc_dict = {}
reg_str = "</b>(.*?)<br>"
for split, data in {"dev": multirc_dev, "train": multirc_train}.items():
for para in data:
res = re.findall(reg_str, para["paragraph"]["text"])
para_text = " ".join(res)
num_sents = len(res)
for q in para["paragraph"]["questions"]:
multirc_dict[i] = {
"split": split,
"passage_id": para["id"],
"passage": para_text,
"num_sentences": num_sents,
"question_dict": q,
}
i += 1
unpacked = pd.DataFrame.from_dict(multirc_dict, orient="index")
# get number of answers and correct answers
def get_num_correct(q):
return sum(a["isAnswer"] for a in q["answers"])
unpacked["num_correct_answers"] = unpacked.question_dict.apply(get_num_correct)
unpacked["num_answers"] = unpacked.apply(
lambda x: len(x["question_dict"]["answers"]), axis=1
)
# filter questions that match Belebele format and where passages aren't too long
one_answer = unpacked.loc[
(unpacked.num_correct_answers == 1)
& (unpacked.num_answers >= 4)
& (unpacked.num_sentences <= 25)
].copy()
# randomly shuffle answers and reformat
np.random.seed(0)
newcolnames = [
"question",
"question_id",
"answer1",
"answer2",
"answer3",
"answer4",
"correct_answer",
"correct_answer_num",
]
def process_question(question):
newcols = {"question": question["question"], "question_id": question["idx"]}
answers = question["answers"]
while len(answers) != 4 or (not any(a["isAnswer"] for a in answers)):
answers = np.random.choice(question["answers"], 4, replace=False)
for i in [1, 2, 3, 4]:
newcols["answer" + str(i)] = answers[i - 1]["text"]
if answers[i - 1]["isAnswer"]:
newcols["correct_answer"] = answers[i - 1]["text"]
newcols["correct_answer_num"] = i
return pd.Series(newcols)
one_answer[newcolnames] = one_answer.question_dict.apply(process_question)
one_answer["dataset"] = "MultiRC"
return one_answer[final_table_columns]
def process_mcscript(download_directory):
"""process the MCScript xml files and return Pandas df"""
# unpack xml format to pandas table
mc_script_dict = {}
i = 0
# only using train data, not taking dev or test set.
xtree = et.parse(os.path.join(download_directory, f"mcscript/train-data.xml"))
xroot = xtree.getroot()
for node in xroot:
passage_id = node.attrib.get("id")
text = node.find("text").text
# use NLTK sent tokenizer to count the number of sentences in the passage
num_sentences = len(sent_tokenize(text))
for q in node.find("questions"):
mc_script_dict[i] = {
"split": "train",
"passage_id": passage_id,
"passage": text,
"question_id": q.attrib.get("id"),
"question": q.attrib.get("text"),
"num_sentences": num_sentences,
}
correct_answer = ""
correct_ans_id = -1
for ans in q:
ans_id = ans.attrib.get("id")
mc_script_dict[i]["answer_" + ans_id] = ans.attrib.get("text")
if ans.attrib.get("correct") == "True":
correct_answer = mc_script_dict[i]["answer_" + ans_id]
correct_ans_id = ans_id
if correct_ans_id == -1:
print(mc_script_dict[i])
mc_script_dict[i]["correct_answer"] = correct_answer
mc_script_dict[i]["correct_answer_id"] = "answer_" + correct_ans_id
i += 1
mc_script_unpacked = pd.DataFrame.from_dict(mc_script_dict, orient="index")
mc_script_unpacked = mc_script_unpacked.loc[mc_script_unpacked.num_sentences <= 25]
# shuffle and reformat questions
newcols = ["answer1", "answer2", "answer3", "answer4", "correct_answer_num"]
def process_mcscript_row(row):
new_dict = {}
similar_rows = mc_script_unpacked.loc[
(mc_script_unpacked.split == row.split)
& (mc_script_unpacked.passage_id == row.passage_id)
& (mc_script_unpacked.question_id != row.question_id)
]
similar_answers = similar_rows[["answer_0", "answer_1"]].to_numpy().flatten()
while len(new_dict.keys()) == 0:
if len(similar_rows) == 0:
two_ans = np.random.choice(
mc_script_unpacked.correct_answer, 2, replace=False
)
else:
two_ans = np.random.choice(similar_answers, 2, replace=False)
if (two_ans[0] in row[["answer_0", "answer_1"]]) or (
two_ans[1] in row[["answer_0", "answer_1"]]
):
continue
new_ans = np.random.choice(
np.concatenate([two_ans, row[["answer_0", "answer_1"]]]),
4,
replace=False,
)
for i in [1, 2, 3, 4]:
new_dict["answer" + str(i)] = new_ans[i - 1]
if new_ans[i - 1] == row["correct_answer"]:
new_dict["correct_answer_num"] = i
return pd.Series(new_dict)
np.random.seed(0)
mc_script_unpacked[newcols] = mc_script_unpacked.apply(process_mcscript_row, axis=1)
mc_script_unpacked["dataset"] = "MCScript2.0"
return mc_script_unpacked[final_table_columns]
def process_mctest(download_directory):
"""process the MCTest tsv files and return Pandas df"""
mc500_raw = {}
# not using test split
for split in ["train", "dev"]:
raw_df = pd.read_csv(
os.path.join(download_directory, f"MCTest/mc500.{split}.tsv"),
sep="\t",
names=[
"mc500_id",
"metadata",
"passage",
"question1",
"MC_answer1.1",
"MC_answer1.2",
"MC_answer1.3",
"MC_answer1.4",
"question2",
"MC_answer2.1",
"MC_answer2.2",
"MC_answer2.3",
"MC_answer2.4",
"question3",
"MC_answer3.1",
"MC_answer3.2",
"MC_answer3.3",
"MC_answer3.4",
"question4",
"MC_answer4.1",
"MC_answer4.2",
"MC_answer4.3",
"MC_answer4.4",
],
)
ans_df = pd.read_csv(
os.path.join(download_directory, f"MCTest/mc500.{split}.ans"),
sep="\t",
names=[
"question1_answer",
"question2_answer",
"question3_answer",
"question4_answer",
],
)
joined_df = raw_df.merge(ans_df, left_index=True, right_index=True)
mc500_raw[split] = joined_df
mc500_all_raw = pd.concat(mc500_raw.values())
# extract answer values to correct format
def get_answer_values(row, num):
conversion = {"A": "1", "B": "2", "C": "3", "D": "4"}
answer_column = (
"MC_answer" + str(num) + "." + conversion[row[f"question{str(num)}_answer"]]
)
return row[answer_column]
for num in [1, 2, 3, 4]:
mc500_all_raw[f"question{str(num)}_answer"] = mc500_all_raw.apply(
get_answer_values, args=[num], axis=1
)
# melt to get question and answer columns in one dataframe
dfq = mc500_all_raw.melt(
id_vars=["mc500_id", "passage"],
value_vars=["question1", "question2", "question3", "question4"],
value_name="question",
var_name="question_number",
)
dfa1 = mc500_all_raw.melt(
id_vars=["mc500_id", "passage"],
value_vars=["MC_answer1.1", "MC_answer2.1", "MC_answer3.1", "MC_answer4.1"],
value_name="MC_answer1",
)
dfa2 = mc500_all_raw.melt(
id_vars=["mc500_id", "passage"],
value_vars=["MC_answer1.2", "MC_answer2.2", "MC_answer3.2", "MC_answer4.2"],
value_name="MC_answer2",
)
dfa3 = mc500_all_raw.melt(
id_vars=["mc500_id", "passage"],
value_vars=["MC_answer1.3", "MC_answer2.3", "MC_answer3.3", "MC_answer4.3"],
value_name="MC_answer3",
)
dfa4 = mc500_all_raw.melt(
id_vars=["mc500_id", "passage"],
value_vars=["MC_answer1.4", "MC_answer2.4", "MC_answer3.4", "MC_answer4.4"],
value_name="MC_answer4",
)
dfca = mc500_all_raw.melt(
id_vars=["mc500_id", "passage"],
value_vars=[
"question1_answer",
"question2_answer",
"question3_answer",
"question4_answer",
],
value_name="correct_answer",
)
mc500_all = pd.concat(
[
dfq,
dfa1.drop(["mc500_id", "passage", "variable"], axis=1),
dfa2.drop(["mc500_id", "passage", "variable"], axis=1),
dfa3.drop(["mc500_id", "passage", "variable"], axis=1),
dfa4.drop(["mc500_id", "passage", "variable"], axis=1),
dfca.drop(["mc500_id", "passage", "variable"], axis=1),
],
axis=1,
)
# extract the prefix to the questions which details the number of sentences required in the passage to answer
mc500_all["sent_required"] = mc500_all.question.str.split(":").str[0].str.strip()
mc500_all["question"] = mc500_all.question.str.split(":").str[1].str.strip()
# use NLTK sent tokenizer to count the number of sentences in the passage
mc500_all["num_sentences"] = mc500_all.passage.apply(
lambda x: sent_tokenize(x)
).str.len()
def get_correct_answer_num(row):
for i in [1, 2, 3, 4]:
if row["MC_answer" + str(i)] == row["correct_answer"]:
return i
mc500_all["correct_answer_num"] = mc500_all.apply(get_correct_answer_num, axis=1)
mc500_all["passage_id"] = mc500_all.mc500_id.apply(lambda x: x.split(".")[-1])
mc500_all["question_id"] = mc500_all.question_number.str.replace("question", "")
mc500_all["dataset"] = "MCTest_500"
mc500_all["split"] = [a[1] for a in mc500_all.mc500_id.str.split(".")]
return mc500_all.loc[mc500_all.num_sentences <= 25].rename(
mapper=(lambda x: x.replace("MC_", "")), axis=1
)[final_table_columns]
def process_race(download_directory):
"""process the RACE txt files and return Pandas df"""
# unpack all the .txt files of the dataset into a single pandas table
race_dict = {}
i = 0
for split in ["dev", "train"]:
for level in ["middle", "high"]:
for file in glob.glob(
os.path.join(download_directory, f"RACE/{split}/{level}/*.txt")
):
with open(file) as f:
file_str = f.read()
file_dict = json.loads(file_str)
num_sentences = len(sent_tokenize(file_dict["article"]))
num_qs = len(file_dict["answers"])
for q in range(num_qs):
race_dict[i] = {
"split": split,
"level": level,
"passage_id": file_dict["id"],
"passage": file_dict["article"],
"question_id": q,
"question": file_dict["questions"][q],
"num_sentences": num_sentences,
}
# rename answer columns
for j in range(len(file_dict["options"][q])):
race_dict[i]["answer" + str(j + 1)] = file_dict["options"][q][j]
race_dict[i]["correct_answer_num"] = (
ord(file_dict["answers"][q]) - 64
)
race_dict[i]["correct_answer"] = file_dict["options"][q][
race_dict[i]["correct_answer_num"] - 1
]
i += 1
race_unpacked = pd.DataFrame.from_dict(race_dict, orient="index")
# remove fill-in-the-blank questions
race_unpacked = race_unpacked.loc[~race_unpacked.question.str.contains("_")]
race_unpacked["dataset"] = "RACE"
return race_unpacked.loc[race_unpacked.num_sentences <= 25][final_table_columns]
def process_reclor(download_directory):
"""process the ReClor json files and return Pandas df"""
# unpack the json format to into a pandas table
reclor_dict = {}
i = 0
for split in ["train", "val"]: # did not include test
with open(os.path.join(download_directory, f"reclor/{split}.json")) as f:
file_str = f.read()
file_dict = json.loads(file_str)
if split == "val":
split = "dev"
for item in file_dict:
idx = item["id_string"].split("_")[-1]
reclor_dict[i] = {
"split": split,
"passage_id": idx,
"question_id": idx,
"passage": item["context"],
"question": item["question"],
}
for j in range(len(item["answers"])):
reclor_dict[i]["answer" + str(j + 1)] = item["answers"][j]
reclor_dict[i]["correct_answer_num"] = item["label"] + 1
reclor_dict[i]["correct_answer"] = item["answers"][item["label"]]
i += 1
reclor_unpacked = pd.DataFrame.from_dict(reclor_dict, orient="index")
reclor_unpacked["dataset"] = "ReClor"
return reclor_unpacked[final_table_columns]
if __name__ == "__main__":
os.environ["HTTPS_PROXY"] = "http://fwdproxy:8080"
parser = argparse.ArgumentParser(
description="Assemble samples from numerous datasets and generate a JSON to serve as the training set for Belebele"
)
parser.add_argument(
"--data_path",
help="Path to the json dataset",
)
parser.add_argument(
"--downloads_path",
help="Path to folder where all the files required to assemble the training set will be downloaded",
default=".",
)
parser.add_argument(
"--output_file",
help="Path to file with the final training set (in tsv format)",
default="belebele_training_set.tsv",
)
args = parser.parse_args()
# the URLs to download
urls = {
"MultiRC": "https://cogcomp.seas.upenn.edu/multirc/data/mutlirc-v2.zip",
"MCScript2.0": "https://fedora.clarin-d.uni-saarland.de/sfb1102/MCScript-2.0.zip",
"MCTest": "https://mattr1.github.io/mctest/data/MCTest.zip",
"RACE": "http://www.cs.cmu.edu/~glai1/data/race/RACE.tar.gz",
"SciQ": "https://ai2-public-datasets.s3.amazonaws.com/sciq/SciQ.zip",
"ReClor": "https://github.com/yuweihao/reclor/releases/download/v1/reclor_data.zip",
}
# the name of the files once unzipped
unzipped_filenames = {
"MultiRC": "splitv2",
"ReClor": "reclor",
"RACE": "RACE",
"SciQ": "SciQ dataset-2 3",
"MCScript2.0": "mcscript",
"MCTest": "MCTest",
}
downloads_repo = download_files(args.downloads_path, urls, unzipped_filenames)
final_table_columns = [
"dataset",
"split",
"passage_id",
"question_id",
"passage",
"question",
"answer1",
"answer2",
"answer3",
"answer4",
"correct_answer",
"correct_answer_num",
]
init_nltk_data()
multirc_ready = process_multirc(downloads_repo)
print("Finished processing MultiRC.")
print("Starting to process MCScript2.0... this may take around 5 minutes")
mcscript_ready = process_mcscript(downloads_repo)
print("Finished processing MCScript2.0.")
mctest_ready = process_mctest(downloads_repo)
print("Finished processing MCTest.")
sciq_ready = process_sciq(downloads_repo)
print("Finished processing SciQ.")
reclor_ready = process_reclor(downloads_repo)
print("Finished processing ReClor.")
race_ready = process_race(downloads_repo)
print("Finished processing RACE... now joining them altogether.")
combined = pd.concat(
[
sciq_ready,
mcscript_ready,
mctest_ready,
multirc_ready,
race_ready,
reclor_ready,
]
)
combined.to_csv(args.output_file, sep="\t")
print(f"Finished creating training set and dumped into {args.output_file}")
print(
"Beware when loading the data from the tsv, there are many newline characters, double quotes, single quotes, etc., especially in the RACE passages."
)
|
belebele-main
|
assemble_training_set.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
from setuptools import find_packages, setup
REQUIRES = [
"matplotlib",
"torch",
"scipy",
"SQLAlchemy==1.4.46",
"dill",
"pandas",
"aepsych_client==0.3.0",
"statsmodels",
"ax-platform==0.3.1",
"botorch==0.8.3",
]
BENCHMARK_REQUIRES = ["tqdm", "pathos", "multiprocess"]
DEV_REQUIRES = BENCHMARK_REQUIRES + [
"coverage",
"flake8",
"black",
"numpy>=1.20",
"sqlalchemy-stubs", # for mypy stubs
"mypy",
"parameterized",
"scikit-learn", # used in unit tests
]
VISUALIZER_REQUIRES = [
"voila==0.3.6",
"ipywidgets==7.6.5",
]
with open("README.md", "r") as fh:
long_description = fh.read()
with open(os.path.join("aepsych", "version.py"), "r") as fh:
for line in fh.readlines():
if line.startswith("__version__"):
version = line.split('"')[1]
setup(
name="aepsych",
version=version,
python_requires=">=3.8",
packages=find_packages(),
description="Adaptive experimetation for psychophysics",
long_description=long_description,
long_description_content_type="text/markdown",
install_requires=REQUIRES,
extras_require={
"dev": DEV_REQUIRES,
"benchmark": BENCHMARK_REQUIRES,
"visualizer": VISUALIZER_REQUIRES,
},
entry_points={
"console_scripts": [
"aepsych_server = aepsych.server.server:main",
"aepsych_database = aepsych.server.utils:main",
],
},
)
|
aepsych-main
|
setup.py
|
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from os import path
from setuptools import find_packages, setup
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, "Readme.md"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="aepsych_client",
version="0.3.0",
packages=find_packages(),
long_description=long_description,
long_description_content_type="text/markdown",
)
|
aepsych-main
|
clients/python/setup.py
|
aepsych-main
|
clients/python/tests/__init__.py
|
|
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import json
import unittest
import uuid
from unittest.mock import MagicMock, patch
import torch
from aepsych.acquisition import MCPosteriorVariance
from aepsych.generators import OptimizeAcqfGenerator, SobolGenerator
from aepsych.models import GPClassificationModel
from aepsych.server import AEPsychServer
from aepsych_client import AEPsychClient
from torch import tensor
class MockStrategy:
def gen(self, num_points):
self._count = self._count + num_points
return torch.tensor([[0.0]])
class RemoteServerTestCase(unittest.TestCase):
def setUp(self):
database_path = "./{}.db".format(str(uuid.uuid4().hex))
self.s = AEPsychServer(database_path=database_path)
self.client = AEPsychClient(connect=False)
self.client._send_recv = MagicMock(
wraps=lambda x: json.dumps(self.s.handle_request(x))
)
def tearDown(self):
self.s.cleanup()
# cleanup the db
if self.s.db is not None:
self.s.db.delete_db()
@patch(
"aepsych.strategy.Strategy.gen",
new=MockStrategy.gen,
)
def test_client(self):
config_str = """
[common]
lb = [0]
ub = [1]
parnames = [x]
stimuli_per_trial = 1
outcome_types = [binary]
strategy_names = [init_strat, opt_strat]
acqf = MCPosteriorVariance
model = GPClassificationModel
[init_strat]
min_asks = 1
generator = SobolGenerator
min_total_outcome_occurrences = 0
[opt_strat]
min_asks = 1
generator = OptimizeAcqfGenerator
min_total_outcome_occurrences = 0
"""
self.client.configure(config_str=config_str, config_name="first_config")
self.assertEqual(self.s.strat_id, 0)
self.assertEqual(self.s.strat.strat_list[0].min_asks, 1)
self.assertEqual(self.s.strat.strat_list[1].min_asks, 1)
self.assertIsInstance(self.s.strat.strat_list[0].generator, SobolGenerator)
self.assertIsInstance(
self.s.strat.strat_list[1].generator, OptimizeAcqfGenerator
)
self.assertIsInstance(self.s.strat.strat_list[1].model, GPClassificationModel)
self.assertEqual(self.s.strat.strat_list[1].generator.acqf, MCPosteriorVariance)
response = self.client.ask()
self.assertSetEqual(set(response["config"].keys()), {"x"})
self.assertEqual(len(response["config"]["x"]), 1)
self.assertTrue(0 <= response["config"]["x"][0] <= 1)
self.assertFalse(response["is_finished"])
self.assertEqual(self.s.strat._count, 1)
self.client.tell(config={"x": [0]}, outcome=1)
self.assertEqual(self.s._strats[0].x, tensor([[0.0]]))
self.assertEqual(self.s._strats[0].y, tensor([[1.0]]))
self.client.tell(config={"x": [0]}, outcome=1, model_data=False)
self.assertEqual(self.s._strats[0].x, tensor([[0.0]]))
self.assertEqual(self.s._strats[0].y, tensor([[1.0]]))
response = self.client.ask()
self.assertTrue(response["is_finished"])
self.client.configure(config_str=config_str, config_name="second_config")
self.assertEqual(self.s.strat._count, 0)
self.assertEqual(self.s.strat_id, 1)
self.client.resume(config_name="first_config")
self.assertEqual(self.s.strat_id, 0)
self.client.resume(config_name="second_config")
self.assertEqual(self.s.strat_id, 1)
self.client.finalize()
class LocalServerTestCase(RemoteServerTestCase):
def setUp(self):
database_path = "./{}.db".format(str(uuid.uuid4().hex))
self.s = AEPsychServer(database_path=database_path)
self.client = AEPsychClient(server=self.s)
def test_warns_ignored_args(self):
with self.assertWarns(UserWarning):
AEPsychClient(ip="0.0.0.0", port=5555, server=self.s)
if __name__ == "__main__":
unittest.main()
|
aepsych-main
|
clients/python/tests/test_client.py
|
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import json
import socket
import warnings
from typing import Any, Dict, List, Optional, TYPE_CHECKING, Union
if TYPE_CHECKING:
from aepsych.server import AEPsychServer
class ServerError(RuntimeError):
pass
class AEPsychClient:
def __init__(
self,
ip: Optional[str] = None,
port: Optional[int] = None,
connect: bool = True,
server: "AEPsychServer" = None,
) -> None:
"""Python client for AEPsych using built-in python sockets. By default it connects
to a localhost server matching AEPsych defaults.
Args:
ip (str, optional): IP to connect to (default: localhost).
port (str, optional): Port to connect on (default: 5555).
connect (bool): Connect as part of init? Defaults to True.
server (AEPsychServer, optional): An in-memory AEPsychServer object to connect to.
If this is not None, the other arguments will be ignored.
"""
self.configs = []
self.config_names = {}
self.server = server
if server is not None and (ip is not None or port is not None):
warnings.warn(
"AEPsychClient will ignore ip and port since it was given a server object!",
UserWarning,
)
if server is None:
ip = ip or "0.0.0.0"
port = port or 5555
self.socket = socket.socket()
if connect:
self.connect(ip, port)
def load_config_index(self) -> None:
"""Loads the config index when server is not None"""
self.configs = []
for i in range(self.server.n_strats):
self.configs.append(i)
def connect(self, ip: str, port: int) -> None:
"""Connect to the server.
Args:
ip (str): IP to connect to.
port (str): Port to connect on.
"""
addr = (ip, port)
self.socket.connect(addr)
def finalize(self) -> None:
"""Let the server know experiment is complete."""
request = {"message": "", "type": "exit"}
self._send_recv(request)
def _send_recv(self, message) -> str:
if self.server is not None:
return self.server.handle_request(message)
message = bytes(json.dumps(message), encoding="utf-8")
self.socket.send(message)
response = self.socket.recv(4096).decode("utf-8")
# TODO this is hacky but we don't consistencly return json
# from the server so we can't check for a status
if response[:12] == "server_error":
error_message = response[13:]
raise ServerError(error_message)
return response
def ask(
self, num_points: int = 1
) -> Union[Dict[str, List[float]], Dict[int, Dict[str, Any]]]:
"""Get next configuration from server.
Args:
num_points[int]: Number of points to return.
Returns:
Dict[int, Dict[str, Any]]: Next configuration(s) to evaluate.
If using the legacy backend, this is formatted as a dictionary where keys are parameter names and values
are lists of parameter values.
If using the Ax backend, this is formatted as a dictionary of dictionaries where the outer keys are trial indices,
the inner keys are parameter names, and the values are parameter values.
"""
request = {"message": {"num_points": num_points}, "type": "ask"}
response = self._send_recv(request)
if isinstance(response, str):
response = json.loads(response)
return response
def tell_trial_by_index(
self,
trial_index: int,
outcome: int,
model_data: bool = True,
**metadata: Dict[str, Any],
) -> None:
"""Update the server on a trial that already has a trial index, as provided by `ask`.
Args:
outcome (int): Outcome that was obtained.
model_data (bool): If True, the data will be recorded in the db and included in the server's model. If False,
the data will be recorded in the db, but will not be used by the model. Defaults to True.
trial_index (int): The associated trial index of the config.
metadata (optional kwargs) is passed to the extra_info field on the server.
Raises:
AssertionError if server failed to acknowledge the tell.
"""
request = {
"type": "tell",
"message": {
"outcome": outcome,
"model_data": model_data,
"trial_index": trial_index,
},
"extra_info": metadata,
}
self._send_recv(request)
def tell(
self,
config: Dict[str, List[Any]],
outcome: int,
model_data: bool = True,
**metadata: Dict[str, Any],
) -> None:
"""Update the server on a configuration that was executed. Use this method when using the legacy backend or for
manually-generated trials without an associated trial_index when uding the Ax backend.
Args:
config (Dict[str, str]): Config that was evaluated.
outcome (int): Outcome that was obtained.
metadata (optional kwargs) is passed to the extra_info field on the server.
model_data (bool): If True, the data will be recorded in the db and included in the server's model. If False,
the data will be recorded in the db, but will not be used by the model. Defaults to True.
Raises:
AssertionError if server failed to acknowledge the tell.
"""
request = {
"type": "tell",
"message": {
"config": config,
"outcome": outcome,
"model_data": model_data,
},
"extra_info": metadata,
}
self._send_recv(request)
def configure(
self, config_path: str = None, config_str: str = None, config_name: str = None
) -> None:
"""Configure the server and prepare for data collection.
Note that either config_path or config_str must be passed.
Args:
config_path (str, optional): Path to a config.ini. Defaults to None.
config_str (str, optional): Config.ini encoded as a string. Defaults to None.
config_name (str, optional): A name to assign to this config internally for convenience.
Raises:
AssertionError if neither config path nor config_str is passed.
"""
if config_path is not None:
assert config_str is None, "if config_path is passed, don't pass config_str"
with open(config_path, "r") as f:
config_str = f.read()
elif config_str is not None:
assert (
config_path is None
), "if config_str is passed, don't pass config_path"
request = {
"type": "setup",
"message": {"config_str": config_str},
}
idx = int(self._send_recv(request))
self.configs.append(idx)
if config_name is not None:
self.config_names[config_name] = idx
def resume(self, config_id: int = None, config_name: str = None):
"""Resume a previous config from this session. To access available configs,
use client.configs or client.config_names
Args:
config_id (int, optional): ID of config to resume.
config_name (str, optional): Name config to resume.
Raises:
AssertionError if name or ID does not exist, or if both name and ID are passed.
"""
if config_id is not None:
assert config_name is None, "if config_id is passed, don't pass config_name"
assert (
config_id in self.configs
), f"No strat with index {config_id} was created!"
elif config_name is not None:
assert config_id is None, "if config_name is passed, don't pass config_id"
assert (
config_name in self.config_names.keys()
), f"{config_name} not known, know {self.config_names.keys()}!"
config_id = self.config_names[config_name]
request = {
"type": "resume",
"message": {"strat_id": config_id},
}
self._send_recv(request)
def __del___(self):
self.finalize()
|
aepsych-main
|
clients/python/aepsych_client/client.py
|
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .client import AEPsychClient
__all__ = ["AEPsychClient"]
|
aepsych-main
|
clients/python/aepsych_client/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import abc
import ast
import configparser
import json
import warnings
from types import ModuleType
from typing import Any, ClassVar, Dict, List, Mapping, Optional, Sequence, TypeVar
import botorch
import gpytorch
import numpy as np
import torch
from aepsych.version import __version__
_T = TypeVar("_T")
class Config(configparser.ConfigParser):
# names in these packages can be referred to by string name
registered_names: ClassVar[Dict[str, object]] = {}
def __init__(
self,
config_dict: Optional[Mapping[str, Any]] = None,
config_fnames: Optional[Sequence[str]] = None,
config_str: Optional[str] = None,
):
"""Initialize the AEPsych config object. This can be used to instantiate most
objects in AEPsych by calling object.from_config(config).
Args:
config_dict (Mapping[str, str], optional): Mapping to build configuration from.
Keys are section names, values are dictionaries with keys and values that
should be present in the section. Defaults to None.
config_fnames (Sequence[str], optional): List of INI filenames to load
configuration from. Defaults to None.
config_str (str, optional): String formatted as an INI file to load configuration
from. Defaults to None.
"""
super().__init__(
inline_comment_prefixes=("#"),
empty_lines_in_values=False,
default_section="common",
interpolation=configparser.ExtendedInterpolation(),
converters={
"list": self._str_to_list,
"tensor": self._str_to_tensor,
"obj": self._str_to_obj,
"array": self._str_to_array,
},
allow_no_value=True,
)
self.update(
config_dict=config_dict,
config_fnames=config_fnames,
config_str=config_str,
)
def _get(
self,
section,
conv,
option,
*,
raw=False,
vars=None,
fallback=configparser._UNSET,
**kwargs,
):
"""
Override configparser to:
1. Return from common if a section doesn't exist. This comes
up any time we have a module fully configured from the
common/default section.
2. Pass extra **kwargs to the converter.
"""
try:
return conv(
self.get(
section=section,
option=option,
raw=raw,
vars=vars,
fallback=fallback,
),
**kwargs,
)
except configparser.NoSectionError:
return conv(
self.get(
section="common",
option=option,
raw=raw,
vars=vars,
fallback=fallback,
),
**kwargs,
)
# Convert config into a dictionary (eliminate duplicates from defaulted 'common' section.)
def to_dict(self, deduplicate=True):
_dict = {}
for section in self:
_dict[section] = {}
for setting in self[section]:
if deduplicate and section != "common" and setting in self["common"]:
continue
_dict[section][setting] = self[section][setting]
return _dict
# Turn the metadata section into JSON.
def jsonifyMetadata(self) -> str:
configdict = self.to_dict()
return json.dumps(configdict["metadata"])
# Turn the entire config into JSON format.
def jsonifyAll(self) -> str:
configdict = self.to_dict()
return json.dumps(configdict)
def update(
self,
config_dict: Mapping[str, str] = None,
config_fnames: Sequence[str] = None,
config_str: str = None,
):
"""Update this object with a new configuration.
Args:
config_dict (Mapping[str, str], optional): Mapping to build configuration from.
Keys are section names, values are dictionaries with keys and values that
should be present in the section. Defaults to None.
config_fnames (Sequence[str], optional): List of INI filenames to load
configuration from. Defaults to None.
config_str (str, optional): String formatted as an INI file to load configuration
from. Defaults to None.
"""
if config_dict is not None:
self.read_dict(config_dict)
if config_fnames is not None:
read_ok = self.read(config_fnames)
if len(read_ok) < 1:
raise FileNotFoundError
if config_str is not None:
self.read_string(config_str)
# Deprecation warning for "experiment" section
if "experiment" in self:
for i in self["experiment"]:
self["common"][i] = self["experiment"][i]
del self["experiment"]
def _str_to_list(self, v: str, element_type: _T = float) -> List[_T]:
if v[0] == "[" and v[-1] == "]":
if v == "[]": # empty list
return []
else:
return [element_type(i.strip()) for i in v[1:-1].split(",")]
else:
return [v.strip()]
def _str_to_array(self, v: str) -> np.ndarray:
v = ast.literal_eval(v)
return np.array(v, dtype=float)
def _str_to_tensor(self, v: str) -> torch.Tensor:
return torch.Tensor(self._str_to_list(v))
def _str_to_obj(self, v: str, fallback_type: _T = str, warn: bool = True) -> object:
try:
return self.registered_names[v]
except KeyError:
if warn:
warnings.warn(f'No known object "{v}"!')
return fallback_type(v)
def __repr__(self):
return f"Config at {hex(id(self))}: \n {str(self)}"
@classmethod
def register_module(cls: _T, module: ModuleType):
"""Register a module with Config so that objects in it can
be referred to by their string name in config files.
Args:
module (ModuleType): Module to register.
"""
cls.registered_names.update(
{
name: getattr(module, name)
for name in module.__all__
if not isinstance(getattr(module, name), ModuleType)
}
)
@classmethod
def register_object(cls: _T, obj: object):
"""Register an object with Config so that it can be
referred to by its string name in config files.
Args:
obj (object): Object to register.
"""
if obj.__name__ in cls.registered_names.keys():
warnings.warn(
f"Registering {obj.__name__} but already"
+ f"have {cls.registered_names[obj.__name__]}"
+ "registered under that name!"
)
cls.registered_names.update({obj.__name__: obj})
def get_section(self, section):
sec = {}
for setting in self[section]:
if section != "common" and setting in self["common"]:
continue
sec[setting] = self[section][setting]
return sec
def __str__(self):
_str = ""
for section in self:
sec = self.get_section(section)
_str += f"[{section}]\n"
for setting in sec:
_str += f"{setting} = {self[section][setting]}\n"
return _str
def convert_to_latest(self):
self.convert(self.version, __version__)
def convert(self, from_version: str, to_version: str) -> None:
"""Converts a config from an older version to a newer version.
Args:
from_version (str): The version of the config to be converted.
to_version (str): The version the config should be converted to.
"""
if from_version == "0.0":
self["common"]["strategy_names"] = "[init_strat, opt_strat]"
if "experiment" in self:
for i in self["experiment"]:
self["common"][i] = self["experiment"][i]
bridge = self["common"]["modelbridge_cls"]
n_sobol = self["SobolStrategy"]["n_trials"]
n_opt = self["ModelWrapperStrategy"]["n_trials"]
if bridge == "PairwiseProbitModelbridge":
self["init_strat"] = {
"generator": "PairwiseSobolGenerator",
"min_asks": n_sobol,
}
self["opt_strat"] = {
"generator": "PairwiseOptimizeAcqfGenerator",
"model": "PairwiseProbitModel",
"min_asks": n_opt,
}
if "PairwiseProbitModelbridge" in self:
self["PairwiseOptimizeAcqfGenerator"] = self[
"PairwiseProbitModelbridge"
]
if "PairwiseGP" in self:
self["PairwiseProbitModel"] = self["PairwiseGP"]
elif bridge == "MonotonicSingleProbitModelbridge":
self["init_strat"] = {
"generator": "SobolGenerator",
"min_asks": n_sobol,
}
self["opt_strat"] = {
"generator": "MonotonicRejectionGenerator",
"model": "MonotonicRejectionGP",
"min_asks": n_opt,
}
if "MonotonicSingleProbitModelbridge" in self:
self["MonotonicRejectionGenerator"] = self[
"MonotonicSingleProbitModelbridge"
]
elif bridge == "SingleProbitModelbridge":
self["init_strat"] = {
"generator": "SobolGenerator",
"min_asks": n_sobol,
}
self["opt_strat"] = {
"generator": "OptimizeAcqfGenerator",
"model": "GPClassificationModel",
"min_asks": n_opt,
}
if "SingleProbitModelbridge" in self:
self["OptimizeAcqfGenerator"] = self["SingleProbitModelbridge"]
else:
raise NotImplementedError(
f"Refactor for {bridge} has not been implemented!"
)
if "ModelWrapperStrategy" in self:
if "refit_every" in self["ModelWrapperStrategy"]:
self["opt_strat"]["refit_every"] = self["ModelWrapperStrategy"][
"refit_every"
]
del self["common"]["model"]
if to_version == __version__:
if self["common"]["outcome_type"] == "single_probit":
self["common"]["stimuli_per_trial"] = "1"
self["common"]["outcome_types"] = "[binary]"
if self["common"]["outcome_type"] == "single_continuous":
self["common"]["stimuli_per_trial"] = "1"
self["common"]["outcome_types"] = "[continuous]"
if self["common"]["outcome_type"] == "pairwise_probit":
self["common"]["stimuli_per_trial"] = "2"
self["common"]["outcome_types"] = "[binary]"
del self["common"]["outcome_type"]
@property
def version(self) -> str:
"""Returns the version number of the config."""
# TODO: implement an explicit versioning system
# Try to infer the version
if "stimuli_per_trial" in self["common"] and "outcome_types" in self["common"]:
return __version__
if "common" in self and "strategy_names" in self["common"]:
return "0.1"
elif (
"SobolStrategy" in self
or "ModelWrapperStrategy" in self
or "EpsilonGreedyModelWrapperStrategy" in self
):
return "0.0"
else:
raise RuntimeError("Unrecognized config format!")
class ConfigurableMixin(abc.ABC):
@abc.abstractclassmethod
def get_config_options(cls, config: Config, name: str) -> Dict[str, Any]: # noqa
raise NotImplementedError(
f"get_config_options hasn't been defined for {cls.__name__}!"
)
@classmethod
def from_config(cls, config: Config, name: Optional[str] = None):
return cls(**cls.get_config_options(config, name))
Config.register_module(gpytorch.likelihoods)
Config.register_module(gpytorch.kernels)
Config.register_module(botorch.acquisition)
Config.registered_names["None"] = None
|
aepsych-main
|
aepsych/config.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
__version__ = "0.4.0"
|
aepsych-main
|
aepsych/version.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import warnings
from typing import Callable, Iterable, List, Optional, Union
import matplotlib.pyplot as plt
import numpy as np
from aepsych.strategy import Strategy
from aepsych.utils import get_lse_contour, get_lse_interval, make_scaled_sobol
from scipy.stats import norm
def plot_strat(
strat: Strategy,
ax: Optional[plt.Axes] = None,
true_testfun: Optional[Callable] = None,
cred_level: float = 0.95,
target_level: Optional[float] = 0.75,
xlabel: Optional[str] = None,
ylabel: Optional[str] = None,
yes_label: str = "Yes trial",
no_label: str = "No trial",
flipx: bool = False,
logx: bool = False,
gridsize: int = 30,
title: str = "",
save_path: Optional[str] = None,
show: bool = True,
include_legend: bool = True,
include_colorbar: bool = True,
) -> None:
"""Creates a plot of a strategy, showing participants responses on each trial, the estimated response function and
threshold, and optionally a ground truth response threshold.
Args:
strat (Strategy): Strategy object to be plotted. Must have a dimensionality of 2 or less.
ax (plt.Axes, optional): Matplotlib axis to plot on (if None, creates a new axis). Default: None.
true_testfun (Callable, optional): Ground truth response function. Should take a n_samples x n_parameters tensor
as input and produce the response probability at each sample as output. Default: None.
cred_level (float): Percentage of posterior mass around the mean to be shaded. Default: 0.95.
target_level (float): Response probability to estimate the threshold of. Default: 0.75.
xlabel (str): Label of the x-axis. Default: "Context (abstract)".
ylabel (str): Label of the y-axis (if None, defaults to "Response Probability" for 1-d plots or
"Intensity (Abstract)" for 2-d plots). Default: None.
yes_label (str): Label of trials with response of 1. Default: "Yes trial".
no_label (str): Label of trials with response of 0. Default: "No trial".
flipx (bool): Whether the values of the x-axis should be flipped such that the min becomes the max and vice
versa.
(Only valid for 2-d plots.) Default: False.
logx (bool): Whether the x-axis should be log-transformed. (Only valid for 2-d plots.) Default: False.
gridsize (int): The number of points to sample each dimension at. Default: 30.
title (str): Title of the plot. Default: ''.
save_path (str, optional): File name to save the plot to. Default: None.
show (bool): Whether the plot should be shown in an interactive window. Default: True.
include_legend (bool): Whether to include the legend in the figure. Default: True.
include_colorbar (bool): Whether to include the colorbar indicating the probability of "Yes" trials.
Default: True.
"""
assert (
"binary" in strat.outcome_types
), f"Plotting not supported for outcome_type {strat.outcome_types[0]}"
if target_level is not None and not hasattr(strat.model, "monotonic_idxs"):
warnings.warn(
"Threshold estimation may not be accurate for non-monotonic models."
)
if ax is None:
_, ax = plt.subplots()
if xlabel is None:
xlabel = "Context (abstract)"
dim = strat.dim
if dim == 1:
if ylabel is None:
ylabel = "Response Probability"
_plot_strat_1d(
strat,
ax,
true_testfun,
cred_level,
target_level,
xlabel,
ylabel,
yes_label,
no_label,
gridsize,
)
elif dim == 2:
if ylabel is None:
ylabel = "Intensity (abstract)"
_plot_strat_2d(
strat,
ax,
true_testfun,
cred_level,
target_level,
xlabel,
ylabel,
yes_label,
no_label,
flipx,
logx,
gridsize,
include_colorbar,
)
elif dim == 3:
raise RuntimeError("Use plot_strat_3d for 3d plots!")
else:
raise NotImplementedError("No plots for >3d!")
ax.set_title(title)
if include_legend:
anchor = (1.4, 0.5) if include_colorbar and dim > 1 else (1, 0.5)
plt.legend(loc="center left", bbox_to_anchor=anchor)
if save_path is not None:
plt.savefig(save_path, bbox_inches="tight")
if show:
plt.tight_layout()
if include_legend or (include_colorbar and dim > 1):
plt.subplots_adjust(left=0.1, bottom=0.25, top=0.75)
plt.show()
def _plot_strat_1d(
strat: Strategy,
ax: plt.Axes,
true_testfun: Optional[Callable],
cred_level: float,
target_level: Optional[float],
xlabel: str,
ylabel: str,
yes_label: str,
no_label: str,
gridsize: int,
):
"""Helper function for creating 1-d plots. See plot_strat for an explanation of the arguments."""
x, y = strat.x, strat.y
assert x is not None and y is not None, "No data to plot!"
grid = strat.model.dim_grid(gridsize=gridsize)
samps = norm.cdf(strat.model.sample(grid, num_samples=10000).detach())
phimean = samps.mean(0)
ax.plot(np.squeeze(grid), phimean)
if cred_level is not None:
upper = np.quantile(samps, cred_level, axis=0)
lower = np.quantile(samps, 1 - cred_level, axis=0)
ax.fill_between(
np.squeeze(grid),
lower,
upper,
alpha=0.3,
hatch="///",
edgecolor="gray",
label=f"{cred_level*100:.0f}% posterior mass",
)
if target_level is not None:
from aepsych.utils import interpolate_monotonic
threshold_samps = [
interpolate_monotonic(
grid.squeeze().numpy(), s, target_level, strat.lb[0], strat.ub[0]
)
for s in samps
]
thresh_med = np.mean(threshold_samps)
thresh_lower = np.quantile(threshold_samps, q=1 - cred_level)
thresh_upper = np.quantile(threshold_samps, q=cred_level)
ax.errorbar(
thresh_med,
target_level,
xerr=np.r_[thresh_med - thresh_lower, thresh_upper - thresh_med][:, None],
capsize=5,
elinewidth=1,
label=f"Est. {target_level*100:.0f}% threshold \n(with {cred_level*100:.0f}% posterior \nmass marked)",
)
if true_testfun is not None:
true_f = true_testfun(grid)
ax.plot(grid, true_f.squeeze(), label="True function")
if target_level is not None:
true_thresh = interpolate_monotonic(
grid.squeeze().numpy(),
true_f.squeeze(),
target_level,
strat.lb[0],
strat.ub[0],
)
ax.plot(
true_thresh,
target_level,
"o",
label=f"True {target_level*100:.0f}% threshold",
)
ax.scatter(
x[y == 0, 0],
np.zeros_like(x[y == 0, 0]),
marker=3,
color="r",
label=no_label,
)
ax.scatter(
x[y == 1, 0],
np.zeros_like(x[y == 1, 0]),
marker=3,
color="b",
label=yes_label,
)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return ax
def _plot_strat_2d(
strat: Strategy,
ax: plt.Axes,
true_testfun: Optional[Callable],
cred_level: float,
target_level: Optional[float],
xlabel: str,
ylabel: str,
yes_label: str,
no_label: str,
flipx: bool,
logx: bool,
gridsize: int,
include_colorbar: bool,
):
"""Helper function for creating 2-d plots. See plot_strat for an explanation of the arguments."""
x, y = strat.x, strat.y
assert x is not None and y is not None, "No data to plot!"
# make sure the model is fit well if we've been limiting fit time
strat.model.fit(train_x=x, train_y=y, max_fit_time=None)
grid = strat.model.dim_grid(gridsize=gridsize)
fmean, _ = strat.model.predict(grid)
phimean = norm.cdf(fmean.reshape(gridsize, gridsize).detach().numpy()).T
extent = np.r_[strat.lb[0], strat.ub[0], strat.lb[1], strat.ub[1]]
colormap = ax.imshow(
phimean, aspect="auto", origin="lower", extent=extent, alpha=0.5
)
if flipx:
extent = np.r_[strat.lb[0], strat.ub[0], strat.ub[1], strat.lb[1]]
colormap = ax.imshow(
phimean, aspect="auto", origin="upper", extent=extent, alpha=0.5
)
else:
extent = np.r_[strat.lb[0], strat.ub[0], strat.lb[1], strat.ub[1]]
colormap = ax.imshow(
phimean, aspect="auto", origin="lower", extent=extent, alpha=0.5
)
# hacky relabel to be in logspace
if logx:
locs = np.arange(strat.lb[0], strat.ub[0])
ax.set_xticks(ticks=locs)
ax.set_xticklabels(2.0**locs)
ax.plot(x[y == 0, 0], x[y == 0, 1], "ro", alpha=0.7, label=no_label)
ax.plot(x[y == 1, 0], x[y == 1, 1], "bo", alpha=0.7, label=yes_label)
if target_level is not None: # plot threshold
mono_grid = np.linspace(strat.lb[1], strat.ub[1], num=gridsize)
context_grid = np.linspace(strat.lb[0], strat.ub[0], num=gridsize)
thresh_75, lower, upper = get_lse_interval(
model=strat.model,
mono_grid=mono_grid,
target_level=target_level,
cred_level=cred_level,
mono_dim=1,
lb=mono_grid.min(),
ub=mono_grid.max(),
gridsize=gridsize,
)
ax.plot(
context_grid,
thresh_75,
label=f"Est. {target_level*100:.0f}% threshold \n(with {cred_level*100:.0f}% posterior \nmass shaded)",
)
ax.fill_between(
context_grid, lower, upper, alpha=0.3, hatch="///", edgecolor="gray"
)
if true_testfun is not None:
true_f = true_testfun(grid).reshape(gridsize, gridsize)
true_thresh = get_lse_contour(
true_f, mono_grid, level=target_level, lb=strat.lb[-1], ub=strat.ub[-1]
)
ax.plot(context_grid, true_thresh, label="Ground truth threshold")
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if include_colorbar:
colorbar = plt.colorbar(colormap, ax=ax)
colorbar.set_label(f"Probability of {yes_label}")
def plot_strat_3d(
strat: Strategy,
parnames: Optional[List[str]] = None,
outcome_label: str = "Yes Trial",
slice_dim: int = 0,
slice_vals: Union[List[float], int] = 5,
contour_levels: Optional[Union[Iterable[float], bool]] = None,
probability_space: bool = False,
gridsize: int = 30,
extent_multiplier: Optional[List[float]] = None,
save_path: Optional[str] = None,
show: bool = True,
):
"""Creates a plot of a 2d slice of a 3D strategy, showing the estimated model or probability response and contours
Args:
strat (Strategy): Strategy object to be plotted. Must have a dimensionality of 3.
parnames (str list): list of the parameter names
outcome_label (str): The label of the outcome variable
slice_dim (int): dimension to slice on
dim_vals (list of floats or int): values to take slices; OR number of values to take even slices from
contour_levels (iterable of floats or bool, optional): List contour values to plot. Default: None. If true, all integer levels.
probability_space (bool): Whether to plot probability. Default: False
gridsize (int): The number of points to sample each dimension at. Default: 30.
extent_multiplier (list, optional): multipliers for each of the dimensions when plotting. Default:None
save_path (str, optional): File name to save the plot to. Default: None.
show (bool): Whether the plot should be shown in an interactive window. Default: True.
"""
assert strat.model is not None, "Cannot plot without a model!"
contour_levels_list = contour_levels or []
if parnames is None:
parnames = ["x1", "x2", "x3"]
# Get global min/max for all slices
if probability_space:
vmax = 1
vmin = 0
if contour_levels is True:
contour_levels_list = [0.75]
else:
d = make_scaled_sobol(strat.lb, strat.ub, 2000)
post = strat.model.posterior(d)
fmean = post.mean.squeeze().detach().numpy()
vmax = np.max(fmean)
vmin = np.min(fmean)
if contour_levels is True:
contour_levels_list = np.arange(np.ceil(vmin), vmax + 1)
# slice_vals is either a list of values or an integer number of values to slice on
if type(slice_vals) is int:
slices = np.linspace(strat.lb[slice_dim], strat.ub[slice_dim], slice_vals)
slices = np.around(slices, 4)
elif type(slice_vals) is not list:
raise TypeError("slice_vals must be either an integer or a list of values")
else:
slices = np.array(slice_vals)
_, axs = plt.subplots(1, len(slices), constrained_layout=True, figsize=(20, 3))
for _i, dim_val in enumerate(slices):
img = plot_slice(
axs[_i],
strat,
parnames,
slice_dim,
dim_val,
vmin,
vmax,
gridsize,
contour_levels_list,
probability_space,
extent_multiplier,
)
plt_parnames = np.delete(parnames, slice_dim)
axs[0].set_ylabel(plt_parnames[1])
cbar = plt.colorbar(img, ax=axs[-1])
if probability_space:
cbar.ax.set_ylabel(f"Probability of {outcome_label}")
else:
cbar.ax.set_ylabel(outcome_label)
for clevel in contour_levels_list: # type: ignore
cbar.ax.axhline(y=clevel, c="w")
if save_path is not None:
plt.savefig(save_path)
if show:
plt.show()
def plot_slice(
ax,
strat,
parnames,
slice_dim,
slice_val,
vmin,
vmax,
gridsize=30,
contour_levels=None,
lse=False,
extent_multiplier=None,
):
"""Creates a plot of a 2d slice of a 3D strategy, showing the estimated model or probability response and contours
Args:
strat (Strategy): Strategy object to be plotted. Must have a dimensionality of 3.
ax (plt.Axes): Matplotlib axis to plot on
parnames (str list): list of the parameter names
slice_dim (int): dimension to slice on
slice_vals (float): value to take the slice along that dimension
vmin (float): global model minimum to use for plotting
vmax (float): global model maximum to use for plotting
gridsize (int): The number of points to sample each dimension at. Default: 30.
contour_levels (int list): Contours to plot. Default: None
lse (bool): Whether to plot probability. Default: False
extent_multiplier (list, optional): multipliers for each of the dimensions when plotting. Default:None
"""
extent = np.c_[strat.lb, strat.ub].reshape(-1)
x = strat.model.dim_grid(gridsize=gridsize, slice_dims={slice_dim: slice_val})
if lse:
fmean, fvar = strat.predict(x)
fmean = fmean.detach().numpy().reshape(gridsize, gridsize)
fmean = norm.cdf(fmean)
else:
post = strat.model.posterior(x)
fmean = post.mean.squeeze().detach().numpy().reshape(gridsize, gridsize)
# optionally rescale extents to correct values
if extent_multiplier is not None:
extent_scaled = extent * np.repeat(extent_multiplier, 2)
dim_val_scaled = slice_val * extent_multiplier[slice_dim]
else:
extent_scaled = extent
dim_val_scaled = slice_val
plt_extents = np.delete(extent_scaled, [slice_dim * 2, slice_dim * 2 + 1])
plt_parnames = np.delete(parnames, slice_dim)
img = ax.imshow(
fmean.T, extent=plt_extents, origin="lower", aspect="auto", vmin=vmin, vmax=vmax
)
ax.set_title(parnames[slice_dim] + "=" + str(dim_val_scaled))
ax.set_xlabel(plt_parnames[0])
if len(contour_levels) > 0:
ax.contour(
fmean.T,
contour_levels,
colors="w",
extent=plt_extents,
origin="lower",
aspect="auto",
)
return img
|
aepsych-main
|
aepsych/plotting.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sys
from gpytorch.likelihoods import BernoulliLikelihood, GaussianLikelihood
from . import acquisition, config, factory, generators, models, strategy, utils
from .config import Config
from .likelihoods import BernoulliObjectiveLikelihood
from .models import GPClassificationModel
from .strategy import SequentialStrategy, Strategy
__all__ = [
# modules
"acquisition",
"config",
"factory",
"models",
"strategy",
"utils",
"generators",
# classes
"GPClassificationModel",
"Strategy",
"SequentialStrategy",
"BernoulliObjectiveLikelihood",
"BernoulliLikelihood",
"GaussianLikelihood",
]
try:
from . import benchmark
__all__ += ["benchmark"]
except ImportError:
pass
Config.register_module(sys.modules[__name__])
|
aepsych-main
|
aepsych/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import time
import warnings
from copy import copy
from typing import Any, Dict, List, Optional, Sequence, Tuple, Type, Union
import numpy as np
import torch
from aepsych.config import Config, ConfigurableMixin
from aepsych.generators.base import AEPsychGenerationStep, AEPsychGenerator
from aepsych.generators.sobol_generator import AxSobolGenerator, SobolGenerator
from aepsych.models.base import ModelProtocol
from aepsych.utils import (
_process_bounds,
get_objectives,
get_parameters,
make_scaled_sobol,
)
from aepsych.utils_logging import getLogger
from ax.core.base_trial import TrialStatus
from ax.modelbridge.generation_strategy import GenerationStrategy
from ax.plot.contour import interact_contour
from ax.plot.slice import plot_slice
from ax.service.ax_client import AxClient
from ax.utils.notebook.plotting import render
from botorch.exceptions.errors import ModelFittingError
logger = getLogger()
def ensure_model_is_fresh(f):
def wrapper(self, *args, **kwargs):
if self.can_fit and not self._model_is_fresh:
starttime = time.time()
if self._count % self.refit_every == 0 or self.refit_every == 1:
logger.info("Starting fitting (no warm start)...")
# don't warm start
self.fit()
else:
logger.info("Starting fitting (warm start)...")
# warm start
self.update()
logger.info(f"Fitting done, took {time.time()-starttime}")
self._model_is_fresh = True
return f(self, *args, **kwargs)
return wrapper
class Strategy(object):
"""Object that combines models and generators to generate points to sample."""
_n_eval_points: int = 1000
def __init__(
self,
generator: AEPsychGenerator,
lb: Union[np.ndarray, torch.Tensor],
ub: Union[np.ndarray, torch.Tensor],
stimuli_per_trial: int,
outcome_types: Sequence[Type[str]],
dim: Optional[int] = None,
min_total_tells: int = 0,
min_asks: int = 0,
model: Optional[ModelProtocol] = None,
refit_every: int = 1,
min_total_outcome_occurrences: int = 1,
max_asks: Optional[int] = None,
keep_most_recent: Optional[int] = None,
min_post_range: Optional[float] = None,
name: str = "",
run_indefinitely: bool = False,
):
"""Initialize the strategy object.
Args:
generator (AEPsychGenerator): The generator object that determines how points are sampled.
lb (Union[numpy.ndarray, torch.Tensor]): Lower bounds of the parameters.
ub (Union[numpy.ndarray, torch.Tensor]): Upper bounds of the parameters.
dim (int, optional): The number of dimensions in the parameter space. If None, it is inferred from the size
of lb and ub.
min_total_tells (int): The minimum number of total observations needed to complete this strategy.
min_asks (int): The minimum number of points that should be generated from this strategy.
model (ModelProtocol, optional): The AEPsych model of the data.
refit_every (int): How often to refit the model from scratch.
min_total_outcome_occurrences (int): The minimum number of total observations needed for each outcome before the strategy will finish.
Defaults to 1 (i.e., for binary outcomes, there must be at least one "yes" trial and one "no" trial).
max_asks (int, optional): The maximum number of trials to generate using this strategy.
If None, there is no upper bound (default).
keep_most_recent (int, optional): Experimental. The number of most recent data points that the model will be fitted on.
This may be useful for discarding noisy data from trials early in the experiment that are not as informative
as data collected from later trials. When None, the model is fitted on all data.
min_post_range (float, optional): Experimental. The required difference between the posterior's minimum and maximum value in
probablity space before the strategy will finish. Ignored if None (default).
name (str): The name of the strategy. Defaults to the empty string.
run_indefinitely (bool): If true, the strategy will run indefinitely until finish() is explicitly called. Other stopping criteria will
be ignored. Defaults to False.
"""
self.is_finished = False
if run_indefinitely:
warnings.warn(
f"Strategy {name} will run indefinitely until finish() is explicitly called. Other stopping criteria will be ignored."
)
elif min_total_tells > 0 and min_asks > 0:
warnings.warn(
"Specifying both min_total_tells and min_asks > 0 may lead to unintended behavior."
)
if model is not None:
assert (
len(outcome_types) == model._num_outputs
), f"Strategy has {len(outcome_types)} outcomes, but model {type(model).__name__} supports {model._num_outputs}!"
assert (
stimuli_per_trial == model.stimuli_per_trial
), f"Strategy has {stimuli_per_trial} stimuli_per_trial, but model {type(model).__name__} supports {model.stimuli_per_trial}!"
if isinstance(model.outcome_type, str):
assert (
len(outcome_types) == 1 and outcome_types[0] == model.outcome_type
), f"Strategy outcome types is {outcome_types} but model outcome type is {model.outcome_type}!"
else:
assert set(outcome_types) == set(
model.outcome_type
), f"Strategy outcome types is {outcome_types} but model outcome type is {model.outcome_type}!"
self.run_indefinitely = run_indefinitely
self.lb, self.ub, self.dim = _process_bounds(lb, ub, dim)
self.min_total_outcome_occurrences = min_total_outcome_occurrences
self.max_asks = max_asks
self.keep_most_recent = keep_most_recent
self.min_post_range = min_post_range
if self.min_post_range is not None:
assert model is not None, "min_post_range must be None if model is None!"
self.eval_grid = make_scaled_sobol(
lb=self.lb, ub=self.ub, size=self._n_eval_points
)
self.x = None
self.y = None
self.n = 0
self.min_asks = min_asks
self._count = 0
self.min_total_tells = min_total_tells
self.stimuli_per_trial = stimuli_per_trial
self.outcome_types = outcome_types
if self.stimuli_per_trial == 1:
self.event_shape: Tuple[int, ...] = (self.dim,)
if self.stimuli_per_trial == 2:
self.event_shape = (self.dim, self.stimuli_per_trial)
self.model = model
self.refit_every = refit_every
self._model_is_fresh = False
self.generator = generator
self.has_model = self.model is not None
if self.generator._requires_model:
assert self.model is not None, f"{self.generator} requires a model!"
if self.min_asks == self.min_total_tells == 0:
warnings.warn(
"strategy.min_asks == strategy.min_total_tells == 0. This strategy will not generate any points!",
UserWarning,
)
self.name = name
def normalize_inputs(self, x, y):
"""converts inputs into normalized format for this strategy
Args:
x (np.ndarray): training inputs
y (np.ndarray): training outputs
Returns:
x (np.ndarray): training inputs, normalized
y (np.ndarray): training outputs, normalized
n (int): number of observations
"""
assert (
x.shape == self.event_shape or x.shape[1:] == self.event_shape
), f"x shape should be {self.event_shape} or batch x {self.event_shape}, instead got {x.shape}"
if x.shape == self.event_shape:
x = x[None, :]
if self.x is None:
x = np.r_[x]
else:
x = np.r_[self.x, x]
if self.y is None:
y = np.r_[y]
else:
y = np.r_[self.y, y]
n = y.shape[0]
return torch.Tensor(x), torch.Tensor(y), n
# TODO: allow user to pass in generator options
@ensure_model_is_fresh
def gen(self, num_points: int = 1):
"""Query next point(s) to run by optimizing the acquisition function.
Args:
num_points (int, optional): Number of points to query. Defaults to 1.
Other arguments are forwared to underlying model.
Returns:
np.ndarray: Next set of point(s) to evaluate, [num_points x dim].
"""
self._count = self._count + num_points
return self.generator.gen(num_points, self.model)
@ensure_model_is_fresh
def get_max(self, constraints=None):
constraints = constraints or {}
return self.model.get_max(constraints)
@ensure_model_is_fresh
def get_min(self, constraints=None):
constraints = constraints or {}
return self.model.get_min(constraints)
@ensure_model_is_fresh
def inv_query(self, y, constraints=None, probability_space=False):
constraints = constraints or {}
return self.model.inv_query(y, constraints, probability_space)
@ensure_model_is_fresh
def predict(self, x, probability_space=False):
return self.model.predict(x=x, probability_space=probability_space)
@ensure_model_is_fresh
def get_jnd(self, *args, **kwargs):
return self.model.get_jnd(*args, **kwargs)
@ensure_model_is_fresh
def sample(self, x, num_samples=None):
return self.model.sample(x, num_samples=num_samples)
def finish(self):
self.is_finished = True
@property
def finished(self):
if self.is_finished:
return True
if self.run_indefinitely:
return False
if hasattr(self.generator, "finished"): # defer to generator if possible
return self.generator.finished
if self.y is None: # always need some data before switching strats
return False
if self.max_asks is not None and self._count >= self.max_asks:
return True
if "binary" in self.outcome_types:
n_yes_trials = (self.y == 1).sum()
n_no_trials = (self.y == 0).sum()
sufficient_outcomes = (
n_yes_trials >= self.min_total_outcome_occurrences
and n_no_trials >= self.min_total_outcome_occurrences
)
else:
sufficient_outcomes = True
if self.min_post_range is not None:
fmean, _ = self.model.predict(self.eval_grid, probability_space=True)
meets_post_range = (fmean.max() - fmean.min()) >= self.min_post_range
else:
meets_post_range = True
finished = (
self._count >= self.min_asks
and self.n >= self.min_total_tells
and sufficient_outcomes
and meets_post_range
)
return finished
@property
def can_fit(self):
return self.has_model and self.x is not None and self.y is not None
@property
def n_trials(self):
warnings.warn(
"'n_trials' is deprecated and will be removed in a future release. Specify 'min_asks' instead.",
DeprecationWarning,
)
return self.min_asks
def add_data(self, x, y):
self.x, self.y, self.n = self.normalize_inputs(x, y)
self._model_is_fresh = False
def fit(self):
if self.can_fit:
if self.keep_most_recent is not None:
try:
self.model.fit(
self.x[-self.keep_most_recent :],
self.y[-self.keep_most_recent :],
)
except (ModelFittingError):
logger.warning(
"Failed to fit model! Predictions may not be accurate!"
)
else:
try:
self.model.fit(self.x, self.y)
except (ModelFittingError):
logger.warning(
"Failed to fit model! Predictions may not be accurate!"
)
else:
warnings.warn("Cannot fit: no model has been initialized!", RuntimeWarning)
def update(self):
if self.can_fit:
if self.keep_most_recent is not None:
try:
self.model.update(
self.x[-self.keep_most_recent :],
self.y[-self.keep_most_recent :],
)
except (ModelFittingError):
logger.warning(
"Failed to fit model! Predictions may not be accurate!"
)
else:
try:
self.model.update(self.x, self.y)
except (ModelFittingError):
logger.warning(
"Failed to fit model! Predictions may not be accurate!"
)
else:
warnings.warn("Cannot fit: no model has been initialized!", RuntimeWarning)
@classmethod
def from_config(cls, config: Config, name: str):
lb = config.gettensor(name, "lb")
ub = config.gettensor(name, "ub")
dim = config.getint(name, "dim", fallback=None)
stimuli_per_trial = config.getint(name, "stimuli_per_trial", fallback=1)
outcome_types = config.getlist(name, "outcome_types", element_type=str)
gen_cls = config.getobj(name, "generator", fallback=SobolGenerator)
generator = gen_cls.from_config(config)
model_cls = config.getobj(name, "model", fallback=None)
if model_cls is not None:
model = model_cls.from_config(config)
else:
model = None
acqf_cls = config.getobj(name, "acqf", fallback=None)
if acqf_cls is not None and hasattr(generator, "acqf"):
if generator.acqf is None:
generator.acqf = acqf_cls
generator.acqf_kwargs = generator._get_acqf_options(acqf_cls, config)
min_asks = config.getint(name, "min_asks", fallback=0)
min_total_tells = config.getint(name, "min_total_tells", fallback=0)
refit_every = config.getint(name, "refit_every", fallback=1)
if model is not None and not generator._requires_model:
if refit_every < min_asks:
warnings.warn(
f"Strategy '{name}' has refit_every < min_asks even though its generator does not require a model. Consider making refit_every = min_asks to speed up point generation.",
UserWarning,
)
keep_most_recent = config.getint(name, "keep_most_recent", fallback=None)
min_total_outcome_occurrences = config.getint(
name,
"min_total_outcome_occurrences",
fallback=1 if "binary" in outcome_types else 0,
)
min_post_range = config.getfloat(name, "min_post_range", fallback=None)
keep_most_recent = config.getint(name, "keep_most_recent", fallback=None)
n_trials = config.getint(name, "n_trials", fallback=None)
if n_trials is not None:
warnings.warn(
"'n_trials' is deprecated and will be removed in a future release. Specify 'min_asks' instead.",
DeprecationWarning,
)
min_asks = n_trials
return cls(
lb=lb,
ub=ub,
stimuli_per_trial=stimuli_per_trial,
outcome_types=outcome_types,
dim=dim,
model=model,
generator=generator,
min_asks=min_asks,
refit_every=refit_every,
min_total_outcome_occurrences=min_total_outcome_occurrences,
min_post_range=min_post_range,
keep_most_recent=keep_most_recent,
min_total_tells=min_total_tells,
name=name,
)
class SequentialStrategy(object):
"""Runs a sequence of strategies defined by its config
All getter methods defer to the current strat
Args:
strat_list (list[Strategy]): TODO make this nicely typed / doc'd
"""
def __init__(self, strat_list: List[Strategy]):
self.strat_list = strat_list
self._strat_idx = 0
self._suggest_count = 0
@property
def _strat(self):
return self.strat_list[self._strat_idx]
def __getattr__(self, name: str):
# return current strategy's attr if it's not a container attr
if "strat_list" not in vars(self):
raise AttributeError("Have no strategies in container, what happened?")
return getattr(self._strat, name)
def _make_next_strat(self):
if (self._strat_idx + 1) >= len(self.strat_list):
warnings.warn(
"Ran out of generators, staying on final generator!", RuntimeWarning
)
return
# populate new model with final data from last model
assert (
self.x is not None and self.y is not None
), "Cannot initialize next strategy; no data has been given!"
self.strat_list[self._strat_idx + 1].add_data(self.x, self.y)
self._suggest_count = 0
self._strat_idx = self._strat_idx + 1
def gen(self, num_points: int = 1, **kwargs):
if self._strat.finished:
self._make_next_strat()
self._suggest_count = self._suggest_count + num_points
return self._strat.gen(num_points=num_points, **kwargs)
def finish(self):
self._strat.finish()
@property
def finished(self):
return self._strat_idx == (len(self.strat_list) - 1) and self._strat.finished
def add_data(self, x, y):
self._strat.add_data(x, y)
@classmethod
def from_config(cls, config: Config):
strat_names = config.getlist("common", "strategy_names", element_type=str)
# ensure strat_names are unique
assert len(strat_names) == len(
set(strat_names)
), f"Strategy names {strat_names} are not all unique!"
strats = []
for name in strat_names:
strat = Strategy.from_config(config, str(name))
strats.append(strat)
return cls(strat_list=strats)
class AEPsychStrategy(ConfigurableMixin):
is_finished = False
def __init__(self, ax_client: AxClient):
self.ax_client = ax_client
self.ax_client.experiment.num_asks = 0
@classmethod
def get_config_options(cls, config: Config, name: Optional[str] = None) -> Dict:
# TODO: Fix the mypy errors
strat_names: List[str] = config.getlist("common", "strategy_names", element_type=str) # type: ignore
steps = []
for name in strat_names:
generator = config.getobj(name, "generator", fallback=AxSobolGenerator) # type: ignore
opts = generator.get_config_options(config, name)
step = AEPsychGenerationStep(**opts)
steps.append(step)
# Add an extra step at the end that we can `ask` endlessly.
final_step = copy(step)
final_step.completion_criteria = []
steps.append(final_step)
parameters = get_parameters(config)
parameter_constraints = config.getlist(
"common", "par_constraints", element_type=str, fallback=None
)
objectives = get_objectives(config)
seed = config.getint("common", "random_seed", fallback=None)
strat = GenerationStrategy(steps=steps)
ax_client = AxClient(strat, random_seed=seed)
ax_client.create_experiment(
name="experiment",
parameters=parameters,
parameter_constraints=parameter_constraints,
objectives=objectives,
)
return {"ax_client": ax_client}
@property
def finished(self) -> bool:
if self.is_finished:
return True
self.strat._maybe_move_to_next_step()
return len(self.strat._steps) == (self.strat.current_step.index + 1)
def finish(self):
self.is_finished = True
def gen(self, num_points: int = 1):
x, _ = self.ax_client.get_next_trials(max_trials=num_points)
self.strat.experiment.num_asks += num_points
return x
def complete_new_trial(self, config, outcome):
_, trial_index = self.ax_client.attach_trial(config)
self.complete_existing_trial(trial_index, outcome)
def complete_existing_trial(self, trial_index, outcome):
self.ax_client.complete_trial(trial_index, outcome)
@property
def experiment(self):
return self.ax_client.experiment
@property
def strat(self):
return self.ax_client.generation_strategy
@property
def can_fit(self):
return (
self.strat.model is not None
and len(self.experiment.trial_indices_by_status[TrialStatus.COMPLETED]) > 0
)
def _warn_on_outcome_mismatch(self):
ax_model = self.ax_client.generation_strategy.model
aepsych_model = ax_model.model.surrogate.model
if (
hasattr(aepsych_model, "outcome_type")
and aepsych_model.outcome_type != "continuous"
):
warnings.warn(
"Cannot directly plot non-continuous outcomes. Plotting the latent function instead."
)
def plot_contours(
self, density: int = 50, slice_values: Optional[Dict[str, Any]] = None
):
"""Plot predictions for a 2-d slice of the parameter space.
Args:
density: Number of points along each parameter to evaluate predictions.
slice_values: A dictionary {name: val} for the fixed values of the
other parameters. If not provided, then the mean of numeric
parameters or the mode of choice parameters will be used.
"""
assert (
len(self.experiment.parameters) > 1
), "plot_contours requires at least 2 parameters! Use 'plot_slice' instead."
ax_model = self.ax_client.generation_strategy.model
self._warn_on_outcome_mismatch()
render(
interact_contour(
model=ax_model,
metric_name="objective",
density=density,
slice_values=slice_values,
)
)
def plot_slice(
self,
param_name: str,
density: int = 50,
slice_values: Optional[Dict[str, Any]] = None,
):
"""Plot predictions for a 1-d slice of the parameter space.
Args:
param_name: Name of parameter that will be sliced
density: Number of points along slice to evaluate predictions.
slice_values: A dictionary {name: val} for the fixed values of the
other parameters. If not provided, then the mean of numeric
parameters or the mode of choice parameters will be used.
"""
self._warn_on_outcome_mismatch()
ax_model = self.ax_client.generation_strategy.model
render(
plot_slice(
model=ax_model,
param_name=param_name,
metric_name="objective",
density=density,
slice_values=slice_values,
)
)
def get_pareto_optimal_parameters(self):
return self.ax_client.get_pareto_optimal_parameters()
|
aepsych-main
|
aepsych/strategy.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import logging.config
import os
logger = logging.getLogger()
def getLogger(level=logging.INFO, log_path="logs") -> logging.Logger:
my_format = "%(asctime)-15s [%(levelname)-7s] %(message)s"
os.makedirs(log_path, exist_ok=True)
logging_config = {
"version": 1,
"disable_existing_loggers": True,
"formatters": {"standard": {"format": my_format}},
"handlers": {
"default": {
"level": level,
"class": "logging.StreamHandler",
"formatter": "standard",
},
"file": {
"class": "logging.FileHandler",
"level": logging.DEBUG,
"filename": f"{log_path}/bayes_opt_server.log",
"formatter": "standard",
},
},
"loggers": {
"": {"handlers": ["default", "file"], "level": level, "propagate": False},
},
}
logging.config.dictConfig(logging_config)
return logger
|
aepsych-main
|
aepsych/utils_logging.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from collections.abc import Iterable
from configparser import NoOptionError
from typing import Dict, List, Mapping, Optional, Tuple
import numpy as np
import torch
from ax.service.utils.instantiation import ObjectiveProperties
from scipy.stats import norm
from torch.quasirandom import SobolEngine
def make_scaled_sobol(lb, ub, size, seed=None):
lb, ub, ndim = _process_bounds(lb, ub, None)
grid = SobolEngine(dimension=ndim, scramble=True, seed=seed).draw(size)
# rescale from [0,1] to [lb, ub]
grid = lb + (ub - lb) * grid
return grid
def promote_0d(x):
if not isinstance(x, Iterable):
return [x]
return x
def dim_grid(
lower: torch.Tensor,
upper: torch.Tensor,
dim: int,
gridsize: int = 30,
slice_dims: Optional[Mapping[int, float]] = None,
) -> torch.Tensor:
"""Create a grid
Create a grid based on lower, upper, and dim.
Parameters
----------
- lower ('int') - lower bound
- upper ('int') - upper bound
- dim ('int) - dimension
- gridsize ('int') - size for grid
- slice_dims (Optional, dict) - values to use for slicing axes, as an {index:value} dict
Returns
----------
grid : torch.FloatTensor
Tensor
"""
slice_dims = slice_dims or {}
lower, upper, _ = _process_bounds(lower, upper, None)
mesh_vals = []
for i in range(dim):
if i in slice_dims.keys():
mesh_vals.append(slice(slice_dims[i] - 1e-10, slice_dims[i] + 1e-10, 1))
else:
mesh_vals.append(slice(lower[i].item(), upper[i].item(), gridsize * 1j))
return torch.Tensor(np.mgrid[mesh_vals].reshape(dim, -1).T)
def _process_bounds(lb, ub, dim) -> Tuple[torch.Tensor, torch.Tensor, int]:
"""Helper function for ensuring bounds are correct shape and type."""
lb = promote_0d(lb)
ub = promote_0d(ub)
if not isinstance(lb, torch.Tensor):
lb = torch.tensor(lb)
if not isinstance(ub, torch.Tensor):
ub = torch.tensor(ub)
lb = lb.float()
ub = ub.float()
assert lb.shape[0] == ub.shape[0], "bounds should be of equal shape!"
if dim is not None:
if lb.shape[0] == 1:
lb = lb.repeat(dim)
ub = ub.repeat(dim)
else:
assert lb.shape[0] == dim, "dim does not match shape of bounds!"
else:
dim = lb.shape[0]
for i, (l, u) in enumerate(zip(lb, ub)):
assert (
l <= u
), f"Lower bound {l} is not less than or equal to upper bound {u} on dimension {i}!"
return lb, ub, dim
def interpolate_monotonic(x, y, z, min_x=-np.inf, max_x=np.inf):
# Ben Letham's 1d interpolation code, assuming monotonicity.
# basic idea is find the nearest two points to the LSE and
# linearly interpolate between them (I think this is bisection
# root-finding)
idx = np.searchsorted(y, z)
if idx == len(y):
return float(max_x)
elif idx == 0:
return float(min_x)
x0 = x[idx - 1]
x1 = x[idx]
y0 = y[idx - 1]
y1 = y[idx]
x_star = x0 + (x1 - x0) * (z - y0) / (y1 - y0)
return x_star
def get_lse_interval(
model,
mono_grid,
target_level,
cred_level=None,
mono_dim=-1,
n_samps=500,
lb=-np.inf,
ub=np.inf,
gridsize=30,
**kwargs,
):
xgrid = torch.Tensor(
np.mgrid[
[
slice(model.lb[i].item(), model.ub[i].item(), gridsize * 1j)
for i in range(model.dim)
]
]
.reshape(model.dim, -1)
.T
)
samps = model.sample(xgrid, num_samples=n_samps, **kwargs)
samps = [s.reshape((gridsize,) * model.dim) for s in samps.detach().numpy()]
contours = np.stack(
[
get_lse_contour(norm.cdf(s), mono_grid, target_level, mono_dim, lb, ub)
for s in samps
]
)
if cred_level is None:
return np.mean(contours, 0.5, axis=0)
else:
alpha = 1 - cred_level
qlower = alpha / 2
qupper = 1 - alpha / 2
upper = np.quantile(contours, qupper, axis=0)
lower = np.quantile(contours, qlower, axis=0)
median = np.quantile(contours, 0.5, axis=0)
return median, lower, upper
def get_lse_contour(post_mean, mono_grid, level, mono_dim=-1, lb=-np.inf, ub=np.inf):
return np.apply_along_axis(
lambda p: interpolate_monotonic(mono_grid, p, level, lb, ub),
mono_dim,
post_mean,
)
def get_jnd_1d(post_mean, mono_grid, df=1, mono_dim=-1, lb=-np.inf, ub=np.inf):
interpolate_to = post_mean + df
return (
np.array(
[interpolate_monotonic(mono_grid, post_mean, ito) for ito in interpolate_to]
)
- mono_grid
)
def get_jnd_multid(post_mean, mono_grid, df=1, mono_dim=-1, lb=-np.inf, ub=np.inf):
return np.apply_along_axis(
lambda p: get_jnd_1d(p, mono_grid, df=df, mono_dim=mono_dim, lb=lb, ub=ub),
mono_dim,
post_mean,
)
def _get_ax_parameters(config):
range_parnames = config.getlist("common", "parnames", element_type=str, fallback=[])
lb = config.getlist("common", "lb", element_type=float, fallback=[])
ub = config.getlist("common", "ub", element_type=float, fallback=[])
assert (
len(range_parnames) == len(lb) == len(ub)
), f"Length of parnames ({range_parnames}), lb ({lb}), and ub ({ub}) don't match!"
range_params = [
{
"name": parname,
"type": "range",
"value_type": config.get(parname, "value_type", fallback="float"),
"log_scale": config.getboolean(parname, "log_scale", fallback=False),
"bounds": [l, u],
}
for parname, l, u in zip(range_parnames, lb, ub)
]
choice_parnames = config.getlist(
"common", "choice_parnames", element_type=str, fallback=[]
)
choices = [
config.getlist(parname, "choices", element_type=str, fallback=["True", "False"])
for parname in choice_parnames
]
choice_params = [
{
"name": parname,
"type": "choice",
"value_type": config.get(parname, "value_type", fallback="str"),
"is_ordered": config.getboolean(parname, "is_ordered", fallback=False),
"values": choice,
}
for parname, choice in zip(choice_parnames, choices)
]
fixed_parnames = config.getlist(
"common", "fixed_parnames", element_type=str, fallback=[]
)
values = []
for parname in fixed_parnames:
try:
try:
value = config.getfloat(parname, "value")
except ValueError:
value = config.get(parname, "value")
values.append(value)
except NoOptionError:
raise RuntimeError(f"Missing value for fixed parameter {parname}!")
fixed_params = [
{
"name": parname,
"type": "fixed",
"value": value,
}
for parname, value in zip(fixed_parnames, values)
]
return range_params, choice_params, fixed_params
def get_parameters(config) -> List[Dict]:
range_params, choice_params, fixed_params = _get_ax_parameters(config)
return range_params + choice_params + fixed_params
def get_dim(config) -> int:
range_params, choice_params, _ = _get_ax_parameters(config)
# Need to sum dimensions added by both range and choice parameters
dim = len(range_params) # 1 dim per range parameter
for par in choice_params:
if par["is_ordered"]:
dim += 1 # Ordered choice params are encoded like continuous parameters
elif len(par["values"]) > 2:
dim += len(
par["values"]
) # Choice parameter is one-hot encoded such that they add 1 dim for every choice
else:
dim += (
len(par["values"]) - 1
) # Choice parameters with n_choices < 3 add n_choices - 1 dims
return dim
def get_objectives(config) -> Dict:
outcome_types: List[str] = config.getlist(
"common", "outcome_types", element_type=str
)
if len(outcome_types) > 1:
for out_type in outcome_types:
assert (
out_type == "continuous"
), "Multiple outcomes is only currently supported for continuous outcomes!"
outcome_names: List[str] = config.getlist(
"common", "outcome_names", element_type=str, fallback=None
)
if outcome_names is None:
outcome_names = [f"outcome_{i+1}" for i in range(len(outcome_types))]
objectives = {}
for out_name in outcome_names:
minimize = config.getboolean(out_name, "minimize", fallback=False)
threshold = config.getfloat(out_name, "threshold", fallback=None)
objectives[out_name] = ObjectiveProperties(
minimize=minimize, threshold=threshold
)
return objectives
|
aepsych-main
|
aepsych/utils.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import itertools
import time
from random import shuffle
from typing import Any, Callable, Dict, List, Mapping, Optional, Tuple, Union
import numpy as np
import pandas as pd
import torch
from aepsych.config import Config
from aepsych.strategy import ensure_model_is_fresh, SequentialStrategy
from tqdm.contrib.itertools import product as tproduct
from .problem import Problem
class Benchmark:
"""
Benchmark base class.
This class wraps standard functionality for benchmarking models including
generating cartesian products of run configurations, running the simulated
experiment loop, and logging results.
TODO make a benchmarking tutorial and link/refer to it here.
"""
def __init__(
self,
problems: List[Problem],
configs: Mapping[str, Union[str, list]],
seed: Optional[int] = None,
n_reps: int = 1,
log_every: Optional[int] = 10,
) -> None:
"""Initialize benchmark.
Args:
problems (List[Problem]): Problem objects containing the test function to evaluate.
configs (Mapping[str, Union[str, list]]): Dictionary of configs to run.
Lists at leaves are used to construct a cartesian product of configurations.
seed (int, optional): Random seed to use for reproducible benchmarks.
Defaults to randomized seeds.
n_reps (int, optional): Number of repetitions to run of each configuration. Defaults to 1.
log_every (int, optional): Logging interval during an experiment. Defaults to logging every 10 trials.
"""
self.problems = problems
self.n_reps = n_reps
self.combinations = self.make_benchmark_list(**configs)
self._log: List[Dict[str, object]] = []
self.log_every = log_every
# shuffle combinations so that intermediate results have a bit of everything
shuffle(self.combinations)
if seed is None:
# explicit cast because int and np.int_ are different types
self.seed = int(np.random.randint(0, 200))
else:
self.seed = seed
def make_benchmark_list(self, **bench_config) -> List[Dict[str, float]]:
"""Generate a list of benchmarks to run from configuration.
This constructs a cartesian product of config dicts using lists at
the leaves of the base config
Returns:
List[dict[str, float]]: List of dictionaries, each of which can be passed
to aepsych.config.Config.
"""
# This could be a generator but then we couldn't
# know how many params we have, tqdm wouldn't work, etc,
# so we materialize the full list.
def gen_combinations(d):
keys, values = d.keys(), d.values()
# only go cartesian on list leaves
values = [v if type(v) == list else [v] for v in values]
combinations = itertools.product(*values)
return [dict(zip(keys, c)) for c in combinations]
keys, values = bench_config.keys(), bench_config.values()
return [
dict(zip(keys, c))
for c in itertools.product(*(gen_combinations(v) for v in values))
]
def materialize_config(self, config_dict):
materialized_config = {}
for key, value in config_dict.items():
materialized_config[key] = {
k: v._evaluate(config_dict) if isinstance(v, DerivedValue) else v
for k, v in value.items()
}
return materialized_config
@property
def num_benchmarks(self) -> int:
"""Return the total number of runs in this benchmark.
Returns:
int: Total number of runs in this benchmark.
"""
return len(self.problems) * len(self.combinations) * self.n_reps
def make_strat_and_flatconfig(
self, config_dict: Mapping[str, str]
) -> Tuple[SequentialStrategy, Dict[str, str]]:
"""From a config dict, generate a strategy (for running) and
flattened config (for logging)
Args:
config_dict (Mapping[str, str]): A run configuration dictionary.
Returns:
Tuple[SequentialStrategy, Dict[str,str]]: A tuple containing a strategy
object and a flat config.
"""
config = Config()
config.update(config_dict=config_dict)
strat = SequentialStrategy.from_config(config)
flatconfig = self.flatten_config(config)
return strat, flatconfig
def run_experiment(
self,
problem: Problem,
config_dict: Dict[str, Any],
seed: int,
rep: int,
) -> Tuple[List[Dict[str, Any]], Union[SequentialStrategy, None]]:
"""Run one simulated experiment.
Args:
config_dict (Dict[str, str]): AEPsych configuration to use.
seed (int): Random seed for this run.
rep (int): Index of this repetition.
Returns:
Tuple[List[Dict[str, object]], SequentialStrategy]: A tuple containing a log of the results and the strategy as
of the end of the simulated experiment. This is ignored in large-scale benchmarks but useful for
one-off visualization.
"""
torch.manual_seed(seed)
np.random.seed(seed)
config_dict["common"]["lb"] = str(problem.lb.tolist())
config_dict["common"]["ub"] = str(problem.ub.tolist())
config_dict["problem"] = problem.metadata
materialized_config = self.materialize_config(config_dict)
# no-op config
is_invalid = materialized_config["common"].get("invalid_config", False)
if is_invalid:
return [{}], None
strat, flatconfig = self.make_strat_and_flatconfig(materialized_config)
problem_metadata = {
f"problem_{key}": value for key, value in problem.metadata.items()
}
total_gentime = 0.0
total_fittime = 0.0
i = 0
results = []
while not strat.finished:
starttime = time.time()
next_x = strat.gen()
gentime = time.time() - starttime
total_gentime += gentime
next_y = [problem.sample_y(next_x)]
strat.add_data(next_x, next_y)
# strat usually defers model fitting until it is needed
# (e.g. for gen or predict) so that we don't refit
# unnecessarily. But for benchmarking we want to time
# fit and gen separately, so we force a strat update
# so we can time fit vs gen. TODO make this less awkward
starttime = time.time()
ensure_model_is_fresh(lambda x: None)(strat._strat)
fittime = time.time() - starttime
total_fittime += fittime
if (self.log_at(i) or strat.finished) and strat.has_model:
metrics = problem.evaluate(strat)
result = {
"fit_time": fittime,
"cum_fit_time": total_fittime,
"gen_time": gentime,
"cum_gen_time": total_gentime,
"trial_id": i,
"rep": rep,
"seed": seed,
"final": strat.finished,
"strat_idx": strat._strat_idx,
}
result.update(problem_metadata)
result.update(flatconfig)
result.update(metrics)
results.append(result)
i = i + 1
return results, strat
def run_benchmarks(self):
"""Run all the benchmarks, sequentially."""
for i, (rep, config, problem) in enumerate(
tproduct(range(self.n_reps), self.combinations, self.problems)
):
local_seed = i + self.seed
results, _ = self.run_experiment(problem, config, seed=local_seed, rep=rep)
if results != [{}]:
self._log.extend(results)
def flatten_config(self, config: Config) -> Dict[str, str]:
"""Flatten a config object for logging.
Args:
config (Config): AEPsych config object.
Returns:
Dict[str,str]: A flat dictionary (that can be used to build a flat pandas data frame).
"""
flatconfig = {}
for s in config.sections():
flatconfig.update({f"{s}_{k}": v for k, v in config[s].items()})
return flatconfig
def log_at(self, i: int) -> bool:
"""Check if we should log on this trial index.
Args:
i (int): Trial index to (maybe) log at.
Returns:
bool: True if this trial should be logged.
"""
if self.log_every is not None:
return i % self.log_every == 0
else:
return False
def pandas(self) -> pd.DataFrame:
return pd.DataFrame(self._log)
class DerivedValue(object):
"""
A class for dynamically generating config values from other config values during benchmarking.
"""
def __init__(self, args: List[Tuple[str, str]], func: Callable) -> None:
"""Initialize DerivedValue.
Args:
args (List[Tuple[str]]): Each tuple in this list is a pair of strings that refer to keys in a nested dictionary.
func (Callable): A function that accepts args as input.
For example, consider the following:
benchmark_config = {
"common": {
"model": ["GPClassificationModel", "FancyNewModelToBenchmark"],
"acqf": "MCLevelSetEstimation"
},
"init_strat": {
"min_asks": [10, 20],
"generator": "SobolGenerator"
},
"opt_strat": {
"generator": "OptimizeAcqfGenerator",
"min_asks":
DerivedValue(
[("init_strat", "min_asks"), ("common", "model")],
lambda x,y : 100 - x if y == "GPClassificationModel" else 50 - x)
}
}
Four separate benchmarks would be generated from benchmark_config:
1. model = GPClassificationModel; init trials = 10; opt trials = 90
2. model = GPClassificationModel; init trials = 20; opt trials = 80
3. model = FancyNewModelToBenchmark; init trials = 10; opt trials = 40
4. model = FancyNewModelToBenchmark; init trials = 20; opt trials = 30
Note that if you can also access problem names into func by including ("problem", "name") in args.
"""
self.args = args
self.func = func
def _evaluate(self, benchmark_config: Dict) -> Any:
"""Fetches values of self.args from benchmark_config and evaluates self.func on them."""
_args = [benchmark_config[outer][inner] for outer, inner in self.args]
return self.func(*_args)
|
aepsych-main
|
aepsych/benchmark/benchmark.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import logging
import time
import traceback
from copy import deepcopy
from pathlib import Path
from random import shuffle
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
import aepsych.utils_logging as utils_logging
import multiprocess.context as ctx
import numpy as np
import pathos
import torch
from aepsych.benchmark import Benchmark
from aepsych.benchmark.problem import Problem
from aepsych.strategy import SequentialStrategy
ctx._force_start_method("spawn") # fixes problems with CUDA and fork
logger = utils_logging.getLogger(logging.INFO)
class PathosBenchmark(Benchmark):
"""Benchmarking class for parallelized benchmarks using pathos"""
def __init__(self, nproc: int = 1, *args, **kwargs):
"""Initialize pathos benchmark.
Args:
nproc (int, optional): Number of cores to use. Defaults to 1.
"""
super().__init__(*args, **kwargs)
# parallelize over jobs, so each job should be 1 thread only
num_threads = torch.get_num_threads()
num_interopt_threads = torch.get_num_interop_threads()
if num_threads > 1 or num_interopt_threads > 1:
raise RuntimeError(
"PathosBenchmark parallelizes over threads,"
+ "and as such is incompatible with torch being threaded. "
+ "Please call `torch.set_num_threads(1)` and "
+ "`torch.set_num_interop_threads(1)` before using PathosBenchmark!"
)
cores_available = pathos.multiprocessing.cpu_count()
if nproc >= cores_available:
raise RuntimeError(
f"Requesting a benchmark with {nproc} cores but "
+ f"machine has {cores_available} cores! It is highly "
"recommended to leave at least 1-2 cores open for OS tasks."
)
self.pool = pathos.pools.ProcessPool(nodes=nproc)
def __del__(self):
# destroy the pool (for when we're testing or running
# multiple benchmarks in one script) but if the GC already
# cleared the underlying multiprocessing object (usually on
# the final call), don't do anything.
if hasattr(self, "pool") and self.pool is not None:
try:
self.pool.close()
self.pool.join()
self.pool.clear()
except TypeError:
pass
def run_experiment(
self,
problem: Problem,
config_dict: Dict[str, Any],
seed: int,
rep: int,
) -> Tuple[List[Dict[str, Any]], Union[SequentialStrategy, None]]:
"""Run one simulated experiment.
Args:
config_dict (Dict[str, Any]): AEPsych configuration to use.
seed (int): Random seed for this run.
rep (int): Index of this repetition.
Returns:
Tuple[List[Dict[str, Any]], SequentialStrategy]: A tuple containing a log of the results and the strategy as
of the end of the simulated experiment. This is ignored in large-scale benchmarks but useful for
one-off visualization.
"""
# copy things that we mutate
local_config = deepcopy(config_dict)
try:
return super().run_experiment(problem, local_config, seed, rep)
except Exception as e:
logging.error(
f"Error on config {config_dict}: {e}!"
+ f"Traceback follows:\n{traceback.format_exc()}"
)
return [], SequentialStrategy([])
def __getstate__(self):
self_dict = self.__dict__.copy()
if "pool" in self_dict.keys():
del self_dict["pool"]
if "futures" in self_dict.keys():
del self_dict["futures"]
return self_dict
def run_benchmarks(self):
"""Run all the benchmarks,
Note that this blocks while waiting for benchmarks to complete. If you
would like to start benchmarks and periodically collect partial results,
use start_benchmarks and then call collate_benchmarks(wait=False) on some
interval.
"""
self.start_benchmarks()
self.collate_benchmarks(wait=True)
def start_benchmarks(self):
"""Start benchmark run.
This does not block: after running it, self.futures holds the
status of benchmarks running in parallel.
"""
def run_discard_strat(*conf):
logger, _ = self.run_experiment(*conf)
return logger
self.all_sim_configs = [
(problem, config_dict, self.seed + seed, rep)
for seed, (problem, config_dict, rep) in enumerate(
itertools.product(self.problems, self.combinations, range(self.n_reps))
)
]
shuffle(self.all_sim_configs)
self.futures = [
self.pool.apipe(run_discard_strat, *conf) for conf in self.all_sim_configs
]
@property
def is_done(self) -> bool:
"""Check if the benchmark is done.
Returns:
bool: True if all futures are cleared and benchmark is done.
"""
return len(self.futures) == 0
def collate_benchmarks(self, wait: bool = False) -> None:
"""Collect benchmark results from completed futures.
Args:
wait (bool, optional): If true, this method blocks and waits
on all futures to complete. Defaults to False.
"""
newfutures = []
while self.futures:
item = self.futures.pop()
if wait or item.ready():
results = item.get()
# filter out empty results from invalid configs
results = [r for r in results if r != {}]
if isinstance(results, list):
self._log.extend(results)
else:
newfutures.append(item)
self.futures = newfutures
def run_benchmarks_with_checkpoints(
out_path: str,
benchmark_name: str,
problems: List[Problem],
configs: Mapping[str, Union[str, list]],
global_seed: Optional[int] = None,
n_chunks: int = 1,
n_reps_per_chunk: int = 1,
log_every: Optional[int] = None,
checkpoint_every: int = 60,
n_proc: int = 1,
serial_debug: bool = False,
) -> None:
"""Runs a series of benchmarks, saving both final and intermediate results to .csv files. Benchmarks are run in
sequential chunks, each of which runs all combinations of problems/configs/reps in parallel. This function should
always be used using the "if __name__ == '__main__': ..." idiom.
Args:
out_path (str): The path to save the results to.
benchmark_name (str): A name give to this set of benchmarks. Results will be saved in files named like
"out_path/benchmark_name_chunk{chunk_number}_out.csv"
problems (List[Problem]): Problem objects containing the test function to evaluate.
configs (Mapping[str, Union[str, list]]): Dictionary of configs to run.
Lists at leaves are used to construct a cartesian product of configurations.
global_seed (int, optional): Global seed to use for reproducible benchmarks.
Defaults to randomized seeds.
n_chunks (int): The number of chunks to break the results into. Each chunk will contain at least 1 run of every
combination of problem and config.
n_reps_per_chunk (int, optional): Number of repetitions to run each problem/config in each chunk.
log_every (int, optional): Logging interval during an experiment. Defaults to only logging at the end.
checkpoint_every (int): Save intermediate results every checkpoint_every seconds.
n_proc (int): Number of processors to use.
serial_debug: debug serially?
"""
Path(out_path).mkdir(
parents=True, exist_ok=True
) # make an output folder if not exist
if serial_debug:
out_fname = Path(f"{out_path}/{benchmark_name}_out.csv")
print(f"Starting {benchmark_name} benchmark (serial debug mode)...")
bench = Benchmark(
problems=problems,
configs=configs,
seed=global_seed,
n_reps=n_reps_per_chunk * n_chunks,
log_every=log_every,
)
bench.run_benchmarks()
final_results = bench.pandas()
final_results.to_csv(out_fname)
else:
for chunk in range(n_chunks):
out_fname = Path(f"{out_path}/{benchmark_name}_chunk{chunk}_out.csv")
intermediate_fname = Path(
f"{out_path}/{benchmark_name}_chunk{chunk}_checkpoint.csv"
)
print(f"Starting {benchmark_name} benchmark... chunk {chunk} ")
bench = PathosBenchmark(
nproc=n_proc,
problems=problems,
configs=configs,
seed=None,
n_reps=n_reps_per_chunk,
log_every=log_every,
)
if global_seed is None:
global_seed = int(np.random.randint(0, 200))
bench.seed = (
global_seed + chunk * bench.num_benchmarks
) # HACK. TODO: make num_benchmarks a property of bench configs
bench.start_benchmarks()
while not bench.is_done:
time.sleep(checkpoint_every)
collate_start = time.time()
print(
f"Checkpointing {benchmark_name} chunk {chunk}..., {len(bench.futures)}/{bench.num_benchmarks} alive"
)
bench.collate_benchmarks(wait=False)
temp_results = bench.pandas()
if len(temp_results) > 0:
temp_results["rep"] = temp_results["rep"] + n_reps_per_chunk * chunk
temp_results.to_csv(intermediate_fname)
print(
f"Collate done in {time.time()-collate_start} seconds, {len(bench.futures)}/{bench.num_benchmarks} left"
)
print(f"{benchmark_name} chunk {chunk} fully done!")
final_results = bench.pandas()
final_results["rep"] = final_results["rep"] + n_reps_per_chunk * chunk
final_results.to_csv(out_fname)
|
aepsych-main
|
aepsych/benchmark/pathos_benchmark.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .benchmark import Benchmark, DerivedValue
from .pathos_benchmark import PathosBenchmark, run_benchmarks_with_checkpoints
from .problem import LSEProblem, Problem
from .test_functions import (
discrim_highdim,
make_songetal_testfun,
modified_hartmann6,
novel_detection_testfun,
novel_discrimination_testfun,
)
__all__ = [
"Benchmark",
"DerivedValue",
"PathosBenchmark",
"PathosBenchmark",
"Problem",
"LSEProblem",
"make_songetal_testfun",
"novel_detection_testfun",
"novel_discrimination_testfun",
"modified_hartmann6",
"discrim_highdim",
"run_benchmarks_with_checkpoints",
]
|
aepsych-main
|
aepsych/benchmark/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from functools import cached_property
from typing import Any, Dict, Union
import aepsych
import numpy as np
import torch
from aepsych.strategy import SequentialStrategy, Strategy
from aepsych.utils import make_scaled_sobol
from scipy.stats import bernoulli, norm, pearsonr
class Problem:
"""Wrapper for a problem or test function. Subclass from this
and override f() to define your test function.
"""
n_eval_points = 1000
@cached_property
def eval_grid(self):
return make_scaled_sobol(lb=self.lb, ub=self.ub, size=self.n_eval_points)
@property
def name(self) -> str:
raise NotImplementedError
def f(self, x):
raise NotImplementedError
@cached_property
def lb(self):
return self.bounds[0]
@cached_property
def ub(self):
return self.bounds[1]
@property
def bounds(self):
raise NotImplementedError
@property
def metadata(self) -> Dict[str, Any]:
"""A dictionary of metadata passed to the Benchmark to be logged. Each key will become a column in the
Benchmark's output dataframe, with its associated value stored in each row."""
return {"name": self.name}
def p(self, x: np.ndarray) -> np.ndarray:
"""Evaluate response probability from test function.
Args:
x (np.ndarray): Points at which to evaluate.
Returns:
np.ndarray: Response probability at queries points.
"""
return norm.cdf(self.f(x))
def sample_y(self, x: np.ndarray) -> np.ndarray:
"""Sample a response from test function.
Args:
x (np.ndarray): Points at which to sample.
Returns:
np.ndarray: A single (bernoulli) sample at points.
"""
return bernoulli.rvs(self.p(x))
def f_hat(self, model: aepsych.models.base.ModelProtocol) -> torch.Tensor:
"""Generate mean predictions from the model over the evaluation grid.
Args:
model (aepsych.models.base.ModelProtocol): Model to evaluate.
Returns:
torch.Tensor: Posterior mean from underlying model over the evaluation grid.
"""
f_hat, _ = model.predict(self.eval_grid)
return f_hat
@cached_property
def f_true(self) -> np.ndarray:
"""Evaluate true test function over evaluation grid.
Returns:
torch.Tensor: Values of true test function over evaluation grid.
"""
return self.f(self.eval_grid).detach().numpy()
@cached_property
def p_true(self) -> torch.Tensor:
"""Evaluate true response probability over evaluation grid.
Returns:
torch.Tensor: Values of true response probability over evaluation grid.
"""
return norm.cdf(self.f_true)
def p_hat(self, model: aepsych.models.base.ModelProtocol) -> torch.Tensor:
"""Generate mean predictions from the model over the evaluation grid.
Args:
model (aepsych.models.base.ModelProtocol): Model to evaluate.
Returns:
torch.Tensor: Posterior mean from underlying model over the evaluation grid.
"""
p_hat, _ = model.predict(self.eval_grid, probability_space=True)
return p_hat
def evaluate(
self,
strat: Union[Strategy, SequentialStrategy],
) -> Dict[str, float]:
"""Evaluate the strategy with respect to this problem.
Extend this in subclasses to add additional metrics.
Metrics include:
- mae (mean absolute error), mae (mean absolute error), max_abs_err (max absolute error),
pearson correlation. All of these are computed over the latent variable f and the
outcome probability p, w.r.t. the posterior mean. Squared and absolute errors (miae, mise) are
also computed in expectation over the posterior, by sampling.
- Brier score, which measures how well-calibrated the outcome probability is, both at the posterior
mean (plain brier) and in expectation over the posterior (expected_brier).
Args:
strat (aepsych.strategy.Strategy): Strategy to evaluate.
Returns:
Dict[str, float]: A dictionary containing metrics and their values.
"""
# we just use model here but eval gets called on strat in case we need it in downstream evals
# for example to separate out sobol vs opt trials
model = strat.model
assert model is not None, "Cannot evaluate strategy without a model!"
# always eval f
f_hat = self.f_hat(model).detach().numpy()
p_hat = self.p_hat(model).detach().numpy()
assert (
self.f_true.shape == f_hat.shape
), f"self.f_true.shape=={self.f_true.shape} != f_hat.shape=={f_hat.shape}"
mae_f = np.mean(np.abs(self.f_true - f_hat))
mse_f = np.mean((self.f_true - f_hat) ** 2)
max_abs_err_f = np.max(np.abs(self.f_true - f_hat))
corr_f = pearsonr(self.f_true.flatten(), f_hat.flatten())[0]
mae_p = np.mean(np.abs(self.p_true - p_hat))
mse_p = np.mean((self.p_true - p_hat) ** 2)
max_abs_err_p = np.max(np.abs(self.p_true - p_hat))
corr_p = pearsonr(self.p_true.flatten(), p_hat.flatten())[0]
brier = np.mean(2 * np.square(self.p_true - p_hat))
# eval in samp-based expectation over posterior instead of just mean
fsamps = model.sample(self.eval_grid, num_samples=1000).detach().numpy()
try:
psamps = (
model.sample(self.eval_grid, num_samples=1000, probability_space=True) # type: ignore
.detach()
.numpy()
)
except TypeError: # vanilla models don't have proba_space samps, TODO maybe we should add them
psamps = norm.cdf(fsamps)
ferrs = fsamps - self.f_true[None, :]
miae_f = np.mean(np.abs(ferrs))
mise_f = np.mean(ferrs**2)
perrs = psamps - self.p_true[None, :]
miae_p = np.mean(np.abs(perrs))
mise_p = np.mean(perrs**2)
expected_brier = (2 * np.square(self.p_true[None, :] - psamps)).mean()
metrics = {
"mean_abs_err_f": mae_f,
"mean_integrated_abs_err_f": miae_f,
"mean_square_err_f": mse_f,
"mean_integrated_square_err_f": mise_f,
"max_abs_err_f": max_abs_err_f,
"pearson_corr_f": corr_f,
"mean_abs_err_p": mae_p,
"mean_integrated_abs_err_p": miae_p,
"mean_square_err_p": mse_p,
"mean_integrated_square_err_p": mise_p,
"max_abs_err_p": max_abs_err_p,
"pearson_corr_p": corr_p,
"brier": brier,
"expected_brier": expected_brier,
}
return metrics
class LSEProblem(Problem):
"""Level set estimation problem.
This extends the base problem class to evaluate the LSE/threshold estimate
in addition to the function estimate.
"""
threshold = 0.75
@property
def metadata(self) -> Dict[str, Any]:
"""A dictionary of metadata passed to the Benchmark to be logged. Each key will become a column in the
Benchmark's output dataframe, with its associated value stored in each row."""
md = super().metadata
md["threshold"] = self.threshold
return md
def f_threshold(self, model=None):
try:
inverse_torch = model.likelihood.objective.inverse
def inverse_link(x):
return inverse_torch(torch.tensor(x)).numpy()
except AttributeError:
inverse_link = norm.ppf
return float(inverse_link(self.threshold))
@cached_property
def true_below_threshold(self) -> np.ndarray:
"""
Evaluate whether the true function is below threshold over the eval grid
(used for proper scoring and threshold missclassification metric).
"""
return (self.p(self.eval_grid) <= self.threshold).astype(float)
def evaluate(self, strat: Union[Strategy, SequentialStrategy]) -> Dict[str, float]:
"""Evaluate the model with respect to this problem.
For level set estimation, we add metrics w.r.t. the true threshold:
- brier_p_below_{thresh), the brier score w.r.t. p(f(x)<thresh), in contrast to
regular brier, which is the brier score for p(phi(f(x))=1), and the same
for misclassification error.
Args:
strat (aepsych.strategy.Strategy): Strategy to evaluate.
Returns:
Dict[str, float]: A dictionary containing metrics and their values,
including parent class metrics.
"""
metrics = super().evaluate(strat)
# we just use model here but eval gets called on strat in case we need it in downstream evals
# for example to separate out sobol vs opt trials
model = strat.model
assert model is not None, "Cannot make predictions without a model!"
# TODO bring back more threshold error metrics when we more clearly
# define what "threshold" means in high-dim.
# Predict p(below threshold) at test points
p_l = model.p_below_threshold(self.eval_grid, self.f_threshold(model))
# Brier score on level-set probabilities
thresh = self.threshold
brier_name = f"brier_p_below_{thresh}"
metrics[brier_name] = np.mean(2 * np.square(self.true_below_threshold - p_l))
# Classification error
classerr_name = f"missclass_on_thresh_{thresh}"
metrics[classerr_name] = np.mean(
p_l * (1 - self.true_below_threshold)
+ (1 - p_l) * self.true_below_threshold
)
return metrics
|
aepsych-main
|
aepsych/benchmark/problem.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import io
import math
from typing import Callable
import numpy as np
import pandas as pd
from scipy.interpolate import CubicSpline, interp1d
from scipy.stats import norm
# manually scraped data from doi:10.1007/s10162-013-0396-x fig 2
raw = """\
freq,thresh,phenotype
0.25,6.816404934,Older-normal
0.5,5.488517768,Older-normal
1,3.512856308,Older-normal
2,5.909671334,Older-normal
3,6.700337017,Older-normal
4,10.08761498,Older-normal
6,13.46962853,Older-normal
8,12.97026073,Older-normal
0.25,5.520856346,Sensory
0.5,4.19296918,Sensory
1,5.618122764,Sensory
2,19.83681866,Sensory
3,42.00403606,Sensory
4,53.32679981,Sensory
6,62.0527006,Sensory
8,66.08775286,Sensory
0.25,21.2291323,Metabolic
0.5,22.00676227,Metabolic
1,24.24163372,Metabolic
2,33.92590956,Metabolic
3,41.35626176,Metabolic
4,47.17294402,Metabolic
6,54.1174655,Metabolic
8,58.31446133,Metabolic
0.25,20.25772154,Metabolic+Sensory
0.5,20.71121368,Metabolic+Sensory
1,21.97442369,Metabolic+Sensory
2,37.48866818,Metabolic+Sensory
3,53.17814263,Metabolic+Sensory
4,64.01507567,Metabolic+Sensory
6,75.00818649,Metabolic+Sensory
8,76.61433583,Metabolic+Sensory"""
dubno_data = pd.read_csv(io.StringIO(raw))
def make_songetal_threshfun(x: np.ndarray, y: np.ndarray) -> Callable[[float], float]:
"""Generate a synthetic threshold function by interpolation of real data.
Real data is from Dubno et al. 2013, and procedure follows Song et al. 2017, 2018.
See make_songetal_testfun for more detail.
Args:
x (np.ndarray): Frequency
y (np.ndarray): Threshold
Returns:
Callable[[float], float]: Function that interpolates the given
frequencies and thresholds and returns threshold as a function
of frequency.
"""
f_interp = CubicSpline(x, y, extrapolate=False)
f_extrap = interp1d(x, y, fill_value="extrapolate")
def f_combo(x):
# interpolate first
interpolated = f_interp(x)
# whatever is nan needs extrapolating
interpolated[np.isnan(interpolated)] = f_extrap(x[np.isnan(interpolated)])
return interpolated
return f_combo
def make_songetal_testfun(
phenotype: str = "Metabolic", beta: float = 1
) -> Callable[[np.ndarray, bool], np.ndarray]:
"""Make an audiometric test function following Song et al. 2017.
To do so,we first compute a threshold by interpolation/extrapolation
from real data, then assume a linear psychometric function in intensity
with slope beta.
Args:
phenotype (str, optional): Audiometric phenotype from Dubno et al. 2013.
Specifically, one of "Metabolic", "Sensory", "Metabolic+Sensory",
or "Older-normal". Defaults to "Metabolic".
beta (float, optional): Psychometric function slope. Defaults to 1.
Returns:
Callable[[np.ndarray, bool], np.ndarray]: A test function taking a [b x 2] array of points and returning the psychometric function value at those points.
Raises:
AssertionError: if an invalid phenotype is passed.
References:
Song, X. D., Garnett, R., & Barbour, D. L. (2017).
Psychometric function estimation by probabilistic classification.
The Journal of the Acoustical Society of America, 141(4), 2513–2525.
https://doi.org/10.1121/1.4979594
"""
valid_phenotypes = ["Metabolic", "Sensory", "Metabolic+Sensory", "Older-normal"]
assert phenotype in valid_phenotypes, f"Phenotype must be one of {valid_phenotypes}"
x = dubno_data[dubno_data.phenotype == phenotype].freq.values
y = dubno_data[dubno_data.phenotype == phenotype].thresh.values
# first, make the threshold fun
threshfun = make_songetal_threshfun(x, y)
# now make it into a test function
def song_testfun(x, cdf=False):
logfreq = x[..., 0]
intensity = x[..., 1]
thresh = threshfun(2**logfreq)
return (
norm.cdf((intensity - thresh) / beta)
if cdf
else (intensity - thresh) / beta
)
return song_testfun
def novel_discrimination_testfun(x: np.ndarray) -> np.ndarray:
"""Evaluate novel discrimination test function from Owen et al.
The threshold is roughly parabolic with context, and the slope
varies with the threshold. Adding to the difficulty is the fact
that the function is minimized at f=0 (or p=0.5), corresponding
to discrimination being at chance at zero stimulus intensity.
Args:
x (np.ndarray): Points at which to evaluate.
Returns:
np.ndarray: Value of function at these points.
"""
freq = x[..., 0]
amp = x[..., 1]
context = 2 * (0.05 + 0.4 * (-1 + 0.2 * freq) ** 2 * freq**2)
return 2 * (amp + 1) / context
def novel_detection_testfun(x: np.ndarray) -> np.ndarray:
"""Evaluate novel detection test function from Owen et al.
The threshold is roughly parabolic with context, and the slope
varies with the threshold.
Args:
x (np.ndarray): Points at which to evaluate.
Returns:
np.ndarray: Value of function at these points.
"""
freq = x[..., 0]
amp = x[..., 1]
context = 2 * (0.05 + 0.4 * (-1 + 0.2 * freq) ** 2 * freq**2)
return 4 * (amp + 1) / context - 4
def discrim_highdim(x: np.ndarray) -> np.ndarray:
amp = x[..., 0]
freq = x[..., 1]
vscale = x[..., 2]
vshift = x[..., 3]
variance = x[..., 4]
asym = x[..., 5]
phase = x[..., 6]
period = x[..., 7]
context = (
-0.5 * vscale * np.cos(period * 0.6 * math.pi * freq + phase)
+ vscale / 2
+ vshift
) * (
-1 * asym * np.sin(period * 0.6 * math.pi * 0.5 * freq + phase) + (2 - asym)
) - 1
z = (amp - context) / (variance + variance * (1 + context))
p = norm.cdf(z)
p = (1 - 0.5) * p + 0.5 # Floor at p=0.5
p = np.clip(p, 0.5, 1 - 1e-5) # clip so that norm.ppf doesn't go to inf
return norm.ppf(p)
def modified_hartmann6(X):
"""
The modified Hartmann6 function used in Lyu et al.
"""
C = np.r_[0.2, 0.22, 0.28, 0.3]
a_t = np.c_[
[8, 3, 10, 3.5, 1.7, 6],
[0.5, 8, 10, 1.0, 6, 9],
[3, 3.5, 1.7, 8, 10, 6],
[10, 6, 0.5, 8, 1.0, 9],
].T
p_t = (
10 ** (-4)
* np.c_[
[1312, 1696, 5569, 124, 8283, 5886],
[2329, 4135, 8307, 3736, 1004, 9991],
[2348, 1451, 3522, 2883, 3047, 6650],
[4047, 8828, 8732, 5743, 1091, 381],
].T
)
y = 0.0
for i, C_i in enumerate(C):
t = 0
for j in range(6):
t += a_t[i, j] * ((X[j] - p_t[i, j]) ** 2)
y += C_i * np.exp(-t)
return -10 * (float(y) - 0.1)
|
aepsych-main
|
aepsych/benchmark/test_functions.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import datetime
import logging
import os
import uuid
from contextlib import contextmanager
from pathlib import Path
from typing import Dict
import aepsych.database.tables as tables
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm.session import close_all_sessions
logger = logging.getLogger()
class Database:
def __init__(self, db_path=None):
if db_path is None:
db_path = "./databases/default.db"
db_dir, db_name = os.path.split(db_path)
self._db_name = db_name
self._db_dir = db_dir
if os.path.exists(db_path):
logger.info(f"Found DB at {db_path}, appending!")
else:
logger.info(f"No DB found at {db_path}, creating a new DB!")
self._engine = self.get_engine()
def get_engine(self):
if not hasattr(self, "_engine") or self._engine is None:
self._full_db_path = Path(self._db_dir)
self._full_db_path.mkdir(parents=True, exist_ok=True)
self._full_db_path = self._full_db_path.joinpath(self._db_name)
self._engine = create_engine(f"sqlite:///{self._full_db_path.as_posix()}")
# create the table metadata and tables
tables.Base.metadata.create_all(self._engine)
# create an ongoing session to be used. Provides a conduit
# to the db so the instantiated objects work properly.
Session = sessionmaker(bind=self.get_engine())
self._session = Session()
return self._engine
def delete_db(self):
if self._engine is not None and self._full_db_path.exists():
close_all_sessions()
self._full_db_path.unlink()
self._engine = None
def is_update_required(self):
return (
tables.DBMasterTable.requires_update(self._engine)
or tables.DbReplayTable.requires_update(self._engine)
or tables.DbStratTable.requires_update(self._engine)
or tables.DbConfigTable.requires_update(self._engine)
or tables.DbRawTable.requires_update(self._engine)
or tables.DbParamTable.requires_update(self._engine)
or tables.DbOutcomeTable.requires_update(self._engine)
)
def perform_updates(self):
"""Perform updates on known tables. SQLAlchemy doesn't do alters so they're done the old fashioned way."""
tables.DBMasterTable.update(self._engine)
tables.DbReplayTable.update(self._engine)
tables.DbStratTable.update(self._engine)
tables.DbConfigTable.update(self._engine)
tables.DbRawTable.update(self, self._engine)
tables.DbParamTable.update(self._engine)
tables.DbOutcomeTable.update(self._engine)
@contextmanager
def session_scope(self):
"""Provide a transactional scope around a series of operations."""
Session = sessionmaker(bind=self.get_engine())
session = Session()
try:
yield session
session.commit()
except Exception as err:
logger.error(f"db session use failed: {err}")
session.rollback()
raise
finally:
session.close()
# @retry(stop_max_attempt_number=8, wait_exponential_multiplier=1.8)
def execute_sql_query(self, query: str, vals: Dict[str, str]):
"""Execute an arbitrary query written in sql."""
with self.session_scope() as session:
return session.execute(query, vals).fetchall()
def get_master_records(self):
"""Grab the list of master records."""
records = self._session.query(tables.DBMasterTable).all()
return records
def get_master_record(self, experiment_id):
"""Grab the list of master record for a specific experiment (master) id."""
records = (
self._session.query(tables.DBMasterTable)
.filter(tables.DBMasterTable.experiment_id == experiment_id)
.all()
)
if 0 < len(records):
return records[0]
return None
def get_replay_for(self, master_id):
"""Get the replay records for a specific master row."""
master_record = self.get_master_record(master_id)
if master_record is not None:
return master_record.children_replay
return None
def get_strats_for(self, master_id=0):
"""Get the strat records for a specific master row."""
master_record = self.get_master_record(master_id)
if master_record is not None and len(master_record.children_strat) > 0:
return [c.strat for c in master_record.children_strat]
return None
def get_strat_for(self, master_id, strat_id=-1):
"""Get a specific strat record for a specific master row."""
master_record = self.get_master_record(master_id)
if master_record is not None and len(master_record.children_strat) > 0:
return master_record.children_strat[strat_id].strat
return None
def get_config_for(self, master_id):
"""Get the strat records for a specific master row."""
master_record = self.get_master_record(master_id)
if master_record is not None:
return master_record.children_config[0].config
return None
def get_raw_for(self, master_id):
"""Get the raw data for a specific master row."""
master_record = self.get_master_record(master_id)
if master_record is not None:
return master_record.children_raw
return None
def get_all_params_for(self, master_id):
"""Get the parameters for all the iterations of a specific experiment."""
raw_record = self.get_raw_for(master_id)
params = []
if raw_record is not None:
for raw in raw_record:
for param in raw.children_param:
params.append(param)
return params
return None
def get_param_for(self, master_id, iteration_id):
"""Get the parameters for a specific iteration of a specific experiment."""
raw_record = self.get_raw_for(master_id)
if raw_record is not None:
for raw in raw_record:
if raw.unique_id == iteration_id:
return raw.children_param
return None
def get_all_outcomes_for(self, master_id):
"""Get the outcomes for all the iterations of a specific experiment."""
raw_record = self.get_raw_for(master_id)
outcomes = []
if raw_record is not None:
for raw in raw_record:
for outcome in raw.children_outcome:
outcomes.append(outcome)
return outcomes
return None
def get_outcome_for(self, master_id, iteration_id):
"""Get the outcomes for a specific iteration of a specific experiment."""
raw_record = self.get_raw_for(master_id)
if raw_record is not None:
for raw in raw_record:
if raw.unique_id == iteration_id:
return raw.children_outcome
return None
def record_setup(
self,
description,
name,
extra_metadata=None,
id=None,
request=None,
participant_id=None,
) -> str:
self.get_engine()
if id is None:
master_table = tables.DBMasterTable()
master_table.experiment_description = description
master_table.experiment_name = name
master_table.experiment_id = str(uuid.uuid4())
if participant_id is not None:
master_table.participant_id = participant_id
else:
master_table.participant_id = str(
uuid.uuid4()
) # no p_id specified will result in a generated UUID
master_table.extra_metadata = extra_metadata
self._session.add(master_table)
logger.debug(f"record_setup = [{master_table}]")
else:
master_table = self.get_master_record(id)
if master_table is None:
raise RuntimeError(f"experiment id {id} doesn't exist in the db.")
record = tables.DbReplayTable()
record.message_type = "setup"
record.message_contents = request
if "extra_info" in request:
record.extra_info = request["extra_info"]
record.timestamp = datetime.datetime.now()
record.parent = master_table
logger.debug(f"record_setup = [{record}]")
self._session.add(record)
self._session.commit()
# return the master table if it has a link to the list of child rows
# tis needs to be passed into all future calls to link properly
return master_table
def record_message(self, master_table, type, request) -> None:
# create a linked setup table
record = tables.DbReplayTable()
record.message_type = type
record.message_contents = request
if "extra_info" in request:
record.extra_info = request["extra_info"]
record.timestamp = datetime.datetime.now()
record.parent = master_table
self._session.add(record)
self._session.commit()
def record_raw(self, master_table, model_data, timestamp=None):
raw_entry = tables.DbRawTable()
raw_entry.model_data = model_data
if timestamp is None:
raw_entry.timestamp = datetime.datetime.now()
else:
raw_entry.timestamp = timestamp
raw_entry.parent = master_table
self._session.add(raw_entry)
self._session.commit()
return raw_entry
def record_param(self, raw_table, param_name, param_value) -> None:
param_entry = tables.DbParamTable()
param_entry.param_name = param_name
param_entry.param_value = param_value
param_entry.parent = raw_table
self._session.add(param_entry)
self._session.commit()
def record_outcome(self, raw_table, outcome_name, outcome_value) -> None:
outcome_entry = tables.DbOutcomeTable()
outcome_entry.outcome_name = outcome_name
outcome_entry.outcome_value = outcome_value
outcome_entry.parent = raw_table
self._session.add(outcome_entry)
self._session.commit()
def record_strat(self, master_table, strat):
strat_entry = tables.DbStratTable()
strat_entry.strat = strat
strat_entry.timestamp = datetime.datetime.now()
strat_entry.parent = master_table
self._session.add(strat_entry)
self._session.commit()
def record_config(self, master_table, config):
config_entry = tables.DbConfigTable()
config_entry.config = config
config_entry.timestamp = datetime.datetime.now()
config_entry.parent = master_table
self._session.add(config_entry)
self._session.commit()
def list_master_records(self):
master_records = self.get_master_records()
print("Listing master records:")
for record in master_records:
print(
f'\t{record.unique_id} - name: "{record.experiment_name}" experiment id: {record.experiment_id}'
)
|
aepsych-main
|
aepsych/database/db.py
|
aepsych-main
|
aepsych/database/__init__.py
|
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import pickle
from collections.abc import Iterable
from aepsych.config import Config
from aepsych.version import __version__
from sqlalchemy import (
Boolean,
Column,
DateTime,
Float,
ForeignKey,
Integer,
PickleType,
String,
)
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, sessionmaker
logger = logging.getLogger()
Base = declarative_base()
"""
Original Schema
CREATE TABLE master (
unique_id INTEGER NOT NULL,
experiment_name VARCHAR(256),
experiment_description VARCHAR(2048),
experiment_id VARCHAR(10),
PRIMARY KEY (unique_id),
UNIQUE (experiment_id)
);
CREATE TABLE replay_data (
unique_id INTEGER NOT NULL,
timestamp DATETIME,
message_type VARCHAR(64),
message_contents BLOB,
master_table_id INTEGER,
PRIMARY KEY (unique_id),
FOREIGN KEY(master_table_id) REFERENCES master (unique_id)
);
"""
class DBMasterTable(Base):
"""
Master table to keep track of all experiments and unique keys associated with the experiment
"""
__tablename__ = "master"
unique_id = Column(Integer, primary_key=True, autoincrement=True)
experiment_name = Column(String(256))
experiment_description = Column(String(2048))
experiment_id = Column(String(10), unique=True)
participant_id = Column(String(50), unique=True)
extra_metadata = Column(String(4096)) # JSON-formatted metadata
children_replay = relationship("DbReplayTable", back_populates="parent")
children_strat = relationship("DbStratTable", back_populates="parent")
children_config = relationship("DbConfigTable", back_populates="parent")
children_raw = relationship("DbRawTable", back_populates="parent")
@classmethod
def from_sqlite(cls, row):
this = DBMasterTable()
this.unique_id = row["unique_id"]
this.experiment_name = row["experiment_name"]
this.experiment_description = row["experiment_description"]
this.experiment_id = row["experiment_id"]
return this
def __repr__(self):
return (
f"<DBMasterTable(unique_id={self.unique_id})"
f", experiment_name={self.experiment_name}, "
f"experiment_description={self.experiment_description}, "
f"experiment_id={self.experiment_id})>"
)
@staticmethod
def update(engine):
logger.info("DBMasterTable : update called")
if not DBMasterTable._has_column(engine, "extra_metadata"):
DBMasterTable._add_column(engine, "extra_metadata")
if not DBMasterTable._has_column(engine, "participant_id"):
DBMasterTable._add_column(engine, "participant_id")
@staticmethod
def requires_update(engine):
return not DBMasterTable._has_column(
engine, "extra_metadata"
) or not DBMasterTable._has_column(engine, "participant_id")
@staticmethod
def _has_column(engine, column: str):
result = engine.execute(
"SELECT COUNT(*) FROM pragma_table_info('master') WHERE name='{0}'".format(
column
)
)
rows = result.fetchall()
count = rows[0][0]
return count != 0
@staticmethod
def _add_column(engine, column: str):
try:
result = engine.execute(
"SELECT COUNT(*) FROM pragma_table_info('master') WHERE name='{0}'".format(
column
)
)
rows = result.fetchall()
count = rows[0][0]
if 0 == count:
logger.debug(
"Altering the master table to add the {0} column".format(column)
)
engine.execute(
"ALTER TABLE master ADD COLUMN {0} VARCHAR".format(column)
)
engine.commit()
except Exception as e:
logger.debug(f"Column already exists, no need to alter. [{e}]")
class DbReplayTable(Base):
__tablename__ = "replay_data"
use_extra_info = False
unique_id = Column(Integer, primary_key=True, autoincrement=True)
timestamp = Column(DateTime)
message_type = Column(String(64))
# specify the pickler to allow backwards compatibility between 3.7 and 3.8
message_contents = Column(PickleType(pickler=pickle))
extra_info = Column(PickleType(pickler=pickle))
master_table_id = Column(Integer, ForeignKey("master.unique_id"))
parent = relationship("DBMasterTable", back_populates="children_replay")
__mapper_args__ = {}
@classmethod
def from_sqlite(cls, row):
this = DbReplayTable()
this.unique_id = row["unique_id"]
this.timestamp = row["timestamp"]
this.message_type = row["message_type"]
this.message_contents = row["message_contents"]
this.master_table_id = row["master_table_id"]
if "extra_info" in row:
this.extra_info = row["extra_info"]
else:
this.extra_info = None
this.strat = row["strat"]
return this
def __repr__(self):
return (
f"<DbReplayTable(unique_id={self.unique_id})"
f", timestamp={self.timestamp}, "
f"message_type={self.message_type}"
f", master_table_id={self.master_table_id})>"
)
@staticmethod
def _has_extra_info(engine):
result = engine.execute(
"SELECT COUNT(*) FROM pragma_table_info('replay_data') WHERE name='extra_info'"
)
rows = result.fetchall()
count = rows[0][0]
return count != 0
@staticmethod
def _configs_require_conversion(engine):
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
results = session.query(DbReplayTable).all()
for result in results:
if result.message_contents["type"] == "setup":
config_str = result.message_contents["message"]["config_str"]
config = Config(config_str=config_str)
if config.version < __version__:
return True # assume that if any config needs to be refactored, all of them do
return False
@staticmethod
def update(engine):
logger.info("DbReplayTable : update called")
if not DbReplayTable._has_extra_info(engine):
DbReplayTable._add_extra_info(engine)
if DbReplayTable._configs_require_conversion(engine):
DbReplayTable._convert_configs(engine)
@staticmethod
def requires_update(engine):
return not DbReplayTable._has_extra_info(
engine
) or DbReplayTable._configs_require_conversion(engine)
@staticmethod
def _add_extra_info(engine):
try:
result = engine.execute(
"SELECT COUNT(*) FROM pragma_table_info('replay_data') WHERE name='extra_info'"
)
rows = result.fetchall()
count = rows[0][0]
if 0 == count:
logger.debug(
"Altering the replay_data table to add the extra_info column"
)
engine.execute("ALTER TABLE replay_data ADD COLUMN extra_info BLOB")
engine.commit()
except Exception as e:
logger.debug(f"Column already exists, no need to alter. [{e}]")
@staticmethod
def _convert_configs(engine):
Session = sessionmaker(bind=engine)
session = Session()
results = session.query(DbReplayTable).all()
for result in results:
if result.message_contents["type"] == "setup":
config_str = result.message_contents["message"]["config_str"]
config = Config(config_str=config_str)
if config.version < __version__:
config.convert_to_latest()
new_str = str(config)
new_message = {"type": "setup", "message": {"config_str": new_str}}
if "version" in result.message_contents:
new_message["version"] = result.message_contents["version"]
result.message_contents = new_message
session.commit()
logger.info("DbReplayTable : updated old configs.")
class DbStratTable(Base):
__tablename__ = "strat_data"
unique_id = Column(Integer, primary_key=True, autoincrement=True)
timestamp = Column(DateTime)
strat = Column(PickleType(pickler=pickle))
master_table_id = Column(Integer, ForeignKey("master.unique_id"))
parent = relationship("DBMasterTable", back_populates="children_strat")
@classmethod
def from_sqlite(cls, row):
this = DbStratTable()
this.unique_id = row["unique_id"]
this.timestamp = row["timestamp"]
this.strat = row["strat"]
this.master_table_id = row["master_table_id"]
return this
def __repr__(self):
return (
f"<DbStratTable(unique_id={self.unique_id})"
f", timestamp={self.timestamp} "
f", master_table_id={self.master_table_id})>"
)
@staticmethod
def update(engine):
logger.info("DbStratTable : update called")
@staticmethod
def requires_update(engine):
return False
class DbConfigTable(Base):
__tablename__ = "config_data"
unique_id = Column(Integer, primary_key=True, autoincrement=True)
timestamp = Column(DateTime)
config = Column(PickleType(pickler=pickle))
master_table_id = Column(Integer, ForeignKey("master.unique_id"))
parent = relationship("DBMasterTable", back_populates="children_config")
@classmethod
def from_sqlite(cls, row):
this = DbConfigTable()
this.unique_id = row["unique_id"]
this.timestamp = row["timestamp"]
this.strat = row["config"]
this.master_table_id = row["master_table_id"]
return this
def __repr__(self):
return (
f"<DbStratTable(unique_id={self.unique_id})"
f", timestamp={self.timestamp} "
f", master_table_id={self.master_table_id})>"
)
@staticmethod
def update(engine):
logger.info("DbConfigTable : update called")
@staticmethod
def requires_update(engine):
return False
class DbRawTable(Base):
"""
Fact table to store the raw data of each iteration of an experiment.
"""
__tablename__ = "raw_data"
unique_id = Column(Integer, primary_key=True, autoincrement=True)
timestamp = Column(DateTime)
model_data = Column(Boolean)
master_table_id = Column(Integer, ForeignKey("master.unique_id"))
parent = relationship("DBMasterTable", back_populates="children_raw")
children_param = relationship("DbParamTable", back_populates="parent")
children_outcome = relationship("DbOutcomeTable", back_populates="parent")
@classmethod
def from_sqlite(cls, row):
this = DbRawTable()
this.unique_id = row["unique_id"]
this.timestamp = row["timestamp"]
this.model_data = row["model_data"]
this.master_table_id = row["master_table_id"]
return this
def __repr__(self):
return (
f"<DbRawTable(unique_id={self.unique_id})"
f", timestamp={self.timestamp} "
f", master_table_id={self.master_table_id})>"
)
@staticmethod
def update(db, engine):
logger.info("DbRawTable : update called")
# Get every master table
for master_table in db.get_master_records():
# Get raw tab
for message in master_table.children_replay:
if message.message_type != "tell":
continue
timestamp = message.timestamp
# Deserialize pickle message
message_contents = message.message_contents
# Get outcome
outcomes = message_contents["message"]["outcome"]
# Get parameters
params = message_contents["message"]["config"]
# Get model_data
model_data = message_contents["message"].get("model_data", True)
db_raw_record = db.record_raw(
master_table=master_table,
model_data=bool(model_data),
timestamp=timestamp,
)
for param_name, param_value in params.items():
if isinstance(param_value, Iterable) and type(param_value) != str:
if len(param_value) == 1:
db.record_param(
raw_table=db_raw_record,
param_name=str(param_name),
param_value=float(param_value[0]),
)
else:
for j, v in enumerate(param_value):
db.record_param(
raw_table=db_raw_record,
param_name=str(param_name) + "_stimuli" + str(j),
param_value=float(v),
)
else:
db.record_param(
raw_table=db_raw_record,
param_name=str(param_name),
param_value=float(param_value),
)
if isinstance(outcomes, Iterable) and type(outcomes) != str:
for j, outcome_value in enumerate(outcomes):
if (
isinstance(outcome_value, Iterable)
and type(outcome_value) != str
):
if len(outcome_value) == 1:
outcome_value = outcome_value[0]
else:
raise ValueError(
"Multi-outcome values must be a list of lists of length 1!"
)
db.record_outcome(
raw_table=db_raw_record,
outcome_name="outcome_" + str(j),
outcome_value=float(outcome_value),
)
else:
db.record_outcome(
raw_table=db_raw_record,
outcome_name="outcome",
outcome_value=float(outcomes),
)
@staticmethod
def requires_update(engine):
"""Check if the raw table is empty, and data already exists."""
n_raws = engine.execute("SELECT COUNT (*) FROM raw_data").fetchone()[0]
n_tells = engine.execute(
"SELECT COUNT (*) FROM replay_data \
WHERE message_type = 'tell'"
).fetchone()[0]
if n_raws == 0 and n_tells != 0:
return True
return False
class DbParamTable(Base):
"""
Dimension table to store the parameters of each iteration of an experiment.
Supports multiple parameters per iteration, and multiple stimuli per parameter.
"""
__tablename__ = "param_data"
unique_id = Column(Integer, primary_key=True, autoincrement=True)
param_name = Column(String(50))
param_value = Column(String(50))
iteration_id = Column(Integer, ForeignKey("raw_data.unique_id"))
parent = relationship("DbRawTable", back_populates="children_param")
@classmethod
def from_sqlite(cls, row):
this = DbParamTable()
this.unique_id = row["unique_id"]
this.param_name = row["param_name"]
this.param_value = row["param_value"]
this.iteration_id = row["iteration_id"]
return this
def __repr__(self):
return (
f"<DbParamTable(unique_id={self.unique_id})"
f", iteration_id={self.iteration_id}>"
)
@staticmethod
def update(engine):
logger.info("DbParamTable : update called")
@staticmethod
def requires_update(engine):
return False
class DbOutcomeTable(Base):
"""
Dimension table to store the outcomes of each iteration of an experiment.
Supports multiple outcomes per iteration.
"""
__tablename__ = "outcome_data"
unique_id = Column(Integer, primary_key=True, autoincrement=True)
outcome_name = Column(String(50))
outcome_value = Column(Float)
iteration_id = Column(Integer, ForeignKey("raw_data.unique_id"))
parent = relationship("DbRawTable", back_populates="children_outcome")
@classmethod
def from_sqlite(cls, row):
this = DbOutcomeTable()
this.unique_id = row["unique_id"]
this.outcome_name = row["outcome_name"]
this.outcome_value = row["outcome_value"]
this.iteration_id = row["iteration_id"]
return this
def __repr__(self):
return (
f"<DbOutcomeTable(unique_id={self.unique_id})"
f", iteration_id={self.iteration_id}>"
)
@staticmethod
def update(engine):
logger.info("DbOutcomeTable : update called")
@staticmethod
def requires_update(engine):
return False
|
aepsych-main
|
aepsych/database/tables.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from typing import Any
import torch
from gpytorch.kernels.rbf_kernel_grad import RBFKernelGrad
class RBFKernelPartialObsGrad(RBFKernelGrad):
"""An RBF kernel over observations of f, and partial/non-overlapping
observations of the gradient of f.
gpytorch.kernels.rbf_kernel_grad assumes a block structure where every
partial derivative is observed at the same set of points at which x is
observed. This generalizes that by allowing f and any subset of the
derivatives of f to be observed at different sets of points.
The final column of x1 and x2 needs to be an index that identifies what is
observed at that point. It should be 0 if this observation is of f, and i
if it is of df/dxi.
"""
def forward(
self, x1: torch.Tensor, x2: torch.Tensor, diag: bool = False, **params: Any
) -> torch.Tensor:
# Extract grad index from each
grad_idx1 = x1[..., -1].to(dtype=torch.long)
grad_idx2 = x2[..., -1].to(dtype=torch.long)
K = super().forward(x1[..., :-1], x2[..., :-1], diag=diag, **params)
# Compute which elements to return
n1 = x1.shape[-2]
n2 = x2.shape[-2]
d = x1.shape[-1] - 1
p1 = [(i * (d + 1)) + int(grad_idx1[i]) for i in range(n1)]
p2 = [(i * (d + 1)) + int(grad_idx2[i]) for i in range(n2)]
if not diag:
return K[..., p1, :][..., p2]
else:
return K[..., p1]
def num_outputs_per_input(self, x1: torch.Tensor, x2: torch.Tensor) -> int:
return 1
|
aepsych-main
|
aepsych/kernels/rbf_partial_grad.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
|
aepsych-main
|
aepsych/kernels/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
r"""
"""
from __future__ import annotations
from typing import Optional
import torch
from aepsych.acquisition.monotonic_rejection import MonotonicMCAcquisition
from botorch.acquisition.input_constructors import acqf_input_constructor
from botorch.acquisition.monte_carlo import MCAcquisitionFunction
from botorch.acquisition.objective import MCAcquisitionObjective
from botorch.models.model import Model
from botorch.sampling.base import MCSampler
from botorch.sampling.normal import SobolQMCNormalSampler
from botorch.utils.transforms import t_batch_mode_transform
from torch import Tensor
from torch.distributions.bernoulli import Bernoulli
def bald_acq(obj_samples: torch.Tensor) -> torch.Tensor:
"""Evaluate Mutual Information acquisition function.
With latent function F and X a hypothetical observation at a new point,
I(F; X) = I(X; F) = H(X) - H(X |F),
H(X |F ) = E_{f} (H(X |F =f )
i.e., we take the posterior entropy of the (Bernoulli) observation X given the
current model posterior and subtract the conditional entropy on F, that being
the mean entropy over the posterior for F. This is equivalent to the BALD
acquisition function in Houlsby et al. NeurIPS 2012.
Args:
obj_samples (torch.Tensor): Objective samples from the GP, of
shape num_samples x batch_shape x d_out
Returns:
torch.Tensor: Value of acquisition at samples.
"""
mean_p = obj_samples.mean(dim=0)
posterior_entropies = Bernoulli(mean_p).entropy().squeeze(-1)
sample_entropies = Bernoulli(obj_samples).entropy()
conditional_entropies = sample_entropies.mean(dim=0).squeeze(-1)
return posterior_entropies - conditional_entropies
class BernoulliMCMutualInformation(MCAcquisitionFunction):
"""Mutual Information acquisition function for a bernoulli outcome.
Given a model and an objective link function, calculate the mutual
information of a trial at a new point and the distribution on the
latent function.
Objective here should give values in (0, 1) (e.g. logit or probit).
"""
def __init__(
self,
model: Model,
objective: MCAcquisitionObjective,
sampler: Optional[MCSampler] = None,
) -> None:
r"""Single Bernoulli mutual information for active learning
Args:
model (Model): A fitted model.
objective (MCAcquisitionObjective): An MCAcquisitionObjective representing the link function
(e.g., logistic or probit)
sampler (MCSampler, optional): The sampler used for drawing MC samples.
"""
if sampler is None:
sampler = SobolQMCNormalSampler(sample_shape=torch.Size([1024]))
super().__init__(
model=model, sampler=sampler, objective=objective, X_pending=None
)
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate mutual information on the candidate set `X`.
Args:
X: A `batch_size x q x d`-dim Tensor.
Returns:
Tensor of shape `batch_size x q` representing the mutual
information of a hypothetical trial at X that active
learning hopes to maximize.
"""
post = self.model.posterior(X)
samples = self.sampler(post)
return self.acquisition(self.objective(samples, X))
def acquisition(self, obj_samples: torch.Tensor) -> torch.Tensor:
"""Evaluate the acquisition function value based on samples.
Args:
obj_samples (torch.Tensor): Samples from the model, transformed through the objective.
Returns:
torch.Tensor: value of the acquisition function (BALD) at the input samples.
"""
# RejectionSampler drops the final dim so we reaugment it
# here for compatibility with non-Monotonic MCAcquisition
if len(obj_samples.shape) == 2:
obj_samples = obj_samples[..., None]
return bald_acq(obj_samples)
@acqf_input_constructor(BernoulliMCMutualInformation)
def construct_inputs_mi(
model,
training_data,
objective=None,
sampler=None,
**kwargs,
):
return {
"model": model,
"objective": objective,
"sampler": sampler,
}
class MonotonicBernoulliMCMutualInformation(MonotonicMCAcquisition):
def acquisition(self, obj_samples: torch.Tensor) -> torch.Tensor:
"""Evaluate the acquisition function value based on samples.
Args:
obj_samples (torch.Tensor): Samples from the model, transformed through the objective.
Returns:
torch.Tensor: value of the acquisition function (BALD) at the input samples.
"""
# TODO this is identical to nono-monotonic BALV acquisition with a different
# base class mixin, consider redesigning?
# RejectionSampler drops the final dim so we reaugment it
# here for compatibility with non-Monotonic MCAcquisition
if len(obj_samples.shape) == 2:
obj_samples = obj_samples[..., None]
return bald_acq(obj_samples)
|
aepsych-main
|
aepsych/acquisition/mutual_information.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional, Union
import torch
from aepsych.acquisition.objective import ProbitObjective
from botorch.acquisition.input_constructors import acqf_input_constructor
from botorch.acquisition.monte_carlo import (
MCAcquisitionFunction,
MCAcquisitionObjective,
MCSampler,
)
from botorch.models.model import Model
from botorch.sampling.normal import SobolQMCNormalSampler
from botorch.utils.transforms import t_batch_mode_transform
from torch import Tensor
class MCLevelSetEstimation(MCAcquisitionFunction):
def __init__(
self,
model: Model,
target: Union[float, Tensor] = 0.75,
beta: Union[float, Tensor] = 3.84,
objective: Optional[MCAcquisitionObjective] = None,
sampler: Optional[MCSampler] = None,
) -> None:
r"""Monte-carlo level set estimation.
Args:
model: A fitted model.
target: the level set (after objective transform) to be estimated
beta: a parameter that governs explore-exploit tradeoff
objective: An MCAcquisitionObjective representing the link function
(e.g., logistic or probit.) applied on the samples.
Can be implemented via GenericMCObjective.
sampler: The sampler used for drawing MC samples.
"""
if sampler is None:
sampler = SobolQMCNormalSampler(sample_shape=torch.Size([512]))
if objective is None:
objective = ProbitObjective()
super().__init__(model=model, sampler=sampler, objective=None, X_pending=None)
self.objective = objective
self.beta = beta
self.target = target
def acquisition(self, obj_samples: torch.Tensor) -> torch.Tensor:
"""Evaluate the acquisition based on objective samples.
Usually you should not call this directly unless you are
subclassing this class and modifying how objective samples
are generated.
Args:
obj_samples (torch.Tensor): Samples from the model, transformed
by the objective. Should be samples x batch_shape.
Returns:
torch.Tensor: Acquisition function at the sampled values.
"""
mean = obj_samples.mean(dim=0)
variance = obj_samples.var(dim=0)
# prevent numerical issues if probit makes all the values 1 or 0
variance = torch.clamp(variance, min=1e-5)
delta = torch.sqrt(self.beta * variance)
return delta - torch.abs(mean - self.target)
@t_batch_mode_transform()
def forward(self, X: torch.Tensor) -> torch.Tensor:
"""Evaluate the acquisition function
Args:
X (torch.Tensor): Points at which to evaluate.
Returns:
torch.Tensor: Value of the acquisition functiona at these points.
"""
post = self.model.posterior(X)
samples = self.sampler(post) # num_samples x batch_shape x q x d_out
return self.acquisition(self.objective(samples, X)).squeeze(-1)
@acqf_input_constructor(MCLevelSetEstimation)
def construct_inputs_lse(
model,
training_data,
objective=None,
target=0.75,
beta=3.84,
sampler=None,
**kwargs,
):
return {
"model": model,
"objective": objective,
"target": target,
"beta": beta,
"sampler": sampler,
}
|
aepsych-main
|
aepsych/acquisition/lse.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, Optional, Tuple
import torch
from botorch.acquisition.objective import PosteriorTransform
from gpytorch.models import GP
from gpytorch.utils.quadrature import GaussHermiteQuadrature1D
from torch import Tensor
from torch.distributions import Normal
from .bvn import bvn_cdf
def posterior_at_xstar_xq(
model: GP,
Xstar: Tensor,
Xq: Tensor,
posterior_transform: Optional[PosteriorTransform] = None,
) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
"""
Evaluate the posteriors of f at single point Xstar and set of points Xq.
Args:
model: The model to evaluate.
Xstar: (b x 1 x d) tensor.
Xq: (b x m x d) tensor.
Returns:
Mu_s: (b x 1) mean at Xstar.
Sigma2_s: (b x 1) variance at Xstar.
Mu_q: (b x m) mean at Xq.
Sigma2_q: (b x m) variance at Xq.
Sigma_sq: (b x m) covariance between Xstar and each point in Xq.
"""
# Evaluate posterior and extract needed components
Xext = torch.cat((Xstar, Xq), dim=-2)
posterior = model.posterior(Xext, posterior_transform=posterior_transform)
mu = posterior.mean[..., :, 0]
Mu_s = mu[..., 0].unsqueeze(-1)
Mu_q = mu[..., 1:]
Cov = posterior.distribution.covariance_matrix
Sigma2_s = Cov[..., 0, 0].unsqueeze(-1)
Sigma2_q = torch.diagonal(Cov[..., 1:, 1:], dim1=-1, dim2=-2)
Sigma_sq = Cov[..., 0, 1:]
return Mu_s, Sigma2_s, Mu_q, Sigma2_q, Sigma_sq
def lookahead_levelset_at_xstar(
model: GP,
Xstar: Tensor,
Xq: Tensor,
posterior_transform: Optional[PosteriorTransform] = None,
**kwargs: Dict[str, Any],
):
"""
Evaluate the look-ahead level-set posterior at Xq given observation at xstar.
Args:
model: The model to evaluate.
Xstar: (b x 1 x d) observation point.
Xq: (b x m x d) reference points.
gamma: Threshold in f-space.
Returns:
Px: (b x m) Level-set posterior at Xq, before observation at xstar.
P1: (b x m) Level-set posterior at Xq, given observation of 1 at xstar.
P0: (b x m) Level-set posterior at Xq, given observation of 0 at xstar.
py1: (b x 1) Probability of observing 1 at xstar.
"""
Mu_s, Sigma2_s, Mu_q, Sigma2_q, Sigma_sq = posterior_at_xstar_xq(
model=model, Xstar=Xstar, Xq=Xq, posterior_transform=posterior_transform
)
try:
gamma = kwargs.get("gamma")
except KeyError:
raise RuntimeError("lookahead_levelset_at_xtar requires passing gamma!")
# Compute look-ahead components
Norm = torch.distributions.Normal(0, 1)
Sigma_q = torch.sqrt(Sigma2_q)
b_q = (gamma - Mu_q) / Sigma_q
Phi_bq = Norm.cdf(b_q)
denom = torch.sqrt(1 + Sigma2_s)
a_s = Mu_s / denom
Phi_as = Norm.cdf(a_s)
Z_rho = -Sigma_sq / (Sigma_q * denom)
Z_qs = bvn_cdf(a_s, b_q, Z_rho)
Px = Phi_bq
py1 = Phi_as
P1 = Z_qs / py1
P0 = (Phi_bq - Z_qs) / (1 - py1)
return Px, P1, P0, py1
def lookahead_p_at_xstar(
model: GP,
Xstar: Tensor,
Xq: Tensor,
posterior_transform: Optional[PosteriorTransform] = None,
**kwargs: Dict[str, Any],
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
"""
Evaluate the look-ahead response probability posterior at Xq given observation at xstar.
Uses the approximation given in expr. 9 in:
Zhao, Guang, et al. "Efficient active learning for Gaussian process classification by
error reduction." Advances in Neural Information Processing Systems 34 (2021): 9734-9746.
Args:
model: The model to evaluate.
Xstar: (b x 1 x d) observation point.
Xq: (b x m x d) reference points.
kwargs: ignored (here for compatibility with other kinds of lookahead)
Returns:
Px: (b x m) Response posterior at Xq, before observation at xstar.
P1: (b x m) Response posterior at Xq, given observation of 1 at xstar.
P0: (b x m) Response posterior at Xq, given observation of 0 at xstar.
py1: (b x 1) Probability of observing 1 at xstar.
"""
Mu_s, Sigma2_s, Mu_q, Sigma2_q, Sigma_sq = posterior_at_xstar_xq(
model=model, Xstar=Xstar, Xq=Xq, posterior_transform=posterior_transform
)
probit = Normal(0, 1).cdf
def lookahead_inner(f_q):
mu_tilde_star = Mu_s + (f_q - Mu_q) * Sigma_sq / Sigma2_q
sigma_tilde_star = Sigma2_s - (Sigma_sq**2) / Sigma2_q
return probit(mu_tilde_star / torch.sqrt(sigma_tilde_star + 1)) * probit(f_q)
pstar_marginal_1 = probit(Mu_s / torch.sqrt(1 + Sigma2_s))
pstar_marginal_0 = 1 - pstar_marginal_1
pq_marginal_1 = probit(Mu_q / torch.sqrt(1 + Sigma2_q))
quad = GaussHermiteQuadrature1D()
fq_mvn = Normal(Mu_q, torch.sqrt(Sigma2_q))
joint_ystar1_yq1 = quad(lookahead_inner, fq_mvn)
joint_ystar0_yq1 = pq_marginal_1 - joint_ystar1_yq1
# now we need from the joint to the marginal on xq
lookahead_pq1 = joint_ystar1_yq1 / pstar_marginal_1
lookahead_pq0 = joint_ystar0_yq1 / pstar_marginal_0
return pq_marginal_1, lookahead_pq1, lookahead_pq0, pstar_marginal_1
def approximate_lookahead_levelset_at_xstar(
model: GP,
Xstar: Tensor,
Xq: Tensor,
gamma: float,
posterior_transform: Optional[PosteriorTransform] = None,
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
"""
The look-ahead posterior approximation of Lyu et al.
Args:
model: The model to evaluate.
Xstar: (b x 1 x d) observation point.
Xq: (b x m x d) reference points.
gamma: Threshold in f-space.
Returns:
Px: (b x m) Level-set posterior at Xq, before observation at xstar.
P1: (b x m) Level-set posterior at Xq, given observation of 1 at xstar.
P0: (b x m) Level-set posterior at Xq, given observation of 0 at xstar.
py1: (b x 1) Probability of observing 1 at xstar.
"""
Mu_s, Sigma2_s, Mu_q, Sigma2_q, Sigma_sq = posterior_at_xstar_xq(
model=model, Xstar=Xstar, Xq=Xq, posterior_transform=posterior_transform
)
Norm = torch.distributions.Normal(0, 1)
Mu_s_pdf = torch.exp(Norm.log_prob(Mu_s))
Mu_s_cdf = Norm.cdf(Mu_s)
# Formulae from the supplement of the paper (Result 2)
vnp1_p = Mu_s_pdf**2 / Mu_s_cdf**2 + Mu_s * Mu_s_pdf / Mu_s_cdf # (C.4)
p_p = Norm.cdf(Mu_s / torch.sqrt(1 + Sigma2_s)) # (C.5)
vnp1_n = Mu_s_pdf**2 / (1 - Mu_s_cdf) ** 2 - Mu_s * Mu_s_pdf / (
1 - Mu_s_cdf
) # (C.6)
p_n = 1 - p_p # (C.7)
vtild = vnp1_p * p_p + vnp1_n * p_n
Sigma2_q_np1 = Sigma2_q - Sigma_sq**2 / ((1 / vtild) + Sigma2_s) # (C.8)
Px = Norm.cdf((gamma - Mu_q) / torch.sqrt(Sigma2_q))
P1 = Norm.cdf((gamma - Mu_q) / torch.sqrt(Sigma2_q_np1))
P0 = P1 # Same because we ignore value of y in this approximation
py1 = 0.5 * torch.ones(*Px.shape[:-1], 1) # Value doesn't matter because P1 = P0
return Px, P1, P0, py1
|
aepsych-main
|
aepsych/acquisition/lookahead_utils.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import torch
from botorch.posteriors import Posterior
from botorch.sampling.base import MCSampler
from torch import Tensor
class RejectionSampler(MCSampler):
"""
Samples from a posterior subject to the constraint that samples in constrained_idx
should be >= 0.
If not enough feasible samples are generated, will return the least violating
samples.
"""
def __init__(
self, num_samples: int, num_rejection_samples: int, constrained_idx: Tensor
):
"""Initialize RejectionSampler
Args:
num_samples (int): Number of samples to return. Note that if fewer samples
than this number are positive in the required dimension, the remaining
samples returned will be the "least violating", i.e. closest to 0.
num_rejection_samples (int): Number of samples to draw before rejecting.
constrained_idx (Tensor): Indices of input dimensions that should be
constrained positive.
"""
self.num_samples = num_samples
self.num_rejection_samples = num_rejection_samples
self.constrained_idx = constrained_idx
super().__init__(sample_shape=torch.Size([num_samples]))
def forward(self, posterior: Posterior) -> Tensor:
"""Run the rejection sampler.
Args:
posterior (Posterior): The unconstrained GP posterior object
to perform rejection samples on.
Returns:
Tensor: Kept samples.
"""
samples = posterior.rsample(
sample_shape=torch.Size([self.num_rejection_samples])
)
assert (
samples.shape[-1] == 1
), "Batches not supported" # TODO T68656582 handle batches later
constrained_samps = samples[:, self.constrained_idx, 0]
valid = (constrained_samps >= 0).all(dim=1)
if valid.sum() < self.num_samples:
worst_violation = constrained_samps.min(dim=1)[0]
keep = torch.argsort(worst_violation, descending=True)[: self.num_samples]
else:
keep = torch.where(valid)[0][: self.num_samples]
return samples[keep, :, :]
|
aepsych-main
|
aepsych/acquisition/rejection_sampler.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sys
from ..config import Config
from .lookahead import ApproxGlobalSUR, EAVC, GlobalMI, GlobalSUR, LocalMI, LocalSUR
from .lse import MCLevelSetEstimation
from .mc_posterior_variance import MCPosteriorVariance, MonotonicMCPosteriorVariance
from .monotonic_rejection import MonotonicMCLSE
from .mutual_information import (
BernoulliMCMutualInformation,
MonotonicBernoulliMCMutualInformation,
)
from .objective import (
FloorGumbelObjective,
FloorLogitObjective,
FloorProbitObjective,
ProbitObjective,
semi_p,
)
lse_acqfs = [
MonotonicMCLSE,
GlobalMI,
GlobalSUR,
ApproxGlobalSUR,
EAVC,
LocalMI,
LocalSUR,
]
__all__ = [
"BernoulliMCMutualInformation",
"MonotonicBernoulliMCMutualInformation",
"MonotonicMCLSE",
"MCPosteriorVariance",
"MonotonicMCPosteriorVariance",
"MCPosteriorVariance",
"MCLevelSetEstimation",
"ProbitObjective",
"FloorProbitObjective",
"FloorLogitObjective",
"FloorGumbelObjective",
"GlobalMI",
"GlobalSUR",
"ApproxGlobalSUR",
"EAVC",
"LocalMI",
"LocalSUR",
"semi_p",
]
Config.register_module(sys.modules[__name__])
|
aepsych-main
|
aepsych/acquisition/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional
import torch
from aepsych.acquisition.monotonic_rejection import MonotonicMCAcquisition
from aepsych.acquisition.objective import ProbitObjective
from botorch.acquisition.input_constructors import acqf_input_constructor
from botorch.acquisition.monte_carlo import MCAcquisitionFunction
from botorch.acquisition.objective import MCAcquisitionObjective
from botorch.models.model import Model
from botorch.sampling.base import MCSampler
from botorch.sampling.normal import SobolQMCNormalSampler
from botorch.utils.transforms import t_batch_mode_transform
from torch import Tensor
def balv_acq(obj_samps: torch.Tensor) -> torch.Tensor:
"""Evaluate BALV (posterior variance) on a set of objective samples.
Args:
obj_samps (torch.Tensor): Samples from the GP, transformed by the objective.
Should be samples x batch_shape.
Returns:
torch.Tensor: Acquisition function value.
"""
# the output of objective is of shape num_samples x batch_shape x d_out
# objective should project the last dimension to 1d,
# so incoming should be samples x batch_shape, we take var in samp dim
return obj_samps.var(dim=0).squeeze(-1)
class MCPosteriorVariance(MCAcquisitionFunction):
r"""Posterior variance, computed using samples so we can use objective/transform"""
def __init__(
self,
model: Model,
objective: Optional[MCAcquisitionObjective] = None,
sampler: Optional[MCSampler] = None,
) -> None:
r"""Posterior Variance of Link Function
Args:
model: A fitted model.
objective: An MCAcquisitionObjective representing the link function
(e.g., logistic or probit.) applied on the difference of (usually 1-d)
two samples. Can be implemented via GenericMCObjective.
sampler: The sampler used for drawing MC samples.
"""
if sampler is None:
sampler = SobolQMCNormalSampler(sample_shape=torch.Size([512]))
if objective is None:
objective = ProbitObjective()
super().__init__(model=model, sampler=sampler, objective=None, X_pending=None)
self.objective = objective
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate MCPosteriorVariance on the candidate set `X`.
Args:
X: A `batch_size x q x d`-dim Tensor
Returns:
Posterior variance of link function at X that active learning
hopes to maximize
"""
# the output is of shape batch_shape x q x d_out
post = self.model.posterior(X)
samples = self.sampler(post) # num_samples x batch_shape x q x d_out
return self.acquisition(self.objective(samples, X))
def acquisition(self, obj_samples: torch.Tensor) -> torch.Tensor:
# RejectionSampler drops the final dim so we reaugment it
# here for compatibility with non-Monotonic MCAcquisition
if len(obj_samples.shape) == 2:
obj_samples = obj_samples[..., None]
return balv_acq(obj_samples)
@acqf_input_constructor(MCPosteriorVariance)
def construct_inputs(
model,
training_data,
objective=None,
sampler=None,
**kwargs,
):
return {
"model": model,
"objective": objective,
"sampler": sampler,
}
class MonotonicMCPosteriorVariance(MonotonicMCAcquisition):
def acquisition(self, obj_samples: torch.Tensor) -> torch.Tensor:
return balv_acq(obj_samples)
|
aepsych-main
|
aepsych/acquisition/mc_posterior_variance.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from math import pi as _pi
import torch
inv_2pi = 1 / (2 * _pi)
_neg_inv_sqrt2 = -1 / (2**0.5)
def _gauss_legendre20(dtype):
_abscissae = torch.tensor(
[
0.9931285991850949,
0.9639719272779138,
0.9122344282513259,
0.8391169718222188,
0.7463319064601508,
0.6360536807265150,
0.5108670019508271,
0.3737060887154196,
0.2277858511416451,
0.07652652113349733,
],
dtype=dtype,
)
_weights = torch.tensor(
[
0.01761400713915212,
0.04060142980038694,
0.06267204833410906,
0.08327674157670475,
0.1019301198172404,
0.1181945319615184,
0.1316886384491766,
0.1420961093183821,
0.1491729864726037,
0.1527533871307259,
],
dtype=dtype,
)
abscissae = torch.cat([1.0 - _abscissae, 1.0 + _abscissae], dim=0)
weights = torch.cat([_weights, _weights], dim=0)
return abscissae, weights
def _ndtr(x: torch.Tensor) -> torch.Tensor:
"""
Standard normal CDF. Called <phid> in Genz's original code.
"""
return 0.5 * torch.erfc(_neg_inv_sqrt2 * x)
def _bvnu(
dh: torch.Tensor,
dk: torch.Tensor,
r: torch.Tensor,
) -> torch.Tensor:
"""
Primary subroutine for bvnu()
"""
# Precompute some terms
h = dh
k = dk
hk = h * k
x, w = _gauss_legendre20(dtype=dh.dtype)
asr = 0.5 * torch.asin(r)
sn = torch.sin(asr[..., None] * x)
res = (sn * hk[..., None] - 0.5 * (h**2 + k**2)[..., None]) / (1 - sn**2)
res = torch.sum(w * torch.exp(res), dim=-1)
res = res * inv_2pi * asr + _ndtr(-h) * _ndtr(-k)
return torch.clip(res, 0, 1)
def bvn_cdf(
xu: torch.Tensor,
yu: torch.Tensor,
r: torch.Tensor,
) -> torch.Tensor:
"""
Evaluate the bivariate normal CDF.
WARNING: Implements only the routine for moderate levels of correlation. Will be
inaccurate and should not be used for correlations larger than 0.925.
Standard (mean 0, var 1) bivariate normal distribution with correlation r.
Evaluated from -inf to xu, and -inf to yu.
Based on function developed by Alan Genz:
http://www.math.wsu.edu/faculty/genz/software/matlab/bvn.m
based in turn on
Drezner, Z and G.O. Wesolowsky, (1989),
On the computation of the bivariate normal inegral,
Journal of Statist. Comput. Simul. 35, pp. 101-107.
Args:
xu: Upper limits for cdf evaluation in x
yu: Upper limits for cdf evaluation in y
r: BVN correlation
Returns: Tensor of cdf evaluations of same size as xu, yu, and r.
"""
p = 1 - _ndtr(-xu) - _ndtr(-yu) + _bvnu(xu, yu, r)
return torch.clip(p, 0, 1)
|
aepsych-main
|
aepsych/acquisition/bvn.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from typing import Optional, Tuple
from ax.models.torch.botorch_modular.acquisition import Acquisition
from botorch.acquisition.objective import MCAcquisitionObjective, PosteriorTransform
class AEPsychAcquisition(Acquisition):
def get_botorch_objective_and_transform(
self, **kwargs
) -> Tuple[Optional[MCAcquisitionObjective], Optional[PosteriorTransform]]:
objective, transform = super().get_botorch_objective_and_transform(**kwargs)
if "objective" in self.options:
objective = self.options.pop("objective")
return objective, transform
|
aepsych-main
|
aepsych/acquisition/acquisition.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional, Tuple, cast
import numpy as np
import torch
from aepsych.utils import make_scaled_sobol
from botorch.acquisition import AcquisitionFunction
from botorch.acquisition.input_constructors import acqf_input_constructor
from botorch.acquisition.objective import PosteriorTransform
from botorch.models.gpytorch import GPyTorchModel
from botorch.utils.transforms import t_batch_mode_transform
from scipy.stats import norm
from torch import Tensor
from .lookahead_utils import (
approximate_lookahead_levelset_at_xstar,
lookahead_levelset_at_xstar,
lookahead_p_at_xstar,
)
def Hb(p: Tensor):
"""
Binary entropy.
Args:
p: Tensor of probabilities.
Returns: Binary entropy for each probability.
"""
epsilon = torch.tensor(np.finfo(float).eps)
p = torch.clamp(p, min=epsilon, max=1 - epsilon)
return -torch.nan_to_num(p * torch.log2(p) + (1 - p) * torch.log2(1 - p))
def MI_fn(Px: Tensor, P1: Tensor, P0: Tensor, py1: Tensor) -> Tensor:
"""
Average mutual information.
H(p) - E_y*[H(p | y*)]
Args:
Px: (b x m) Level-set posterior before observation
P1: (b x m) Level-set posterior given observation of 1
P0: (b x m) Level-set posterior given observation of 0
py1: (b x 1) Probability of observing 1
Returns: (b) tensor of mutual information averaged over Xq.
"""
mi = Hb(Px) - py1 * Hb(P1) - (1 - py1) * Hb(P0)
return mi.sum(dim=-1)
def ClassErr(p: Tensor) -> Tensor:
"""
Expected classification error, min(p, 1-p).
"""
return torch.min(p, 1 - p)
def SUR_fn(Px: Tensor, P1: Tensor, P0: Tensor, py1: Tensor) -> Tensor:
"""
Stepwise uncertainty reduction.
Expected reduction in expected classification error given observation at Xstar,
averaged over Xq.
Args:
Px: (b x m) Level-set posterior before observation
P1: (b x m) Level-set posterior given observation of 1
P0: (b x m) Level-set posterior given observation of 0
py1: (b x 1) Probability of observing 1
Returns: (b) tensor of SUR values.
"""
sur = ClassErr(Px) - py1 * ClassErr(P1) - (1 - py1) * ClassErr(P0)
return sur.sum(dim=-1)
def EAVC_fn(Px: Tensor, P1: Tensor, P0: Tensor, py1: Tensor) -> Tensor:
"""
Expected absolute value change.
Expected absolute change in expected level-set volume given observation at Xstar.
Args:
Px: (b x m) Level-set posterior before observation
P1: (b x m) Level-set posterior given observation of 1
P0: (b x m) Level-set posterior given observation of 0
py1: (b x 1) Probability of observing 1
Returns: (b) tensor of EAVC values.
"""
avc1 = torch.abs((Px - P1).sum(dim=-1))
avc0 = torch.abs((Px - P0).sum(dim=-1))
return py1.squeeze(-1) * avc1 + (1 - py1).squeeze(-1) * avc0
class LookaheadAcquisitionFunction(AcquisitionFunction):
def __init__(
self,
model: GPyTorchModel,
target: Optional[float],
lookahead_type: str = "levelset",
) -> None:
"""
A localized look-ahead acquisition function.
Args:
model: The gpytorch model.
target: Threshold value to target in p-space.
"""
super().__init__(model=model)
if lookahead_type == "levelset":
self.lookahead_fn = lookahead_levelset_at_xstar
assert target is not None, "Need a target for levelset lookahead!"
self.gamma = norm.ppf(target)
elif lookahead_type == "posterior":
self.lookahead_fn = lookahead_p_at_xstar
self.gamma = None
else:
raise RuntimeError(f"Got unknown lookahead type {lookahead_type}!")
## Local look-ahead acquisitions
class LocalLookaheadAcquisitionFunction(LookaheadAcquisitionFunction):
def __init__(
self,
model: GPyTorchModel,
lookahead_type: str = "levelset",
target: Optional[float] = None,
posterior_transform: Optional[PosteriorTransform] = None,
) -> None:
"""
A localized look-ahead acquisition function.
Args:
model: The gpytorch model.
target: Threshold value to target in p-space.
"""
super().__init__(model=model, target=target, lookahead_type=lookahead_type)
self.posterior_transform = posterior_transform
@t_batch_mode_transform(expected_q=1)
def forward(self, X: Tensor) -> Tensor:
"""
Evaluate acquisition function at X.
Args:
X: (b x 1 x d) point at which to evalaute acquisition function.
Returns: (b) tensor of acquisition values.
"""
Px, P1, P0, py1 = self.lookahead_fn(
model=self.model,
Xstar=X,
Xq=X,
gamma=self.gamma,
posterior_transform=self.posterior_transform,
) # Return shape here has m=1.
return self._compute_acqf(Px, P1, P0, py1)
def _compute_acqf(self, Px: Tensor, P1: Tensor, P0: Tensor, py1: Tensor) -> Tensor:
raise NotImplementedError
class LocalMI(LocalLookaheadAcquisitionFunction):
def _compute_acqf(self, Px: Tensor, P1: Tensor, P0: Tensor, py1: Tensor) -> Tensor:
return MI_fn(Px, P1, P0, py1)
class LocalSUR(LocalLookaheadAcquisitionFunction):
def _compute_acqf(self, Px: Tensor, P1: Tensor, P0: Tensor, py1: Tensor) -> Tensor:
return SUR_fn(Px, P1, P0, py1)
@acqf_input_constructor(LocalMI, LocalSUR)
def construct_inputs_local_lookahead(
model: GPyTorchModel,
training_data,
lookahead_type="levelset",
target: Optional[float] = None,
posterior_transform: Optional[PosteriorTransform] = None,
**kwargs,
):
return {
"model": model,
"lookahead_type": lookahead_type,
"target": target,
"posterior_transform": posterior_transform,
}
## Global look-ahead acquisitions
class GlobalLookaheadAcquisitionFunction(LookaheadAcquisitionFunction):
def __init__(
self,
model: GPyTorchModel,
lookahead_type: str = "levelset",
target: Optional[float] = None,
posterior_transform: Optional[PosteriorTransform] = None,
query_set_size: Optional[int] = 256,
Xq: Optional[Tensor] = None,
) -> None:
"""
A global look-ahead acquisition function.
Args:
model: The gpytorch model.
target: Threshold value to target in p-space.
Xq: (m x d) global reference set.
"""
super().__init__(model=model, target=target, lookahead_type=lookahead_type)
self.posterior_transform = posterior_transform
assert (
Xq is not None or query_set_size is not None
), "Must pass either query set size or a query set!"
if Xq is not None and query_set_size is not None:
assert Xq.shape[0] == query_set_size, (
"If passing both Xq and query_set_size,"
+ "first dim of Xq should be query_set_size, got {Xq.shape[0]} != {query_set_size}"
)
if Xq is None:
# cast to an int in case we got a float from Config, which
# would raise on make_scaled_sobol
query_set_size = cast(int, query_set_size) # make mypy happy
assert int(query_set_size) == query_set_size # make sure casting is safe
# if the asserts above pass and Xq is None, query_set_size is not None so this is safe
query_set_size = int(query_set_size) # cast
Xq = make_scaled_sobol(model.lb, model.ub, query_set_size)
self.register_buffer("Xq", Xq)
@t_batch_mode_transform(expected_q=1)
def forward(self, X: Tensor) -> Tensor:
"""
Evaluate acquisition function at X.
Args:
X: (b x 1 x d) point at which to evalaute acquisition function.
Returns: (b) tensor of acquisition values.
"""
Px, P1, P0, py1 = self._get_lookahead_posterior(X)
return self._compute_acqf(Px, P1, P0, py1)
def _get_lookahead_posterior(
self, X: Tensor
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
Xq_batch = self.Xq.expand(X.shape[0], *self.Xq.shape)
return self.lookahead_fn(
model=self.model,
Xstar=X,
Xq=Xq_batch,
gamma=self.gamma,
posterior_transform=self.posterior_transform,
)
def _compute_acqf(self, Px: Tensor, P1: Tensor, P0: Tensor, py1: Tensor) -> Tensor:
raise NotImplementedError
class GlobalMI(GlobalLookaheadAcquisitionFunction):
def _compute_acqf(self, Px: Tensor, P1: Tensor, P0: Tensor, py1: Tensor) -> Tensor:
return MI_fn(Px, P1, P0, py1)
class GlobalSUR(GlobalLookaheadAcquisitionFunction):
def _compute_acqf(self, Px: Tensor, P1: Tensor, P0: Tensor, py1: Tensor) -> Tensor:
return SUR_fn(Px, P1, P0, py1)
class ApproxGlobalSUR(GlobalSUR):
def __init__(
self,
model: GPyTorchModel,
lookahead_type="levelset",
target: Optional[float] = None,
query_set_size: Optional[int] = 256,
Xq: Optional[Tensor] = None,
) -> None:
assert (
lookahead_type == "levelset"
), f"ApproxGlobalSUR only supports lookahead on level set, got {lookahead_type}!"
super().__init__(
model=model,
target=target,
lookahead_type=lookahead_type,
query_set_size=query_set_size,
Xq=Xq,
)
def _get_lookahead_posterior(
self, X: Tensor
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
Xq_batch = self.Xq.expand(X.shape[0], *self.Xq.shape)
return approximate_lookahead_levelset_at_xstar(
model=self.model,
Xstar=X,
Xq=Xq_batch,
gamma=self.gamma,
posterior_transform=self.posterior_transform,
)
class EAVC(GlobalLookaheadAcquisitionFunction):
def _compute_acqf(self, Px: Tensor, P1: Tensor, P0: Tensor, py1: Tensor) -> Tensor:
return EAVC_fn(Px, P1, P0, py1)
class MOCU(GlobalLookaheadAcquisitionFunction):
"""
MOCU acquisition function given in expr. 4 of:
Zhao, Guang, et al. "Uncertainty-aware active learning for optimal Bayesian classifier."
International Conference on Learning Representations (ICLR) 2021.
"""
def _compute_acqf(self, Px: Tensor, P1: Tensor, P0: Tensor, py1: Tensor) -> Tensor:
current_max_query = torch.maximum(Px, 1 - Px)
# expectation w.r.t. y* of the max of pq
lookahead_pq1_max = torch.maximum(P1, 1 - P1)
lookahead_pq0_max = torch.maximum(P0, 1 - P0)
lookahead_max_query = lookahead_pq1_max * py1 + lookahead_pq0_max * (1 - py1)
return (lookahead_max_query - current_max_query).mean(-1)
class SMOCU(GlobalLookaheadAcquisitionFunction):
"""
SMOCU acquisition function given in expr. 11 of:
Zhao, Guang, et al. "Bayesian active learning by soft mean objective cost of uncertainty."
International Conference on Artificial Intelligence and Statistics (AISTATS) 2021.
"""
def __init__(self, k, *args, **kwargs):
super().__init__(*args, **kwargs)
self.k = k
def _compute_acqf(self, Px: Tensor, P1: Tensor, P0: Tensor, py1: Tensor) -> Tensor:
stacked = torch.stack((Px, 1 - Px), dim=-1)
current_softmax_query = torch.logsumexp(self.k * stacked, dim=-1) / self.k
# expectation w.r.t. y* of the max of pq
lookahead_pq1_max = torch.maximum(P1, 1 - P1)
lookahead_pq0_max = torch.maximum(P0, 1 - P0)
lookahead_max_query = lookahead_pq1_max * py1 + lookahead_pq0_max * (1 - py1)
return (lookahead_max_query - current_softmax_query).mean(-1)
class BEMPS(GlobalLookaheadAcquisitionFunction):
"""
BEMPS acquisition function given in:
Tan, Wei, et al. "Diversity Enhanced Active Learning with Strictly Proper Scoring Rules."
Advances in Neural Information Processing Systems 34 (2021).
"""
def __init__(self, scorefun, *args, **kwargs):
super().__init__(*args, **kwargs)
self.scorefun = scorefun
def _compute_acqf(self, Px: Tensor, P1: Tensor, P0: Tensor, py1: Tensor) -> Tensor:
current_score = self.scorefun(Px)
lookahead_pq1_score = self.scorefun(P1)
lookahead_pq0_score = self.scorefun(P0)
lookahead_expected_score = lookahead_pq1_score * py1 + lookahead_pq0_score * (
1 - py1
)
return (lookahead_expected_score - current_score).mean(-1)
@acqf_input_constructor(GlobalMI, GlobalSUR, ApproxGlobalSUR, EAVC, MOCU, SMOCU, BEMPS)
def construct_inputs_global_lookahead(
model: GPyTorchModel,
training_data,
lookahead_type="levelset",
target: Optional[float] = None,
posterior_transform: Optional[PosteriorTransform] = None,
query_set_size: Optional[int] = 256,
Xq: Optional[Tensor] = None,
**kwargs,
):
lb = [bounds[0] for bounds in kwargs["bounds"]]
ub = [bounds[1] for bounds in kwargs["bounds"]]
Xq = Xq if Xq is not None else make_scaled_sobol(lb, ub, query_set_size)
return {
"model": model,
"lookahead_type": lookahead_type,
"target": target,
"posterior_transform": posterior_transform,
"query_set_size": query_set_size,
"Xq": Xq,
}
|
aepsych-main
|
aepsych/acquisition/lookahead.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from typing import Optional
import torch
from botorch.acquisition.acquisition import AcquisitionFunction
from botorch.acquisition.objective import IdentityMCObjective, MCAcquisitionObjective
from botorch.models.model import Model
from torch import Tensor
from .rejection_sampler import RejectionSampler
class MonotonicMCAcquisition(AcquisitionFunction):
"""
Acquisition function base class for use with the rejection sampling
monotonic GP. This handles the bookkeeping of the derivative
constraint points -- implement specific monotonic MC acquisition
in subclasses.
"""
def __init__(
self,
model: Model,
deriv_constraint_points: torch.Tensor,
num_samples: int = 32,
num_rejection_samples: int = 1024,
objective: Optional[MCAcquisitionObjective] = None,
) -> None:
"""Initialize MonotonicMCAcquisition
Args:
model (Model): Model to use, usually a MonotonicRejectionGP.
num_samples (int, optional): Number of samples to keep from the rejection sampler. . Defaults to 32.
num_rejection_samples (int, optional): Number of rejection samples to draw. Defaults to 1024.
objective (Optional[MCAcquisitionObjective], optional): Objective transform of the GP output
before evaluating the acquisition. Defaults to identity transform.
"""
super().__init__(model=model)
self.deriv_constraint_points = deriv_constraint_points
self.num_samples = num_samples
self.num_rejection_samples = num_rejection_samples
self.sampler_shape = torch.Size([])
if objective is None:
assert model.num_outputs == 1
objective = IdentityMCObjective()
else:
assert isinstance(objective, MCAcquisitionObjective)
self.add_module("objective", objective)
def forward(self, X: Tensor) -> Tensor:
"""Evaluate the acquisition function at a set of points.
Args:
X (Tensor): Points at which to evaluate the acquisition function.
Should be (b) x q x d, and q should be 1.
Returns:
Tensor: Acquisition function value at these points.
"""
# This is currently doing joint samples over (b), and requiring q=1
# TODO T68656582 support batches properly.
if len(X.shape) == 3:
assert X.shape[1] == 1, "q must be 1"
Xfull = torch.cat((X[:, 0, :], self.deriv_constraint_points), dim=0)
else:
Xfull = torch.cat((X, self.deriv_constraint_points), dim=0)
if not hasattr(self, "sampler") or Xfull.shape != self.sampler_shape:
self._set_sampler(X.shape)
self.sampler_shape = Xfull.shape
posterior = self.model.posterior(Xfull)
samples = self.sampler(posterior)
assert len(samples.shape) == 3
# Drop derivative samples
samples = samples[:, : X.shape[0], :]
# NOTE: Squeeze below makes sure that we pass in the same `X` that was used
# to generate the `samples`. This is necessitated by `MCAcquisitionObjective`,
# which verifies that `samples` and `X` have the same q-batch size.
obj_samples = self.objective(samples, X=X.squeeze(-2) if X.ndim == 3 else X)
return self.acquisition(obj_samples)
def _set_sampler(self, Xshape: torch.Size) -> None:
sampler = RejectionSampler(
num_samples=self.num_samples,
num_rejection_samples=self.num_rejection_samples,
constrained_idx=torch.arange(
Xshape[0], Xshape[0] + self.deriv_constraint_points.shape[0]
),
)
self.add_module("sampler", sampler)
def acquisition(self, obj_samples: torch.Tensor) -> torch.Tensor:
raise NotImplementedError
class MonotonicMCLSE(MonotonicMCAcquisition):
def __init__(
self,
model: Model,
deriv_constraint_points: torch.Tensor,
target: float,
num_samples: int = 32,
num_rejection_samples: int = 1024,
beta: float = 3.84,
objective: Optional[MCAcquisitionObjective] = None,
) -> None:
"""Level set estimation acquisition function for use with monotonic models.
Args:
model (Model): Underlying model object, usually should be MonotonicRejectionGP.
target (float): Level set value to target (after the objective).
num_samples (int, optional): Number of MC samples to draw in MC acquisition. Defaults to 32.
num_rejection_samples (int, optional): Number of rejection samples from which to subsample monotonic ones. Defaults to 1024.
beta (float, optional): Parameter of the LSE acquisition function that governs exploration vs
exploitation (similarly to the same parameter in UCB). Defaults to 3.84 (1.96 ** 2), which maps to the straddle
heuristic of Bryan et al. 2005.
objective (Optional[MCAcquisitionObjective], optional): Objective transform. Defaults to identity transform.
"""
self.beta = beta
self.target = target
super().__init__(
model=model,
deriv_constraint_points=deriv_constraint_points,
num_samples=num_samples,
num_rejection_samples=num_rejection_samples,
objective=objective,
)
def acquisition(self, obj_samples: torch.Tensor) -> torch.Tensor:
mean = obj_samples.mean(dim=0)
variance = obj_samples.var(dim=0)
# prevent numerical issues if probit makes all the values 1 or 0
variance = torch.clamp(variance, min=1e-5)
delta = torch.sqrt(self.beta * variance)
return delta - torch.abs(mean - self.target)
|
aepsych-main
|
aepsych/acquisition/monotonic_rejection.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from typing import Optional
import torch
from botorch.acquisition.objective import MCAcquisitionObjective
from torch import Tensor
from torch.distributions.normal import Normal
class AEPsychObjective(MCAcquisitionObjective):
def inverse(self, samples: Tensor, X: Optional[Tensor] = None) -> Tensor:
raise NotImplementedError
class ProbitObjective(AEPsychObjective):
"""Probit objective
Transforms the input through the normal CDF (probit).
"""
def forward(self, samples: Tensor, X: Optional[Tensor] = None) -> Tensor:
"""Evaluates the objective (normal CDF).
Args:
samples (Tensor): GP samples.
X (Optional[Tensor], optional): ignored, here for compatibility
with MCAcquisitionObjective.
Returns:
Tensor: [description]
"""
return Normal(loc=0, scale=1).cdf(samples.squeeze(-1))
def inverse(self, samples: Tensor, X: Optional[Tensor] = None) -> Tensor:
"""Evaluates the inverse of the objective (normal PPF).
Args:
samples (Tensor): GP samples.
X (Optional[Tensor], optional): ignored, here for compatibility
with MCAcquisitionObjective.
Returns:
Tensor: [description]
"""
return Normal(loc=0, scale=1).icdf(samples.squeeze(-1))
class FloorLinkObjective(AEPsychObjective):
"""
Wrapper for objectives to add a floor, when
the probability is known not to go below it.
"""
def __init__(self, floor=0.5):
self.floor = floor
super().__init__()
def forward(self, samples: Tensor, X: Optional[Tensor] = None) -> Tensor:
"""Evaluates the objective for input x and floor f
Args:
samples (Tensor): GP samples.
X (Optional[Tensor], optional): ignored, here for compatibility
with MCAcquisitionObjective.
Returns:
Tensor: outcome probability.
"""
return self.link(samples.squeeze(-1)) * (1 - self.floor) + self.floor
def inverse(self, samples: Tensor, X: Optional[Tensor] = None) -> Tensor:
"""Evaluates the inverse of the objective.
Args:
samples (Tensor): GP samples.
X (Optional[Tensor], optional): ignored, here for compatibility
with MCAcquisitionObjective.
Returns:
Tensor: [description]
"""
return self.inverse_link((samples - self.floor) / (1 - self.floor))
def link(self, samples):
raise NotImplementedError
def inverse_link(self, samples):
raise NotImplementedError
@classmethod
def from_config(cls, config):
floor = config.getfloat(cls.__name__, "floor")
return cls(floor=floor)
class FloorLogitObjective(FloorLinkObjective):
"""
Logistic sigmoid (aka expit, aka logistic CDF),
but with a floor so that its output is
between floor and 1.0.
"""
def link(self, samples):
return torch.special.expit(samples)
def inverse_link(self, samples):
return torch.special.logit(samples)
class FloorGumbelObjective(FloorLinkObjective):
"""
Gumbel CDF but with a floor so that its output
is between floor and 1.0. Note that this is not
the standard Gumbel distribution, but rather the
left-skewed Gumbel that arises as the log of the Weibull
distribution, e.g. Treutwein 1995, doi:10.1016/0042-6989(95)00016-X.
"""
def link(self, samples):
return torch.nan_to_num(
-torch.special.expm1(-torch.exp(samples)), posinf=1.0, neginf=0.0
)
def inverse_link(self, samples):
return torch.log(-torch.special.log1p(-samples))
class FloorProbitObjective(FloorLinkObjective):
"""
Probit (aka Gaussian CDF), but with a floor
so that its output is between floor and 1.0.
"""
def link(self, samples):
return Normal(0, 1).cdf(samples)
def inverse_link(self, samples):
return Normal(0, 1).icdf(samples)
|
aepsych-main
|
aepsych/acquisition/objective/objective.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from typing import Optional
import torch
from aepsych.config import Config
from aepsych.likelihoods import LinearBernoulliLikelihood
from botorch.acquisition.objective import MCAcquisitionObjective
from gpytorch.likelihoods import Likelihood
from torch import Tensor
class SemiPObjectiveBase(MCAcquisitionObjective):
"""Wraps the semi-parametric transform into an objective
that correctly extracts various things
"""
# because we have an extra dim for the SemiP batch dimension,
# all the q-batch output shape checks fail, disable them here
_verify_output_shape: bool = False
def __init__(self, stim_dim: int = 0):
super().__init__()
self.stim_dim = stim_dim
class SemiPProbabilityObjective(SemiPObjectiveBase):
"""Wraps the semi-parametric transform into an objective
that gives outcome probabilities
"""
def __init__(self, likelihood: Likelihood = None, *args, **kwargs):
"""Evaluates the probability objective.
Args:
likelihood (Likelihood). Underlying SemiP likelihood (which we use for its objective/link)
other arguments are passed to the base class (notably, stim_dim).
"""
super().__init__(*args, **kwargs)
self.likelihood = likelihood or LinearBernoulliLikelihood()
def forward(self, samples: Tensor, X: Tensor) -> Tensor:
"""Evaluates the probability objective.
Args:
samples (Tensor): GP samples.
X (Tensor): Inputs at which to evaluate objective. Unlike most AEPsych objectives,
we need X here to split out the intensity dimension.
Returns:
Tensor: Response probabilities at the specific X values and function samples.
"""
Xi = X[..., self.stim_dim]
# the output of LinearBernoulliLikelihood is (nsamp x b x n x 1)
# but the output of MCAcquisitionObjective should be `nsamp x *batch_shape x q`
# so we remove the final dim
return self.likelihood.p(function_samples=samples, Xi=Xi).squeeze(-1)
@classmethod
def from_config(cls, config: Config):
classname = cls.__name__
likelihood_cls = config.getobj(classname, "likelihood", fallback=None)
if likelihood_cls is not None:
if hasattr(likelihood_cls, "from_config"):
likelihood = likelihood_cls.from_config(config)
else:
likelihood = likelihood_cls()
else:
likelihood = None # fall back to __init__ default
return cls(likelihood=likelihood)
class SemiPThresholdObjective(SemiPObjectiveBase):
"""Wraps the semi-parametric transform into an objective
that gives the threshold distribution.
"""
def __init__(self, target: float, likelihood=None, *args, **kwargs):
"""Evaluates the probability objective.
Args:
target (float): the threshold to evaluate.
likelihood (Likelihood): Underlying SemiP likelihood (which we use for its inverse link)
other arguments are passed to the base class (notably, stim_dim).
"""
super().__init__(*args, **kwargs)
self.likelihood = likelihood or LinearBernoulliLikelihood()
self.fspace_target = self.likelihood.objective.inverse(torch.tensor(target))
def forward(self, samples: Tensor, X: Optional[Tensor] = None) -> Tensor:
"""Evaluates the probability objective.
Args:
samples (Tensor): GP samples.
X (Tensor, optional): Ignored, here for compatibility with the objective API.
Returns:
Tensor: Threshold probabilities at the specific GP sample values.
"""
offset = samples[..., 0, :]
slope = samples[..., 1, :]
return (self.fspace_target + slope * offset) / slope
@classmethod
def from_config(cls, config: Config):
classname = cls.__name__
likelihood_cls = config.getobj(classname, "likelihood", fallback=None)
if likelihood_cls is not None:
if hasattr(likelihood_cls, "from_config"):
likelihood = likelihood_cls.from_config(config)
else:
likelihood = likelihood_cls()
else:
likelihood = None # fall back to __init__ default
target = config.getfloat(classname, "target", fallback=0.75)
return cls(likelihood=likelihood, target=target)
|
aepsych-main
|
aepsych/acquisition/objective/semi_p.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sys
from ...config import Config
from .objective import (
AEPsychObjective,
FloorGumbelObjective,
FloorLogitObjective,
FloorProbitObjective,
ProbitObjective,
)
from .semi_p import SemiPProbabilityObjective, SemiPThresholdObjective
__all__ = [
"AEPsychObjective",
"FloorGumbelObjective",
"FloorLogitObjective",
"FloorProbitObjective",
"ProbitObjective",
"SemiPProbabilityObjective",
"SemiPThresholdObjective",
]
Config.register_module(sys.modules[__name__])
|
aepsych-main
|
aepsych/acquisition/objective/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
|
aepsych-main
|
aepsych/means/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import torch
from gpytorch.means.constant_mean import ConstantMean
class ConstantMeanPartialObsGrad(ConstantMean):
"""A mean function for use with partial gradient observations.
This follows gpytorch.means.constant_mean_grad and sets the prior mean for
derivative observations to 0, though unlike that function it allows for
partial observation of derivatives.
The final column of input should be an index that is 0 if the observation
is of f, or i if it is of df/dxi.
"""
def forward(self, input: torch.Tensor) -> torch.Tensor:
idx = input[..., -1].to(dtype=torch.long) > 0
mean_fit = super(ConstantMeanPartialObsGrad, self).forward(input[..., ~idx, :])
sz = mean_fit.shape[:-1] + torch.Size([input.shape[-2]])
mean = torch.zeros(sz)
mean[~idx] = mean_fit
return mean
|
aepsych-main
|
aepsych/means/constant_partial_grad.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.