python_code
stringlengths 0
4.04M
| repo_name
stringlengths 8
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
import logging
import os.path as osp
import time
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset
from torchvision import transforms
from torchvision.datasets.folder import default_loader
logger = logging.getLogger(__name__)
class DfDatasetWithCF(Dataset):
def __init__(
self,
df: pd.DataFrame,
transform=None,
target_transform=None,
is_use_bias: bool = False,
is_skip_img: bool = False,
) -> None:
super().__init__()
self.df = df
self.loader = default_loader
self.transform = transform
self.target_transform = target_transform
self.is_use_bias = is_use_bias
self.is_skip_img = is_skip_img
def __len__(self):
return len(self.df)
def __getitem__(self, index):
row = self.df.iloc[index]
img_path = row["img_path"]
pos_img_path = row["pos_img_path"] if "pos_img_path" in row else row["img_path"]
cf_vector = torch.tensor(row["embs"])
target = torch.tensor(row["label_vec"])
is_labeled = row["is_labeled"]
cf_bias = torch.tensor(row["bias"])
cf_confidence = torch.tensor(row["cf_confidence"])
if self.is_use_bias is True:
cf_vector = torch.hstack((cf_vector, cf_bias)).float()
if self.is_skip_img is True:
return cf_vector, target
# Load image
image = self.loader(img_path)
image_pos = self.loader(pos_img_path)
if self.transform is not None:
image = self.transform(image)
image_pos = self.transform(image_pos)
if self.target_transform is not None:
target = self.target_transform(target)
return (
image,
image_pos,
cf_vector,
target,
is_labeled,
cf_confidence,
)
# Transformation as ImageNet training
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_transform = transforms.Compose(
[
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1),
transforms.RandomGrayscale(),
transforms.ToTensor(),
normalize,
]
)
test_transform = transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]
)
def get_loss_based_confidence(cf_based_loss_path: str):
assert cf_based_loss_path is not None
cf_based_loss = torch.load(cf_based_loss_path)
loss_mean = cf_based_loss.mean(axis=1)
cf_confidence = 1 / loss_mean
return cf_confidence
def pos_label_loss_based(
cf_based_loss_path: str, label_vecs: pd.Series
) -> torch.tensor:
assert cf_based_loss_path is not None
cf_based_loss = torch.load(cf_based_loss_path)
label_vecs = torch.tensor(label_vecs.tolist()).bool()
# Get only loss of GT labels
loss_mean = torch.tensor(
[values[mask].mean() for values, mask in zip(cf_based_loss, label_vecs)]
)
cf_confidence = 1 / loss_mean
# For samples without positive labels: set the confidence to 0
cf_confidence[torch.isnan(cf_confidence)] = 0.0
return cf_confidence
def clip_confidence(
cf_conf_train: torch.Tensor, cf_conf_test: torch.Tensor, max_min_ratio: float
) -> np.ndarray:
lower_limit, upper_limit = torch.quantile(cf_conf_train, torch.tensor([0.25, 0.75]))
if max_min_ratio is None or lower_limit == upper_limit:
return cf_conf_train, cf_conf_test
cf_conf_train_clipped = torch.clip(cf_conf_train, lower_limit, upper_limit)
cf_conf_test_clipped = torch.clip(cf_conf_test, lower_limit, upper_limit)
# Normalize to keep min-max value between 1 and max_min_ratio
min_val, max_val = cf_conf_train_clipped.min(), cf_conf_train_clipped.max()
cf_conf_train_clipped = 1 + (cf_conf_train_clipped - min_val) * (
max_min_ratio - 1
) / (max_val - min_val)
cf_conf_test_clipped = 1 + (cf_conf_test_clipped - min_val) * (
max_min_ratio - 1
) / (max_val - min_val)
# Log
min_val, max_val = cf_conf_train_clipped.min(), cf_conf_train_clipped.max()
logger.info(
f"clip_confidence: {max_min_ratio=} [min max]={min_val:.2f} {max_val:.2f}"
)
return cf_conf_train_clipped, cf_conf_test_clipped
def assign_positive_cf(df_train, df_test):
df_all_set = pd.concat(
(df_train[["asin", "img_path"]], df_test[["asin", "img_path"]])
)
pos_img_path = pd.merge(
df_train[["asin", "pos_asin"]],
df_all_set,
left_on=["pos_asin"],
right_on=["asin"],
how="left",
)["img_path"]
df_train["pos_img_path"] = pos_img_path
pos_img_path = pd.merge(
df_test[["asin", "pos_asin"]],
df_all_set,
left_on=["pos_asin"],
right_on=["asin"],
how="left",
)["img_path"]
df_test["pos_img_path"] = pos_img_path
return df_train, df_test
def plot_and_save_conf_histogram(
out_dir: str,
confidence_type: str,
cf_conf: np.ndarray,
cf_conf_clipped: np.ndarray,
):
mask = np.isfinite(cf_conf)
value_min, value_max = np.round(
[np.min(cf_conf_clipped), np.max(cf_conf_clipped)], 2
)
_, axs = plt.subplots(2, 1, sharex=False)
ax = axs[0]
_, bins, _ = ax.hist(cf_conf[mask], bins=100, alpha=0.5, label="raw", color="C0")
ax.hist(cf_conf_clipped, bins=bins, alpha=0.5, label="clipped", color="C1")
ax.set_ylabel("Count")
ax.set_yscale("log")
ax.legend()
ax.set_title(f"Confidence {confidence_type=}")
ax = axs[1]
ax.hist(cf_conf_clipped, bins=100, alpha=0.5, label="clipped", color="C1")
ax.set_ylabel("Count")
ax.set_yscale("log")
ax.set_yscale("log")
ax.set_xlabel(f"Confidnce value. [min max]=[{value_min} {value_max}]")
plt.tight_layout()
plt.savefig(osp.join(out_dir, "cf_confidence.jpg"))
plt.close()
def get_datasets(
df_train_path: str,
df_test_path: str,
cf_vector_df_path: str,
out_dir: str,
labeled_ratio: float = 1.0,
is_use_bias: bool = False,
is_skip_img: bool = False,
cf_based_train_loss_path: str = None,
cf_based_test_loss_path: str = None,
is_use_cf_embeddings: bool = False,
cf_embeddings_train_path: str = None,
cf_embeddings_test_path: str = None,
confidence_type: str = "uniform",
conf_max_min_ratio: float = None,
is_plot_conf_hist: bool = True,
):
t0 = time.time()
df_train = pd.read_pickle(df_train_path)
df_test = pd.read_pickle(df_test_path)
cf_df = pd.read_pickle(cf_vector_df_path)
logger.info(
f"Loaded df in {time.time() -t0 :.2f} sec. {len(df_train)=} {len(df_test)=} {len(cf_df)=}"
)
# Add CF vectors
t0 = time.time()
cf_df["asin"] = cf_df["asin"].astype(df_train["asin"].dtype)
df_train = pd.merge(df_train, cf_df, on=["asin"], how="inner")
df_test = pd.merge(df_test, cf_df, on=["asin"], how="inner")
logger.info(
f"merge df in {time.time() -t0 :.2f} sec. {len(df_train)=} {len(df_test)=} {len(cf_df)=}"
)
if is_use_cf_embeddings is True:
t0 = time.time()
cf_embeddings_train = torch.load(cf_embeddings_train_path)
cf_embeddings_test = torch.load(cf_embeddings_test_path)
df_train["embs"] = cf_embeddings_train.tolist()
df_test["embs"] = cf_embeddings_test.tolist()
logger.info(f"Override cf vectors in {time.time() -t0 :.2f} sec.")
# Add positive CF
if "pos_asin" in df_train.columns:
df_train, df_test = assign_positive_cf(df_train, df_test)
# Hide labels
df_train["is_labeled"] = torch.rand(len(df_train)) > 1.0 - labeled_ratio
df_test["is_labeled"] = True
# Define positive weight: Since positives are much less than negatives, increase their weights
train_labels = np.array(
df_train[df_train["is_labeled"] == True].label_vec.to_list()
)
pos_weight = len(train_labels) / (train_labels.sum(axis=0) + 1e-6)
# Apply confidence to cf vector
t0 = time.time()
if confidence_type == "uniform":
cf_conf_train = torch.ones(len(df_train))
cf_conf_test = torch.ones(len(df_test))
elif confidence_type == "loss_based":
cf_conf_train = get_loss_based_confidence(cf_based_train_loss_path)
cf_conf_test = get_loss_based_confidence(cf_based_test_loss_path)
elif confidence_type == "num_intercations":
cf_conf_train = torch.from_numpy(np.sqrt(df_train["num_intercations"].values))
cf_conf_test = torch.from_numpy(np.sqrt(df_test["num_intercations"].values))
elif confidence_type == "pos_label_loss_based":
cf_conf_train = pos_label_loss_based(
cf_based_train_loss_path, df_train["label_vec"]
)
cf_conf_test = pos_label_loss_based(
cf_based_test_loss_path, df_test["label_vec"]
)
else:
raise ValueError(f"{confidence_type} is not supported")
cf_conf_train_clipped, cf_conf_test_clipped = clip_confidence(
cf_conf_train, cf_conf_test, conf_max_min_ratio
)
df_train["cf_confidence"] = cf_conf_train_clipped
df_test["cf_confidence"] = cf_conf_test_clipped
logger.info(f"CF confidence in {time.time() -t0 :.2f} sec.")
if is_plot_conf_hist is True:
plot_and_save_conf_histogram(
out_dir,
confidence_type,
cf_conf_train.numpy(),
cf_conf_train_clipped.numpy(),
)
logger.info(f"Plotted CF confidence in {time.time() -t0 :.2f} sec. {out_dir=}")
# Construct dataset
train_dataset = DfDatasetWithCF(
df_train,
transform=train_transform,
is_use_bias=is_use_bias,
is_skip_img=is_skip_img,
)
test_dataset = DfDatasetWithCF(
df_test,
transform=test_transform,
is_use_bias=is_use_bias,
is_skip_img=is_skip_img,
)
# Get metadata
num_classes = len(df_train["label_vec"].iloc[0])
cf_vector_dim = len(df_train["embs"].iloc[0])
if is_use_bias is True:
cf_vector_dim += 1
dataset_meta = {
"train_set_size": len(df_train),
"test_set_size": len(df_test),
"num_classes": num_classes,
"cf_vector_dim": cf_vector_dim,
}
return train_dataset, test_dataset, dataset_meta, pos_weight
|
collaborative_image_understanding-main
|
src/dataset_utils.py
|
# Copyright (c) 2015-present, Meta Platforms, Inc. and affiliates.
# All rights reserved.
import logging
import os
import os.path as osp
from typing import List
import pandas as pd
import requests
from tqdm import tqdm
logger = logging.getLogger(__name__)
class DataHelper:
def __init__(self, is_debug: bool, is_override: bool):
self.is_debug = is_debug
self.is_override = is_override
def is_img_path(self, path: str) -> bool:
if path.lower().endswith((".jpg", ".png", ".jepg", ".gif", ".tiff")):
return True
else:
return False
def is_exist(self, path: str):
is_path_exist = osp.exists(path)
if self.is_override:
is_path_exist = False
return is_path_exist
def download_url(
self,
url: str,
dst: str = None,
is_force_download: bool = False,
):
if self.is_debug:
logger.info(f"download_url: {url=} {dst=} {is_force_download=}")
if dst is None:
dst = os.path.basename(url)
if is_force_download is False and self.is_exist(dst):
return
r = requests.get(url)
with open(dst, "wb") as f:
f.write(r.content)
def ungzip_file(self, path_src: str, path_dst: str):
logger.info(f"ungzip_file: {path_src=} {path_dst=}")
if self.is_exist(path_dst):
return
os.system(f"gzip -dk {path_src}")
def read_pickle(self, pkl_path: str) -> pd.DataFrame:
logger.info(f"pd.read_pickle {pkl_path}")
df = pd.read_pickle(pkl_path)
return df
def save_df_as_pkl(self, json_path: str, pkl_path: str):
logger.info(f"save_df_as_pkl: {json_path=} {pkl_path=}")
with open(json_path, "r") as fin:
df = {}
for i, line in enumerate(tqdm(fin)):
df[i] = eval(line)
df = pd.DataFrame.from_dict(df, orient="index")
df.to_pickle(pkl_path)
def create_dir(self, dst: str):
logger.info(f"create_dir {dst=}")
os.makedirs(dst, exist_ok=True)
def list_files_in_dir(self, path: str) -> List[str]:
return os.listdir(path)
|
collaborative_image_understanding-main
|
src/data_utils.py
|
# Copyright (c) 2015-present, Meta Platforms, Inc. and affiliates.
# All rights reserved.
import logging
import os
import os.path as osp
import time
from glob import glob
import hydra
import pandas as pd
import torch
from hydra.utils import get_original_cwd
from omegaconf import DictConfig, OmegaConf
from sklearn.metrics import average_precision_score
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
from dataset_utils import get_datasets
from lit_utils import LitModel
logging.basicConfig()
logger = logging.getLogger(__name__)
def get_products(model, dataloader):
preds_list, labels, embeddings_list = [], [], []
for batch in tqdm(dataloader):
imgs, labels_i = batch[0], batch[3]
preds, _ = model(imgs.to("cuda").half())
embeddings = model.get_embeddings(imgs.to("cuda").half())
preds_list.append(torch.sigmoid(preds).cpu())
embeddings_list.append(embeddings.cpu())
labels.append(labels_i)
preds = torch.vstack(preds_list)
embeddings = torch.vstack(embeddings_list)
labels = torch.vstack(labels)
return preds, embeddings, labels
def load_cfg_file(base_dir: str):
cfg_path = osp.join(base_dir, ".hydra", "config.yaml")
cfg = OmegaConf.load(cfg_path)
return cfg
def load_trained_model(base_dir: str):
model_path = glob(osp.join(base_dir, "epoch*.ckpt"))[0]
haparam_path = glob(osp.join(base_dir, "default", "version_0", "hparams.yaml"))[0]
model = LitModel.load_from_checkpoint(model_path, hparams_file=haparam_path)
model.eval()
return model
@hydra.main(
config_path="../configs",
config_name="extract_embeddings",
)
def extract_embeddings(cfg: DictConfig):
out_dir = os.getcwd()
os.chdir(get_original_cwd())
logger.info(os.getcwd())
dir_path = cfg.dir_path
resource_dict = {"label_ratio_1.0_no_cf": dir_path}
logger.info(resource_dict)
logger.info("load_cfg_file")
cfg_file = load_cfg_file(resource_dict["label_ratio_1.0_no_cf"])
cfg_file.batch_size = cfg.batch_size
cfg_file.batch_size = cfg.num_workers
# Load data
logger.info("Load datasets")
train_dataset, test_dataset, dataset_meta, _ = get_datasets(
cfg_file.train_df_path,
cfg_file.test_df_path,
cfg_file.cf_vector_df_path,
out_dir,
cfg_file.labeled_ratio,
cfg_file.is_use_bias,
cf_based_train_loss_path=cfg_file.cf_based_train_loss_path,
cf_based_test_loss_path=cfg_file.cf_based_test_loss_path,
is_use_cf_embeddings=cfg_file.is_use_cf_embeddings,
cf_embeddings_train_path=cfg_file.cf_embeddings_train_path,
cf_embeddings_test_path=cfg_file.cf_embeddings_test_path,
confidence_type=cfg_file.confidence_type,
is_plot_conf_hist=False,
)
logger.info(
"Sizes [trainset testset num_classes cf_vector_dim]=[{} {} {} {}]".format(
dataset_meta["train_set_size"],
dataset_meta["test_set_size"],
dataset_meta["num_classes"],
dataset_meta["cf_vector_dim"],
)
)
trainloader = DataLoader(
train_dataset,
batch_size=cfg.batch_size,
num_workers=cfg.num_workers,
shuffle=False,
pin_memory=True,
)
testloader = DataLoader(
test_dataset,
batch_size=cfg.batch_size,
num_workers=cfg.num_workers,
shuffle=False,
pin_memory=True,
)
torch.multiprocessing.set_sharing_strategy("file_system")
torch.set_grad_enabled(False)
for key, base_dir in resource_dict.items():
logger.info(f"{key}, {base_dir}")
model = load_trained_model(base_dir)
model = model.to("cuda").half()
dfs = []
for loader_type, loader in [
("test", testloader),
("train", trainloader),
]:
t0 = time.time()
preds, embeddings, labels = get_products(model, loader)
ap = average_precision_score(labels, preds)
logger.info(f"Finish {loader_type=} in {time.time()-t0:.2f}. {ap=}")
df = loader.dataset.df
df["pred"] = preds.tolist()
df["image_embedding"] = embeddings.tolist()
df["set_type"] = loader_type
df["label_vec_dataloder_output"] = labels.tolist()
dfs.append(df)
# Save products
df = pd.concat(dfs)
df = df.rename(
columns={"embs": "cf_vec"},
)
df.to_pickle(osp.join(out_dir, f"{cfg.dataset_name}_features.pkl"))
df.to_csv(osp.join(out_dir, f"{cfg.dataset_name}_features.csv"))
if __name__ == "__main__":
extract_embeddings()
|
collaborative_image_understanding-main
|
src/main_extract_embeddings.py
|
# Copyright (c) 2015-present, Meta Platforms, Inc. and affiliates.
# All rights reserved.
import logging
import os
import time
import os.path as osp
import hydra
import pytorch_lightning as pl
import torch
from hydra.utils import get_original_cwd
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import loggers as pl_loggers
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
from torch.utils.data.dataloader import DataLoader
from dataset_utils import get_datasets
from lit_utils import LitModel
logger = logging.getLogger(__name__)
@hydra.main(
config_path="../configs",
config_name="train_model_with_cf_pretraining",
)
def train_model_with_cf_pretraining(cfg: DictConfig):
t_start = time.time()
logger.info(cfg)
out_dir = os.getcwd()
os.chdir(get_original_cwd())
logger.info(f"{out_dir=}")
pl.utilities.seed.seed_everything(cfg.seed)
logger.info(f"{torch.cuda.is_available()=}")
# Configure logging
tb_logger = pl_loggers.TensorBoardLogger(out_dir)
tb_logger.log_hyperparams(OmegaConf.to_container(cfg))
# Configure checkpoint saver
checkpoint_callback = ModelCheckpoint(
dirpath=out_dir,
monitor="ap/val" if cfg.is_debug is False else "ap/train",
save_top_k=1,
mode="max",
)
# Load data
t0 = time.time()
train_dataset, test_dataset, dataset_meta, pos_weight = get_datasets(
cfg.train_df_path,
cfg.test_df_path,
cfg.cf_vector_df_path,
out_dir,
cfg.labeled_ratio,
cfg.is_use_bias,
cf_based_train_loss_path=cfg.cf_based_train_loss_path,
cf_based_test_loss_path=cfg.cf_based_test_loss_path,
is_use_cf_embeddings=cfg.is_use_cf_embeddings,
cf_embeddings_train_path=cfg.cf_embeddings_train_path,
cf_embeddings_test_path=cfg.cf_embeddings_test_path,
confidence_type=cfg.confidence_type,
conf_max_min_ratio=cfg.conf_max_min_ratio,
)
logger.info(f"Loadded data in {time.time() -t0 :.2f} sec")
logger.info(
"Sizes [trainset testset num_classes cf_vector_dim]=[{} {} {} {}]".format(
dataset_meta["train_set_size"],
dataset_meta["test_set_size"],
dataset_meta["num_classes"],
dataset_meta["cf_vector_dim"],
)
)
# Create dataloder
t0 = time.time()
trainloader = DataLoader(
train_dataset,
batch_size=cfg.batch_size,
num_workers=cfg.num_workers,
shuffle=True,
pin_memory=True,
)
testloader = DataLoader(
test_dataset,
batch_size=cfg.batch_size,
num_workers=cfg.num_workers,
pin_memory=True,
)
# First training: predict CF vector
cfg["cf_weight"], cfg["label_weight"] = 1.0, 0.0
lit_h = LitModel(
dataset_meta["num_classes"], dataset_meta["cf_vector_dim"], cfg, pos_weight
)
trainer = pl.Trainer(
min_epochs=cfg["epochs"],
max_epochs=cfg["epochs"],
progress_bar_refresh_rate=1,
logger=tb_logger,
callbacks=[
checkpoint_callback,
LearningRateMonitor(logging_interval="epoch"),
],
fast_dev_run=cfg.is_debug,
num_sanity_val_steps=0,
gpus=[cfg.gpu] if torch.cuda.is_available() else None,
precision=16,
)
trainer.fit(lit_h, trainloader, testloader)
logger.info(f"Finish cf training in {time.time() -t_start :.2f} sec")
logger.info(f"{out_dir=}")
trainer.save_checkpoint(osp.join(out_dir, "model_pretrained_cf.ckpt"))
# Second training: predict labels
cfg["cf_weight"], cfg["label_weight"] = 0.0, 1.0
trainer = pl.Trainer(
min_epochs=cfg["epochs"],
max_epochs=cfg["epochs"],
progress_bar_refresh_rate=1,
logger=tb_logger,
callbacks=[
checkpoint_callback,
LearningRateMonitor(logging_interval="epoch"),
],
fast_dev_run=cfg.is_debug,
num_sanity_val_steps=0,
gpus=[cfg.gpu] if torch.cuda.is_available() else None,
precision=16,
)
trainer.fit(lit_h, trainloader, testloader)
logger.info(
f"Finish label training in {time.time() -t_start :.2f} sec. {lit_h.map_best=:.3f}"
)
logger.info(f"{out_dir=}")
trainer.save_checkpoint(osp.join(out_dir, "model.ckpt"))
if __name__ == "__main__":
train_model_with_cf_pretraining()
|
collaborative_image_understanding-main
|
src/train_model_with_cf_pretraining.py
|
# Copyright (c) 2015-present, Meta Platforms, Inc. and affiliates.
# All rights reserved.
import logging
import os
import os.path as osp
import time
from itertools import chain
import hydra
import numpy as np
import pandas as pd
from omegaconf import DictConfig
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MultiLabelBinarizer
from torchvision.datasets.folder import default_loader as img_loader
logger = logging.getLogger(__name__)
def keep_only_exists_images(df: pd.DataFrame) -> pd.DataFrame:
img_exists = []
for img_path in df["img_path"]:
if osp.exists(img_path):
try:
img_loader(img_path)
img_exists.append(True)
except:
img_exists.append(False)
else:
img_exists.append(False)
df["img_exists"] = img_exists
logger.info(f"Img exsists {df.img_exists.sum()}/{len(df)}")
return df[df["img_exists"] == True]
def execute_train_test_split(
df: pd.DataFrame, train_set_ratio: float, min_label_count_thresh: int
):
n_iter = 0
min_label_count = 0
while min_label_count < min_label_count_thresh:
df_train, df_test = train_test_split(df, train_size=train_set_ratio)
train_labels = np.array(df_train.label_vec.to_list())
test_labels = np.array(df_test.label_vec.to_list())
min_label_count = min(
train_labels.sum(axis=0).min(), test_labels.sum(axis=0).min()
)
logger.info(f"[{n_iter}] train-test split {min_label_count=}")
n_iter += 1
return df_train, df_test
@hydra.main(
config_path="../configs",
config_name="process_labels_movielens",
)
def process_labels_movielens(cfg: DictConfig):
out_dir = os.getcwd()
logger.info(cfg)
logger.info(os.getcwd())
# Load df
t0 = time.time()
meta_path = osp.join(cfg.data_dir, f"movies.dat")
meta_df = pd.read_csv(
meta_path, delimiter="::", names=["asin", "movie_name", "categories"]
)
logger.info(f"Loadded meta_df in {time.time() -t0:.2f} sec. {len(meta_df)=}")
# Add image paths
meta_df["img_path"] = meta_df["asin"].apply(
lambda x: osp.join(cfg.data_dir, cfg.category, str(x) + ".jpg")
)
# Keep only items with images
df = keep_only_exists_images(meta_df)[["asin", "img_path", "categories"]]
# Find top level label name by most ferquent
df = df[df["categories"] != "(no genres listed)"]
df["merged_labels"] = df["categories"].apply(lambda cat_list: cat_list.split("|"))
# Count number of samples for each category: remove downlevel category if there are not enough samples
label_count = pd.value_counts(
list(chain.from_iterable(df["merged_labels"].tolist()))
)
# Encode to Multilabel vector
mlb = MultiLabelBinarizer()
df["label_vec"] = mlb.fit_transform(df["merged_labels"].tolist()).tolist()
logger.info(f"\n{df.head()}")
# Save results
out_path = osp.join(out_dir, "label_count.csv")
label_count.to_csv(out_path, header=False)
out_path = osp.join(out_dir, "df_w_labels.pkl")
df = df.reset_index()
df.to_pickle(out_path)
logger.info(f"Save to {out_path}")
out_path = osp.join(out_dir, "label_mapper.csv")
pd.DataFrame(mlb.classes_).to_csv(out_path, header=False)
logger.info(f"Save to {out_path}")
# Train-test split
df_train, df_test = execute_train_test_split(
df, cfg.train_set_ratio, cfg.min_label_count
)
# Save train
out_path = osp.join(out_dir, "df_train.pkl")
df_train = df_train.reset_index()
df_train.to_pickle(out_path)
logger.info(f"Save to {out_path}")
out_path = osp.join(out_dir, "..", "df_train.pkl")
df_train.to_pickle(out_path)
logger.info(f"Save to {out_path}")
# Save test
out_path = osp.join(out_dir, "df_test.pkl")
df_test = df_test.reset_index()
df_test.to_pickle(out_path)
logger.info(f"Save to {out_path}")
out_path = osp.join(out_dir, "..", "df_test.pkl")
df_test.to_pickle(out_path)
logger.info(f"Save to {out_path}")
logger.info("Finish")
if __name__ == "__main__":
process_labels_movielens()
|
collaborative_image_understanding-main
|
src/main_process_labels_movielens.py
|
# Copyright (c) 2015-present, Meta Platforms, Inc. and affiliates.
# All rights reserved.
import logging
import os
import os.path as osp
import time
import cornac
import hydra
import pandas as pd
import torch
from cornac.eval_methods import RatioSplit
from cornac.metrics import AUC, MAP
from omegaconf import DictConfig
from recommender_utils import RecommendationDataset, VAECFWithBias
logger = logging.getLogger(__name__)
@hydra.main(
config_path="../configs",
config_name="train_recommender",
)
def train_recommender(cfg: DictConfig):
out_dir = os.getcwd()
logger.info(cfg)
logger.info(os.getcwd())
# Initalize dataset
t1 = time.time()
dataset_h = RecommendationDataset(cfg.data_dir, cfg.category, cfg.user_based)
dataset = dataset_h.load_feedback()
rs = RatioSplit(
data=dataset,
test_size=cfg.test_size,
rating_threshold=1.0,
seed=cfg.seed,
exclude_unknowns=True,
verbose=True,
)
logger.info(f"Loaded dataset in {time.time()-t1:.2f}")
# Initalize model
models = []
if "most_pop" in cfg.models:
model = cornac.models.MostPop()
models.append(model)
if "bpr" in cfg.models:
bpr = cornac.models.BPR(
k=10, max_iter=1000, learning_rate=0.001, lambda_reg=0.001, seed=123
)
models.append(bpr)
if "vae_no_bias" in cfg.models:
model = cornac.models.VAECF(
k=cfg.bottleneck_size,
autoencoder_structure=list(cfg.emb_size),
act_fn="tanh",
likelihood="mult",
n_epochs=cfg.n_epochs,
batch_size=cfg.batch_size,
learning_rate=cfg.lr,
beta=cfg.beta,
seed=cfg.seed,
use_gpu=True,
verbose=True,
)
models.append(model)
if "vae_no_bias" in cfg.models:
vaecf = VAECFWithBias(
k=cfg.bottleneck_size,
autoencoder_structure=list(cfg.emb_size),
act_fn="tanh",
likelihood="mult",
n_epochs=cfg.n_epochs,
batch_size=cfg.batch_size,
learning_rate=cfg.lr,
lr_steps=cfg.lr_steps,
beta=cfg.beta,
seed=cfg.seed,
use_gpu=True,
verbose=True,
out_dir=out_dir,
)
models.append(vaecf)
# Run training
t0 = time.time()
metrics = [AUC(), MAP()]
cornac.Experiment(
eval_method=rs,
models=models,
metrics=metrics,
user_based=False,
).run()
logger.info(f"Finish training in {time.time() -t0:.2f} sec")
if "bpr" in cfg.models:
logger.info(bpr)
embs = bpr.i_factors
bias = bpr.i_biases
if "vae_no_bias" in cfg.models:
logger.info(vaecf.vae)
# Save vae model
out_path = osp.join(out_dir, "vae.pt")
torch.save(vaecf.vae.state_dict(), out_path)
embs = vaecf.vae.decoder.fc1.weight.detach().cpu()
bias = vaecf.vae.item_bias.weight.detach().cpu().squeeze()
# Create CF data frame
num_intercations = rs.train_set.csc_matrix.sum(axis=0).tolist()[0]
df = pd.DataFrame(
{
"asin": list(rs.train_set.item_ids),
"embs": embs.tolist(),
"bias": bias.tolist(),
"num_intercations": num_intercations,
}
)
# Save to: out path
out_path = osp.join(out_dir, "cf_df.pkl")
logger.info(out_path)
df.to_pickle(out_path)
if cfg.test_size == 0.0:
# Save to: dataset output top dir
out_path = osp.join(out_dir, "..", "cf_df.pkl")
logger.info(out_path)
df.to_pickle(out_path)
logger.info(f"Finish in {time.time()-t0:.2f} sec")
if __name__ == "__main__":
train_recommender()
|
collaborative_image_understanding-main
|
src/main_train_recommender.py
|
# Copyright (c) 2015-present, Meta Platforms, Inc. and affiliates.
# All rights reserved.
import logging
import os.path as osp
from typing import List
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
from cornac.data import Reader
from cornac.models import VAECF
from cornac.models.recommender import Recommender
from cornac.models.vaecf.vaecf import VAE, learn
from tqdm.auto import trange
logger = logging.getLogger(__name__)
def learn(
vae,
train_set,
n_epochs,
batch_size,
learn_rate,
lr_steps,
beta,
verbose,
out_dir: str,
device=torch.device("cpu"),
):
loss_list, lr_list = [], []
optimizer = torch.optim.Adam(params=vae.parameters(), lr=learn_rate)
schedular = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=lr_steps)
progress_bar = trange(1, n_epochs + 1, disable=not verbose)
for _ in progress_bar:
sum_loss = 0.0
count = 0
for batch_id, u_ids in enumerate(
train_set.user_iter(batch_size, shuffle=False)
):
u_batch = train_set.matrix[u_ids, :]
u_batch.data = np.ones(len(u_batch.data)) # Binarize data
u_batch = u_batch.A
u_batch = torch.tensor(u_batch, dtype=torch.float32, device=device)
# Reconstructed batch
u_batch_, mu, logvar = vae(u_batch)
loss = vae.loss(u_batch, u_batch_, mu, logvar, beta)
optimizer.zero_grad()
loss.backward()
optimizer.step()
sum_loss += loss.data.item()
count += len(u_batch)
if batch_id % 10 == 0:
progress_bar.set_postfix(loss=(sum_loss / count))
schedular.step()
loss_list.append(sum_loss / count)
lr_list += schedular.get_last_lr()
_, axs = plt.subplots(2, 1, sharex=True)
ax = axs[0]
ax.plot(loss_list)
ax.set_ylabel("loss")
ax.set_yscale("log")
ax.grid()
ax = axs[1]
ax.plot(lr_list)
ax.set_ylabel("lr")
ax.set_yscale("log")
ax.set_xlabel("epoch")
ax.grid()
plt.tight_layout()
plt.savefig(osp.join(out_dir, "loss.jpg"))
plt.close()
return vae
class VAEWithBias(VAE):
def __init__(self, z_dim, ae_structure, act_fn, likelihood):
logger.info("VAEWithBias")
super().__init__(z_dim, ae_structure, act_fn, likelihood)
# Add bias
num_items = ae_structure[0]
self.item_bias = torch.nn.Embedding(num_items, 1)
def decode(self, z):
h = self.decoder(z)
if self.likelihood == "mult":
return torch.softmax(h + self.item_bias.weight.T, dim=1)
else:
raise NotImplementedError()
return torch.sigmoid(h)
class VAECFWithBias(VAECF):
def __init__(
self,
name="VAECF",
k=10,
autoencoder_structure=[20],
act_fn="tanh",
likelihood="mult",
n_epochs=100,
batch_size=100,
learning_rate=0.001,
lr_steps=[10],
out_dir=".",
beta=1.0,
trainable=True,
verbose=False,
seed=None,
use_gpu=False,
):
super().__init__(
name=name,
k=k,
autoencoder_structure=autoencoder_structure,
act_fn=act_fn,
likelihood=likelihood,
n_epochs=n_epochs,
batch_size=batch_size,
learning_rate=learning_rate,
beta=beta,
trainable=trainable,
verbose=verbose,
seed=seed,
use_gpu=use_gpu,
)
self.lr_steps = lr_steps
self.out_dir = out_dir
def fit(self, train_set, val_set=None):
"""Fit the model to observations.
Parameters
----------
train_set: :obj:`cornac.data.Dataset`, required
User-Item preference data as well as additional modalities.
val_set: :obj:`cornac.data.Dataset`, optional, default: None
User-Item preference data for model selection purposes (e.g., early stopping).
Returns
-------
self : object
"""
Recommender.fit(self, train_set, val_set)
self.device = (
torch.device("cuda:0")
if (self.use_gpu and torch.cuda.is_available())
else torch.device("cpu")
)
if self.trainable:
if self.seed is not None:
torch.manual_seed(self.seed)
torch.cuda.manual_seed(self.seed)
if not hasattr(self, "vae"):
data_dim = train_set.matrix.shape[1]
self.vae = VAEWithBias(
self.k,
[data_dim] + self.autoencoder_structure,
self.act_fn,
self.likelihood,
).to(self.device)
learn(
self.vae,
self.train_set,
n_epochs=self.n_epochs,
batch_size=self.batch_size,
learn_rate=self.learning_rate,
lr_steps=self.lr_steps,
beta=self.beta,
verbose=self.verbose,
device=self.device,
out_dir=self.out_dir,
)
elif self.verbose:
logger.info("%s is trained already (trainable = False)" % (self.name))
return self
class RecommendationDataset:
def __init__(
self,
data_dir: str,
category: str = "Clothing_Shoes_and_Jewelry",
user_based: bool = True,
) -> None:
self.data_dir = data_dir
self.category = category
self.review_path = osp.join(self.data_dir, f"reviews_{category}.pkl")
self.rating_path = osp.join(self.data_dir, f"rating_{category}_user_based.txt")
if not osp.exists(self.rating_path):
self.convert_review_pkl_to_rating()
def convert_review_pkl_to_rating(self):
review_df = pd.read_pickle(
osp.join(self.data_dir, f"reviews_{self.category}.pkl")
)
# Algin to rating.txt format
review_df = review_df[["reviewerID", "asin", "overall"]]
review_df.to_csv(self.rating_path, sep="\t", index=False, header=False)
def load_feedback(self, reader: Reader = None) -> List:
reader = Reader(bin_threshold=1.0) if reader is None else reader
return reader.read(self.rating_path, sep="\t")
|
collaborative_image_understanding-main
|
src/recommender_utils.py
|
# Copyright (c) 2015-present, Meta Platforms, Inc. and affiliates.
# All rights reserved.
import logging
import os
import time
import hydra
import pytorch_lightning as pl
import torch
from hydra.utils import get_original_cwd
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import loggers as pl_loggers
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
from torch.utils.data.dataloader import DataLoader
from dataset_utils import get_datasets
from lit_utils import LitModel
logger = logging.getLogger(__name__)
@hydra.main(
config_path="../configs",
config_name="train_model",
)
def train_model(cfg: DictConfig):
t_start = time.time()
logger.info(cfg)
out_dir = os.getcwd()
os.chdir(get_original_cwd())
logger.info(f"{out_dir=}")
pl.utilities.seed.seed_everything(cfg.seed)
logger.info(f"{torch.cuda.is_available()=}")
# Configure logging
tb_logger = pl_loggers.TensorBoardLogger(out_dir)
tb_logger.log_hyperparams(OmegaConf.to_container(cfg))
# Configure checkpoint saver
checkpoint_callback = ModelCheckpoint(
dirpath=out_dir,
monitor="ap/val" if cfg.is_debug is False else "ap/train",
save_top_k=1,
mode="max",
)
# Load data
t0 = time.time()
train_dataset, test_dataset, dataset_meta, pos_weight = get_datasets(
cfg.train_df_path,
cfg.test_df_path,
cfg.cf_vector_df_path,
out_dir,
cfg.labeled_ratio,
cfg.is_use_bias,
cf_based_train_loss_path=cfg.cf_based_train_loss_path,
cf_based_test_loss_path=cfg.cf_based_test_loss_path,
is_use_cf_embeddings=cfg.is_use_cf_embeddings,
cf_embeddings_train_path=cfg.cf_embeddings_train_path,
cf_embeddings_test_path=cfg.cf_embeddings_test_path,
confidence_type=cfg.confidence_type,
conf_max_min_ratio=cfg.conf_max_min_ratio,
)
logger.info(f"Loadded data in {time.time() -t0 :.2f} sec")
logger.info(
"Sizes [trainset testset num_classes cf_vector_dim]=[{} {} {} {}]".format(
dataset_meta["train_set_size"],
dataset_meta["test_set_size"],
dataset_meta["num_classes"],
dataset_meta["cf_vector_dim"],
)
)
# Create dataloder
t0 = time.time()
trainloader = DataLoader(
train_dataset,
batch_size=cfg.batch_size,
num_workers=cfg.num_workers,
shuffle=True,
pin_memory=True,
)
testloader = DataLoader(
test_dataset,
batch_size=cfg.batch_size,
num_workers=cfg.num_workers,
pin_memory=True,
)
# Load model
lit_h = LitModel(
dataset_meta["num_classes"],
dataset_meta["cf_vector_dim"],
cfg,
pos_weight,
out_dir=out_dir,
)
trainer = pl.Trainer(
min_epochs=cfg["epochs"],
max_epochs=cfg["epochs"],
progress_bar_refresh_rate=1,
logger=tb_logger,
callbacks=[
checkpoint_callback,
LearningRateMonitor(logging_interval="epoch"),
],
fast_dev_run=cfg.is_debug,
num_sanity_val_steps=0,
gpus=[cfg.gpu] if torch.cuda.is_available() else None,
precision=16,
)
trainer.fit(lit_h, trainloader, testloader)
logger.info(
f"Finish training in {time.time() -t_start :.2f} sec. {lit_h.map_best=:.3f}"
)
logger.info(f"{os.getcwd()=}")
if __name__ == "__main__":
train_model()
|
collaborative_image_understanding-main
|
src/main_train_model.py
|
# Copyright (c) 2015-present, Meta Platforms, Inc. and affiliates.
# All rights reserved.
import logging
import os
import os.path as osp
import hydra
import numpy as np
import pandas as pd
import scipy
from hydra.utils import get_original_cwd
from omegaconf import DictConfig, OmegaConf
from scipy.stats import ttest_rel
from sklearn.metrics import average_precision_score
from tqdm import tqdm
import torch
logger = logging.getLogger(__name__)
def mean_confidence_interval(data, confidence=0.9):
a = 1.0 * np.array(data)
n = len(a)
se = scipy.stats.sem(a)
h = se * scipy.stats.t.ppf((1 + confidence) / 2.0, n - 1)
return h
def compare_results(preds_dict: dict, eval_function, metric_dict: dict):
for dataset_name, dataset_dict in preds_dict.items():
print(dataset_name)
if dataset_name in metric_dict and dataset_name != "Toys":
print(f"{dataset_name} exists in metric_dict")
continue
labels = dataset_dict["labels"]
no_cf_preds = dataset_dict["no_cf_preds"]
with_cf_preds = dataset_dict["with_cf_preds"]
df = single_set_compare_results(
labels, no_cf_preds, with_cf_preds, eval_function
)
metric_dict[dataset_name] = df
logger.info(df[["no_cf", "with_cf", "improvement"]].round(3).T)
return metric_dict
def single_label_ratio_compare_results(
label_ratio, labels, preds_a, preds_b, eval_function
):
# Define output
res_dict = {"label_ratio": label_ratio}
# Evaluate performance
perf_a = eval_function(labels, preds_a)
perf_b = eval_function(labels, preds_b)
res_dict["pvalue"] = ttest_rel(perf_a, perf_b).pvalue
# No CF
res_dict["no_cf"] = np.mean(perf_a)
res_dict["no_cf_std"] = np.std(perf_a)
res_dict["no_cf_ci"] = mean_confidence_interval(perf_a)
# With CF
res_dict["with_cf"] = np.mean(perf_b)
res_dict["with_cf_std"] = np.std(perf_b)
res_dict["with_cf_ci"] = mean_confidence_interval(perf_b)
return res_dict
def single_set_compare_results(
labels, no_cf_pred_list, with_cf_pred_list, eval_function
):
# Defining a dict
res_dicts = []
total = len(no_cf_pred_list)
label_ratios = np.arange(0.1, 1.1, 0.1)
for label_ratio, preds_a, preds_b in tqdm(
zip(label_ratios, no_cf_pred_list, with_cf_pred_list), total=total
):
res_dict = single_label_ratio_compare_results(
label_ratio, labels, preds_a, preds_b, eval_function
)
res_dicts.append(res_dict)
df = pd.DataFrame(res_dicts)
df.set_index("label_ratio")
df["improvement"] = df["with_cf"] / df["no_cf"] - 1.0
return df
def calc_top1_acc(labels, preds):
return np.array(
[labels[n][top1] for n, top1 in enumerate(np.argmax(preds, axis=1))]
)
def calc_recall_at_k(labels, preds, k: int = 5):
recalls = []
for pred, label in zip(torch.tensor(preds), torch.tensor(labels)):
_, pred_idx = torch.topk(pred, k=k) # The predicted labels
label_idx = torch.where(label == 1)[0] # The ground truth labels
# In case there are no labels
if len(label_idx) == 0:
continue
# Recal per item
recall_i = sum(el in pred_idx for el in label_idx) / len(label_idx)
recalls.append(recall_i)
return recalls
def calc_recall_at_1(labels, preds):
return calc_recall_at_k(labels, preds, k=1)
def calc_recall_at_3(labels, preds):
return calc_recall_at_k(labels, preds, k=3)
def calc_recall_at_5(labels, preds):
return calc_recall_at_k(labels, preds, k=5)
def calc_recall_at_10(labels, preds):
return calc_recall_at_k(labels, preds, k=10)
def calc_precision_at_k(labels, preds, k: int = 5):
ps = []
for pred, label in zip(torch.tensor(preds), torch.tensor(labels)):
_, pred_idx = torch.topk(pred, k=k) # The predicted labels
label_idx = torch.where(label == 1)[0] # The ground truth labels
# In case there are no labels
if len(label_idx) == 0:
continue
# Recal per item
p_i = sum(el in label_idx for el in pred_idx) / k
ps.append(p_i)
return ps
def calc_precision_at_1(labels, preds):
return calc_precision_at_k(labels, preds, k=1)
def calc_precision_at_3(labels, preds):
return calc_precision_at_k(labels, preds, k=3)
def calc_precision_at_5(labels, preds):
return calc_precision_at_k(labels, preds, k=5)
def calc_precision_at_10(labels, preds):
return calc_precision_at_k(labels, preds, k=10)
def calc_ap_score(labels, preds) -> np.ndarray:
aps = []
num_experiments = 50
num_samples = int(0.9 * len(labels))
idxs_list = np.random.randint(
low=0, high=len(labels), size=(num_experiments, num_samples)
)
for idxs in idxs_list:
labels_chosen, preds_chosen = labels[idxs], preds[idxs]
mask = labels_chosen.sum(axis=0) > 0
ap = average_precision_score(labels_chosen[:, mask], preds_chosen[:, mask])
aps.append(ap)
return np.array(aps)
def build_label_ratio_dicts(results_path):
res_dict = OmegaConf.load(results_path)
# Build absolute path
res_dict = {
key: osp.join(res_dict["base_path"], value)
for key, value in res_dict.items()
if key != "base_path"
}
no_cf_dict = {key: value for key, value in res_dict.items() if "_no_cf" in key}
with_cf_dict = {key: value for key, value in res_dict.items() if "_with_cf" in key}
return no_cf_dict, with_cf_dict
def load_preds(base_path):
no_cf_dict, with_cf_dict = build_label_ratio_dicts(base_path)
labels = np.load(osp.join(list(no_cf_dict.values())[0], "labels.npy"))
no_cf_preds, with_cf_preds = [], []
no_cf_aps, with_cf_aps = [], []
for (key_a, path_a), (key_b, path_b) in zip(
no_cf_dict.items(), with_cf_dict.items()
):
preds_a = np.load(osp.join(path_a, "preds.npy"))
preds_b = np.load(osp.join(path_b, "preds.npy"))
ap_a = average_precision_score(labels, preds_a) # ,average='micro')
ap_b = average_precision_score(labels, preds_b) # ,average='micro')
ratio = np.round(100 * np.round(ap_b, 3) / np.round(ap_a, 3) - 100, 2)
print(
f"{key_a} {key_b} [{ap_a:.3f} {ap_b:.3f} {ratio:.3f}%]. size={preds_a.shape}"
)
no_cf_preds.append(preds_a)
with_cf_preds.append(preds_b)
no_cf_aps.append(ap_a)
with_cf_aps.append(ap_b)
return {
"no_cf_preds": no_cf_preds,
"with_cf_preds": with_cf_preds,
"labels": labels,
"no_cf_ap": np.array(no_cf_aps),
"with_cf_ap": np.array(with_cf_aps),
}
@hydra.main(
config_path="../configs",
config_name="evaluate_methods",
)
def evaluate_methods(cfg: DictConfig):
os.chdir(get_original_cwd())
out_path = osp.join("../outputs/figures")
metric_res_dicts_path = osp.join(out_path, "metric_res_dicts.npy")
dataset_mapping = {
"pinterest": "Pinterest",
"movielens": "MovieLens",
"Clothing_Shoes_and_Jewelry": "Clothing",
"Toys_and_Games": "Toys",
}
preds_dict = {}
for dataset_name, print_name in dataset_mapping.items():
print(dataset_name)
preds_dict[print_name] = load_preds(
osp.join(f"../outputs/{dataset_name}/results.yaml")
)
metric_funcs = {
"mAP": calc_ap_score,
}
if osp.exists(metric_res_dicts_path):
metric_res_dicts = np.load(metric_res_dicts_path, allow_pickle=True).item()
else:
metric_res_dicts = {}
for metric_name, metric_func in metric_funcs.items():
logger.info(metric_name)
# Initilize output: if metric exsits, use previous results
single_metric_res_dict = {}
if metric_name in metric_res_dicts:
single_metric_res_dict = metric_res_dicts[metric_name]
# metric -> dataset -> performance dataframe
single_metric_res_dict = compare_results(
preds_dict, metric_func, single_metric_res_dict
)
# Add to dict
metric_res_dicts[metric_name] = single_metric_res_dict
np.save(metric_res_dicts_path, metric_res_dicts)
logger.info("")
np.save(metric_res_dicts_path, metric_res_dicts)
if __name__ == "__main__":
evaluate_methods()
|
collaborative_image_understanding-main
|
src/eval_utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import subprocess
import sys
from setuptools import Extension, find_packages, setup
from setuptools.command.build_ext import build_ext
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
# Convert distutils Windows platform specifiers to CMake -A arguments
PLAT_TO_CMAKE = {
"win32": "Win32",
"win-amd64": "x64",
"win-arm32": "ARM",
"win-arm64": "ARM64",
}
def _get_version():
path = os.path.join(ROOT_DIR, "version.txt")
version = open(path, "r").read().strip()
return version
# A CMakeExtension needs a sourcedir instead of a file list.
# The name must be the _single_ output extension from the CMake build.
# If you need multiple extensions, see scikit-build.
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=""):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuild(build_ext):
def build_extension(self, ext):
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
# required for auto-detection & inclusion of auxiliary "native" libs
if not extdir.endswith(os.path.sep):
extdir += os.path.sep
debug = int(os.environ.get("DEBUG", 0)) if self.debug is None else self.debug
cfg = "Debug" if debug else "Release"
# CMake lets you override the generator - we need to check this.
# Can be set with Conda-Build, for example.
cmake_generator = os.environ.get("CMAKE_GENERATOR", "")
cmake_args = [
f"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY={extdir}",
f"-DPYTHON_EXECUTABLE={sys.executable}",
f"-DCMAKE_BUILD_TYPE={cfg}", # not used on MSVC, but no harm
"-DBUILD_PYARK=ON",
"-DBUILD_ARK_TESTS=OFF",
"-DBUILD_ARK_EXAMPLES=OFF",
]
build_args = []
# Adding CMake arguments set as environment variable
# (needed e.g. to build for ARM OSx on conda-forge)
if "CMAKE_ARGS" in os.environ:
cmake_args += [item for item in os.environ["CMAKE_ARGS"].split(" ") if item]
if self.compiler.compiler_type != "msvc":
# Using Ninja-build since it a) is available as a wheel and b)
# multithreads automatically. MSVC would require all variables be
# exported for Ninja to pick it up, which is a little tricky to do.
# Users can override the generator with CMAKE_GENERATOR in CMake
# 3.15+.
if not cmake_generator:
try:
import ninja # noqa: F401
cmake_args += ["-GNinja"]
except ImportError:
pass
else:
# Single config generators are handled "normally"
single_config = any(x in cmake_generator for x in {"NMake", "Ninja"})
# CMake allows an arch-in-generator style for backward compatibility
contains_arch = any(x in cmake_generator for x in {"ARM", "Win64"})
# Specify the arch if using MSVC generator, but only if it doesn't
# contain a backward-compatibility arch spec already in the
# generator name.
if not single_config and not contains_arch:
cmake_args += ["-A", PLAT_TO_CMAKE[self.plat_name]]
# Multi-config generators have a different way to specify configs
if not single_config:
cmake_args += [
f"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{cfg.upper()}={extdir}"
]
build_args += ["--config", cfg]
if sys.platform.startswith("darwin"):
# Cross-compile support for macOS - respect ARCHFLAGS if set
archs = re.findall(r"-arch (\S+)", os.environ.get("ARCHFLAGS", ""))
if archs:
cmake_args += ["-DCMAKE_OSX_ARCHITECTURES={}".format(";".join(archs))]
# Set CMAKE_BUILD_PARALLEL_LEVEL to control the parallel build level
# across all generators.
if "CMAKE_BUILD_PARALLEL_LEVEL" not in os.environ:
# self.parallel is a Python 3 only way to set parallel jobs by hand
# using -j in the build_ext call, not supported by pip or PyPA-build.
if hasattr(self, "parallel") and self.parallel:
# CMake 3.12+ only.
build_args += [f"-j{self.parallel}"]
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(
["cmake", ext.sourcedir] + cmake_args, cwd=self.build_temp
)
subprocess.check_call(
["cmake", "--build", "."] + build_args, cwd=self.build_temp
)
def main():
# The information here can also be placed in setup.cfg - better separation of
# logic and declaration, and simpler if you include description/version in a file.
setup(
name="projectaria_tools",
version=_get_version(),
description="Project Aria Tools",
long_description="Python API for sensor models and streaming of Aria datasets.",
url="https://github.com/facebookresearch/aria_data_tools",
ext_modules=[CMakeExtension("projectaria_tools", sourcedir=ROOT_DIR)],
author="Meta Reality Labs Research",
cmdclass={"build_ext": CMakeBuild},
zip_safe=False,
python_requires=">=3.6",
packages=find_packages(),
license="Apache-2.0",
)
if __name__ == "__main__":
main()
|
Aria_data_tools-main
|
src/setup.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import numpy as np
from PIL import Image
from projectaria_tools.dataprovider import AriaVrsDataProvider, StreamId
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--vrs",
dest="vrs_path",
type=str,
required=True,
help="path to vrs file",
)
return parser.parse_args()
# DataProvider samples
# Extract 10 RGB camera thumbnails from a VRS file
# Teachings:
# - How to initialize the AriaVrsDataProvider
# - How to register a given Player to the DataProvider
# - How to initialize a StreamId and query data records for it at a given timestamp
if __name__ == "__main__":
args = parse_args()
aria_data_provider: AriaVrsDataProvider = AriaVrsDataProvider()
if not aria_data_provider.openFile(args.vrs_path):
print(f"failed to open vrs: {args.vrs_path}")
aria_data_provider.setRgbCameraPlayer()
aria_data_provider.setVerbose(True)
# from https://facebookresearch.github.io/Aria_data_tools/docs/sensors-measurements/
rgb_camera_recordable_type_id = 214
rgb_camera_instance_id = 1
rgb_camera_stream_id = StreamId(
rgb_camera_recordable_type_id, rgb_camera_instance_id
)
recording_start = aria_data_provider.getFirstTimestampSec()
recording_end = aria_data_provider.getLastDataRecord(rgb_camera_stream_id).timestamp
sample_count = 10
sample_timestamps = np.linspace(recording_start, recording_end, sample_count)
width = aria_data_provider.getImageWidth(rgb_camera_stream_id)
height = aria_data_provider.getImageHeight(rgb_camera_stream_id)
resize_ratio = 10
big_image = new_image = Image.new(
"RGB", (int(width * sample_count / resize_ratio), int(height / resize_ratio))
)
current_width = 0
for sample in sample_timestamps:
aria_data_provider.readDataRecordByTime(rgb_camera_stream_id, sample)
rgb_player = aria_data_provider.getRgbCameraPlayer()
img = rgb_player.getData()
img_buf = img.pixelFrame.getBuffer()
buffer_array = np.array(img_buf, dtype=np.uint8)
image_array = buffer_array.reshape((height, width, 3))
image = Image.fromarray(image_array)
new_size = (
int(image.size[0] / resize_ratio),
int(image.size[1] / resize_ratio),
)
image = image.resize(new_size).rotate(-90)
big_image.paste(image, (current_width, 0))
current_width = int(current_width + width / resize_ratio)
big_image.show()
|
Aria_data_tools-main
|
src/python/tutorials/DataProvider_ImageTimeline.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import numpy as np
from projectaria_tools import sensors
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--vrs",
dest="vrs_path",
type=str,
required=True,
help="path to vrs file",
)
return parser.parse_args()
# Sensors samples
# Show how to manipulate the most common function in Sensors module
# Teachings:
# - How to retrieve calibration data from a given VRS file
# - How to retrieve per sensor calibration data
# - How to retrieve Camera calibration and use it to project/unproject points
# - How to transfer points between sensor frames
if __name__ == "__main__":
args = parse_args()
#
# Read calibration data from a VRS file
#
print("Attempting to read calibration data from: ", args.vrs_path)
calib_str = sensors.getCalibStrFromFile(args.vrs_path)
device = sensors.DeviceModel.fromJson(calib_str)
print(f"Cameras: {device.getCameraLabels()}")
print(f"IMUs: {device.getImuLabels()}")
print(f"Magnetometers: {device.getMagnetometerLabels()}")
print(f"Barometers: {device.getBarometerLabels()}")
print(f"Microphones: {device.getMicrophoneLabels()}")
#
# Demonstrate how to use camera model
# Create a 3D points and project and unproject it with a given camera
camLabel = "camera-slam-left"
p_slamLeft = np.array([3.0, 2.0, 1.0])
uv_slamLeft = device.getCameraCalib(camLabel).projectionModel.project(p_slamLeft)
print(
f"Projecting 3D point {p_slamLeft} to image space of {camLabel}: "
+ f"{uv_slamLeft}."
)
p_slamLeft_convertBack = device.getCameraCalib(camLabel).projectionModel.unproject(
uv_slamLeft
)
print(
f"Unprojecting 2D pixel {uv_slamLeft} to 3D space in "
+ f"the frame of {camLabel}: {p_slamLeft_convertBack}."
)
# Transform points between sensor frames.
imuLabel = "imu-left"
p_imuLeft = device.transform(p_slamLeft, camLabel, imuLabel)
print(
f"Transforming {p_slamLeft} from {camLabel} frame to {imuLabel} "
+ f"frame: {p_imuLeft}"
)
# Rectifying points with the IMU accelerometer model.
p_imuLeft_rect = device.getImuCalib(
imuLabel
).accel.compensateForSystematicErrorFromMeasurement(p_imuLeft)
print(
f"Point {p_imuLeft} is rectified by the accelerometer model "
+ f"of {imuLabel} as: {p_imuLeft_rect}"
)
|
Aria_data_tools-main
|
src/python/tutorials/Sensors.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import time
import json
import numpy as np
from ax.modelbridge.generation_strategy import GenerationStep, GenerationStrategy
from ax.modelbridge.strategies.rembo import HeSBOStrategy
from ax.storage.json_store.encoder import object_to_json
from ax.service.ax_client import AxClient
from fb_abr_problem import ParkContextualRunner
########
# Define problem
ABR_CONTEXT_CONFIG_DICT = {
'c0': {'name': 'c0', 'delay': 0.09111558887847584},
'c1': {'name': 'c1', 'delay': 0.13919983731019495},
'c10': {'name': 'c10', 'delay': 0.04709563378153773},
'c11': {'name': 'c11', 'delay': 0.09175980911983045},
'c2': {'name': 'c2', 'delay': 0.05811786663939401},
'c3': {'name': 'c3', 'delay': 0.15680707174733982},
'c4': {'name': 'c4', 'delay': 0.21008791350238118},
'c5': {'name': 'c5', 'delay': 0.12895778597785987},
'c6': {'name': 'c6', 'delay': 0.05922074675831855},
'c7': {'name': 'c7', 'delay': 0.0751735817104147},
'c8': {'name': 'c8', 'delay': 0.08200189263592551},
'c9': {'name': 'c9', 'delay': 0.0962324885998359}
}
num_trials = 75
for rep in range(25):
print('====================', rep)
num_contexts = len(ABR_CONTEXT_CONFIG_DICT)
benchmark_problem = ParkContextualRunner(
num_contexts=num_contexts,
context_dict=ABR_CONTEXT_CONFIG_DICT
)
decomposition = benchmark_problem.contextual_parameter_decomposition
t1 = time.time()
gs = HeSBOStrategy(
D=len(benchmark_problem._contextual_parameters), d=8, init_per_proj=8
)
axc = AxClient(generation_strategy=gs)
experiment_parameters = benchmark_problem.contextual_parameters
axc.create_experiment(
name="cbo_aggregated_reward_experiment",
parameters=experiment_parameters,
objective_name="aggregated_reward",
minimize=True,
)
context_reward_list = []
def evaluation_aggregated_reward(parameters):
# put parameters into 1-D array
# x = [bw_c0, bf_c0, c_c0, ...]
x = []
for context_name in benchmark_problem.context_name_list:
x.extend([parameters.get(param) for param in decomposition[context_name]])
aggregated_reward, context_reward = benchmark_problem.f(np.array(x))
return {"aggregated_reward": (aggregated_reward, 0.0)}, context_reward
for itrial in range(num_trials):
parameters, trial_index = axc.get_next_trial()
aggregated_res, context_res = evaluation_aggregated_reward(parameters)
axc.complete_trial(trial_index=trial_index, raw_data=aggregated_res)
context_res["trial_index"] = itrial
context_reward_list.append(context_res)
res = json.dumps(
{
"experiment": object_to_json(axc.experiment),
"context_rewards": context_reward_list,
}
)
with open(f'results/hesbo_park_rep_{rep}.json', "w") as fout:
json.dump(res, fout)
print ('=============', time.time() - t1)
|
ContextualBO-main
|
park_abr/run_park_hesbo.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Run benchmarks for Ensemble BO.
Requires installing EBO from https://github.com/zi-w/Ensemble-Bayesian-Optimization.
"""
import os
import sys
sys.path.insert(1, os.path.join(os.getcwd(), 'Ensemble-Bayesian-Optimization'))
import time
import json
from ebo_core.ebo import ebo
import numpy.matlib
import numpy as np
from fb_abr_problem import ParkContextualRunner
########
# Define problem
ABR_CONTEXT_CONFIG_DICT = {
'c0': {'name': 'c0', 'delay': 0.09111558887847584},
'c1': {'name': 'c1', 'delay': 0.13919983731019495},
'c10': {'name': 'c10', 'delay': 0.04709563378153773},
'c11': {'name': 'c11', 'delay': 0.09175980911983045},
'c2': {'name': 'c2', 'delay': 0.05811786663939401},
'c3': {'name': 'c3', 'delay': 0.15680707174733982},
'c4': {'name': 'c4', 'delay': 0.21008791350238118},
'c5': {'name': 'c5', 'delay': 0.12895778597785987},
'c6': {'name': 'c6', 'delay': 0.05922074675831855},
'c7': {'name': 'c7', 'delay': 0.0751735817104147},
'c8': {'name': 'c8', 'delay': 0.08200189263592551},
'c9': {'name': 'c9', 'delay': 0.0962324885998359}
}
lbs = np.array([0.0, 0.0, 0.0, 0.0001] * len(ABR_CONTEXT_CONFIG_DICT))
ubs = np.array([1.0, 3.0, 1.0, 0.25] * len(ABR_CONTEXT_CONFIG_DICT))
###### Prepare EBO
# All options except: x_range, dx, max_value, T, gp_sigma, dim_limit (3)
# Taken as package defaults from test_ebo.py
core_options = {
'B':10, # number of candidates to be evaluated
'dim_limit':4, # max dimension of the input for each additive function component
'isplot':0, # 1 if plotting the result; otherwise 0.
'z':None, 'k':None, # group assignment and number of cuts in the Gibbs sampling subroutine
'alpha':1., # hyperparameter of the Gibbs sampling subroutine
'beta':np.array([5.,2.]),
'opt_n':1000, # points randomly sampled to start continuous optimization of acfun
'pid':'test3', # process ID for Azure
'datadir':'tmp_data/', # temporary data directory for Azure
'gibbs_iter':10, # number of iterations for the Gibbs sampling subroutine
'useAzure':False, # set to True if use Azure for batch evaluation
'func_cheap':True, # if func cheap, we do not use Azure to test functions
'n_add':None, # this should always be None. it makes dim_limit complicated if not None.
'nlayers': 100, # number of the layers of tiles
'gp_type':'l1', # other choices are l1, sk, sf, dk, df
'n_bo':10, # min number of points selected for each partition
'n_bo_top_percent': 0.5, # percentage of top in bo selections
'n_top':10, # how many points to look ahead when doing choose Xnew
'min_leaf_size':10, # min number of samples in each leaf
'max_n_leaves':10, # max number of leaves
'thresAzure':1, # if batch size > thresAzure, we use Azure
'save_file_name': 'tmp/tmp.pk',
}
for rep in range(25):
print('================', rep)
options = {
'x_range': np.vstack((lbs, ubs)),
'dx': 4 * len(ABR_CONTEXT_CONFIG_DICT),
'max_value': 180, # Give it a pretty good guess for max value
'T': 75,
'gp_sigma': 1e-7,
}
options.update(core_options)
##### Run optimization
r_contextual = ParkContextualRunner(
num_contexts=len(ABR_CONTEXT_CONFIG_DICT),
context_dict=ABR_CONTEXT_CONFIG_DICT,
max_eval=75,
return_context_reward=False,
)
t1 = time.time()
f = lambda x: -r_contextual.f(x) # since EBO maximizes
e = ebo(f, options)
try:
e.run()
except Exception:
pass
with open("results/ebo_park_rep_{rep}.json".format(rep), 'w') as fout:
# times -1 to fs
json.dump([-reward for reward in r_contextual.fs], fout)
# json.dump(r_contextual.fs, fout)
print ('=============', time.time() - t1)
print(time.time() - t1)
|
ContextualBO-main
|
park_abr/run_park_ebo.py
|
#! /usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, List
import numpy as np
import park
from ax.utils.common.logger import get_logger
NUM_RUNS = 400
TH_DEFAULT = 3
TH_START_DEFAULT = 1
logger = get_logger(name="ABR_SIM_FB")
class Agent(object):
def __init__(
self,
bw,
bf,
c,
exp_weight,
th=TH_DEFAULT,
th_start=TH_START_DEFAULT,
num_encodes=5,
):
"""Constructor.
Args:
bw: bandwidth prediction scaling term
bf: buffer scaling term
c: constant shift
th: distance between thresholds for different
bitrate
th: starting threshold of bitrates
exp_weight: expoential weights for bandwidth estimate
num_encodes: number of encoding levels (available
bitrates)
"""
self.bw = bw
self.bf = bf
self.c = c
self.num_encodes = num_encodes
self.exp_weight = exp_weight
self.th_levels = [th_start + i * th for i in range(num_encodes)]
self.th_levels.append(np.inf) # avoid empty sequence at loopup
self.reset()
def reset(self):
self.prev_bw = []
self.prev_t = []
def exp_avg_bw(self, prev_bw, prev_t):
"""Expoential average bandwidth based on previous observations.
Args:
prev_bw: list of previous bandwidth observation
prev_t: time intervals to the bandwidth observations
"""
assert len(prev_bw) == len(prev_t)
if len(prev_bw) == 0:
return 0 # no previous observations
prev_bw = np.array(prev_bw)
prev_t = np.array(prev_t)
prev_t_cumsum = np.cumsum(prev_t[::-1])[::-1]
prev_t_exp = np.exp(-self.exp_weight * prev_t_cumsum)
bw = np.sum(prev_bw * prev_t_exp) / np.sum(prev_t_exp)
return bw
def get_action(self, obs):
# network bandwidth measurement for downloading the
# last video chunk (with some normalization)
curr_bw = obs[0] / 100000
curr_t = obs[1]
self.prev_bw.append(curr_bw)
self.prev_t.append(curr_t)
# estimate bandwidth with expoential average over past observations
bw_est = self.exp_avg_bw(self.prev_bw, self.prev_t)
# current video buffer occupancy with some normalization (see
# https://github.com/park-project/park/blob/master/park/envs/abr_sim/abr.py
# L82-L88 for more details)
curr_bf = obs[2] / 10
# here we assume the network bandwidth for downloading
# the next chunk is the same (you can use more sophisticated method)
th = self.bw * bw_est + self.bf * curr_bf + self.c
# check which bitrate level is just below the threshold
act = min(i for i in range(self.num_encodes + 1) if self.th_levels[i] > th)
return act
class ContextualAgent(Agent):
def __init__(self, bw_dict, bf_dict, c_dict, exp_weight_dict, num_encodes=5):
"""Contextual agent Constructor that resets bandwidths, buffer etc for
different contexts.
"""
self.bw_dict = bw_dict
self.bf_dict = bf_dict
self.c_dict = c_dict
self.exp_weight_dict = exp_weight_dict
self.num_encodes = num_encodes
self.reset(context_name=None)
def reset(self, context_name):
self.prev_bw = []
self.prev_t = []
if context_name is not None:
self.bw = self.bw_dict[context_name]
self.bf = self.bf_dict[context_name]
self.c = self.c_dict[context_name]
self.th = TH_DEFAULT
self.th_start = TH_START_DEFAULT
self.th_levels = [
self.th_start + i * self.th for i in range(self.num_encodes)
]
self.th_levels.append(np.inf) # avoid empty sequence at loopup
self.exp_weight = self.exp_weight_dict[context_name]
class ParkNoncontextualRunner:
def __init__(self, context_dict, max_eval=1000, return_context_reward=True):
# For tracking iterations
self.fs = []
self.context_fs = []
self.n_eval = 0
self.max_eval = max_eval
self.context_dict = context_dict
self.return_context_reward = return_context_reward
# define search space for non-dp setting
self._base_parameters = [
{
"name": "bw",
"type": "range",
"bounds": [0.0, 1.0],
"value_type": "float",
"log_scale": False,
},
{
"name": "bf",
"type": "range",
"bounds": [0.0, 3.0],
"value_type": "float",
"log_scale": False,
},
{
"name": "c",
"type": "range",
"bounds": [0.0, 1.0],
"value_type": "float",
"log_scale": False,
},
{
"name": "exp_weight",
"type": "range",
"bounds": [0.0001, 0.25],
"value_type": "float",
"log_scale": False,
},
]
self.n_params = len(self._base_parameters)
@property
def base_parameters(self) -> List[Dict]:
return self._base_parameters
def f(self, x):
"""
x = [bw, bf, c, exp_weight]
"""
if self.n_eval >= self.max_eval:
raise StopIteration("Evaluation budget exhuasted")
agent = Agent(bw=x[0], bf=x[1], c=x[2], exp_weight=x[3])
rewards, context_rewards = run_non_contextual_experiments_multiple_times(
agent=agent, context_dict=self.context_dict, num_runs=NUM_RUNS
) # Change this to 1 to make it faster
f_x = np.mean(rewards)
self.n_eval += 1
self.fs.append(f_x)
self.context_fs.append(context_rewards)
if self.return_context_reward is False:
return -f_x
return -f_x, context_rewards # because maximization
class ParkContextualRunner(ParkNoncontextualRunner):
def __init__(
self, num_contexts, context_dict, max_eval=1000, return_context_reward=True
):
super().__init__(
context_dict=context_dict,
max_eval=max_eval,
return_context_reward=return_context_reward,
)
self.num_contexts = num_contexts
self.context_name_list = list(context_dict.keys())
self._contextual_parameters = []
for context_name in self.context_name_list:
self._contextual_parameters.extend(
[
{
"name": f"{self._base_parameters[j]['name']}_{context_name}",
"type": self._base_parameters[j]["type"],
"bounds": self._base_parameters[j]["bounds"],
"value_type": self._base_parameters[j]["value_type"],
"log_scale": self._base_parameters[j]["log_scale"],
}
for j in range(self.n_params)
]
)
self._decomposition = {
f"{context_name}": [
f"{self._base_parameters[j]['name']}_{context_name}"
for j in range(self.n_params)
]
for context_name in self.context_name_list
}
@property
def contextual_parameters(self) -> List[Dict]:
return self._contextual_parameters
@property
def contextual_parameter_decomposition(self) -> List[Dict]:
return self._decomposition
def f(self, x):
"""
x = [bw_1, bf_1, c_1, exp_weight_1, bw_2, bf_2, c_2, exp_weight_2, ...]
"""
if self.n_eval >= self.max_eval:
raise StopIteration("Evaluation budget exhuasted")
bw_dict = {
f"{self.context_name_list[i]}": x[i * self.n_params]
for i in range(self.num_contexts)
}
bf_dict = {
f"{self.context_name_list[i]}": x[i * self.n_params + 1]
for i in range(self.num_contexts)
}
c_dict = {
f"{self.context_name_list[i]}": x[i * self.n_params + 2]
for i in range(self.num_contexts)
}
exp_weight_dict = {
f"{self.context_name_list[i]}": x[i * self.n_params + 3]
for i in range(self.num_contexts)
}
agent = ContextualAgent(
bw_dict=bw_dict,
bf_dict=bf_dict,
c_dict=c_dict,
exp_weight_dict=exp_weight_dict,
)
# Change this to 1 to make it run faster
rewards, context_rewards = run_contextual_experiments_multiple_times(
agent=agent, context_dict=self.context_dict, num_runs=NUM_RUNS
)
f_x = np.mean(rewards)
self.n_eval += 1
self.fs.append(f_x)
self.context_fs.append(context_rewards)
if self.return_context_reward is False:
return -f_x
return -f_x, context_rewards
def run_contextual_experiments_multiple_times(agent, context_dict, num_runs):
total_rewards = []
context_rewards = {}
for context_name, context_val in context_dict.items():
env = park.make("abr_sim_fb")
reward_list = []
for irun in range(num_runs):
obs = env.reset(context_val, irun)
if len(obs) == 0:
break
agent.reset(context_name)
done = False
rewards = 0
while not done:
act = agent.get_action(obs)
obs, reward, done, info = env.step(act)
rewards += reward # context weight could be applied here
total_rewards.append(rewards)
reward_list.append(rewards)
context_rewards[context_name] = -(np.mean(reward_list))
return total_rewards, context_rewards
def run_non_contextual_experiments_multiple_times(agent, context_dict, num_runs):
total_rewards = []
context_rewards = {}
for context_name, context_val in context_dict.items():
env = park.make("abr_sim_fb")
reward_list = []
for irun in range(num_runs):
obs = env.reset(context_val, irun)
if len(obs) == 0:
break
agent.reset()
done = False
rewards = 0
while not done:
act = agent.get_action(obs)
obs, reward, done, info = env.step(act)
rewards += reward # context weight could be applied here
total_rewards.append(rewards)
reward_list.append(rewards)
context_rewards[context_name] = -(np.mean(reward_list))
return total_rewards, context_rewards
|
ContextualBO-main
|
park_abr/fb_abr_problem.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Run benchmarks for Add-GP-UCB.
Requires installing dragonfly-opt from pip. The experiments here used version
0.1.4.
"""
import cma
import time
import json
from argparse import Namespace
import numpy as np
from dragonfly import minimise_function
from fb_abr_problem import ParkContextualRunner
########
# Define problem
ABR_CONTEXT_CONFIG_DICT = {
'c0': {'name': 'c0', 'delay': 0.09111558887847584},
'c1': {'name': 'c1', 'delay': 0.13919983731019495},
'c10': {'name': 'c10', 'delay': 0.04709563378153773},
'c11': {'name': 'c11', 'delay': 0.09175980911983045},
'c2': {'name': 'c2', 'delay': 0.05811786663939401},
'c3': {'name': 'c3', 'delay': 0.15680707174733982},
'c4': {'name': 'c4', 'delay': 0.21008791350238118},
'c5': {'name': 'c5', 'delay': 0.12895778597785987},
'c6': {'name': 'c6', 'delay': 0.05922074675831855},
'c7': {'name': 'c7', 'delay': 0.0751735817104147},
'c8': {'name': 'c8', 'delay': 0.08200189263592551},
'c9': {'name': 'c9', 'delay': 0.0962324885998359}
}
num_trials = 75
for rep in range(25):
print('====================', rep)
r_contextual = ParkContextualRunner(
num_contexts=len(ABR_CONTEXT_CONFIG_DICT),
context_dict=ABR_CONTEXT_CONFIG_DICT,
max_eval=75,
return_context_reward=False,
)
t1 = time.time()
options = Namespace(acq="add_ucb")
try:
minimise_function(
r_contextual.f,
domain=[[0.0, 1.0], [0.0, 3.0], [0.0, 1.0], [0.0001, 0.25]] * len(ABR_CONTEXT_CONFIG_DICT),
max_capital=num_trials,
options=options,
)
except StopIteration:
pass
with open(f'results/add_ucb_park_rep_{rep}.json', 'w') as fout:
# times -1 to fs
json.dump([-reward for reward in r_contextual.fs], fout)
print ('=============', time.time() - t1)
|
ContextualBO-main
|
park_abr/run_park_add_ucb.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import time
import json
import numpy as np
from ax.modelbridge.generation_strategy import GenerationStep, GenerationStrategy
from ax.modelbridge.strategies.rembo import REMBOStrategy
from ax.storage.json_store.encoder import object_to_json
from ax.service.ax_client import AxClient
from fb_abr_problem import ParkContextualRunner
########
# Define problem
ABR_CONTEXT_CONFIG_DICT = {
'c0': {'name': 'c0', 'delay': 0.09111558887847584},
'c1': {'name': 'c1', 'delay': 0.13919983731019495},
'c10': {'name': 'c10', 'delay': 0.04709563378153773},
'c11': {'name': 'c11', 'delay': 0.09175980911983045},
'c2': {'name': 'c2', 'delay': 0.05811786663939401},
'c3': {'name': 'c3', 'delay': 0.15680707174733982},
'c4': {'name': 'c4', 'delay': 0.21008791350238118},
'c5': {'name': 'c5', 'delay': 0.12895778597785987},
'c6': {'name': 'c6', 'delay': 0.05922074675831855},
'c7': {'name': 'c7', 'delay': 0.0751735817104147},
'c8': {'name': 'c8', 'delay': 0.08200189263592551},
'c9': {'name': 'c9', 'delay': 0.0962324885998359}
}
num_trials = 75
for rep in range(25):
print('====================', rep)
num_contexts = len(ABR_CONTEXT_CONFIG_DICT)
benchmark_problem = ParkContextualRunner(
num_contexts=num_contexts,
context_dict=ABR_CONTEXT_CONFIG_DICT
)
decomposition = benchmark_problem.contextual_parameter_decomposition
t1 = time.time()
gs = REMBOStrategy(
D=len(benchmark_problem._contextual_parameters), d=8, init_per_proj=8
)
axc = AxClient(generation_strategy=gs)
experiment_parameters = benchmark_problem.contextual_parameters
axc.create_experiment(
name="cbo_aggregated_reward_experiment",
parameters=experiment_parameters,
objective_name="aggregated_reward",
minimize=True,
)
context_reward_list = []
def evaluation_aggregated_reward(parameters):
# put parameters into 1-D array
# x = [bw_c0, bf_c0, c_c0, ...]
x = []
for context_name in benchmark_problem.context_name_list:
x.extend([parameters.get(param) for param in decomposition[context_name]])
aggregated_reward, context_reward = benchmark_problem.f(np.array(x))
return {"aggregated_reward": (aggregated_reward, 0.0)}, context_reward
for itrial in range(num_trials):
parameters, trial_index = axc.get_next_trial()
aggregated_res, context_res = evaluation_aggregated_reward(parameters)
axc.complete_trial(trial_index=trial_index, raw_data=aggregated_res)
context_res["trial_index"] = itrial
context_reward_list.append(context_res)
res = json.dumps(
{
"experiment": object_to_json(axc.experiment),
"context_rewards": context_reward_list,
}
)
with open(f'results/rembo_park_rep_{rep}.json', "w") as fout:
json.dump(res, fout)
print ('=============', time.time() - t1)
|
ContextualBO-main
|
park_abr/run_park_rembo.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import time
import json
import numpy as np
from ax.modelbridge.generation_strategy import GenerationStep, GenerationStrategy
from ax.modelbridge.factory import get_sobol
from ax.storage.json_store.encoder import object_to_json
from ax.service.ax_client import AxClient
from cbo_generation_strategy import get_ContextualBO
from fb_abr_problem import ParkContextualRunner
########
# Define problem
ABR_CONTEXT_CONFIG_DICT = {
'c0': {'name': 'c0', 'delay': 0.09111558887847584},
'c1': {'name': 'c1', 'delay': 0.13919983731019495},
'c10': {'name': 'c10', 'delay': 0.04709563378153773},
'c11': {'name': 'c11', 'delay': 0.09175980911983045},
'c2': {'name': 'c2', 'delay': 0.05811786663939401},
'c3': {'name': 'c3', 'delay': 0.15680707174733982},
'c4': {'name': 'c4', 'delay': 0.21008791350238118},
'c5': {'name': 'c5', 'delay': 0.12895778597785987},
'c6': {'name': 'c6', 'delay': 0.05922074675831855},
'c7': {'name': 'c7', 'delay': 0.0751735817104147},
'c8': {'name': 'c8', 'delay': 0.08200189263592551},
'c9': {'name': 'c9', 'delay': 0.0962324885998359}
}
num_trials = 75
for rep in range(25):
print('====================', rep)
num_contexts = len(ABR_CONTEXT_CONFIG_DICT)
benchmark_problem = ParkContextualRunner(
num_contexts=num_contexts,
context_dict=ABR_CONTEXT_CONFIG_DICT
)
decomposition = benchmark_problem.contextual_parameter_decomposition
t1 = time.time()
gs = GenerationStrategy(
name="SAC",
steps=[
GenerationStep(get_sobol, 8),
GenerationStep(
get_ContextualBO,
-1,
model_kwargs={"decomposition": decomposition},
),
],
)
axc = AxClient(generation_strategy=gs)
experiment_parameters = benchmark_problem.contextual_parameters
axc.create_experiment(
name="cbo_aggregated_reward_experiment",
parameters=experiment_parameters,
objective_name="aggregated_reward",
minimize=True,
)
context_reward_list = []
def evaluation_aggregated_reward(parameters):
# put parameters into 1-D array
# x = [bw_c0, bf_c0, c_c0, ...]
x = []
for context_name in benchmark_problem.context_name_list:
x.extend([parameters.get(param) for param in decomposition[context_name]])
aggregated_reward, context_reward = benchmark_problem.f(np.array(x))
return {"aggregated_reward": (aggregated_reward, 0.0)}, context_reward
for itrial in range(num_trials):
parameters, trial_index = axc.get_next_trial()
aggregated_res, context_res = evaluation_aggregated_reward(parameters)
axc.complete_trial(trial_index=trial_index, raw_data=aggregated_res)
context_res["trial_index"] = itrial
context_reward_list.append(context_res)
res = json.dumps(
{
"experiment": object_to_json(axc.experiment),
"context_rewards": context_reward_list,
}
)
with open(f'results/cbo_sac_park_rep_{rep}.json', "w") as fout:
json.dump(res, fout)
print ('=============', time.time() - t1)
|
ContextualBO-main
|
park_abr/run_park_sac.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Run benchmarks for CMAES.
Requires installing cma from pip. The experiments here used version 2.7.0.
"""
import cma
import time
import json
import numpy.matlib
import numpy as np
from fb_abr_problem import ParkContextualRunner
########
# Define problem
ABR_CONTEXT_CONFIG_DICT = {
'c0': {'name': 'c0', 'delay': 0.09111558887847584},
'c1': {'name': 'c1', 'delay': 0.13919983731019495},
'c10': {'name': 'c10', 'delay': 0.04709563378153773},
'c11': {'name': 'c11', 'delay': 0.09175980911983045},
'c2': {'name': 'c2', 'delay': 0.05811786663939401},
'c3': {'name': 'c3', 'delay': 0.15680707174733982},
'c4': {'name': 'c4', 'delay': 0.21008791350238118},
'c5': {'name': 'c5', 'delay': 0.12895778597785987},
'c6': {'name': 'c6', 'delay': 0.05922074675831855},
'c7': {'name': 'c7', 'delay': 0.0751735817104147},
'c8': {'name': 'c8', 'delay': 0.08200189263592551},
'c9': {'name': 'c9', 'delay': 0.0962324885998359}
}
num_trials = 75
for rep in range(25):
print('====================', rep)
r_contextual = ParkContextualRunner(
num_contexts=len(ABR_CONTEXT_CONFIG_DICT),
context_dict=ABR_CONTEXT_CONFIG_DICT,
max_eval=75,
return_context_reward=False,
)
t1 = time.time()
try:
_ = cma.fmin(
objective_function=r_contextual.f,
x0=[0.5, 1.0, 0.5, 0.001] * len(ABR_CONTEXT_CONFIG_DICT),
sigma0=0.15,
options={
"bounds": [
[0.0, 0.0, 0.0, 0.0001] * len(ABR_CONTEXT_CONFIG_DICT),
[1.0, 3.0, 1.0, 0.25] * len(ABR_CONTEXT_CONFIG_DICT),
],
"maxfevals": num_trials,
},
)
except StopIteration:
pass
with open(f'results/cma_es_park_rep_{rep}.json', 'w') as fout:
# times -1 to fs
json.dump([-reward for reward in r_contextual.fs], fout)
print ('=============', time.time() - t1)
|
ContextualBO-main
|
park_abr/run_park_cma_es.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import time
import json
import numpy as np
from ax.modelbridge.generation_strategy import GenerationStep, GenerationStrategy
from ax.modelbridge.strategies.alebo import ALEBOStrategy
from ax.storage.json_store.encoder import object_to_json
from ax.service.ax_client import AxClient
from fb_abr_problem import ParkContextualRunner
########
# Define problem
ABR_CONTEXT_CONFIG_DICT = {
'c0': {'name': 'c0', 'delay': 0.09111558887847584},
'c1': {'name': 'c1', 'delay': 0.13919983731019495},
'c10': {'name': 'c10', 'delay': 0.04709563378153773},
'c11': {'name': 'c11', 'delay': 0.09175980911983045},
'c2': {'name': 'c2', 'delay': 0.05811786663939401},
'c3': {'name': 'c3', 'delay': 0.15680707174733982},
'c4': {'name': 'c4', 'delay': 0.21008791350238118},
'c5': {'name': 'c5', 'delay': 0.12895778597785987},
'c6': {'name': 'c6', 'delay': 0.05922074675831855},
'c7': {'name': 'c7', 'delay': 0.0751735817104147},
'c8': {'name': 'c8', 'delay': 0.08200189263592551},
'c9': {'name': 'c9', 'delay': 0.0962324885998359}
}
num_trials = 75
for rep in range(25):
print('====================', rep)
num_contexts = len(ABR_CONTEXT_CONFIG_DICT)
benchmark_problem = ParkContextualRunner(
num_contexts=num_contexts,
context_dict=ABR_CONTEXT_CONFIG_DICT
)
decomposition = benchmark_problem.contextual_parameter_decomposition
t1 = time.time()
gs = ALEBOStrategy(
D=len(benchmark_problem._contextual_parameters), d=8, init_size=8
)
axc = AxClient(generation_strategy=gs)
experiment_parameters = benchmark_problem.contextual_parameters
axc.create_experiment(
name="cbo_aggregated_reward_experiment",
parameters=experiment_parameters,
objective_name="aggregated_reward",
minimize=True,
)
context_reward_list = []
def evaluation_aggregated_reward(parameters):
# put parameters into 1-D array
# x = [bw_c0, bf_c0, c_c0, ...]
x = []
for context_name in benchmark_problem.context_name_list:
x.extend([parameters.get(param) for param in decomposition[context_name]])
aggregated_reward, context_reward = benchmark_problem.f(np.array(x))
return {"aggregated_reward": (aggregated_reward, 0.0)}, context_reward
for itrial in range(num_trials):
parameters, trial_index = axc.get_next_trial()
aggregated_res, context_res = evaluation_aggregated_reward(parameters)
axc.complete_trial(trial_index=trial_index, raw_data=aggregated_res)
context_res["trial_index"] = itrial
context_reward_list.append(context_res)
res = json.dumps(
{
"experiment": object_to_json(axc.experiment),
"context_rewards": context_reward_list,
}
)
with open(f'results/alebo_park_rep_{rep}.json', "w") as fout:
json.dump(res, fout)
print ('=============', time.time() - t1)
|
ContextualBO-main
|
park_abr/run_park_alebo.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import time
import json
import numpy as np
from ax.modelbridge.generation_strategy import GenerationStep, GenerationStrategy
from ax.modelbridge.factory import get_GPEI, get_sobol
from ax.storage.json_store.encoder import object_to_json
from ax.service.ax_client import AxClient
from fb_abr_problem import ParkContextualRunner
########
# Define problem
ABR_CONTEXT_CONFIG_DICT = {
'c0': {'name': 'c0', 'delay': 0.09111558887847584},
'c1': {'name': 'c1', 'delay': 0.13919983731019495},
'c10': {'name': 'c10', 'delay': 0.04709563378153773},
'c11': {'name': 'c11', 'delay': 0.09175980911983045},
'c2': {'name': 'c2', 'delay': 0.05811786663939401},
'c3': {'name': 'c3', 'delay': 0.15680707174733982},
'c4': {'name': 'c4', 'delay': 0.21008791350238118},
'c5': {'name': 'c5', 'delay': 0.12895778597785987},
'c6': {'name': 'c6', 'delay': 0.05922074675831855},
'c7': {'name': 'c7', 'delay': 0.0751735817104147},
'c8': {'name': 'c8', 'delay': 0.08200189263592551},
'c9': {'name': 'c9', 'delay': 0.0962324885998359}
}
num_trials = 75
for rep in range(25):
print('====================', rep)
num_contexts = len(ABR_CONTEXT_CONFIG_DICT)
benchmark_problem = ParkContextualRunner(
num_contexts=num_contexts,
context_dict=ABR_CONTEXT_CONFIG_DICT
)
decomposition = benchmark_problem.contextual_parameter_decomposition
t1 = time.time()
gs = GenerationStrategy(
name="GPEI",
steps=[
GenerationStep(get_sobol, 8),
GenerationStep(get_GPEI, -1),
],
)
axc = AxClient(generation_strategy=gs)
experiment_parameters = benchmark_problem.contextual_parameters
axc.create_experiment(
name="cbo_aggregated_reward_experiment",
parameters=experiment_parameters,
objective_name="aggregated_reward",
minimize=True,
)
context_reward_list = []
def evaluation_aggregated_reward(parameters):
# put parameters into 1-D array
# x = [bw_c0, bf_c0, c_c0, ...]
x = []
for context_name in benchmark_problem.context_name_list:
x.extend([parameters.get(param) for param in decomposition[context_name]])
aggregated_reward, context_reward = benchmark_problem.f(np.array(x))
return {"aggregated_reward": (aggregated_reward, 0.0)}, context_reward
for itrial in range(num_trials):
parameters, trial_index = axc.get_next_trial()
aggregated_res, context_res = evaluation_aggregated_reward(parameters)
axc.complete_trial(trial_index=trial_index, raw_data=aggregated_res)
context_res["trial_index"] = itrial
context_reward_list.append(context_res)
res = json.dumps(
{
"experiment": object_to_json(axc.experiment),
"context_rewards": context_reward_list,
}
)
with open(f'results/standard_bo_park_rep_{rep}.json', "w") as fout:
json.dump(res, fout)
print ('=============', time.time() - t1)
|
ContextualBO-main
|
park_abr/run_park_standard_bo.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import time
import json
import numpy as np
from ax.modelbridge.generation_strategy import GenerationStep, GenerationStrategy
from ax.modelbridge.factory import get_GPEI, get_sobol
from ax.storage.json_store.encoder import object_to_json
from ax.service.ax_client import AxClient
from fb_abr_problem import ParkNoncontextualRunner
########
# Define problem
ABR_CONTEXT_CONFIG_DICT = {
'c0': {'name': 'c0', 'delay': 0.09111558887847584},
'c1': {'name': 'c1', 'delay': 0.13919983731019495},
'c10': {'name': 'c10', 'delay': 0.04709563378153773},
'c11': {'name': 'c11', 'delay': 0.09175980911983045},
'c2': {'name': 'c2', 'delay': 0.05811786663939401},
'c3': {'name': 'c3', 'delay': 0.15680707174733982},
'c4': {'name': 'c4', 'delay': 0.21008791350238118},
'c5': {'name': 'c5', 'delay': 0.12895778597785987},
'c6': {'name': 'c6', 'delay': 0.05922074675831855},
'c7': {'name': 'c7', 'delay': 0.0751735817104147},
'c8': {'name': 'c8', 'delay': 0.08200189263592551},
'c9': {'name': 'c9', 'delay': 0.0962324885998359}
}
num_trials = 75
for rep in range(25):
print('====================', rep)
num_contexts = len(ABR_CONTEXT_CONFIG_DICT)
benchmark_problem = ParkNoncontextualRunner(context_dict=ABR_CONTEXT_CONFIG_DICT)
t1 = time.time()
gs = GenerationStrategy(
name="GPEI",
steps=[
GenerationStep(get_sobol, 8),
GenerationStep(get_GPEI, -1),
],
)
axc = AxClient(generation_strategy=gs)
experiment_parameters = benchmark_problem.base_parameters
axc.create_experiment(
name="cbo_aggregated_reward_experiment",
parameters=experiment_parameters,
objective_name="aggregated_reward",
minimize=True,
)
context_reward_list = []
def evaluation_aggregated_reward(parameters):
# put parameters into 1-D array
x = [
parameters.get(param["name"]) for param in benchmark_problem.base_parameters
]
aggregated_reward, context_reward = benchmark_problem.f(np.array(x))
return {"aggregated_reward": (aggregated_reward, 0.0)}, context_reward
for itrial in range(num_trials):
parameters, trial_index = axc.get_next_trial()
aggregated_res, context_res = evaluation_aggregated_reward(parameters)
axc.complete_trial(trial_index=trial_index, raw_data=aggregated_res)
context_res["trial_index"] = itrial
context_reward_list.append(context_res)
res = json.dumps(
{
"experiment": object_to_json(axc.experiment),
"context_rewards": context_reward_list,
}
)
with open(f'results/non_contextual_park_rep_{rep}.json', "w") as fout:
json.dump(res, fout)
print ('=============', time.time() - t1)
|
ContextualBO-main
|
park_abr/run_park_non_contextual.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import time
import json
import numpy as np
from ax.modelbridge.generation_strategy import GenerationStep, GenerationStrategy
from ax.modelbridge.factory import get_sobol
from ax.storage.json_store.encoder import object_to_json
from ax.service.ax_client import AxClient
from cbo_generation_strategy import get_ContextualEmbeddingBO
from fb_abr_problem import ParkContextualRunner
CBO_EMB_MODEL_GEN_OPTIONS = {
"acquisition_function_kwargs": {"q": 1, "noiseless": True},
"optimizer_kwargs": {
"method": "SLSQP",
"batch_limit": 1,
"joint_optimization": True,
},
}
########
# Define problem
ABR_CONTEXT_CONFIG_DICT = {
'c0': {'name': 'c0', 'delay': 0.09111558887847584},
'c1': {'name': 'c1', 'delay': 0.13919983731019495},
'c10': {'name': 'c10', 'delay': 0.04709563378153773},
'c11': {'name': 'c11', 'delay': 0.09175980911983045},
'c2': {'name': 'c2', 'delay': 0.05811786663939401},
'c3': {'name': 'c3', 'delay': 0.15680707174733982},
'c4': {'name': 'c4', 'delay': 0.21008791350238118},
'c5': {'name': 'c5', 'delay': 0.12895778597785987},
'c6': {'name': 'c6', 'delay': 0.05922074675831855},
'c7': {'name': 'c7', 'delay': 0.0751735817104147},
'c8': {'name': 'c8', 'delay': 0.08200189263592551},
'c9': {'name': 'c9', 'delay': 0.0962324885998359}
}
num_trials = 75
for rep in range(25):
print('====================', rep)
num_contexts = len(ABR_CONTEXT_CONFIG_DICT)
benchmark_problem = ParkContextualRunner(
num_contexts=num_contexts,
context_dict=ABR_CONTEXT_CONFIG_DICT
)
decomposition = benchmark_problem.contextual_parameter_decomposition
t1 = time.time()
gs = GenerationStrategy(
name="LCE-A",
steps=[
GenerationStep(get_sobol, 8),
GenerationStep(
get_ContextualEmbeddingBO,
-1,
model_kwargs={"decomposition": decomposition},
model_gen_kwargs={"model_gen_options": CBO_EMB_MODEL_GEN_OPTIONS},
),
],
)
axc = AxClient(generation_strategy=gs)
experiment_parameters = benchmark_problem.contextual_parameters
axc.create_experiment(
name="cbo_aggregated_reward_experiment",
parameters=experiment_parameters,
objective_name="aggregated_reward",
minimize=True,
)
context_reward_list = []
def evaluation_aggregated_reward(parameters):
# put parameters into 1-D array
# x = [bw_c0, bf_c0, c_c0, ...]
x = []
for context_name in benchmark_problem.context_name_list:
x.extend([parameters.get(param) for param in decomposition[context_name]])
aggregated_reward, context_reward = benchmark_problem.f(np.array(x))
return {"aggregated_reward": (aggregated_reward, 0.0)}, context_reward
for itrial in range(num_trials):
parameters, trial_index = axc.get_next_trial()
aggregated_res, context_res = evaluation_aggregated_reward(parameters)
axc.complete_trial(trial_index=trial_index, raw_data=aggregated_res)
context_res["trial_index"] = itrial
context_reward_list.append(context_res)
res = json.dumps(
{
"experiment": object_to_json(axc.experiment),
"context_rewards": context_reward_list,
}
)
with open(f'results/cbo_lcea_park_rep_{rep}.json', "w") as fout:
json.dump(res, fout)
print ('=============', time.time() - t1)
|
ContextualBO-main
|
park_abr/run_park_lcea.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Run benchmarks for TuRBO.
Requires installing turbo from https://github.com/uber-research/TuRBO.
"""
import turbo
import time
import json
import numpy.matlib
import numpy as np
from fb_abr_problem import ParkContextualRunner
########
# Define problem
ABR_CONTEXT_CONFIG_DICT = {
'c0': {'name': 'c0', 'delay': 0.09111558887847584},
'c1': {'name': 'c1', 'delay': 0.13919983731019495},
'c10': {'name': 'c10', 'delay': 0.04709563378153773},
'c11': {'name': 'c11', 'delay': 0.09175980911983045},
'c2': {'name': 'c2', 'delay': 0.05811786663939401},
'c3': {'name': 'c3', 'delay': 0.15680707174733982},
'c4': {'name': 'c4', 'delay': 0.21008791350238118},
'c5': {'name': 'c5', 'delay': 0.12895778597785987},
'c6': {'name': 'c6', 'delay': 0.05922074675831855},
'c7': {'name': 'c7', 'delay': 0.0751735817104147},
'c8': {'name': 'c8', 'delay': 0.08200189263592551},
'c9': {'name': 'c9', 'delay': 0.0962324885998359}
}
lbs = np.array([0.0, 0.0, 0.0, 0.0001] * len(ABR_CONTEXT_CONFIG_DICT))
ubs = np.array([1.0, 3.0, 1.0, 0.25] * len(ABR_CONTEXT_CONFIG_DICT))
for rep in range(25):
print('====================', rep)
r_contextual = ParkContextualRunner(
num_contexts=len(ABR_CONTEXT_CONFIG_DICT),
context_dict=ABR_CONTEXT_CONFIG_DICT,
max_eval=75,
return_context_reward=False,
)
t1 = time.time()
turbo1 = turbo.Turbo1(
f=r_contextual.f,
lb=lbs,
ub=ubs,
n_init=8,
max_evals=75,
)
turbo1.optimize()
with open(f'results/turbo_park_rep_{rep}.json', 'w') as fout:
# times -1 to fs
json.dump([-reward for reward in r_contextual.fs], fout)
# json.dump(r_contextual.fs, fout)
print ('=============', time.time() - t1)
|
ContextualBO-main
|
park_abr/run_park_turbo.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, List, Optional, Type
import torch
from ax.core.data import Data
from ax.core.experiment import Experiment
from ax.core.generator_run import GeneratorRun
from ax.core.observation import ObservationFeatures
from ax.core.parameter import ChoiceParameter
from ax.core.search_space import SearchSpace
from ax.models.torch.cbo_lcea import LCEABO
from ax.models.torch.cbo_sac import SACBO
from ax.modelbridge.factory import DEFAULT_TORCH_DEVICE
from ax.modelbridge.generation_strategy import GenerationStep, GenerationStrategy
from ax.modelbridge.random import RandomModelBridge
from ax.modelbridge.registry import (
Cont_X_trans,
Y_trans,
)
from ax.modelbridge.torch import TorchModelBridge
from ax.modelbridge.transforms.base import Transform
from ax.models.random.sobol import SobolGenerator
from ax.models.torch.botorch import BotorchModel
def get_ContextualBO(
experiment: Experiment,
data: Data,
decomposition: Dict[str, List[str]],
search_space: Optional[SearchSpace] = None,
dtype: torch.dtype = torch.double,
device: torch.device = DEFAULT_TORCH_DEVICE,
transforms: List[Type[Transform]] = Cont_X_trans + Y_trans,
) -> TorchModelBridge:
return TorchModelBridge(
experiment=experiment,
search_space=search_space or experiment.search_space,
data=data,
model=SACBO(decomposition=decomposition),
transforms=transforms,
torch_dtype=dtype,
torch_device=device,
)
def get_ContextualEmbeddingBO(
experiment: Experiment,
data: Data,
decomposition: Dict[str, List[str]],
dtype: torch.dtype = torch.double,
device: torch.device = DEFAULT_TORCH_DEVICE,
transforms: List[Type[Transform]] = Cont_X_trans + Y_trans,
cat_feature_dict: Optional[Dict] = None,
embs_feature_dict: Optional[Dict] = None,
context_weight_dict: Optional[Dict] = None,
embs_dim_list: Optional[List[int]] = None,
search_space: Optional[SearchSpace] = None,
gp_model_args: Optional[Dict[str, Any]] = None,
) -> TorchModelBridge:
return TorchModelBridge(
experiment=experiment,
search_space=search_space or experiment.search_space,
data=data,
model=LCEABO(
decomposition=decomposition,
cat_feature_dict=cat_feature_dict,
embs_feature_dict=embs_feature_dict,
context_weight_dict=context_weight_dict,
embs_dim_list=embs_dim_list,
gp_model_args=gp_model_args,
),
transforms=transforms,
torch_dtype=dtype,
torch_device=device,
)
|
ContextualBO-main
|
park_abr/cbo_generation_strategy.py
|
# Folk of the adaptive video streaming environment in https://github.com/park-project/park
from park.envs import make
|
ContextualBO-main
|
park_abr/park/__init__.py
|
# Folk of the adaptive video streaming environment in https://github.com/park-project/park
from park import logger
# Env-related abstractions
class Env(object):
"""
The main park class. The interface follows OpenAI gym
https://gym.openai.com, which encapsulates an environment with
arbitrary behind-the-scenes dynamics. An environment can be
partially or fully observed.
The main API methods that users of this class need to know are:
observe
step
reset
seed
And set the following attributes:
action_space: The Space object corresponding to valid actions
observation_space: The Space object corresponding to valid observations
reward_range: A tuple corresponding to the min and max possible rewards
"""
# Set this in some subclasses
metadata = {"env.name": "abstract_env"}
reward_range = (-float("inf"), float("inf"))
# Set these in ALL subclasses
action_space = None
observation_space = None
def step(self, action):
"""
Run one timestep of the environment's dynamics. When end of
episode is reached, you are responsible for calling `reset()`
to reset this environment's state.
Accepts an action and returns a tuple (observation, reward, done, info).
Args:
action (object): an action provided by the environment
Returns:
observation (object): agent's observation of the current environment
reward (float) : amount of reward returned after previous action
done (boolean): whether the episode has ended, in which case further step() calls will return undefined results
info (dict): contains auxiliary diagnostic information (helpful for debugging, and sometimes learning)
"""
raise NotImplementedError
def reset(self):
"""
Resets the state of the environment and returns an initial observation.
Returns: observation (object): the initial observation of the space.
"""
raise NotImplementedError
def seed(self, seed=None):
"""
Sets the seed for this env's random number generator(s).
"""
logger.warn("Could not seed environment " + self.metadata["env.name"])
return
# Real system environment abstractions
class SysEnv(object):
"""
For many real world systems, the agent only passively returns
action when the system requests. In other words, it is more
natural for the system to run on it own, as opposed to using
the step function to "tick the time" in most simualted cases
as above.
The main API methods that users of this class need to know is:
run(agent_constructor, agent_parameters)
The user implements the agent in this format
class Agent(object):
def __init__(self, state_space, action_space, *args, **kwargs):
self.state_space = state_space
self.action_space = action_space
def get_action(self, obs, prev_reward, prev_done, prev_info):
act = self.action_space.sample()
# implement real action logic here
return act
"""
# Set this in some subclasses
metadata = {"env.name": "abstract_env"}
reward_range = (-float("inf"), float("inf"))
# Set these in ALL subclasses
action_space = None
observation_space = None
def run(self, agent, *args, **kwargs):
"""
Take in agent constructor, run the real system that consults the
agent for the action at certain events
"""
raise NotImplementedError
# Space-related abstractions
class Space(object):
"""
Defines the observation and action spaces, so you can write generic
code that applies to any Env. For example, you can choose a random
action.
"""
def __init__(self, struct=None, shape=None, dtype=None):
import numpy as np # takes about 300-400ms to import, load lazily
self.struct = struct # tensor, graph, etc.
self.shape = None if shape is None else tuple(shape)
self.dtype = None if dtype is None else np.dtype(dtype)
def sample(self):
"""
Uniformly randomly sample a random element of this space
"""
raise NotImplementedError
def contains(self, x):
"""
Return boolean specifying if x is a valid
member of this space
"""
raise NotImplementedError
|
ContextualBO-main
|
park_abr/park/core.py
|
# Folk of the adaptive video streaming environment in https://github.com/park-project/park
import logging
def debug(msg):
logging.debug(msg)
def info(msg):
logging.info(msg)
def warn(msg):
logging.warning(msg)
def error(msg):
logging.error(msg)
def exception(msg, *args, **kwargs):
logging.exception(msg, *args, **kwargs)
|
ContextualBO-main
|
park_abr/park/logger.py
|
import numpy as np
from park import core, logger
from park.spaces.rng import np_random
class Box(core.Space):
"""
A box in R^n.
I.e., each coordinate is bounded.
Example usage:
self.action_space = spaces.Box(low=-10, high=10, shape=(1,))
"""
def __init__(self, low=None, high=None, struct=None, shape=None, dtype=None):
"""
Two kinds of valid input:
Box(low=-1.0, high=1.0, shape=(3,4)) # low and high are scalars, and shape is provided
Box(low=np.array([-1.0,-2.0]), high=np.array([2.0,4.0])) # low and high are arrays of the same shape
"""
if shape is None:
assert low.shape == high.shape
shape = low.shape
else:
assert np.isscalar(low) and np.isscalar(high)
low = low + np.zeros(shape)
high = high + np.zeros(shape)
if dtype is None: # Autodetect type
if (high == 255).all():
dtype = np.uint8
else:
dtype = np.float32
logger.warn(
"park.spaces.Box autodetected dtype as {}. Please provide explicit dtype.".format(
dtype
)
)
self.low = low.astype(dtype)
self.high = high.astype(dtype)
core.Space.__init__(self, struct, shape, dtype)
def sample(self):
return np_random.uniform(
low=self.low,
high=self.high + (0 if self.dtype.kind == "f" else 1),
size=self.low.shape,
).astype(self.dtype)
def contains(self, x):
return (
x.shape == self.shape and (x >= self.low).all() and (x <= self.high).all()
)
|
ContextualBO-main
|
park_abr/park/spaces/box.py
|
import numpy as np
"""
Separate the random number generator from the environment.
This is used for all random sample in the space native methods.
We expect new algorithms to have their own rngs.
"""
np_random = np.random.RandomState()
np_random.seed(42)
|
ContextualBO-main
|
park_abr/park/spaces/rng.py
|
import numpy as np
from park import core
from park.spaces.rng import np_random
class Discrete(core.Space):
"""
{0,1,...,n-1}
Example usage:
self.observation_space = spaces.Discrete(2)
"""
def __init__(self, n):
self.n = n
core.Space.__init__(self, "tensor_int64", (), np.int64)
def sample(self):
return np_random.randint(self.n)
def contains(self, x):
if isinstance(x, int):
as_int = x
elif isinstance(x, (np.generic, np.ndarray)) and (
x.dtype.kind in np.typecodes["AllInteger"] and x.shape == ()
):
as_int = int(x)
else:
return False
return as_int >= 0 and as_int < self.n
|
ContextualBO-main
|
park_abr/park/spaces/discrete.py
|
from park.spaces.box import Box
from park.spaces.discrete import Discrete
|
ContextualBO-main
|
park_abr/park/spaces/__init__.py
|
import numpy as np
def np_random(seed=42):
if not (isinstance(seed, int) and seed >= 0):
raise ValueError("Seed must be a non-negative integer.")
rng = np.random.RandomState()
rng.seed(seed)
return rng
|
ContextualBO-main
|
park_abr/park/utils/seeding.py
|
import os
def create_folder_if_not_exists(folder_path):
if not os.path.exists(folder_path):
os.makedirs(folder_path)
|
ContextualBO-main
|
park_abr/park/utils/misc.py
|
import networkx as nx
import numpy as np
class DirectedGraph(object):
def __init__(self, node_features=None, edge_features=None):
self.graph = nx.DiGraph()
if node_features is not None:
self.update_nodes(node_features)
if edge_features is not None:
self.update_edges(edge_features)
def update_nodes(self, node_features):
self.graph.add_nodes_from(node_features.keys())
for node in node_features:
self.graph.nodes[node]["feature"] = node_features[node]
def remove_nodes(self, nodes):
self.graph.remove_nodes_from(nodes)
def update_edges(self, edge_features):
self.graph.add_edges_from(edge_features.keys())
for edge in edge_features:
assert len(edge) == 2
self.graph[edge[0]][edge[1]]["feature"] = edge_features[edge]
def remove_edges(self, edges):
self.graph.remove_edges_from(edges)
def has_node(self, node):
return self.graph.has_node(node)
def has_edge(self, edge):
assert len(edge) == 2
return self.graph.has_edge(edge[0], edge[1])
def nodes(self):
return self.graph.nodes
def edges(self):
return self.graph.edges
def number_of_nodes(self):
return self.graph.number_of_nodes()
def number_of_edges(self):
return self.graph.number_of_edges()
def get_node_features_tensor(self):
node_features = []
node_map = {}
for (i, n) in enumerate(self.graph.nodes):
feature = self.graph.nodes[n]["feature"]
if feature is not None:
node_features.append(feature)
node_map[i] = n
return np.array(node_features), node_map
def get_edge_features_tensor(self):
edge_features = []
edge_map = {}
for (i, e) in enumerate(self.graph.edges):
feature = self.graph[e[0]][e[1]]["feature"]
if feature is not None:
edge_features.append(feature)
edge_map[i] = e
return np.array(edge_features), edge_map
def convert_to_tensor(self):
# node feature matrix, adjacency matrix, edge feature matrix,
# node map (node index -> node object),
# edge map (edge index -> edge object)
node_features, node_map = self.get_node_features_tensor()
edge_features, edge_map = self.get_edge_features_tensor()
adj_mat = nx.adjacency_matrix(self.graph)
return node_features, edge_features, adj_mat, node_map, edge_map
def get_node_feature(self, node):
return self.graph.nodes[node]["feature"]
def get_edge_feature(self, edge):
return self.graph[edge[0]][edge[1]]["feature"]
def get_neighbors(self, node):
list_neighbors = [n for n in self.graph.neighbors(node)]
return list_neighbors
def visualize(self):
# TODO: use pydot
pass
|
ContextualBO-main
|
park_abr/park/utils/directed_graph.py
|
def print_red(s):
print("\033[91m" + s + "\033[0m")
def print_orange(s):
print("\033[93m" + s + "\033[0m")
def print_green(s):
print("\033[92m" + s + "\033[0m")
def print_blue(s):
print("\034[92m" + s + "\033[0m")
|
ContextualBO-main
|
park_abr/park/utils/colorful_print.py
|
ContextualBO-main
|
park_abr/park/utils/__init__.py
|
|
from collections import OrderedDict
class OrderedSet(object):
def __init__(self, contents=()):
self.set = OrderedDict((c, None) for c in contents)
def __contains__(self, item):
return item in self.set
def __iter__(self):
return iter(self.set.keys())
def __len__(self):
return len(self.set)
def __reversed__(self):
return iter(reversed(self.set.keys()))
def add(self, item):
self.set[item] = None
def clear(self):
self.set.clear()
def pop(self):
item = next(iter(self.set))
del self.set[item]
return item
def remove(self, item):
del self.set[item]
def to_list(self):
return [k for k in self.set]
|
ContextualBO-main
|
park_abr/park/utils/ordered_set.py
|
# Format follows OpenAI gym https://gym.openai.com
# Folk of the adaptive video streaming environment in https://github.com/park-project/park
def load(entry_point):
import pkg_resources # takes ~400ms to load, so we import it lazily
entry_point = pkg_resources.EntryPoint.parse("x={}".format(entry_point))
result = entry_point.resolve()
return result
class EnvSpec(object):
"""
A specification for a particular instance of the environment. Used
to register the parameters for official evaluations.
Args:
id (str): The environment ID
entry_point (Optional[str]): The Python entrypoint of the environment class (e.g. module.name:Class)
"""
def __init__(self, env_id, entry_point=None):
self.env_id = env_id
self._entry_point = entry_point
def make(self):
"""Instantiates an instance of the environment with appropriate kwargs"""
if self._entry_point is None:
raise Exception(
"Environment " + self.env_id + " needs to specify an entry point"
)
elif callable(self._entry_point):
env = self._entry_point()
else:
cls = load(self._entry_point)
env = cls()
return env
class EnvRegistry(object):
"""
Register an env by ID. The goal is that results on a particular
environment should be comparable.
"""
def __init__(self):
self.env_specs = {}
def make(self, env_id):
spec = self.spec(env_id)
env = spec.make()
return env
def all(self):
return self.env_specs.values()
def spec(self, env_id):
if env_id not in self.env_specs:
raise KeyError("Environment " + env_id + " not defined.")
return self.env_specs[env_id]
def register(self, env_id, entry_point=None):
if env_id in self.env_specs:
raise Exception("Cannot re-register id: {}".format(env_id))
self.env_specs[env_id] = EnvSpec(env_id, entry_point)
# Global registry
registry = EnvRegistry()
def register(env_id, entry_point):
return registry.register(env_id, entry_point)
def make(env_id):
return registry.make(env_id)
def spec(env_id):
return registry.spec(env_id)
|
ContextualBO-main
|
park_abr/park/envs/registration.py
|
# Format follows OpenAI gym https://gym.openai.com
# Folk of the adaptive video streaming environment in https://github.com/park-project/park
from park.envs.registration import make, register
register(env_id="abr_sim_fb", entry_point="park.envs.abr_sim:ABRSimFBEnv")
|
ContextualBO-main
|
park_abr/park/envs/__init__.py
|
# Folk of the adaptive video streaming environment in https://github.com/park-project/park
from collections import deque
import numpy as np
from ax.utils.common.logger import get_logger
from park import core, logger, spaces
from park.envs.abr_sim.trace_loader import (
get_chunk_time,
load_chunk_sizes,
load_traces,
sample_trace,
)
from park.utils import seeding
logger = get_logger(name="ABR_SIM")
class ABRSimEnv(core.Env):
"""
Adapt bitrate during a video playback with varying network conditions.
The objective is to (1) reduce stall (2) increase video quality and
(3) reduce switching between bitrate levels. Ideally, we would want to
*simultaneously* optimize the objectives in all dimensions.
* STATE *
[The throughput estimation of the past chunk (chunk size / elapsed time),
download time (i.e., elapsed time since last action), current buffer ahead,
number of the chunks until the end, the bitrate choice for the past chunk,
current chunk size of bitrate 1, chunk size of bitrate 2,
..., chunk size of bitrate 5]
Note: we need the selected bitrate for the past chunk because reward has
a term for bitrate change, a fully observable MDP needs the bitrate for past chunk
* ACTIONS *
Which bitrate to choose for the current chunk, represented as an integer in [0, 4]
* REWARD *
At current time t, the selected bitrate is b_t, the stall time between
t to t + 1 is s_t, then the reward r_t is
b_{t} - 4.3 * s_{t} - |b_t - b_{t-1}|
Note: there are different definitions of combining multiple objectives in the reward,
check Section 5.1 of the first reference below.
* REFERENCE *
Section 4.2, Section 5.1
Neural Adaptive Video Streaming with Pensieve
H Mao, R Netravali, M Alizadeh
https://dl.acm.org/citation.cfm?id=3098843
Figure 1b, Section 6.2 and Appendix J
Variance Reduction for Reinforcement Learning in Input-Driven Environments.
H Mao, SB Venkatakrishnan, M Schwarzkopf, M Alizadeh.
https://openreview.net/forum?id=Hyg1G2AqtQ
A Control-Theoretic Approach for Dynamic Adaptive Video Streaming over HTTP
X Yin, A Jindal, V Sekar, B Sinopoli
https://dl.acm.org/citation.cfm?id=2787486
"""
def __init__(self):
# observation and action space
self.setup_space()
# set up seed
self.seed(42)
# load all trace files
self.all_traces = load_traces()
# load all video chunk sizes
self.chunk_sizes = load_chunk_sizes()
# mapping between action and bitrate level
self.bitrate_map = [0.3, 0.75, 1.2, 1.85, 2.85, 4.3] # Mbps
# how many past throughput to report
self.past_chunk_len = 8
# assert number of chunks for different bitrates are all the same
assert len(np.unique([len(chunk_size) for chunk_size in self.chunk_sizes])) == 1
self.total_num_chunks = len(self.chunk_sizes[0])
def observe(self):
if self.chunk_idx < self.total_num_chunks:
valid_chunk_idx = self.chunk_idx
else:
valid_chunk_idx = 0
if self.past_action is not None:
valid_past_action = self.past_action
else:
valid_past_action = 0
# network throughput of past chunk, past chunk download time,
# current buffer, number of chunks left and the last bitrate choice
obs_arr = [
self.past_chunk_throughputs[-1],
self.past_chunk_download_times[-1],
self.buffer_size,
self.total_num_chunks - self.chunk_idx,
valid_past_action,
]
# current chunk size of different bitrates
obs_arr.extend(self.chunk_sizes[i][valid_chunk_idx] for i in range(6))
# fit in observation space
for i in range(len(obs_arr)):
if obs_arr[i] > self.obs_high[i]:
obs_arr[i] = self.obs_high[i]
obs_arr = np.array(obs_arr)
assert self.observation_space.contains(obs_arr)
return obs_arr
def reset(self, context_setup):
self.trace, self.curr_t_idx = sample_trace(self.all_traces, self.np_random)
self.noise_std = context_setup.get("noise_std", 0.0)
self.shift = context_setup.get("shift", 0.0)
self.multiplier = context_setup.get("multiplier", 1.0)
self.delay = context_setup.get("delay", 0.0)
self.chunk_time_left = get_chunk_time(self.trace, self.curr_t_idx)
self.chunk_idx = 0
self.buffer_size = 0.0 # initial download time not counted
self.past_action = None
self.past_chunk_throughputs = deque(maxlen=self.past_chunk_len)
self.past_chunk_download_times = deque(maxlen=self.past_chunk_len)
for _ in range(self.past_chunk_len):
self.past_chunk_throughputs.append(0)
self.past_chunk_download_times.append(0)
return self.observe()
def seed(self, seed):
self.np_random = seeding.np_random(seed)
def setup_space(self):
# Set up the observation and action space
# The boundary of the space may change if the dynamics is changed
# a warning message will show up every time e.g., the observation falls
# out of the observation space
self.obs_low = np.array([0] * 11)
self.obs_high = np.array(
[10e6, 100, 100, 500, 5, 10e6, 10e6, 10e6, 10e6, 10e6, 10e6]
)
self.observation_space = spaces.Box(
low=self.obs_low, high=self.obs_high, dtype=np.float32
)
self.action_space = spaces.Discrete(6)
def step(self, action):
# 0 <= action < num_servers
assert self.action_space.contains(action)
# Note: sizes are in bytes, times are in seconds
chunk_size = self.chunk_sizes[action][self.chunk_idx]
# compute chunk download time based on trace
delay = self.delay # in seconds
# keep experiencing the network trace
# until the chunk is downloaded
while chunk_size > 1e-8: # floating number business
throuput = self.trace[1][self.curr_t_idx] / 8.0 * 1e6 # bytes/second
throuput = throuput * self.multiplier + self.shift
sign = [-1, 1]
throuput += (0.25 * self.np_random.poisson(self.noise_std) * 1e6) * (
sign[self.np_random.binomial(1, 0.85)]
)
throuput = max(throuput, 0)
chunk_time_used = min(self.chunk_time_left, chunk_size / throuput)
chunk_size -= throuput * chunk_time_used
self.chunk_time_left -= chunk_time_used
delay += chunk_time_used
if self.chunk_time_left == 0:
self.curr_t_idx += 1
if self.curr_t_idx == len(self.trace[1]):
self.curr_t_idx = 0
self.chunk_time_left = get_chunk_time(self.trace, self.curr_t_idx)
# compute buffer size
rebuffer_time = max(delay - self.buffer_size, 0)
# update video buffer
self.buffer_size = max(self.buffer_size - delay, 0)
self.buffer_size += 4.0 # each chunk is 4 seconds of video
# cap the buffer size
self.buffer_size = min(self.buffer_size, 40.0)
# bitrate change penalty
if self.past_action is None:
bitrate_change = 0
else:
bitrate_change = np.abs(
self.bitrate_map[action] - self.bitrate_map[self.past_action]
)
# linear reward
# (https://dl.acm.org/citation.cfm?id=3098843 section 5.1, QoE metrics (1))
reward = self.bitrate_map[action] - 4.3 * rebuffer_time - bitrate_change
# store action for future bitrate change penalty
self.past_action = action
# update observed network bandwidth and duration
self.past_chunk_throughputs.append(
self.chunk_sizes[action][self.chunk_idx] / float(delay)
)
self.past_chunk_download_times.append(delay)
# advance video
self.chunk_idx += 1
done = self.chunk_idx == self.total_num_chunks
return (
self.observe(),
reward,
done,
{
"bitrate": self.bitrate_map[action],
"stall_time": rebuffer_time,
"bitrate_change": bitrate_change,
},
)
|
ContextualBO-main
|
park_abr/park/envs/abr_sim/abr.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# A modification of the adaptive video streaming environment in https://github.com/park-project/park
from park.envs.abr_sim.abr_fb import ABRSimFBEnv
|
ContextualBO-main
|
park_abr/park/envs/abr_sim/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# A modification of the adaptive video streaming environment in https://github.com/park-project/park
from collections import deque
import numpy as np
from ax.utils.common.logger import get_logger
from park import logger
from park.envs.abr_sim.abr import ABRSimEnv
from park.envs.abr_sim.fb_trace_loader import (
get_chunk_time,
load_chunk_sizes,
load_traces,
sample_trace,
)
logger = get_logger(name="ABR_SIM_FB")
class ABRSimFBEnv(ABRSimEnv):
"""
ABRSimEnv in FB setting.
Adapt bitrate during a video playback with varying network conditions.
The objective is to (1) reduce stall (2) increase video quality and
(3) reduce switching between bitrate levels. Ideally, we would want to
*simultaneously* optimize the objectives in all dimensions.
* STATE *
[The throughput estimation of the past chunk (chunk size / elapsed time),
download time (i.e., elapsed time since last action), current buffer ahead,
number of the chunks until the end, the bitrate choice for the past chunk,
current chunk size of bitrate 1, chunk size of bitrate 2,
..., chunk size of bitrate 5]
Note: we need the selected bitrate for the past chunk because reward has
a term for bitrate change, a fully observable MDP needs the bitrate for past chunk
* ACTIONS *
Which bitrate to choose for the current chunk, represented as an integer in [0, 5]
* REWARD *
At current time t, the selected bitrate is b_t, the stall time between
t to t + 1 is s_t, then the reward r_t is
b_{t} - 2.8 * s_{t} - 0.5 * |b_t - b_{t-1}|
* REFERENCE *
Section 4.2, Section 5.1
Neural Adaptive Video Streaming with Pensieve
H Mao, R Netravali, M Alizadeh
https://dl.acm.org/citation.cfm?id=3098843
Figure 1b, Section 6.2 and Appendix J
Variance Reduction for Reinforcement Learning in Input-Driven Environments.
H Mao, SB Venkatakrishnan, M Schwarzkopf, M Alizadeh.
https://openreview.net/forum?id=Hyg1G2AqtQ
A Control-Theoretic Approach for Dynamic Adaptive Video Streaming over HTTP
X Yin, A Jindal, V Sekar, B Sinopoli
https://dl.acm.org/citation.cfm?id=2787486
"""
def __init__(self):
# observation and action space
self.setup_space()
# set up seed
self.seed(42)
# load all trace files
self.all_traces = load_traces()
# load all video chunk sizes
self.all_chunk_sizes = load_chunk_sizes()
# mapping between action and bitrate level
self.bitrate_map = [0.3, 0.75, 1.2, 1.85, 2.85, 4.3] # Mbps
# how many past throughput to report
self.past_chunk_len = 8
def reset(self, context_setup, irun):
# context_setup = {"name": "context_0", "delay": delay}
context_name = context_setup["name"]
# load trace
if irun >= len(self.all_traces[context_name]):
return []
trace_uuid = sample_trace(self.all_traces[context_name], irun)
self.trace = self.all_traces[context_name][trace_uuid]
self.curr_t_idx = 0
# load chunk
self.chunk_sizes = self.all_chunk_sizes[context_name][trace_uuid] # sample
self.chunk_idx = 0
self.total_num_chunks = len(self.chunk_sizes[0])
# assert number of chunks for different bitrates are all the same
assert len(np.unique([len(chunk_size) for chunk_size in self.chunk_sizes])) == 1
self.delay = context_setup.get("delay", 0.0)
self.chunk_time_left = get_chunk_time(self.trace, self.curr_t_idx)
self.buffer_size = 0.0 # initial download time not counted
self.past_action = None
self.past_chunk_throughputs = deque(maxlen=self.past_chunk_len)
self.past_chunk_download_times = deque(maxlen=self.past_chunk_len)
for _ in range(self.past_chunk_len):
self.past_chunk_throughputs.append(0)
self.past_chunk_download_times.append(0)
return self.observe()
def step(self, action):
# 0 <= action < num_servers
assert self.action_space.contains(action)
# Note: sizes are in bytes, times are in seconds
chunk_size = self.chunk_sizes[action][self.chunk_idx]
# compute chunk download time based on trace
delay = self.delay # in seconds
# keep experiencing the network trace
# until the chunk is downloaded
while chunk_size > 1e-8: # floating number business
throuput = self.trace[1][self.curr_t_idx] # bytes/second
throuput = throuput / 3.0
throuput = max(throuput, 0)
chunk_time_used = min(self.chunk_time_left, chunk_size / throuput)
chunk_size -= throuput * chunk_time_used
self.chunk_time_left -= chunk_time_used
delay += chunk_time_used
if self.chunk_time_left == 0:
self.curr_t_idx += 1
if self.curr_t_idx == len(self.trace[1]):
self.curr_t_idx = 0
self.chunk_time_left = get_chunk_time(self.trace, self.curr_t_idx)
# compute buffer size
rebuffer_time = max(delay - self.buffer_size, 0)
# update video buffer
self.buffer_size = max(self.buffer_size - delay, 0)
self.buffer_size += 4.0 # each chunk is 4 seconds of video
# cap the buffer size
self.buffer_size = min(self.buffer_size, 40.0)
# bitrate change penalty
if self.past_action is None:
bitrate_change = 0
else:
bitrate_change = np.abs(
self.bitrate_map[action] - self.bitrate_map[self.past_action]
)
# linear reward
# (https://dl.acm.org/citation.cfm?id=3098843 section 5.1, QoE metrics (1))
reward = self.bitrate_map[action] - 2.8 * rebuffer_time - 0.5 * bitrate_change
# store action for future bitrate change penalty
self.past_action = action
# update observed network bandwidth and duration
self.past_chunk_throughputs.append(
self.chunk_sizes[action][self.chunk_idx] / float(delay)
)
self.past_chunk_download_times.append(delay)
# advance video
self.chunk_idx += 1
done = self.chunk_idx == self.total_num_chunks
return (
self.observe(),
reward,
done,
{
"bitrate": self.bitrate_map[action],
"stall_time": rebuffer_time,
"bitrate_change": bitrate_change,
},
)
|
ContextualBO-main
|
park_abr/park/envs/abr_sim/abr_fb.py
|
# Folk of the adaptive video streaming environment in https://github.com/park-project/park
import os
import numpy as np
import park
def get_chunk_time(trace, t_idx):
if t_idx == len(trace[0]) - 1:
return 1 # bandwidth last for 1 second
else:
return trace[0][t_idx + 1] - trace[0][t_idx]
def load_chunk_sizes():
# bytes of video chunk file at different bitrates
# source video: "Envivio-Dash3" video H.264/MPEG-4 codec
# at bitrates in {300,750,1200,1850,2850,4300} kbps
# original video file:
# https://github.com/hongzimao/pensieve/tree/master/video_server
# download video size folder if not existed
video_folder = park.__path__[0] + "/envs/abr_sim/videos/"
chunk_sizes = np.load(video_folder + "video_sizes.npy")
return chunk_sizes
def load_traces():
# download video size folder if not existed
trace_folder = park.__path__[0] + "/envs/abr_sim/traces/"
all_traces = []
for trace in os.listdir(trace_folder):
all_t = []
all_bandwidth = []
with open(trace_folder + trace, "rb") as f:
for line in f:
parse = line.split()
all_t.append(float(parse[0]))
all_bandwidth.append(float(parse[1]))
all_traces.append((all_t, all_bandwidth))
return all_traces
def sample_trace(all_traces, np_random):
# weighted random sample based on trace length
all_p = [len(trace[1]) for trace in all_traces]
sum_p = float(sum(all_p))
all_p = [p / sum_p for p in all_p]
# sample a trace
trace_idx = np_random.choice(len(all_traces), p=all_p)
# sample a starting point
init_t_idx = np_random.choice(len(all_traces[trace_idx][0]))
# return a trace and the starting t
return all_traces[trace_idx], init_t_idx
|
ContextualBO-main
|
park_abr/park/envs/abr_sim/trace_loader.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# A modification of the adaptive video streaming environment in https://github.com/park-project/park
import json
import park
def get_chunk_time(trace, t_idx):
if t_idx == len(trace[0]) - 1:
return 1 # bandwidth last for 1 second
else:
return trace[0][t_idx + 1] - trace[0][t_idx]
def load_chunk_sizes():
"""
chunk_sizes is a dict that with keys being context name and values
being bytes of video chunk file at different bitrates for correpsonding
traces.
"""
# download video size folder if not existed
video_folder = park.__path__[0] + "/envs/abr_sim/videos/"
with open(video_folder + "fb_chunks_data_all.json", "r") as fp:
chunk_sizes = json.load(fp)
return chunk_sizes
def load_traces():
"""
all_traces is a dict that with keys being context name and values
being a dictionary with keys being trace uuid and values being bandwidths
{
"context name": {
"trace_id": (
[0.01, 1.0], # segment time (seconds)
[1e6, 2e6], # bandwidth (bytes per second)
}
}
"""
# download video size folder if not existed
trace_folder = park.__path__[0] + "/envs/abr_sim/fb_traces/"
with open(trace_folder + "fb_traces_data_all.json", "r") as fp:
all_traces = json.load(fp)
return all_traces
def sample_trace(all_traces, irun):
# deterministic
trace_list = list(all_traces.keys())
trace_list.sort()
return trace_list[irun]
def random_sample_trace(all_traces, np_random):
# weighted random sample based on trace length
trace_list = list(all_traces.keys())
trace_list.sort()
all_p = [len(all_traces[trace_name][1]) for trace_name in trace_list]
sum_p = float(sum(all_p))
all_p = [p / sum_p for p in all_p]
# sample a trace
trace_idx = np_random.choice(len(trace_list), p=all_p)
trace_uuid = trace_list[trace_idx]
# sample a starting point
init_t_idx = np_random.choice(len(all_traces[trace_uuid][0]))
# return a trace and the starting t
return trace_uuid, init_t_idx
|
ContextualBO-main
|
park_abr/park/envs/abr_sim/fb_trace_loader.py
|
#! /usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import numpy as np
from ax.modelbridge.factory import get_GPEI, get_sobol
from ax.modelbridge.generation_strategy import GenerationStep, GenerationStrategy
from ax.service.ax_client import AxClient
from ax.storage.json_store.encoder import object_to_json
from cbo_generation_strategy import (
get_ContextualBO,
get_ContextualEmbeddingBO,
)
from get_synthetic_problem import get_benchmark_problem
CBO_EMB_MODEL_GEN_OPTIONS = {
"acquisition_function_kwargs": {"q": 1, "noiseless": True},
"optimizer_kwargs": {
"method": "SLSQP",
"batch_limit": 1,
"joint_optimization": True,
},
}
def run_aggregated_reward_benchmark(
strategy_name,
benchmark_problem_name,
irep,
num_contexts,
init_size=8,
num_trials=100,
benchmark_problem_args={},
):
benchmark_problem = get_benchmark_problem(
name=benchmark_problem_name,
num_contexts=num_contexts,
benchmark_problem_args=benchmark_problem_args,
)
decomposition = benchmark_problem.contextual_parameter_decomposition
context_weight_dict = {
benchmark_problem.context_name_list[i]: benchmark_problem.context_weights[i]
for i in range(benchmark_problem.num_contexts)
}
embs_feature_dict = {
benchmark_problem.context_name_list[i]: benchmark_problem.context_embedding[
i, :
]
for i in range(benchmark_problem.num_contexts)
}
if strategy_name == "Sobol":
gs = GenerationStrategy(
name="Sobol", steps=[GenerationStep(get_sobol, -1)]
)
elif strategy_name == "GPEI":
gs = GenerationStrategy(
name="GPEI",
steps=[
GenerationStep(get_sobol, init_size),
GenerationStep(get_GPEI, -1),
],
)
elif strategy_name == "SAC":
gs = GenerationStrategy(
name="SAC",
steps=[
GenerationStep(model=get_sobol, num_trials=init_size),
GenerationStep(
model=get_ContextualBO,
num_trials=-1,
model_kwargs={"decomposition": decomposition},
),
],
)
elif strategy_name == "LCE-A":
gs = GenerationStrategy(
name="LCE-A",
steps=[
GenerationStep(model=get_sobol, num_trials=init_size),
GenerationStep(
model=get_ContextualEmbeddingBO,
num_trials=-1,
model_kwargs={
"decomposition": decomposition,
},
model_gen_kwargs={"model_gen_options": CBO_EMB_MODEL_GEN_OPTIONS},
),
],
)
axc = AxClient(generation_strategy=gs)
experiment_parameters = benchmark_problem.contextual_parameters
axc.create_experiment(
name="cbo_aggregated_reward_experiment",
parameters=experiment_parameters,
objective_name="aggregated_reward",
minimize=True,
)
def evaluation_aggregated_reward(parameters):
# put parameters into 2-D array
x = np.array(
[
[parameters.get(param) for param in decomposition[context_name]]
for context_name in benchmark_problem.context_name_list
]
)
return {
"aggregated_reward": (
benchmark_problem.evaluation_function_aggregated(x),
0.0,
)
}
for _ in range(num_trials):
parameters, trial_index = axc.get_next_trial()
axc.complete_trial(
trial_index=trial_index, raw_data=evaluation_aggregated_reward(parameters)
)
res = json.dumps(object_to_json(axc.experiment))
with open(f'results/aggregated_reward_{benchmark_problem_name}_{strategy_name}_rep_{irep}.json', "w") as fout:
json.dump(res, fout)
return res
def run_aggregated_reward_benchmark_reps(
benchmark_problem_name,
strategy,
num_contexts,
init_size=8,
num_trials=100,
reps=8,
benchmark_problem_args={},
):
res = {strategy: []}
for irep in range(reps):
res[strategy].append(
run_aggregated_reward_benchmark(
strategy_name=strategy,
benchmark_problem_name=benchmark_problem_name,
irep=irep,
num_contexts=num_contexts,
init_size=init_size,
num_trials=num_trials,
benchmark_problem_args=benchmark_problem_args,
)
)
with open(f'results/aggregated_reward_{benchmark_problem_name}_{strategy}.json', "w") as fout:
json.dump(res, fout)
if __name__ == '__main__':
# Run all of the benchmark replicates.
# Hartmann5DEmbedding, Uniform Weights, SAC
run_aggregated_reward_benchmark_reps(
benchmark_problem_name="Hartmann5DEmbedding",
strategy="SAC",
num_contexts=5,
reps=10
)
# # Hartmann5DEmbedding, Uniform Weights, LCE-A
run_aggregated_reward_benchmark_reps(
benchmark_problem_name="Hartmann5DEmbedding",
strategy="LCE-A",
num_contexts=5,
reps=10
)
# Hartmann5DEmbedding, Uniform Weights, SOBOL
run_aggregated_reward_benchmark_reps(
benchmark_problem_name="Hartmann5DEmbedding",
strategy="Sobol",
num_contexts=5,
num_trials=10,
reps=1
)
# Hartmann5DEmbedding, Uniform Weights, Standard BO
run_aggregated_reward_benchmark_reps(
benchmark_problem_name="Hartmann5DEmbedding",
strategy="GPEI",
num_contexts=5,
num_trials=10,
reps=1
)
# Hartmann5DEmbedding, Skewed Weights, num of contexts = 10, num of dense contexts = 2
# run_aggregated_reward_benchmark_reps(
# benchmark_problem_name="Hartmann5DEmbedding",
# strategy="SAC",
# num_contexts=10,
# reps=10,
# benchmark_problem_args = {"context_weights": [0.46, 0.46, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01]}
# )
|
ContextualBO-main
|
benchmarks/run_synthetic_benchmarks_agg_reward.py
|
#! /usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import datetime
import json
from typing import Any, Dict, List, Optional
import numpy as np
from ax.core.parameter import ChoiceParameter, ParameterType
from ax.service.ax_client import AxClient
from ax.storage.json_store.encoder import object_to_json
from get_synthetic_problem import get_benchmark_problem
from cbo_generation_strategy import (
MultiOutputStrategy,
MultiSOBOLStrategy,
MultiTaskContextualBOStrategy,
)
from synthetic_problems import ContextualEmbeddingSyntheticFunction
def run_multioutput_reward_benchmark(
strategy_name,
benchmark_problem_name,
irep,
num_contexts,
init_size=8,
num_trials=100,
benchmark_problem_args={},
):
init_size = init_size * num_contexts
num_trials = num_trials * num_contexts
benchmark_problem = get_benchmark_problem(
name=benchmark_problem_name,
num_contexts=num_contexts,
benchmark_problem_args=benchmark_problem_args,
)
context_parameter = ChoiceParameter(
name="CONTEXT_PARAMS",
values=benchmark_problem.context_name_list,
is_task=True,
parameter_type=ParameterType.STRING,
)
if strategy_name == "MultiSOBOL":
gs = MultiSOBOLStrategy(context_parameter=context_parameter, name="MultiSOBOL")
elif strategy_name == "ICM":
gs = MultiOutputStrategy(
name="ICM",
context_parameter=context_parameter,
init_size=init_size,
)
elif strategy_name == "LCE-M":
gs = MultiTaskContextualBOStrategy(
name="LCE-M",
context_parameter=context_parameter,
init_size=init_size,
)
axc = AxClient(generation_strategy=gs)
experiment_parameters = benchmark_problem.base_parameters
experiment_parameters.append(
{
"name": context_parameter.name,
"type": "choice",
"values": context_parameter.values,
"is_task": True,
}
)
axc.create_experiment(
name="cbo_multioutput_reward_experiment",
parameters=experiment_parameters,
objective_name="context_reward",
minimize=True,
)
def evaluation_contextual_reward(parameters):
# get base parameters only (into 1-D array)
x = np.array(
[
parameters.get(param["name"])
for param in benchmark_problem.base_parameters
if param["name"] != context_parameter.name
]
)
context = parameters.get(context_parameter.name)
weight = benchmark_problem.context_weights[
benchmark_problem.context_name_list.index(context)
]
if isinstance(benchmark_problem, ContextualEmbeddingSyntheticFunction):
embs = benchmark_problem.context_embedding[
benchmark_problem.context_name_list.index(context), :
]
x = np.hstack([x, embs])
return {
"context_reward": (weight * benchmark_problem.component_function(x), 0.0)
}
for _ in range(num_trials):
parameters, trial_index = axc.get_next_trial()
axc.complete_trial(
trial_index=trial_index, raw_data=evaluation_contextual_reward(parameters)
)
res = json.dumps(object_to_json(axc.experiment))
with open(f'results/multioutput_reward_{benchmark_problem_name}_{strategy_name}_rep_{irep}.json', "w") as fout:
json.dump(res, fout)
return res
def run_multioutput_reward_benchmark_reps(
benchmark_problem_name,
strategy,
num_contexts,
init_size=4,
num_trials=40,
reps=8,
benchmark_problem_args={},
):
res = {strategy: []}
for irep in range(reps):
res[strategy].append(
run_multioutput_reward_benchmark(
strategy_name=strategy,
benchmark_problem_name=benchmark_problem_name,
irep=irep,
num_contexts=num_contexts,
init_size=init_size,
num_trials=num_trials,
benchmark_problem_args=benchmark_problem_args,
)
)
with open(f'results/multioutput_reward_{benchmark_problem_name}_{strategy}.json', "w") as fout:
json.dump(res, fout)
if __name__ == '__main__':
# Run all of the benchmark replicates.
# Hartmann5DEmbedding, Uniform Weights, LCE-M
run_multioutput_reward_benchmark_reps(
benchmark_problem_name="Hartmann5DEmbedding",
strategy="LCE-M",
num_contexts=5,
reps=8
)
# Hartmann5DEmbedding, Uniform Weights, ICM
run_multioutput_reward_benchmark_reps(
benchmark_problem_name="Hartmann5DEmbedding",
strategy="ICM",
num_contexts=5,
reps=8
)
# Hartmann5DEmbedding, Uniform Weights, SOBOL
run_multioutput_reward_benchmark_reps(
benchmark_problem_name="Hartmann5DEmbedding",
strategy="MultiSOBOL",
num_contexts=5,
reps=8
)
|
ContextualBO-main
|
benchmarks/run_synthetic_benchmarks_multioutput_reward.py
|
#! /usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from abc import ABC
from typing import Dict, List
import numpy as np
import torch
from ax.models.random.sobol import SobolGenerator
from ax.utils.measurement.synthetic_functions import branin, hartmann6
class ContextualSyntheticFunction(ABC):
def __init__(
self,
context_name_list: List,
context_weights: np.ndarray,
noise_sd: float = 0.0,
) -> None:
self.context_name_list = context_name_list
# contextual weights
self.context_weights = context_weights
# number of contexts
self.num_contexts = len(context_name_list)
# noise term
self.noise_sd = noise_sd
self._base_parameters = []
self._contextual_parameters = []
self._decomposition = {}
@property
def base_parameters(self) -> List[Dict]:
return self._base_parameters
@property
def contextual_parameters(self) -> List[Dict]:
return self._contextual_parameters
@property
def contextual_parameter_decomposition(self) -> List[Dict]:
return self._decomposition
def component_function(self, x: np.ndarray) -> float:
"""function that produces the outcomes for each component."""
raise NotImplementedError
def evaluation_function_vectorized(self, x: np.ndarray) -> np.ndarray:
# input x is a matrix: each row corresponds each context
# and each column to each base parameter
return np.array(
[self.component_function(x[i, :]) for i in range(self.num_contexts)]
)
def evaluation_function_aggregated(self, x: np.ndarray) -> float:
# aggregate across context weighted by context coeff
context_output = self.evaluation_function_vectorized(x)
return np.sum(context_output * self.context_weights)
class ContextualEmbeddingSyntheticFunction(ContextualSyntheticFunction):
def __init__(
self,
context_name_list: List,
context_weights: np.ndarray,
context_embedding: np.ndarray,
noise_sd: float = 0.0,
) -> None:
super().__init__(
context_name_list=context_name_list,
context_weights=context_weights,
noise_sd=noise_sd,
)
# each row corresponds each context and each column to each embeddding
self.context_embedding = context_embedding
def evaluation_function_vectorized(self, x: np.ndarray) -> np.ndarray:
# input x is a matrix: each row corresponds each context
# and each column to each base parameter
x_all = np.hstack([x, self.context_embedding])
return np.array(
[self.component_function(x_all[i, :]) for i in range(self.num_contexts)]
)
class Branin2DBase(ContextualSyntheticFunction):
def __init__(
self,
context_name_list: List,
context_weights: np.ndarray,
noise_sd: float = 0.0,
) -> None:
super().__init__(
context_name_list=context_name_list,
context_weights=context_weights,
noise_sd=noise_sd,
)
# define search space for non-dp setting
self._base_parameters = [
{
"name": "x0",
"type": "range",
"bounds": [-5.0, 10.0],
"value_type": "float",
"log_scale": False,
},
{
"name": "x1",
"type": "range",
"bounds": [0.0, 15.0],
"value_type": "float",
"log_scale": False,
},
]
# for dp setting, extend to contextual search space
self._contextual_parameters = []
for context_name in self.context_name_list:
self._contextual_parameters.append(
{
"name": f"x0_{context_name}",
"type": "range",
"bounds": [-5.0, 10.0],
"value_type": "float",
"log_scale": False,
}
)
self._contextual_parameters.append(
{
"name": f"x1_{context_name}",
"type": "range",
"bounds": [0.0, 15.0],
"value_type": "float",
"log_scale": False,
}
)
self._decomposition = {
f"{context_name}": [f"x0_{context_name}", f"x1_{context_name}"]
for context_name in self.context_name_list
}
def component_function(self, x: np.ndarray) -> float:
return branin.f(x)
class Branin1DEmbedding(ContextualEmbeddingSyntheticFunction):
def __init__(
self,
context_name_list: List,
context_weights: np.ndarray,
context_embedding: np.ndarray,
noise_sd: float = 0.0,
) -> None:
super().__init__(
context_name_list=context_name_list,
context_weights=context_weights,
context_embedding=context_embedding, # values between [0.0, 15.0]
noise_sd=noise_sd,
)
# define search space for non-dp setting
self._base_parameters = [
{
"name": "x0",
"type": "range",
"bounds": [-5.0, 10.0],
"value_type": "float",
"log_scale": False,
}
]
# for dp setting, extend to contextual search space
self._contextual_parameters = []
for context_name in self.context_name_list:
self._contextual_parameters.append(
{
"name": f"x0_{context_name}",
"type": "range",
"bounds": [-5.0, 10.0],
"value_type": "float",
"log_scale": False,
}
)
self._decomposition = {
f"{context_name}": [f"x0_{context_name}"]
for context_name in self.context_name_list
}
def component_function(self, x: np.ndarray) -> float:
# make sure embedding is at the end of the array
return branin.f(x)
class Hartmann6DBase(ContextualSyntheticFunction):
# additive brannin 2d case
def __init__(
self,
context_name_list: List,
context_weights: np.ndarray,
noise_sd: float = 0.0,
) -> None:
super().__init__(
context_name_list=context_name_list,
context_weights=context_weights,
noise_sd=noise_sd,
)
# define search space for non-dp setting
self._base_parameters = [
{
"name": f"x{i}",
"type": "range",
"bounds": [0.0, 1.0],
"value_type": "float",
"log_scale": False,
}
for i in range(6)
]
# for dp setting, extend to contextual search space
self._contextual_parameters = []
for context_name in self.context_name_list:
self._contextual_parameters.extend(
[
{
"name": f"x{j}_{context_name}",
"type": "range",
"bounds": [0.0, 1.0],
"value_type": "float",
"log_scale": False,
}
for j in range(6)
]
)
self._decomposition = {
f"{context_name}": [f"x{j}_{context_name}" for j in range(6)]
for context_name in self.context_name_list
}
def component_function(self, x: np.ndarray) -> float:
return hartmann6.f(x)
class Hartmann5DEmbedding(ContextualEmbeddingSyntheticFunction):
def __init__(
self,
context_name_list: List,
context_weights: np.ndarray,
context_embedding: np.ndarray,
noise_sd: float = 0.0,
) -> None:
super().__init__(
context_name_list=context_name_list,
context_weights=context_weights,
context_embedding=context_embedding, # values between [0.0, 1.0]
noise_sd=noise_sd,
)
# define search space for non-dp setting
self._base_parameters = [
{
"name": f"x{i}",
"type": "range",
"bounds": [0.0, 1.0],
"value_type": "float",
"log_scale": False,
}
for i in range(5)
]
# for dp setting, extend to contextual search space
self._contextual_parameters = []
for context_name in self.context_name_list:
self._contextual_parameters.extend(
[
{
"name": f"x{j}_{context_name}",
"type": "range",
"bounds": [0.0, 1.0],
"value_type": "float",
"log_scale": False,
}
for j in range(5)
]
)
self._decomposition = {
f"{context_name}": [f"x{j}_{context_name}" for j in range(5)]
for context_name in self.context_name_list
}
def component_function(self, x: np.ndarray) -> float:
# make sure embedding is at the end of the array
return hartmann6.f(x)
|
ContextualBO-main
|
benchmarks/synthetic_problems.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, Optional
import numpy as np
from synthetic_problems import (
Branin1DEmbedding,
Branin2DBase,
Hartmann5DEmbedding,
Hartmann6DBase,
)
def get_benchmark_problem(
name: str,
num_contexts: int,
benchmark_problem_args: Optional[Dict[str, Any]] = None,
):
"""generate benchmark problems.
Args:
1. name: benchmark name
2. num_contexts: number of contexts n
3. args for creating benchmark
- context_name_list. List of str. Default is [c0, c1, ..., cn]
- context_weights. [w0, w1, ..., wn]. sum of w_i = 1. Default is [1/n]
- context_embedding.
"""
benchmark_problem_args = benchmark_problem_args or {}
context_name_list = benchmark_problem_args.get(
"context_name_list", [f"c{i}" for i in range(num_contexts)]
)
context_weights = np.array(
benchmark_problem_args.get(
"context_weights", np.ones(num_contexts) / num_contexts
)
)
if name == "Branin2D":
benchmark_problem = Branin2DBase(
context_name_list=context_name_list, context_weights=context_weights
)
elif name == "Hartmann6D":
benchmark_problem = Hartmann6DBase(
context_name_list=context_name_list, context_weights=context_weights
)
elif name == "Branin1DEmbedding":
benchmark_problem = Branin1DEmbedding(
context_name_list=context_name_list,
context_weights=context_weights,
context_embedding=np.arange(0.0, 15.0, 15.0 / num_contexts).reshape(
num_contexts, 1
),
)
elif name == "Hartmann5DEmbedding":
context_embedding = np.array(
benchmark_problem_args.get(
"context_embedding", np.arange(0.0, 1.0, 1.0 / num_contexts)
)
)
benchmark_problem = Hartmann5DEmbedding(
context_name_list=context_name_list,
context_weights=context_weights,
context_embedding=context_embedding.reshape(num_contexts, 1),
)
return benchmark_problem
|
ContextualBO-main
|
benchmarks/get_synthetic_problem.py
|
#! /usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, List, Optional, Type
import torch
from ax.core.data import Data
from ax.core.experiment import Experiment
from ax.core.generator_run import GeneratorRun
from ax.core.observation import ObservationFeatures
from ax.core.parameter import ChoiceParameter
from ax.core.search_space import SearchSpace
from ax.models.torch.cbo_lcea import LCEABO
from ax.models.torch.cbo_sac import SACBO
from ax.models.torch.cbo_lcem import LCEMBO
from ax.modelbridge.factory import DEFAULT_TORCH_DEVICE
from ax.modelbridge.generation_strategy import GenerationStep, GenerationStrategy
from ax.modelbridge.random import RandomModelBridge
from ax.modelbridge.registry import (
Cont_X_trans,
StratifiedStandardizeY,
TaskEncode,
Y_trans,
)
from ax.modelbridge.torch import TorchModelBridge
from ax.modelbridge.transforms.base import Transform
from ax.models.random.sobol import SobolGenerator
from ax.models.torch.botorch import BotorchModel
def get_multisobol(search_space: SearchSpace) -> RandomModelBridge:
return RandomModelBridge(
search_space=search_space,
model=SobolGenerator(),
transforms=[TaskEncode] + Cont_X_trans,
)
class MultiSOBOLStrategy(GenerationStrategy):
def __init__(
self, context_parameter: ChoiceParameter, name: str = "MultiSOBOL"
) -> None:
self.context_parameter = context_parameter
self.num_contexts = len(context_parameter.values)
steps = [GenerationStep(get_multisobol, -1)]
super().__init__(steps=steps, name=name)
def clone_reset(self) -> "MultiSOBOLStrategy":
"""Copy without state."""
return self.__class__(context_parameter=self.context_parameter, name=self.name)
def gen(
self,
experiment: Experiment,
data: Optional[Data] = None,
n: int = 1,
**kwargs: Any,
) -> GeneratorRun:
"""Produce the next points in the experiment."""
num_trials = len(self._generator_runs)
idx = num_trials % self.num_contexts # decide which context to optimize
fixed_features = ObservationFeatures(
parameters={self.context_parameter.name: self.context_parameter.values[idx]}
)
generator_run = super().gen(
experiment=experiment, data=data, n=1, fixed_features=fixed_features
)
return generator_run
def get_multioutput(
experiment: Experiment,
data: Data,
search_space: Optional[SearchSpace] = None,
status_quo_features: Optional[ObservationFeatures] = None,
) -> TorchModelBridge:
# Set transforms for a Single-type MTGP model.
transforms = Cont_X_trans + [StratifiedStandardizeY, TaskEncode]
return TorchModelBridge(
experiment=experiment,
search_space=search_space or experiment.search_space,
data=data,
model=BotorchModel(),
transforms=transforms,
torch_dtype=torch.double,
status_quo_features=status_quo_features,
)
class MultiOutputStrategy(GenerationStrategy):
def __init__(
self,
context_parameter: ChoiceParameter,
init_size: int,
steps: Optional[List[GenerationStep]] = None,
name: str = "MultiOutputBO",
) -> None:
self.context_parameter = context_parameter
self.num_contexts = len(context_parameter.values)
if steps is None:
steps = [
GenerationStep(get_multisobol, init_size),
GenerationStep(get_multioutput, -1),
]
super().__init__(steps=steps, name=name)
def clone_reset(self) -> "MultiOutputStrategy":
"""Copy without state."""
return self.__class__(context_parameter=self.context_parameter, name=self.name)
def gen(
self,
experiment: Experiment,
data: Optional[Data] = None,
n: int = 1,
**kwargs: Any,
) -> GeneratorRun:
"""Produce the next points in the experiment."""
num_trials = len(self._generator_runs)
idx = num_trials % self.num_contexts # decide which context to optimize
fixed_features = ObservationFeatures(
parameters={self.context_parameter.name: self.context_parameter.values[idx]}
)
generator_run = super().gen(
experiment=experiment, data=data, n=1, fixed_features=fixed_features
)
return generator_run
def get_multitask_contextualBO(
experiment: Experiment,
data: Data,
search_space: Optional[SearchSpace] = None,
status_quo_features: Optional[ObservationFeatures] = None,
) -> TorchModelBridge:
# Set transforms for a Single-type MTGP model.
transforms = Cont_X_trans + [StratifiedStandardizeY, TaskEncode]
return TorchModelBridge(
experiment=experiment,
search_space=search_space or experiment.search_space,
data=data,
model=LCEMBO(),
transforms=transforms,
torch_dtype=torch.double,
status_quo_features=status_quo_features,
)
class MultiTaskContextualBOStrategy(MultiOutputStrategy):
def __init__(
self,
context_parameter: ChoiceParameter,
init_size: int,
name: str = "MultiTaskContextualBO",
) -> None:
steps = [
GenerationStep(get_multisobol, init_size),
GenerationStep(get_multitask_contextualBO, -1),
]
super().__init__(
context_parameter=context_parameter,
init_size=init_size,
steps=steps,
name=name,
)
def get_ContextualBO(
experiment: Experiment,
data: Data,
decomposition: Dict[str, List[str]],
search_space: Optional[SearchSpace] = None,
dtype: torch.dtype = torch.double,
device: torch.device = DEFAULT_TORCH_DEVICE,
transforms: List[Type[Transform]] = Cont_X_trans + Y_trans,
) -> TorchModelBridge:
return TorchModelBridge(
experiment=experiment,
search_space=search_space or experiment.search_space,
data=data,
model=SACBO(decomposition=decomposition),
transforms=transforms,
torch_dtype=dtype,
torch_device=device,
)
def get_ContextualEmbeddingBO(
experiment: Experiment,
data: Data,
decomposition: Dict[str, List[str]],
dtype: torch.dtype = torch.double,
device: torch.device = DEFAULT_TORCH_DEVICE,
transforms: List[Type[Transform]] = Cont_X_trans + Y_trans,
cat_feature_dict: Optional[Dict] = None,
embs_feature_dict: Optional[Dict] = None,
context_weight_dict: Optional[Dict] = None,
embs_dim_list: Optional[List[int]] = None,
search_space: Optional[SearchSpace] = None,
gp_model_args: Optional[Dict[str, Any]] = None,
) -> TorchModelBridge:
return TorchModelBridge(
experiment=experiment,
search_space=search_space or experiment.search_space,
data=data,
model=LCEABO(
decomposition=decomposition,
cat_feature_dict=cat_feature_dict,
embs_feature_dict=embs_feature_dict,
context_weight_dict=context_weight_dict,
embs_dim_list=embs_dim_list,
gp_model_args=gp_model_args,
),
transforms=transforms,
torch_dtype=dtype,
torch_device=device,
)
|
ContextualBO-main
|
benchmarks/cbo_generation_strategy.py
|
#Copyright (c) Facebook, Inc. and its affiliates.
#All rights reserved.
#This source code is licensed under the license found in the
#LICENSE file in the root directory of this source tree.
from scipy.spatial.transform import Rotation as R
import os
from glob import glob
from tqdm import tqdm
import scipy.io as sio
import random
from PIL import Image
import numpy as np
import torch
from torchvision import transforms
preprocess = transforms.Compose([
transforms.Resize((256, 256)),
transforms.ToTensor()
])
# class used for obtaining an instance of the dataset for training vision chart prediction
# to be passed to a pytorch dataloader
# input:
# - classes: list of object classes used
# - args: set of input parameters from the training file
# - set_type: the set type used
# - sample_num: the size of the point cloud to be returned in a given batch
class mesh_loader_vision(object):
def __init__(self, classes, args, set_type='train', sample_num=3000):
# initialization of data locations
self.args = args
self.surf_location = '../data/surfaces/'
self.img_location = '../data/images/'
self.touch_location = '../data/scene_info/'
self.sheet_location = '../data/sheets/'
self.sample_num = sample_num
self.set_type = set_type
self.set_list = np.load('../data/split.npy', allow_pickle='TRUE').item()
names = [[f.split('/')[-1], f.split('/')[-2]] for f in glob((f'{self.img_location}/*/*'))]
self.names = []
self.classes_names = [[] for _ in classes]
np.random.shuffle(names)
for n in tqdm(names):
if n[1] in classes:
if os.path.exists(self.surf_location + n[1] + '/' + n[0] + '.npy'):
if os.path.exists(self.touch_location + n[1] + '/' + n[0]):
if n[0] + n[1] in self.set_list[self.set_type]:
if n[0] +n[1] in self.set_list[self.set_type]:
self.names.append(n)
self.classes_names[classes.index(n[1])].append(n)
print(f'The number of {set_type} set objects found : {len(self.names)}')
def __len__(self):
return len(self.names)
# select the object and grasps for training
def get_training_instance(self):
# select an object and and a principle grasp randomly
class_choice = random.choice(self.classes_names)
object_choice = random.choice(class_choice)
obj, obj_class = object_choice
# select the remaining grasps and shuffle the select grasps
num_choices = [0, 1, 2, 3, 4]
nums = []
for i in range(self.args.num_grasps):
choice = random.choice(num_choices)
nums.append(choice)
del (num_choices[num_choices.index(choice)])
random.shuffle(nums)
return obj, obj_class, nums[-1], nums
# select the object and grasps for validating
def get_validation_examples(self, index):
# select an object and a principle grasp
obj, obj_class = self.names[index]
orig_num = 0
# select the remaining grasps deterministically
nums = [(orig_num + i) % 5 for i in range(self.args.num_grasps)]
return obj, obj_class, orig_num, nums
# load surface point cloud
def get_gt_points(self, obj_class, obj):
samples = np.load(self.surf_location +obj_class + '/' + obj + '.npy')
if self.args.eval:
np.random.seed(0)
np.random.shuffle(samples)
gt_points = torch.FloatTensor(samples[:self.sample_num])
gt_points *= .5 # scales the models to the size of shape we use
gt_points[:, -1] += .6 # this is to make the hand and the shape the right releative sizes
return gt_points
# load vision signal
def get_images(self, obj_class, obj, grasp_number):
# load images
img_occ = Image.open(f'{self.img_location}/{obj_class}/{obj}/{grasp_number}.png')
img_unocc = Image.open(f'{self.img_location}/{obj_class}/{obj}/unoccluded.png')
# apply pytorch image preprocessing
img_occ = preprocess(img_occ)
img_unocc = preprocess(img_unocc)
return torch.FloatTensor(img_occ), torch.FloatTensor(img_unocc)
# load touch sheet mask indicating toch success
def get_touch_info(self, obj_class, obj, grasps):
sheets, successful = [], []
# cycle though grasps and load touch sheets
for grasp in grasps:
sheet_location = self.sheet_location + f'{obj_class}/{obj}/sheets_{grasp}_finger_num.npy'
hand_info = np.load(f'{self.touch_location}/{obj_class}/{obj}/{grasp}.npy', allow_pickle=True).item()
sheet, success = self.get_touch_sheets(sheet_location, hand_info)
sheets.append(sheet)
successful += success
return torch.cat(sheets), successful
# load the touch sheet
def get_touch_sheets(self, location, hand_info):
sheets = []
successful = []
touches = hand_info['touch_success']
finger_pos = torch.FloatTensor(hand_info['cam_pos'])
# cycle through fingers in the grasp
for i in range(4):
sheet = np.load(location.replace('finger_num', str(i)))
# if the touch was unsuccessful
if not touches[i] or sheet.shape[0] == 1:
sheets.append(finger_pos[i].view(1, 3).expand(25, 3)) # save the finger position instead in every vertex
successful.append(False) # binary mask for unsuccessful touch
# if the touch was successful
else:
sheets.append(torch.FloatTensor(sheet)) # save the sheet
successful.append(True) # binary mask for successful touch
sheets = torch.stack(sheets)
return sheets, successful
def __getitem__(self, index):
if self.set_type == 'train':
obj, obj_class, grasp_number, grasps = self.get_training_instance()
else:
obj, obj_class, grasp_number, grasps = self.get_validation_examples(index)
data = {}
# meta data
data['names'] = obj, obj_class, grasp_number
data['class'] = obj_class
# load sampled ground truth points
data['gt_points'] = self.get_gt_points(obj_class, obj)
# load images
data['img_occ'], data['img_unocc'] = self.get_images(obj_class, obj, grasp_number)
# get touch information
data['sheets'], data['successful'] = self.get_touch_info(obj_class, obj, grasps)
return data
def collate(self, batch):
data = {}
data['names'] = [item['names'] for item in batch]
data['class'] = [item['class'] for item in batch]
data['sheets'] = torch.cat([item['sheets'].unsqueeze(0) for item in batch])
data['gt_points'] = torch.cat([item['gt_points'].unsqueeze(0) for item in batch])
data['img_occ'] = torch.cat([item['img_occ'].unsqueeze(0) for item in batch])
data['img_unocc'] = torch.cat([item['img_unocc'].unsqueeze(0) for item in batch])
data['successful'] = [item['successful'] for item in batch]
return data
# class used for obtaining an instance of the dataset for training touch chart prediction
# to be passed to a pytorch dataloader
# input:
# - classes: list of object classes used
# - args: set of input parameters from the training file
# - set_type: the set type used
# - num: if specified only returns a given grasp number
# - all: if True use all objects, regarless of set type
# - finger: if specified only returns a given finger number
class mesh_loader_touch(object):
def __init__(self, classes, args, set_type='train', produce_sheets = False):
# initialization of data locations
self.args = args
self.surf_location = '../data/surfaces/'
self.img_location = '../data/images/'
self.touch_location = '../data/scene_info/'
self.sheet_location = '../data/remake_sheets/'
self.set_type = set_type
self.set_list = np.load('../data/split.npy', allow_pickle='TRUE').item()
self.empty = torch.FloatTensor(np.load('../data/empty_gel.npy'))
self.produce_sheets = produce_sheets
names = [[f.split('/')[-1], f.split('/')[-2]] for f in glob((f'{self.img_location}/*/*'))]
self.names = []
import os
for n in tqdm(names):
if n[1] in classes:
if os.path.exists(self.surf_location + n[1] + '/' + n[0] + '.npy'):
if os.path.exists(self.touch_location + n[1] + '/' + n[0]):
if self.produce_sheets or (n[0] + n[1]) in self.set_list[self.set_type]:
if produce_sheets:
for i in range(5):
for j in range(4):
self.names.append(n + [i, j])
else:
for i in range(5):
hand_info = np.load(f'{self.touch_location}/{n[1]}/{n[0]}/{i}.npy',
allow_pickle=True).item()
for j in range(4):
if hand_info['touch_success'][j]:
self.names.append(n + [i, j])
print(f'The number of {set_type} set objects found : {len(self.names)}')
def __len__(self):
return len(self.names)
def standerdize_point_size(self, points):
if points.shape[0] == 0:
return torch.zeros((self.args.num_samples, 3))
np.random.shuffle(points)
points = torch.FloatTensor(points)
while points.shape[0] < self.args.num_samples :
points = torch.cat((points, points, points, points))
perm = torch.randperm(points.shape[0])
idx = perm[:self.args.num_samples ]
return points[idx]
def get_finger_transforms(self, hand_info, finger_num, args):
rot = hand_info['cam_rot'][finger_num]
rot = R.from_euler('xyz', rot, degrees=False).as_matrix()
rot_q = R.from_matrix(rot).as_quat()
pos = hand_info['cam_pos'][finger_num]
return torch.FloatTensor(rot_q), torch.FloatTensor(rot), torch.FloatTensor(pos)
def __getitem__(self, index):
obj, obj_class, num, finger_num = self.names[index]
# meta data
data = {}
data['names'] = [obj, num , finger_num]
data['class'] = obj_class
# hand infomation
hand_info = np.load(f'{self.touch_location}/{obj_class}/{obj}/{num}.npy', allow_pickle=True).item()
data['rot'], data['rot_M'], data['pos'] = self.get_finger_transforms(hand_info, finger_num, self.args)
data['good_touch'] = hand_info['touch_success']
# simulated touch information
scene_info = np.load(f'{self.touch_location}/{obj_class}/{obj}/{num}.npy', allow_pickle=True).item()
data['depth'] = torch.clamp(torch.FloatTensor(scene_info['depth'][finger_num]).unsqueeze(0), 0, 1)
data['sim_touch'] = torch.FloatTensor(np.array(scene_info['gel'][finger_num]) / 255.).permute(2, 0, 1).contiguous().view(3, 100, 100)
data['empty'] = torch.FloatTensor(self.empty / 255.).permute(2, 0, 1).contiguous().view(3, 100, 100)
# point cloud information
data['samples'] = self.standerdize_point_size(scene_info['points'][finger_num])
data['num_samples'] = scene_info['points'][finger_num].shape
# where to save sheets
data['save_dir'] = f'{self.sheet_location}/{obj_class}/{obj}/sheets_{num}_{finger_num}.npy'
return data
def collate(self, batch):
data = {}
data['names'] = [item['names'] for item in batch]
data['class'] = [item['class'] for item in batch]
data['samples'] = torch.cat([item['samples'].unsqueeze(0) for item in batch])
data['sim_touch'] = torch.cat([item['sim_touch'].unsqueeze(0) for item in batch])
data['empty'] = torch.cat([item['empty'].unsqueeze(0) for item in batch])
data['depth'] = torch.cat([item['depth'].unsqueeze(0) for item in batch])
data['ref'] = {}
data['ref']['rot'] = torch.cat([item['rot'].unsqueeze(0) for item in batch])
data['ref']['rot_M'] = torch.cat([item['rot_M'].unsqueeze(0) for item in batch])
data['ref']['pos'] = torch.cat([item['pos'].unsqueeze(0) for item in batch])
data['good_touch'] = [item['good_touch'] for item in batch]
data['save_dir'] = [item['save_dir'] for item in batch]
data['num_samples'] = [item['num_samples'] for item in batch]
return data
|
3D-Vision-and-Touch-main
|
data_loaders.py
|
#Copyright (c) Facebook, Inc. and its affiliates.
#All rights reserved.
#This source code is licensed under the license found in the
#LICENSE file in the root directory of this source tree.
import numpy as np
import torch
import time
import sys
sys.path.insert(0, "../")
from pytorch3d.loss import chamfer_distance as cuda_cd
# loads the initial mesh and stores vertex, face, and adjacency matrix information
# input:
# - args: arguments from the training file
# - obj_name: name of the initial mesh object file fot eh vision charts
# output:
# - adj_info: the adjacency matrix, and faces for the combination of vision and touch charts
# - verts: the set of vertices for the initial vision charts
def load_mesh_vision(args, obj_name):
# load obj file
obj = import_obj(obj_name)
verts = np.array(obj.vertices)
verts = torch.FloatTensor(verts).cuda()
faces = torch.LongTensor(np.array(obj.faces) - 1).cuda()
# get adjacency matrix infomation
adj_info = adj_init(verts, faces, args)
return adj_info, verts
# loads object file
# involves identifying face and vertex infomation in .obj file
# needs to be triangulated to work
class import_obj(object):
def __init__(self, file):
self.vertices = []
self.faces = []
with open(file) as f :
for line in f:
line = line.replace('//', '/')
line = line.replace('\n', '')
if line[:2] == "v ":
self.vertices.append([float(v) for v in line.split(" ")[1:]])
elif line[0] == "f":
self.faces.append([int(s.split('/')[0]) for s in line.split(' ')[1:]])
# normalizes symetric, binary adj matrix such that sum of each row is 1
def normalize_adj(mx):
rowsum = mx.sum(1)
r_inv = (1. / rowsum).view(-1)
r_inv[r_inv != r_inv] = 0.
mx = torch.mm(torch.eye(r_inv.shape[0]).to(mx.device) * r_inv, mx)
return mx
# defines the adjacecny matrix for an object
def adj_init(verts, faces, args):
# get generic adjacency matrix for vision charts
adj = calc_adj(faces)
adj_info = {}
if args.use_touch:
# this combines the adjacency information of touch and vision charts
# the output adj matrix has the first k rows corresponding to vision charts, and the last |V| - k
# corresponding to touch charts. Similarly the first l faces are correspond to vision charts, and the
# remaining correspond to touch charts
adj, faces = adj_fuse_touch(verts, faces, adj, args)
adj = normalize_adj(adj)
adj_info['adj'] = adj
adj_info['faces'] = faces
return adj_info
# combines graph for vision and touch charts to define a fused adjacency matrix
# input:
# - verts: vertices of the vision charts
# - faces: faces of the vision charts
# - adj: adjacency matric for the vision charts
# - args: arguements from the training file
# output:
# - adj: adjacency matrix from the combination of touch and vision charts
# - faces: combination of vision and touch chart faces
def adj_fuse_touch(verts, faces, adj, args):
verts = verts.data.cpu().numpy()
hash = {}
# find vertices which have the same 3D position
for e, v in enumerate(verts):
if v.tobytes() in hash:
hash[v.tobytes()].append(e)
else:
hash[v.tobytes()] = [e]
# load object information for generic touch chart
sheet = import_obj('../data/initial_sheet.obj')
sheet_verts = torch.FloatTensor(np.array(sheet.vertices)).cuda()
sheet_faces = torch.LongTensor(np.array(sheet.faces) - 1).cuda()
sheet_adj = calc_adj(sheet_faces)
# central vertex for each touch chart that will communicate with all vision charts
central_point = 4
central_points = [central_point + (i * sheet_adj.shape[0]) + adj.shape[0] for i in range(4 * args.num_grasps)]
# define and fill new adjacency matrix with vision and touch charts
new_dim = adj.shape[0] + (4 * args.num_grasps * sheet_adj.shape[0])
new_adj = torch.zeros((new_dim, new_dim)).cuda()
new_adj[: adj.shape[0], :adj.shape[0]] = adj.clone()
for i in range(4 * args.num_grasps):
start = adj.shape[0] + (sheet_adj.shape[0] * i)
end = adj.shape[0] + (sheet_adj.shape[0] * (i + 1))
new_adj[start: end, start:end] = sheet_adj.clone()
adj = new_adj
# define new faces with vision and touch charts
all_faces = [faces]
for i in range(4 * args.num_grasps):
temp_sheet_faces = sheet_faces.clone() + verts.shape[0]
temp_sheet_faces += i * sheet_verts.shape[0]
all_faces.append(temp_sheet_faces)
faces = torch.cat(all_faces)
# update adjacency matrix to allow communication between vision and touch charts
for key in hash.keys():
cur_verts = hash[key]
if len(cur_verts) > 1:
for v1 in cur_verts:
for v2 in cur_verts: # vertices on the boundary of vision charts can communicate
adj[v1, v2] = 1
if args.use_touch:
for c in central_points: # touch and vision charts can communicate
adj[v1, c] = 1
adj[c, v1] = 1
return adj, faces
# computes adjacemcy matrix from face information
def calc_adj(faces):
v1 = faces[:, 0]
v2 = faces[:, 1]
v3 = faces[:, 2]
num_verts = int(faces.max())
adj = torch.eye(num_verts + 1).to(faces.device)
adj[(v1, v2)] = 1
adj[(v1, v3)] = 1
adj[(v2, v1)] = 1
adj[(v2, v3)] = 1
adj[(v3, v1)] = 1
adj[(v3, v2)] = 1
return adj
# sample points from a batch of meshes
# implemented from:
# https://github.com/EdwardSmith1884/GEOMetrics/blob/master/utils.py
# MIT License
# input:
# - verts: vertices of the mesh to sample from
# - faces: faces of the mesh to sample from
# - num: number of point to sample
# output:
# - points: points sampled on the surface of the mesh
def batch_sample(verts, faces, num=10000):
dist_uni = torch.distributions.Uniform(torch.tensor([0.0]).cuda(), torch.tensor([1.0]).cuda())
batch_size = verts.shape[0]
# calculate area of each face
x1, x2, x3 = torch.split(torch.index_select(verts, 1, faces[:, 0]) - torch.index_select(verts, 1, faces[:, 1]), 1,
dim=-1)
y1, y2, y3 = torch.split(torch.index_select(verts, 1, faces[:, 1]) - torch.index_select(verts, 1, faces[:, 2]), 1,
dim=-1)
a = (x2 * y3 - x3 * y2) ** 2
b = (x3 * y1 - x1 * y3) ** 2
c = (x1 * y2 - x2 * y1) ** 2
Areas = torch.sqrt(a + b + c) / 2
Areas = Areas.squeeze(-1) / torch.sum(Areas, dim=1) # percentage of each face w.r.t. full surface area
# define distrubtions of relative face surface areas
choices = None
for A in Areas:
if choices is None:
choices = torch.multinomial(A, num, True) # list of faces to be sampled from
else:
choices = torch.cat((choices, torch.multinomial(A, num, True)))
# select the faces to be used
select_faces = faces[choices].view(verts.shape[0], 3, num)
face_arange = verts.shape[1] * torch.arange(0, batch_size).cuda().unsqueeze(-1).expand(batch_size, num)
select_faces = select_faces + face_arange.unsqueeze(1)
select_faces = select_faces.view(-1, 3)
flat_verts = verts.view(-1, 3)
# sample one point from each
xs = torch.index_select(flat_verts, 0, select_faces[:, 0])
ys = torch.index_select(flat_verts, 0, select_faces[:, 1])
zs = torch.index_select(flat_verts, 0, select_faces[:, 2])
u = torch.sqrt(dist_uni.sample_n(batch_size * num))
v = dist_uni.sample_n(batch_size * num)
points = (1 - u) * xs + (u * (1 - v)) * ys + u * v * zs
points = points.view(batch_size, num, 3)
return points
# compute the local chamfer distance metric on the ground truth mesh at different distances away from the touch sites
# input:
# - samples: point cloud from surface of predicted charts
# - batch: current batch information
# - losses: the current losses across the test set
# output:
# - losses: updates losses across the test set
# - num_examples: the number of times the losses were updated
def calc_local_chamfer(samples, batch, losses):
batch_size = samples.shape[0]
# a grid of point projected towards the surface of the object, starting from the same position and orientation
# as the touch sensor when the touch occurred, but 5 times its size
planes = batch['radius'].cuda().view(batch_size, 4, 100, 100, 3)
# mask indicating which point hit the surface of the object, ie, tho ones we care about
masks = batch['radius_masks'].cuda().view(batch_size, 4, 100, 100)
successful = batch['successful']
num_examples = 0
# for every grasps
for pred, gt, mask, success in zip(samples, planes, masks, successful):
# for every ring size around each touch site
for i in range(5):
# for every touch
for j in range(4):
if not success[j]:
continue
# select the right ring of points, ie 1 x size of sensor ... 5 x size of sensor
dim_mask = torch.zeros(mask[j].shape).clone()
dim_mask[40 - i * 10: 60 + i * 10, 40 - i * 10: 60 + i * 10] = 1
dim_mask[50 - i * 10: 50 + i * 10, 50 - i * 10: 50 + i * 10] = 0
# select point which are on the objects surface
dim_mask[mask[j] == 0] = 0
gt_masked = gt[j][dim_mask == 1]
if (gt_masked.shape[0] == 0):
continue
# compute the local loss between the selected points and the predicted surface
loss, _ = cuda_cd(pred_points, gt_points, batch_reduction=None)
losses[i] += loss.mean()
if i == 0:
num_examples += 1.
return losses, num_examples
# sets up arugments for the pretrained models
def pretrained_args(args):
if args.pretrained == 'empty':
args.use_occluded = False
args.use_unoccluded = False
args.use_touch = False
elif args.pretrained == 'touch':
args.num_gcn_layers = 25
args.hidden_gcn_layers = 250
args.use_occluded = False
args.use_unoccluded = False
args.use_touch = True
elif args.pretrained == 'touch_unoccluded':
args.num_img_blocks = 4
args.num_img_layers = 3
args.size_img_ker = 5
args.num_gcn_layers = 15
args.hidden_gcn_layers = 200
args.use_occluded = False
args.use_unoccluded = True
args.use_touch = True
elif args.pretrained == 'touch_occluded':
args.num_img_blocks = 4
args.num_img_layers = 3
args.size_img_ker = 5
args.num_gcn_layers = 20
args.hidden_gcn_layers = 200
args.use_occluded = True
args.use_unoccluded = False
args.use_touch = True
elif args.pretrained == 'unoccluded':
args.num_img_blocks = 5
args.num_img_layers = 3
args.size_img_ker = 5
args.num_gcn_layers = 15
args.hidden_gcn_layers = 150
args.use_occluded = False
args.use_unoccluded = True
args.use_touch = False
elif args.pretrained == 'occluded':
args.num_img_blocks = 4
args.num_img_layers = 3
args.size_img_ker = 5
args.num_gcn_layers = 25
args.hidden_gcn_layers = 250
args.use_occluded = True
args.use_unoccluded = False
args.use_touch = False
return args
# implemented from:
# https://github.com/EdwardSmith1884/GEOMetrics/blob/master/utils.py
# MIT License
# loads the initial mesh and returns vertex, and face information
def load_mesh_touch(obj='386.obj'):
obj = import_obj(obj)
verts = np.array(obj.vertices)
verts = torch.FloatTensor(verts).cuda()
faces = torch.LongTensor(np.array(obj.faces) - 1).cuda()
return verts, faces
# returns the chamfer distance between a mesh and a point cloud
# input:
# - verts: vertices of the mesh
# - faces: faces of the mesh
# - gt_points: point cloud to operate over
# output:
# - cd: computed chamfer distance
def chamfer_distance(verts, faces, gt_points, num=1000):
batch_size = verts.shape[0]
# sample from faces and calculate pairs
pred_points = batch_sample(verts, faces, num=num)
cd, _ = cuda_cd(pred_points, gt_points, batch_reduction=None)
return cd.mean()
# implemented from:
# https://github.com/EdwardSmith1884/GEOMetrics/blob/master/utils.py
# MIT License
# compute the edgle lengths of a batch of meshes
def batch_calc_edge(verts, faces):
# get vertex locations of faces
p1 = torch.index_select(verts, 1, faces[:, 0])
p2 = torch.index_select(verts, 1, faces[:, 1])
p3 = torch.index_select(verts, 1, faces[:, 2])
# get edge lengths
e1 = p2 - p1
e2 = p3 - p1
e3 = p2 - p3
edge_length = (torch.sum(e1 ** 2, -1).mean() + torch.sum(e2 ** 2, -1).mean() + torch.sum(e3 ** 2, -1).mean()) / 3.
return edge_length
# returns the chamfer distance between two point clouds
# input:
# - gt_points: point cloud 1 to operate over
# - pred_points: point cloud 2 to operate over
# output:
# - cd: computed chamfer distance
def point_loss(gt_points, pred_points):
cd, _ = cuda_cd(pred_points, gt_points, batch_reduction=None)
return cd.mean()
|
3D-Vision-and-Touch-main
|
utils.py
|
#Copyright (c) Facebook, Inc. and its affiliates.
#All rights reserved.
#This source code is licensed under the license found in the
#LICENSE file in the root directory of this source tree.
import numpy as np
import os
from tqdm import tqdm
interval = 1300
commands_to_run = []
for i in range(200):
commands_to_run += [f'python runner.py --save_director experiments/checkpoint/pretrained/encoder_touch '
f'--start {interval*i } --end {interval*i + interval}']
def call(command):
os.system(command)
from multiprocessing import Pool
pool = Pool(processes=10)
pbar = tqdm(pool.imap_unordered(call, commands_to_run), total=len(commands_to_run))
pbar.set_description(f"calling submitit")
for _ in pbar:
pass
|
3D-Vision-and-Touch-main
|
touch_charts/submit.py
|
#Copyright (c) Facebook, Inc. and its affiliates.
#All rights reserved.
#This source code is licensed under the license found in the
#LICENSE file in the root directory of this source tree.
import os
import submitit
import argparse
import produce_sheets
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=0, help='Random seed.')
parser.add_argument('--start', type=int, default=0, help='Random seed.')
parser.add_argument('--end', type=int, default=10000000, help='Random seed.')
parser.add_argument('--save_directory', type=str, default='experiments/checkpoint/pretrained/encoder_touch',
help='Location of the model used to produce sheets')
parser.add_argument('--num_samples', type=int, default=4000, help='Number of points in the predicted point cloud.')
parser.add_argument('--model_location', type=str, default="../data/initial_sheet.obj")
parser.add_argument('--surf_co', type=float, default=9000.)
args = parser.parse_args()
trainer = produce_sheets.Engine(args)
submitit_logs_dir = os.path.join('experiments','sheet_logs_again',str(args.start))
executor = submitit.SlurmExecutor(submitit_logs_dir, max_num_timeout=3)
time = 360
executor.update_parameters(
num_gpus=1,
partition='',
cpus_per_task=16,
mem=500000,
time=time,
job_name=str(args.start),
signal_delay_s=300,
)
executor.submit(trainer)
|
3D-Vision-and-Touch-main
|
touch_charts/runner.py
|
#Copyright (c) Facebook, Inc. and its affiliates.
#All rights reserved.
#This source code is licensed under the license found in the
#LICENSE file in the root directory of this source tree.
import torch.nn as nn
import torch
import torch.nn.functional as F
# implemented from:
# https://github.com/MicrosoftLearning/dev290x-v2/blob/master/Mod04/02-Unet/unet_pytorch/model.py
# MIT License
class DoubleConv(nn.Module):
def __init__(self, in_channels, out_channels, mid_channels=None):
super().__init__()
if not mid_channels:
mid_channels = out_channels
self.double_conv = nn.Sequential(
nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(mid_channels),
nn.ReLU(inplace=True),
nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.double_conv(x)
# implemented from:
# https://github.com/MicrosoftLearning/dev290x-v2/blob/master/Mod04/02-Unet/unet_pytorch/model.py
# MIT License
class Down(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.maxpool_conv = nn.Sequential(
nn.MaxPool2d(2),
DoubleConv(in_channels, out_channels)
)
def forward(self, x):
return self.maxpool_conv(x)
# implemented from:
# https://github.com/MicrosoftLearning/dev290x-v2/blob/master/Mod04/02-Unet/unet_pytorch/model.py
# MIT License
class Up(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.up = nn.ConvTranspose2d(in_channels , in_channels // 2, kernel_size=2, stride=2)
self.conv = DoubleConv(in_channels, out_channels)
def forward(self, x1, x2):
x1 = self.up(x1)
diffY = torch.tensor([x2.size()[2] - x1.size()[2]])
diffX = torch.tensor([x2.size()[3] - x1.size()[3]])
x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2])
x = torch.cat([x2, x1], dim=1)
output = self.conv(x)
return output
# implemented from:
# https://github.com/MicrosoftLearning/dev290x-v2/blob/master/Mod04/02-Unet/unet_pytorch/model.py
# MIT License
class OutConv(nn.Module):
def __init__(self, in_channels, out_channels):
super(OutConv, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
def forward(self, x):
return self.conv(x)
class Encoder(nn.Module):
def __init__(self, args, dim = 100):
super(Encoder, self).__init__()
self.args = args
# settings
n_channels = 3
n_classes = 1
# downscale the image
self.inc = DoubleConv(n_channels, 64)
self.down1 = Down(64, 128)
self.down2 = Down(128, 256)
self.down3 = Down(256, 512)
# upscale the image
self.down4 = Down(512, 1024)
self.up1 = Up(1024, 512)
self.up2 = Up(512, 256)
self.up3 = Up(256, 128)
self.up4 = Up(128, 64)
self.outc = OutConv(64, n_classes)
# define a plane of the same size, and shape at the touch sensor
width = .0218 - 0.00539
y_z = torch.arange(dim).cuda().view(dim, 1).expand(dim, dim).float()
y_z = torch.stack((y_z, y_z.permute(1, 0))).permute(1, 2, 0)
plane = torch.cat((torch.zeros(dim, dim, 1).cuda(), y_z), dim=-1)
self.orig_plane = (plane / float(dim) - .5) * width
# update the plane with the predicted depth information
def project_depth(self, depths, pos, rot, dim=100):
# reshape the plane to have the same position and orientation as the touch sensor when the touch occurred
batch_size = depths.shape[0]
planes = self.orig_plane.view(1 , -1 , 3).expand(batch_size, -1, 3)
planes = torch.bmm(rot, planes.permute(0, 2, 1)).permute(0, 2, 1)
planes += pos.view(batch_size, 1, 3)
# add the depth in the same direction as the normal of the sensor plane
init_camera_vector = torch.FloatTensor((1, 0, 0)).cuda().view(1, 3, 1) .expand(batch_size, 3, 1 )
camera_vector = torch.bmm(rot, init_camera_vector).permute(0, 2, 1)
camera_vector = F.normalize(camera_vector, p=2, dim=-1).view(batch_size, 1, 1, 3).expand(batch_size, dim, dim, 3)
depth_update = depths.unsqueeze(-1) * camera_vector
local_depth = (planes + depth_update.view(batch_size, -1, 3)).view(batch_size, -1, 3)
return local_depth
def forward(self, gel, depth, ref_frame, empty, producing_sheet = False):
# get initial data
batch_size = ref_frame['pos'].shape[0]
pos = ref_frame['pos'].cuda().view(batch_size, -1)
rot_m = ref_frame['rot_M'].cuda().view(-1, 3, 3)
# U-Net prediction
# downscale the image
x1 = self.inc(gel)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
# upscale the image
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
pred_depth =(self.outc(x))
# scale the prediction
pred_depth = F.sigmoid(pred_depth) * 0.1
# we only want to use the points in the predicted point cloud if they correspond to pixels in the touch signal
# which are "different" enough from the an untouched touch signal, otherwise the do not correspond to any
# geometry of the object which is deforming the touch sensor's surface.
diff = torch.sqrt((((gel.permute(0, 2, 3, 1) - empty.permute(0, 2, 3, 1)).view(batch_size, -1, 3)) **2).sum(dim = -1))
useful_points = diff > 0.001
# project the depth values into 3D points
projected_depths = self.project_depth(pred_depth.squeeze(1), pos, rot_m).view(batch_size, -1, 3)
pred_points = []
for points, useful in zip(projected_depths, useful_points):
# select only useful points
orig_points = points.clone()
points = points[useful]
if points.shape[0] == 0:
if producing_sheet:
pred_points.append(torch.zeros((self.args.num_samples, 3)).cuda())
continue
else:
points = orig_points
# make the number of points in each element of a batch consistent
while points.shape[0] < self.args.num_samples:
points = torch.cat((points, points, points, points))
perm = torch.randperm(points.shape[0])
idx = perm[:self.args.num_samples]
points = points[idx]
pred_points.append(points)
pred_points = torch.stack(pred_points)
return pred_depth, pred_points
|
3D-Vision-and-Touch-main
|
touch_charts/models.py
|
#Copyright (c) Facebook, Inc. and its affiliates.
#All rights reserved.
#This source code is licensed under the license found in the
#LICENSE file in the root directory of this source tree.
import models
import os
import torch
import numpy as np
import torch.optim as optim
from torch.utils.data import DataLoader
from tqdm import tqdm
import argparse
import sys
sys.path.insert(0, "../")
import utils
import data_loaders
class Engine():
def __init__(self, args):
# set seeds
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
self.classes = ['0001', '0002']
self.args = args
self.verts, self.faces = utils.load_mesh_touch(f'../data/initial_sheet.obj')
def __call__(self) -> float:
self.encoder = models.Encoder(self.args)
self.encoder.load_state_dict(torch.load(self.args.save_directory))
self.encoder.cuda()
self.encoder.eval()
train_data = data_loaders.mesh_loader_touch(self.classes, self.args, produce_sheets=True)
train_data.names = train_data.names[self.args.start:self.args.end]
train_loader = DataLoader(train_data, batch_size=1, shuffle=False,
num_workers=16, collate_fn=train_data.collate)
for k, batch in enumerate(tqdm(train_loader, smoothing=0)):
# initialize data
sim_touch = batch['sim_touch'].cuda()
depth = batch['depth'].cuda()
ref_frame = batch['ref']
# predict point cloud
with torch.no_grad():
pred_depth, sampled_points = self.encoder(sim_touch, depth, ref_frame, empty = batch['empty'].cuda())
# optimize touch chart
for points, dir in zip(sampled_points, batch['save_dir']):
if os.path.exists(dir):
continue
directory = dir[:-len(dir.split('/')[-1])]
if not os.path.exists(directory):
os.makedirs(directory)
# if not a successful touch
if torch.abs(points).sum() == 0 :
np.save(dir, np.zeros(1))
continue
# make initial mesh match touch sensor when touch occurred
initial = self.verts.clone().unsqueeze(0)
pos = ref_frame['pos'].cuda().view(1, -1)
rot = ref_frame['rot_M'].cuda().view(1, 3, 3)
initial = torch.bmm(rot, initial.permute(0, 2, 1)).permute(0, 2, 1)
initial += pos.view(1, 1, 3)
initial = initial[0]
# set up optimization
updates = torch.zeros(self.verts.shape, requires_grad=True, device="cuda")
optimizer = optim.Adam([updates], lr=0.003, weight_decay=0)
last_improvement = 0
best_loss = 10000
while True:
# update
optimizer.zero_grad()
verts = initial + updates
# losses
surf_loss = utils.chamfer_distance(verts.unsqueeze(0), self.faces, points.unsqueeze(0), num =self.args.num_samples)
edge_lengths = utils.batch_calc_edge(verts.unsqueeze(0), self.faces)
loss = self.args.surf_co * surf_loss + 70 * edge_lengths
# optimize
loss.backward()
optimizer.step()
# check results
if loss < 0.0006:
break
if best_loss > loss :
best_loss = loss
best_verts = verts.clone()
last_improvement = 0
else:
last_improvement += 1
if last_improvement > 50:
break
np.save(dir, best_verts.data.cpu().numpy())
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=0, help='Random seed.')
parser.add_argument('--start', type=int, default=0, help='Random seed.')
parser.add_argument('--end', type=int, default=10000000, help='Random seed.')
parser.add_argument('--save_directory', type=str, default='experiments/checkpoint/pretrained/encoder_touch',
help='Location of the model used to produce sheet')
parser.add_argument('--num_samples', type=int, default=4000, help='Number of points in the predicted point cloud.')
parser.add_argument('--model_location', type=str, default="../data/initial_sheet.obj",
help='Location of inital mesh sheet whcih will be optimized')
parser.add_argument('--surf_co', type=float, default=9000.)
args = parser.parse_args()
trainer = Engine(args)
trainer()
|
3D-Vision-and-Touch-main
|
touch_charts/produce_sheets.py
|
#Copyright (c) Facebook, Inc. and its affiliates.
#All rights reserved.
#This source code is licensed under the license found in the
#LICENSE file in the root directory of this source tree.
import models
from torch.utils.tensorboard import SummaryWriter
import torch
import numpy as np
import torch.optim as optim
import os
from torch.utils.data import DataLoader
from tqdm import tqdm
import argparse
import sys
sys.path.insert(0, "../")
import utils
import data_loaders
class Engine():
def __init__(self, args):
# set seeds
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
# set initial data values
self.epoch = 0
self.best_loss = 10000
self.args = args
self.last_improvement = 0
self.classes = ['0001', '0002']
self.checkpoint_dir = os.path.join('experiments/checkpoint/', args.exp_type, args.exp_id)
self.log_dir = f'experiments/results/{self.args.exp_type}/{self.args.exp_id}/'
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
def __call__(self) -> float:
self.encoder = models.Encoder(self.args)
self.encoder.cuda()
params = list(self.encoder.parameters())
self.optimizer = optim.Adam(params, lr=self.args.lr, weight_decay=0)
writer = SummaryWriter(os.path.join('experiments/tensorboard/', args.exp_type ))
train_loader, valid_loaders = self.get_loaders()
if self.args.eval:
self.load('')
with torch.no_grad():
self.validate(valid_loaders, writer)
exit()
for epoch in range(self.args.epochs):
self.epoch = epoch
self.train(train_loader, writer)
with torch.no_grad():
self.validate(valid_loaders, writer)
self.check_values()
def get_loaders(self):
# training data
train_data = data_loaders.mesh_loader_touch(self.classes, self.args, set_type='train')
train_loader = DataLoader(train_data, batch_size=self.args.batch_size, shuffle=True, num_workers=16, collate_fn=train_data.collate)
# validation data
valid_loaders = []
set_type = 'test' if self.args.eval else 'valid'
for c in self.classes:
valid_data = data_loaders.mesh_loader_touch(c, self.args, set_type=set_type)
valid_loaders.append(
DataLoader(valid_data, batch_size=self.args.batch_size, shuffle=False, num_workers=16, collate_fn=valid_data.collate))
return train_loader, valid_loaders
def train(self, data, writer):
total_loss = 0
iterations = 0
self.encoder.train()
for k, batch in enumerate(tqdm(data)):
self.optimizer.zero_grad()
# initialize data
sim_touch = batch['sim_touch'].cuda()
depth = batch['depth'].cuda()
ref_frame = batch['ref']
gt_points = batch['samples'].cuda()
# inference
pred_depth, pred_points = self.encoder(sim_touch, depth, ref_frame, empty = batch['empty'].cuda())
# losses
loss = point_loss = self.args.loss_coeff * utils.point_loss(pred_points, gt_points)
total_loss += point_loss.item()
# backprop
loss.backward()
self.optimizer.step()
# log
message = f'Train || Epoch: {self.epoch}, loss: {loss.item():.5f} '
message += f'|| best_loss: {self.best_loss :.5f}'
tqdm.write(message)
iterations += 1.
writer.add_scalars('train', {self.args.exp_id: total_loss / iterations}, self.epoch)
def validate(self, data, writer):
total_loss = 0
self.encoder.eval()
# loop through every class
for v, valid_loader in enumerate(data):
num_examples = 0
class_loss = 0
# loop through every batch
for k, batch in enumerate(tqdm(valid_loader)):
# initialize data
sim_touch = batch['sim_touch'].cuda()
depth = batch['depth'].cuda()
ref_frame = batch['ref']
gt_points = batch['samples'].cuda()
obj_class = batch['class'][0]
batch_size = gt_points.shape[0]
# inference
pred_depth, pred_points = self.encoder( sim_touch, depth, ref_frame, empty = batch['empty'].cuda())
# losses
point_loss = self.args.loss_coeff * utils.point_loss(pred_points, gt_points)
# log
num_examples += float(batch_size)
class_loss += point_loss * float(batch_size)
# log
class_loss = (class_loss / num_examples)
message = f'Valid || Epoch: {self.epoch}, class: {obj_class}, loss: {class_loss:.5f}'
message += f' || best_loss: {self.best_loss:.5f}'
tqdm.write(message)
total_loss += (class_loss / float(len(self.classes)))
# log
print('*******************************************************')
print(f'Total validation loss: {total_loss}')
print('*******************************************************')
if not self.args.eval:
writer.add_scalars('valid', {self.args.exp_id: total_loss}, self.epoch)
self.current_loss = total_loss
def save(self, label):
if not os.path.exists(self.checkpoint_dir):
os.makedirs(self.checkpoint_dir)
torch.save(self.encoder.state_dict(), self.checkpoint_dir + '/encoder_touch' + label)
torch.save(self.optimizer.state_dict(), self.checkpoint_dir + '/optim_touch' + label)
def check_values(self):
if self.best_loss >= self.current_loss:
improvement = self.best_loss - self.current_loss
self.best_loss = self.current_loss
print(f'Saving Model with a {improvement} improvement in point loss')
self.save('')
self.last_improvement = 0
else:
self.last_improvement += 1
if self.last_improvement == self.args.patience:
print(f'Over {self.args.patience} steps since last imporvement')
print('Exiting now')
exit()
if self.epoch % 10 == 0:
print(f'Saving Model at epoch {self.epoch}')
self.save(f'_recent')
print('*******************************************************')
def load(self, label):
self.encoder.load_state_dict(torch.load(self.checkpoint_dir + '/encoder_touch' + label))
self.optimizer.load_state_dict(torch.load(self.checkpoint_dir + '/optim_touch' + label))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=0, help='Setting for the random seed.')
parser.add_argument('--epochs', type=int, default=300, help='Number of epochs to use.')
parser.add_argument('--lr', type=float, default=0.001, help='Initial learning rate.')
parser.add_argument('--eval', action='store_true', default=False, help='Evaluate the trained model on the test set.')
parser.add_argument('--batch_size', type=int, default=128, help='Size of the batch.')
parser.add_argument('--num_samples', type=int, default=4000, help='Number of points in the predicted point cloud.')
parser.add_argument('--patience', type=int, default=70, help='How many epochs without imporvement before training stops.')
parser.add_argument('--loss_coeff', type=float, default=9000., help='Coefficient for loss term.')
parser.add_argument('--exp_id', type=str, default='test', help='The experiment name')
parser.add_argument('--exp_type', type=str, default='test', help='The experiment group')
args = parser.parse_args()
trainer = Engine(args)
trainer()
|
3D-Vision-and-Touch-main
|
touch_charts/recon.py
|
#Copyright (c) Facebook, Inc. and its affiliates.
#All rights reserved.
#This source code is licensed under the license found in the
#LICENSE file in the root directory of this source tree.
from .chamfer_distance import ChamferDistance
|
3D-Vision-and-Touch-main
|
third_party_code/__init__.py
|
#Copyright (c) Facebook, Inc. and its affiliates.
#All rights reserved.
#This source code is licensed under the license found in the
#LICENSE file in the root directory of this source tree.
import torch
from torch.utils.cpp_extension import load
cd = load(name="cd",
sources=["../third_party_code/chamfer_distance.cpp",
"../third_party_code/chamfer_distance.cu"])
class ChamferDistanceFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, xyz1, xyz2):
batchsize, n, _ = xyz1.size()
_, m, _ = xyz2.size()
xyz1 = xyz1.contiguous()
xyz2 = xyz2.contiguous()
dist1 = torch.zeros(batchsize, n)
dist2 = torch.zeros(batchsize, m)
idx1 = torch.zeros(batchsize, n, dtype=torch.int)
idx2 = torch.zeros(batchsize, m, dtype=torch.int)
dist1 = dist1.cuda()
dist2 = dist2.cuda()
idx1 = idx1.cuda()
idx2 = idx2.cuda()
cd.forward_cuda(xyz1, xyz2, dist1, dist2, idx1, idx2)
return idx1, idx2
class ChamferDistance(torch.nn.Module):
def forward(self, xyz1, xyz2):
return ChamferDistanceFunction.apply(xyz1, xyz2)
|
3D-Vision-and-Touch-main
|
third_party_code/chamfer_distance.py
|
#Copyright (c) Facebook, Inc. and its affiliates.
#All rights reserved.
#This source code is licensed under the license found in the
#LICENSE file in the root directory of this source tree.
import numpy as np
import os
from tqdm import tqdm
def call(command):
os.system(command)
param_namer = {'--seed': 'seed', '--num_gcn_layers': 'ngl', '--hidden_gcn_layers': 'hgl', '--num_img_blocks': 'nib',
'--num_img_layers': 'nil', '--num_grasps': 'grasps', '--geo': 'geo'}
commands = []
ex_type = 'Comparison'
eval = False
def add_commands(forced_params, string, params, exp_id_start):
for f in forced_params:
string += f' {f}'
number = []
keys = list(params.keys())
for param_name in keys:
number.append(len(params[param_name]))
numbers = np.where(np.zeros(number) == 0 )
numbers = np.stack(numbers).transpose()
commands = []
for n in numbers :
exp_id = exp_id_start
command = string
for e, k in enumerate(n):
param_name = keys[e]
param_value = params[param_name][k]
command += f' {param_name} {param_value}'
exp_id += f'_{param_namer[param_name]}_{param_value}'
if eval:
command += ' --eval'
command += f' --exp_id {exp_id}'
commands.append(command)
return commands
######################
###### empty #########
######################
params = {'--seed': [0,1,2,3,4,5]}
exp_id_start = '@empty'
string = f'CUDA_VISIBLE_DEVICES=0 python runner.py --exp_type {ex_type}'
forced_params = []
commands += add_commands(forced_params, string, params, exp_id_start)
######################
###### occluded ######
######################
params = {'--num_gcn_layers': [15, 20, 25], '--hidden_gcn_layers': [150, 200, 250], '--num_img_blocks': [4,5],
'--num_img_layers': [3, 5]}
exp_id_start = '@occluded'
string = f'CUDA_VISIBLE_DEVICES=0 python runner.py --exp_type {ex_type}'
forced_params = ['--use_occluded']
commands += add_commands(forced_params, string, params, exp_id_start)
######################
###### unoccluded ####
######################
params = {'--num_gcn_layers': [15, 20, 25], '--hidden_gcn_layers': [150, 200, 250], '--num_img_blocks': [4,5],
'--num_img_layers': [3, 5]}
exp_id_start = '@unoccluded'
string = f'CUDA_VISIBLE_DEVICES=0 python runner.py --exp_type {ex_type}'
forced_params = ['--use_unoccluded']
commands += add_commands(forced_params, string, params, exp_id_start)
########################
#### touch ######
########################
params = {'--num_gcn_layers': [15, 20, 25], '--hidden_gcn_layers': [150, 200, 250], '--num_grasps': [1, 2, 3, 4, 5]}
exp_id_start = '@touch'
string = f'CUDA_VISIBLE_DEVICES=0 python runner.py --exp_type {ex_type}'
forced_params = ['--use_touch', ]
commands += add_commands(forced_params, string, params, exp_id_start)
##############################
##### occluded + touch #######
##############################
params = {'--num_gcn_layers': [15, 20, 25], '--hidden_gcn_layers': [150, 200, 250], '--num_img_blocks': [4,5],
'--num_img_layers': [3, 5]}
exp_id_start = '@occluded_touch'
string = f'CUDA_VISIBLE_DEVICES=0 python runner.py --exp_type {ex_type}'
forced_params = ['--use_occluded', '--use_touch']
commands += add_commands(forced_params, string, params, exp_id_start)
##############################
### touch + unoccluded ######
##############################
params = {'--num_gcn_layers': [15, 20, 25], '--hidden_gcn_layers': [150, 200, 250], '--num_img_blocks': [4,5],
'--num_img_layers': [3, 5],'--num_grasps': [1, 2, 3, 4, 5] }
exp_id_start = '@unoccluded_touch'
string = f'CUDA_VISIBLE_DEVICES=0 python runner.py --exp_type {ex_type}'
forced_params = ['--use_unoccluded', '--use_touch', ]
commands += add_commands(forced_params, string, params, exp_id_start)
for i in range(len(commands)):
commands[i] += f'_command_{i}@'
from multiprocessing import Pool
pool = Pool(processes=10)
pbar = tqdm(pool.imap_unordered(call, commands), total=len(commands))
pbar.set_description(f"calling submitit")
for _ in pbar:
pass
|
3D-Vision-and-Touch-main
|
vision_charts/submit.py
|
#Copyright (c) Facebook, Inc. and its affiliates.
#All rights reserved.
#This source code is licensed under the license found in the
#LICENSE file in the root directory of this source tree.
import os
import submitit
import argparse
import recon
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=0, help='Setting for the random seed.')
parser.add_argument('--geo', type=int, default=0, help='use_geomtrics')
parser.add_argument('--lr', type=float, default=0.0003, help='Initial learning rate.')
parser.add_argument('--eval', action='store_true', default=False, help='Evaluate the trained model on the test set.')
parser.add_argument('--batch_size', type=int, default=25, help='Size of the batch.')
parser.add_argument('--exp_id', type=str, default='Eval', help='The experiment name')
parser.add_argument('--exp_type', type=str, default='Test', help='The experiment group')
parser.add_argument('--use_occluded', action='store_true', default=False, help='To use the occluded image.')
parser.add_argument('--use_unoccluded', action='store_true', default=False, help='To use the unoccluded image.')
parser.add_argument('--use_touch', action='store_true', default=False, help='To use the touch information.')
parser.add_argument('--patience', type=int, default=70, help='How many epochs without imporvement before training stops.')
parser.add_argument('--loss_coeff', type=float, default=9000., help='Coefficient for loss term.')
parser.add_argument('--num_img_blocks', type=int, default=6, help='Number of image block in the image encoder.')
parser.add_argument('--num_img_layers', type=int, default=3, help='Number of image layer in each blocl in the image encoder.')
parser.add_argument('--size_img_ker', type=int, default=5, help='Size of the image kernel in each Image encoder layer')
parser.add_argument('--num_gcn_layers', type=int, default=20, help='Number of GCN layer in the mesh deformation network.')
parser.add_argument('--hidden_gcn_layers', type=int, default=300, help='Size of the feature vector for each GCN layer in the mesh deformation network.')
parser.add_argument('--num_grasps', type=int, default=1, help='Number of grasps in each instance to train with')
parser.add_argument('--pretrained', type=str, default='no', help='String indicating which pretrained model to use.',
choices=['no', 'touch', 'touch_unoccluded', 'touch_occluded', 'unoccluded', 'occluded'])
parser.add_argument('--visualize', action='store_true', default=False)
args = parser.parse_args()
trainer = recon.Engine(args)
submitit_logs_dir = os.path.join('experiments','logs', args.exp_type, args.exp_id )
executor = submitit.SlurmExecutor(submitit_logs_dir, max_num_timeout=3)
if args.eval:
time = 30
else:
time = 60*48
executor.update_parameters(
num_gpus=1,
partition='',
cpus_per_task=16,
mem=500000,
time=time,
job_name=args.exp_id,
signal_delay_s=300,
)
executor.submit(trainer)
|
3D-Vision-and-Touch-main
|
vision_charts/runner.py
|
#Copyright (c) Facebook, Inc. and its affiliates.
#All rights reserved.
#This source code is licensed under the license found in the
#LICENSE file in the root directory of this source tree.
import torch.nn as nn
import torch
import numpy as np
from torch.nn.parameter import Parameter
import torch.nn.functional as F
import math
# network for making image features for vertex feature vectors
class Image_Encoder(nn.Module):
def __init__(self, args):
super(Image_Encoder, self).__init__()
layers = []
cur_size = 6
next_size = 16
for i in range(args.num_img_blocks):
layers.append(CNN_layer(cur_size, next_size, args.size_img_ker, stride=2))
cur_size = next_size
next_size = next_size * 2
for j in range(args.num_img_layers -1):
layers.append(CNN_layer(cur_size, cur_size, args.size_img_ker))
self.args = args
self.layers = nn.ModuleList(layers)
f = 221.7025
RT = np.array([[-0.0000, -1.0000, 0.0000, -0.0000],
[-0.7071, 0.0000, -0.7071, 0.4243],
[0.7071, 0.0000, -0.7071, 1.1314]])
K = np.array([[f, 0, 128.], [0, f, 128.], [0, 0, 1]])
self.matrix = torch.FloatTensor(K.dot(RT)).cuda()
# implemented from:
# https://github.com/EdwardSmith1884/GEOMetrics/blob/master/utils.py
# MIT License
# defines image features over vertices from vertex positions, and feature mpas from vision
def pooling(self, blocks, verts_pos, debug=False):
# convert vertex positions to x,y coordinates in the image, scaled to fractions of image dimension
ext_verts_pos = torch.cat(
(verts_pos, torch.FloatTensor(np.ones([verts_pos.shape[0], verts_pos.shape[1], 1])).cuda()), dim=-1)
ext_verts_pos = torch.matmul(ext_verts_pos, self.matrix.permute(1, 0))
xs = ext_verts_pos[:, :, 1] / ext_verts_pos[:, :, 2] / 256.
ys = ext_verts_pos[:, :, 0] / ext_verts_pos[:, :, 2] / 256.
full_features = None
batch_size = verts_pos.shape[0]
# check camera project covers the image
if debug:
dim = 256
xs = (torch.clamp(xs * dim, 0, dim - 1).data.cpu().numpy()).astype(np.uint8)
ys = (torch.clamp(ys * dim, 0, dim - 1).data.cpu().numpy()).astype(np.uint8)
for ex in range(blocks.shape[0]):
img = blocks[ex].permute(1, 2, 0).data.cpu().numpy()[:, :, :3]
for x, y in zip(xs[ex], ys[ex]):
img[x, y, 0] = 1
img[x, y, 1] = 0
img[x, y, 2] = 0
from PIL import Image
Image.fromarray((img * 255).astype(np.uint8)).save('results/temp.png')
print('saved')
input()
for block in blocks:
# scale projected vertex points to dimension of current feature map
dim = block.shape[-1]
cur_xs = torch.clamp(xs * dim, 0, dim - 1)
cur_ys = torch.clamp(ys * dim, 0, dim - 1)
# https://en.wikipedia.org/wiki/Bilinear_interpolation
x1s, y1s, x2s, y2s = torch.floor(cur_xs), torch.floor(cur_ys), torch.ceil(cur_xs), torch.ceil(cur_ys)
A = x2s - cur_xs
B = cur_xs - x1s
G = y2s - cur_ys
H = cur_ys - y1s
x1s = x1s.type(torch.cuda.LongTensor)
y1s = y1s.type(torch.cuda.LongTensor)
x2s = x2s.type(torch.cuda.LongTensor)
y2s = y2s.type(torch.cuda.LongTensor)
# flatten batch of feature maps to make vectorization easier
flat_block = block.permute(1, 0, 2, 3).contiguous().view(block.shape[1], -1)
block_idx = torch.arange(0, verts_pos.shape[0]).cuda().unsqueeze(-1).expand(batch_size, verts_pos.shape[1])
block_idx = block_idx * dim * dim
selection = (block_idx + (x1s * dim) + y1s).view(-1)
C = torch.index_select(flat_block, 1, selection)
C = C.view(-1, batch_size, verts_pos.shape[1]).permute(1, 0, 2)
selection = (block_idx + (x1s * dim) + y2s).view(-1)
D = torch.index_select(flat_block, 1, selection)
D = D.view(-1, batch_size, verts_pos.shape[1]).permute(1, 0, 2)
selection = (block_idx + (x2s * dim) + y1s).view(-1)
E = torch.index_select(flat_block, 1, selection)
E = E.view(-1, batch_size, verts_pos.shape[1]).permute(1, 0, 2)
selection = (block_idx + (x2s * dim) + y2s).view(-1)
F = torch.index_select(flat_block, 1, selection)
F = F.view(-1, batch_size, verts_pos.shape[1]).permute(1, 0, 2)
section1 = A.unsqueeze(1) * C * G.unsqueeze(1)
section2 = H.unsqueeze(1) * D * A.unsqueeze(1)
section3 = G.unsqueeze(1) * E * B.unsqueeze(1)
section4 = B.unsqueeze(1) * F * H.unsqueeze(1)
features = (section1 + section2 + section3 + section4)
features = features.permute(0, 2, 1)
if full_features is None:
full_features = features
else:
full_features = torch.cat((full_features, features), dim=2)
return full_features
def forward(self, img_occ, img_unocc, cur_vertices):
# double size due to legacy decision
if self.args.use_unoccluded:
x = torch.cat((img_unocc, img_unocc), dim = 1)
elif self.args.use_occluded:
x = torch.cat((img_occ, img_occ), dim=1)
else:
x = torch.cat((img_occ, img_unocc), dim=1)
features = []
layer_selections = [len(self.layers) - 1 - (i+1)*self.args.num_img_layers for i in range(3)]
for e, layer in enumerate(self.layers):
if x.shape[-1] < self.args.size_img_ker:
break
x = layer(x)
# collect feature maps
if e in layer_selections:
features.append(x)
features.append(x)
# get vertex features from selected feature maps
vert_image_features = self.pooling(features, cur_vertices)
return vert_image_features
# global chart deformation class
class Encoder(nn.Module):
def __init__(self, adj_info, inital_positions, args):
super(Encoder, self).__init__()
self.adj_info = adj_info
self.initial_positions = inital_positions
self.args = args
input_size = 3 # used to determine the size of the vertex feature vector
if args.use_occluded or args.use_unoccluded:
self.img_encoder = Image_Encoder(args).cuda()
with torch.no_grad():
input_size += self.img_encoder(torch.zeros(1, 3, 256, 256).cuda(), torch.zeros(1, 3, 256, 256).cuda(), torch.zeros(1, 1, 3).cuda()).shape[-1]
if self.args.use_touch:
input_size+=1
self.mesh_decoder = GCN(input_size, args).cuda()
def forward(self, img_occ, img_unocc, batch):
# initial data
batch_size = img_occ.shape[0]
cur_vertices = self.initial_positions.unsqueeze(0).expand(batch_size, -1, -1)
size_vision_charts = cur_vertices.shape[1]
# if using touch then append touch chart position to graph definition
if self.args.use_touch:
sheets = batch['sheets'].cuda().view(batch_size, -1, 3)
cur_vertices = torch.cat((cur_vertices,sheets), dim = 1 )
# cycle thorugh deformation
for _ in range(3):
vertex_features = cur_vertices.clone()
# add vision features
if self.args.use_occluded or self.args.use_unoccluded:
vert_img_features = self.img_encoder(img_occ, img_unocc, cur_vertices)
vertex_features = torch.cat((vert_img_features, vertex_features), dim=-1)
# add mask for touch charts
if self.args.use_touch:
vision_chart_mask = torch.ones(batch_size, size_vision_charts, 1).cuda() * 2 # flag corresponding to vision
touch_chart_mask = torch.FloatTensor(batch['successful']).cuda().unsqueeze(-1).expand(batch_size, 4 * self.args.num_grasps, 25)
touch_chart_mask = touch_chart_mask.contiguous().view(batch_size, -1, 1)
mask = torch.cat((vision_chart_mask, touch_chart_mask), dim=1)
vertex_features = torch.cat((vertex_features,mask), dim = -1)
# deform the vertex positions
vertex_positions = self.mesh_decoder(vertex_features, self.adj_info)
# avoid deforming the touch chart positions
vertex_positions[:, size_vision_charts:] = 0
cur_vertices = cur_vertices + vertex_positions
return cur_vertices
# implemented from:
# https://github.com/tkipf/pygcn/tree/master/pygcn
# MIT License
# Graph convolutional network for chart deformation
class GCN(nn.Module):
def __init__(self, input_features, args):
super(GCN, self).__init__()
self.num_layers = args.num_gcn_layers
# define output sizes for each GCN layer
hidden_values = [input_features] + [ args.hidden_gcn_layers for k in range(self.num_layers -1)] + [3]
# define layers
layers = []
for i in range(self.num_layers):
layers.append(GCN_layer(hidden_values[i], hidden_values[i+1]))
self.layers = nn.ModuleList(layers)
def forward(self, vertex_features, adj_info):
adj = adj_info['adj']
# iterate through GCN layers
x = self.layers[0](vertex_features, adj, F.relu)
for i in range(1, self.num_layers-1):
x = self.layers[i](x, adj, F.relu)
coords = (self.layers[-1](x, adj, lambda x: x))
return coords
# CNN layer definition
def CNN_layer(f_in, f_out, k, stride = 1):
layers = []
layers.append(nn.Conv2d(int(f_in), int(f_out), kernel_size=k, padding=1, stride=stride))
layers.append(nn.BatchNorm2d(int(f_out)))
layers.append(nn.ReLU(inplace=True))
return nn.Sequential(*layers)
# implemented from:
# https://github.com/tkipf/pygcn/tree/master/pygcn
# MIT License
# Graph convolutional network layer definition
class GCN_layer(nn.Module):
def __init__(self, in_features, out_features, bias=True):
super(GCN_layer, self).__init__()
self.weight1 = Parameter(torch.Tensor(1, in_features, out_features))
self.bias = Parameter(torch.Tensor(out_features))
self.reset_parameters()
def reset_parameters(self):
stdv = 6. / math.sqrt((self.weight1.size(1) + self.weight1.size(0)))
stdv *= .3
self.weight1.data.uniform_(-stdv, stdv)
self.bias.data.uniform_(-.1, .1)
def forward(self, features, adj, activation):
# 0N-GCN definition, removes need for resnet layers
features = torch.matmul(features, self.weight1)
output = torch.matmul(adj, features[:, :, :features.shape[-1] // 3])
output = torch.cat((output, features[:, :, features.shape[-1] // 3:]), dim=-1)
output = output + self.bias
return activation(output)
|
3D-Vision-and-Touch-main
|
vision_charts/models.py
|
#Copyright (c) Facebook, Inc. and its affiliates.
#All rights reserved.
#This source code is licensed under the license found in the
#LICENSE file in the root directory of this source tree.
import os
import models
import numpy as np
import torch
from torch.utils.data import DataLoader
from torch.autograd import Variable
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
import tensorflow as tf
import tensorboard as tb
from tqdm import tqdm
import sys
sys.path.insert(0, "../")
import utils
import data_loaders
class Engine():
def __init__(self, args):
# set seeds
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
# set initial data values
self.epoch = 0
self.best_loss = 10000
self.args = args
self.last_improvement = 0
self.num_samples = 10000
self.classes = ['0001', '0002']
self.checkpoint_dir = os.path.join('experiments/checkpoint/', args.exp_type, args.exp_id)
def __call__(self) -> float:
# initial data
if self.args.GEOmetrics:
self.adj_info, initial_positions = utils.load_mesh_vision(self.args, f'../data/sphere.obj')
else:
self.adj_info, initial_positions = utils.load_mesh_vision(self.args, f'../data/vision_sheets.obj')
self.encoder = models.Encoder(self.adj_info, Variable(initial_positions.cuda()), self.args)
self.encoder.cuda()
params = list(self.encoder.parameters())
self.optimizer = optim.Adam(params, lr=self.args.lr, weight_decay=0)
writer = SummaryWriter(os.path.join('experiments/tensorboard/', self.args.exp_type ))
train_loader, valid_loaders = self.get_loaders()
if self.args.eval:
if self.args.pretrained != 'no':
self.load_pretrained()
else:
self.load('')
with torch.no_grad():
self.validate(valid_loaders, writer)
exit()
# training loop
for epoch in range(3000):
self.epoch = epoch
self.train(train_loader, writer)
with torch.no_grad():
self.validate(valid_loaders, writer)
self.check_values()
def get_loaders(self):
train_data = data_loaders.mesh_loader_vision(self.classes, self.args, set_type='train', sample_num=self.num_samples)
train_loader = DataLoader(train_data, batch_size=self.args.batch_size, shuffle=True, num_workers=16, collate_fn=train_data.collate)
valid_loaders = []
set_type = 'test' if self.args.eval else 'valid'
for c in self.classes:
valid_data = data_loaders.mesh_loader_vision(c, self.args, set_type=set_type, sample_num=self.num_samples)
valid_loaders.append( DataLoader(valid_data, batch_size=self.args.batch_size, shuffle=False, num_workers=16, collate_fn=valid_data.collate))
return train_loader, valid_loaders
def train(self, data, writer):
total_loss = 0
iterations = 0
self.encoder.train()
for k, batch in enumerate(tqdm(data)):
self.optimizer.zero_grad()
# initialize data
img_occ = batch['img_occ'].cuda()
img_unocc = batch['img_unocc'].cuda()
gt_points = batch['gt_points'].cuda()
# inference
# self.encoder.img_encoder.pooling(img_unocc, gt_points, debug=True)
verts = self.encoder(img_occ, img_unocc, batch)
# losses
loss = utils.chamfer_distance(verts, self.adj_info['faces'], gt_points, num=self.num_samples)
loss = self.args.loss_coeff * loss.mean()
# backprop
loss.backward()
self.optimizer.step()
# log
message = f'Train || Epoch: {self.epoch}, loss: {loss.item():.2f}, b_ptp: {self.best_loss:.2f}'
tqdm.write(message)
total_loss += loss.item()
iterations += 1.
writer.add_scalars('train_loss', {self.args.exp_id : total_loss / iterations}, self.epoch)
def validate(self, data, writer):
total_loss = 0
# local losses at different distances from the touch sites
self.encoder.eval()
for v, valid_loader in enumerate(data):
num_examples = 0
class_loss = 0
for k, batch in enumerate(tqdm(valid_loader)):
# initialize data
img_occ = batch['img_occ'].cuda()
img_unocc = batch['img_unocc'].cuda()
gt_points = batch['gt_points'].cuda()
batch_size = img_occ.shape[0]
obj_class = batch['class'][0]
# model prediction
verts = self.encoder(img_occ, img_unocc, batch)
# losses
loss = utils.chamfer_distance(verts, self.adj_info['faces'], gt_points, num=self.num_samples)
loss = self.args.loss_coeff * loss.mean() * batch_size
# logs
num_examples += float(batch_size)
class_loss += loss
print_loss = (class_loss / num_examples)
message = f'Valid || Epoch: {self.epoch}, class: {obj_class}, f1: {print_loss:.2f}'
tqdm.write(message)
total_loss += (print_loss / float(len(self.classes)))
print('*******************************************************')
print(f'Validation Accuracy: {total_loss}')
print('*******************************************************')
writer.add_scalars('valid_ptp', {self.args.exp_id: total_loss}, self.epoch)
self.current_loss = total_loss
def save(self, label):
if not os.path.exists(self.checkpoint_dir):
os.makedirs(self.checkpoint_dir)
torch.save(self.encoder.state_dict(), self.checkpoint_dir + '/encoder_vision' + label)
torch.save(self.optimizer.state_dict(), self.checkpoint_dir + '/optim_vision' + label)
def check_values(self):
if self.best_loss >= self.current_loss:
improvement = self.best_loss -self.current_loss
self.best_loss = self.current_loss
print(f'Saving Model with a {improvement} improvement')
self.save('')
self.last_improvement = 0
else:
self.last_improvement += 1
if self.last_improvement == self.args.patience:
print(f'Over {self.args.patience} steps since last imporvement')
print('Exiting now')
exit()
if self.epoch % 10 == 0:
print(f'Saving Model at epoch {self.epoch}')
self.save(f'_recent')
print('*******************************************************')
def load(self, label):
self.encoder.load_state_dict(torch.load(self.checkpoint_dir + '/encoder_vision' + label))
self.optimizer.load_state_dict(torch.load(self.checkpoint_dir + '/optim_vision' + label))
def load_pretrained(self):
pretrained_location = 'experiments/checkpoint/pretrained/' + self.args.pretrained
self.encoder.load_state_dict(torch.load(pretrained_location))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=0, help='Setting for the random seed.')
parser.add_argument('--GEOmetrics', type=int, default=0, help='use GEOMemtrics setup instead')
parser.add_argument('--lr', type=float, default=0.0003, help='Initial learning rate.')
parser.add_argument('--eval', action='store_true', default=False, help='Evaluate the trained model on the test set.')
parser.add_argument('--batch_size', type=int, default=16, help='Size of the batch.')
parser.add_argument('--exp_id', type=str, default='Eval', help='The experiment name')
parser.add_argument('--exp_type', type=str, default='Test', help='The experiment group')
parser.add_argument('--use_occluded', action='store_true', default=False, help='To use the occluded image.')
parser.add_argument('--use_unoccluded', action='store_true', default=False, help='To use the unoccluded image.')
parser.add_argument('--use_touch', action='store_true', default=False, help='To use the touch information.')
parser.add_argument('--patience', type=int, default=30, help='How many epochs without imporvement before training stops.')
parser.add_argument('--loss_coeff', type=float, default=9000., help='Coefficient for loss term.')
parser.add_argument('--num_img_blocks', type=int, default=6, help='Number of image block in the image encoder.')
parser.add_argument('--num_img_layers', type=int, default=3, help='Number of image layer in each blocl in the image encoder.')
parser.add_argument('--size_img_ker', type=int, default=5, help='Size of the image kernel in each Image encoder layer')
parser.add_argument('--num_gcn_layers', type=int, default=20, help='Number of GCN layer in the mesh deformation network.')
parser.add_argument('--hidden_gcn_layers', type=int, default=300, help='Size of the feature vector for each GCN layer in the mesh deformation network.')
parser.add_argument('--num_grasps', type=int, default=1, help='Number of grasps in each instance to train with')
parser.add_argument('--pretrained', type=str, default='no', help='String indicating which pretrained model to use.',
choices=['no', 'empty', 'touch', 'touch_unoccluded', 'touch_occluded', 'unoccluded', 'occluded'])
args = parser.parse_args()
# update args for pretrained models
args = utils.pretrained_args(args)
trainer = Engine(args)
trainer()
|
3D-Vision-and-Touch-main
|
vision_charts/recon.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
# coding: utf-8
"""Dataset Loader for Memory Dialogs.
Author(s): noctli, skottur
(c) Meta Platforms, Inc. and affiliates. Confidential and proprietary.
"""
import json
import logging
import os
import pickle
import re
from itertools import chain
import numpy as np
import torch
import torch.utils.data
import tqdm
from dataset import tokenize
from torch.utils.data import Dataset
# from train import SPECIAL_TOKENS, MODEL_INPUTS, PADDED_INPUTS
# SPECIAL_TOKENS = ["<bos>", "<eos>", "<user>", "<system>", "<video>", "<pad>"]
# SPECIAL_TOKENS_DICT = {
# "bos_token": "<bos>",
# "eos_token": "<eos>",
# "additional_special_tokens": ["<user>", "<system>", "<video>", "<cap>"],
# "pad_token": "<pad>",
# }
MODEL_INPUTS = ["input_ids", "token_type_ids", "lm_labels"]
PADDED_INPUTS = ["input_ids", "token_type_ids", "lm_labels"]
MEMORY_BREAK = "<MM_BREAK>"
ANCHOR_TOKENS = ["<USER>", "<SYSTEM>", "<MM>", "<SOAC>", "<SOAR>", "<SOR>"]
def get_dataset(tokenizer, data_file, feature_path=None, feature_width=None):
"""Get dataset given tokenizer and data file."""
with open(data_file, "r") as file_id:
instance_data = json.load(file_id)
# Read the features from the folder.
if feature_path is not None:
feature_map = {}
feature_type = None
listings = [ii for ii in os.listdir(feature_path) if ".npy" in ii]
for file_name in listings:
search_slots = re.findall(r"mscoco_([^_]*)_([\d]*).npy", file_name)
extracted_type, memory_id = search_slots[0]
if not feature_type:
feature_type = extracted_type
else:
assert feature_type == extracted_type, (
f"Mismatch feature type: {feature_type} != {extracted_type}"
)
file_path = os.path.join(feature_path, file_name)
feature_map[memory_id] = file_path
else:
feature_map = None
feature_type = None
# instance_data = instance_data[:10]
for datum in tqdm.tqdm(instance_data, desc="Preparing dataset"):
context = datum["predict"]
target = datum["target"]
# Identify memory features (if any) in the context.
# NOTE: Make this cleaner, slightly adhoc at the moment.
split_str = context.split(MEMORY_BREAK)
memory_ids = []
for ii in split_str[:-1]:
memory_ids.append(int(ii.rsplit(" ", 1)[-1]))
assert len(memory_ids) + 1 == len(split_str), "Invalid MM breaks!"
# Alternatively zip the two lists.
zipped_context = [None for _ in range(len(memory_ids) + len(split_str))]
zipped_context[::2] = split_str
zipped_context[1::2] = [
{
"memory_id": ii,
"memory_feature_path": os.path.join(
feature_path, f"mscoco_{feature_type}_{ii}.npy"
),
}
for ii in memory_ids
]
# Extract the token types.
zipped_token_type_ids = []
zipped_context_tokens = []
current_type = None
for context_part in zipped_context:
if not isinstance(context_part, dict):
tokenized_substr, substr_type_ids, current_type = tokenize_by_type(
context_part, tokenizer, current_type
)
assert len(tokenized_substr) == len(
substr_type_ids
), "String tokens and token ids should be of same length!"
zipped_context_tokens.append(tokenized_substr)
zipped_token_type_ids.extend(substr_type_ids)
else:
assert "memory_id" in context_part, "Not a memory!"
if feature_path:
zipped_token_type_ids.extend(
[tokenizer.convert_tokens_to_ids("<MM>")] * feature_width
)
zipped_context_tokens.append(context_part)
datum["context_tokens"] = zipped_context_tokens
datum["context_token_types"] = zipped_token_type_ids
assert MEMORY_BREAK not in target, "Target cannot have multimodal entries!"
datum["target_tokens"] = tokenize(target, tokenizer)
if datum["type"] == "API":
target_token_type_ids = [tokenizer.convert_tokens_to_ids("<SOAC>")] * len(
datum["target_tokens"]
)
else:
target_token_type_ids = [tokenizer.convert_tokens_to_ids("<SOR>")] * len(
datum["target_tokens"]
)
datum["target_token_types"] = target_token_type_ids
# Get input tokens by merging the two.
input_tokens, input_token_types, lm_labels = merge_context_target_tokens(datum)
datum["input_tokens"] = input_tokens
datum["input_token_types"] = input_token_types
datum["lm_labels"] = lm_labels
return instance_data, feature_map
def merge_context_target_tokens(datum):
"""Merge context and target tokens."""
input_tokens = datum["context_tokens"] + [datum["target_tokens"]]
input_token_types = datum["context_token_types"] + datum["target_token_types"]
lm_labels = [-1] * len(datum["context_token_types"]) + datum["target_tokens"]
return input_tokens, input_token_types, lm_labels
def tokenize_by_type(string, tokenizer, start_type=None):
# Raw tokenization.
tokens = string.split(" ")
current_type = start_type
start_index = 0
token_splits = []
for index, token in enumerate(tokens):
if token in ANCHOR_TOKENS:
# First discovered token type, do nothing.
if current_type is not None:
reconstructed_str = " ".join(tokens[start_index:index])
token_splits.append((reconstructed_str, current_type))
start_index = index
current_type = token
# Repeat for the last section.
reconstructed_str = " ".join(tokens[start_index : index + 1])
token_splits.append((reconstructed_str, current_type))
# Now tokenize the substrings.
tokenized_str = []
tokenized_type_ids = []
for substring, current_type in token_splits:
tokenized_substring = tokenize(substring, tokenizer)
tokenized_str.extend(tokenized_substring)
tokenized_type_ids.extend(
[
tokenizer.convert_tokens_to_ids(current_type)
for _ in range(len(tokenized_substring))
]
)
return tokenized_str, tokenized_type_ids, current_type
class MemoryDialogDataset(Dataset):
def __init__(self, dialogs, tokenizer, features=None, drop_rate=0.5, train=True):
self.dialogs = dialogs
self.features = features
self.tokenizer = tokenizer
self.drop_rate = drop_rate
self.train = train
def __len__(self):
return len(self.dialogs)
def __getitem__(self, index):
instance = self.dialogs[index]
input_ids = []
# TODO: Move this to initialization?
for ii in instance["input_tokens"]:
if isinstance(ii, list):
input_ids.append(torch.Tensor(ii).long())
else:
if self.features:
memory_features = np.load(
ii["memory_feature_path"], allow_pickle=True
)[()]["features"]
input_ids.append({"features": memory_features})
token_type_ids = torch.Tensor(instance["input_token_types"]).long()
lm_labels = torch.Tensor(instance["lm_labels"]).long()
return input_ids, token_type_ids, lm_labels
def padding(seq, pad_token):
max_len = max([i.size(0) for i in seq])
input_mask = torch.zeros((len(seq), max_len)).long()
if len(seq[0].size()) == 1:
result = torch.ones((len(seq), max_len)).long() * pad_token
else:
result = torch.ones(
(len(seq), max_len, seq[0].size(-1)),
dtype=seq[0].dtype,
device=seq[0].device,
)
for i in range(len(seq)):
result[i, : seq[i].size(0)] = seq[i]
input_mask[i, : seq[i].size(0)] = 1.0
return result, input_mask
def collate_fn(batch, pad_token, features=None):
input_ids_list, token_type_ids_list, lm_labels_list, i3d_list = [], [], [], []
for i in batch:
input_ids_list.append(i[0])
token_type_ids_list.append(i[1])
lm_labels_list.append(i[2])
token_type_ids, input_mask = padding(token_type_ids_list, pad_token)
lm_labels, _ = padding(lm_labels_list, -1)
return input_ids_list, token_type_ids, lm_labels, input_mask
def pad_dataset(dataset, padding=0):
"""Pad the dataset.
This could be optimized by defining a Dataset class and pad only
batches but this is simpler.
"""
max_l = max(len(x) for x in dataset["input_ids"])
for name in PADDED_INPUTS:
dataset[name] = [
x + [padding if name != "labels" else -1] * (max_l - len(x))
for x in dataset[name]
]
return dataset
|
comet_memory_dialog-main
|
models/gpt2_mm/dataset_memory.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
import copy
import json
import logging
import random
import time
from argparse import ArgumentParser
from itertools import chain
import os
from pprint import pformat
import numpy as np
import torch
import torch.nn.functional as F
import tqdm
from transformers import *
from VideoGPT2 import *
from dataset import build_input_from_segments
from dataset_memory import get_dataset
def top_filtering(
logits, top_k=0, top_p=0.0, threshold=-float("Inf"), filter_value=-float("Inf")
):
"""Filter a distribution of logits using top-k, top-p (nucleus) and/or threshold filtering
Args:
logits: logits distribution shape (vocabulary size)
top_k: <=0: no filtering, >0: keep only top k tokens with highest probability.
top_p: <=0.0: no filtering, >0.0: keep only a subset S of candidates, where S is the smallest subset
whose total probability mass is greater than or equal to the threshold top_p.
In practice, we select the highest probability tokens whose cumulative probability mass exceeds
the threshold top_p.
threshold: a minimal threshold to keep logits
"""
assert (
logits.dim() == 1
) # Only work for batch size 1 for now - could update but it would obfuscate a bit the code
top_k = min(top_k, logits.size(-1))
if top_k > 0:
# Remove all tokens with a probability less than the last token in the top-k tokens
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
# Compute cumulative probabilities of sorted tokens
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probabilities = torch.cumsum(
F.softmax(sorted_logits, dim=-1), dim=-1
)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probabilities > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
# Back to unsorted indices and set them to -infinity
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
indices_to_remove = logits < threshold
logits[indices_to_remove] = filter_value
return logits
def sample_sequence(
instance,
tokenizer,
model,
args,
feature_map,
current_output=None,
):
special_tokens_ids = tokenizer.convert_tokens_to_ids(["<EOAC>", "<EOS>"])
if current_output is None:
current_output = []
context_embeds = None
for time_step in range(args.max_length):
input_ids = []
# For the first time step, work on context_tokens, context_token_types.
if context_embeds is None:
context_embeds = []
for ii in instance["context_tokens"]:
if isinstance(ii, list):
context_embeds.append(
model.transformer.wte(torch.Tensor(ii).long().to(args.device))
)
else:
memory_features = np.load(
ii["memory_feature_path"], allow_pickle=True
)[()]["features"]
memory_embeds = model.video_ff(
torch.Tensor(memory_features).to(args.device)
)
context_embeds.append(memory_embeds)
context_embeds = torch.cat(context_embeds)
context_token_type_ids = (
torch.Tensor(instance["context_token_types"]).long().to(args.device)
)
context_embeds = context_embeds.unsqueeze(0)
context_token_type_ids = context_token_type_ids.unsqueeze(0)
else:
new_context_embed = model.transformer.wte(
torch.Tensor([current_output[-1]]).long().to(args.device)
).unsqueeze(0)
context_embeds = torch.cat([context_embeds, new_context_embed], dim=1)
context_token_type_ids = torch.cat(
[
context_token_type_ids,
context_token_type_ids[0][-1].clone().view(1, -1),
],
dim=1,
)
logits = model(context_embeds, token_type_ids=context_token_type_ids)
if "gpt2" == args.model:
logits = logits[0]
logits = logits[0, -1, :] / args.temperature
logits = top_filtering(logits, top_k=args.top_k, top_p=args.top_p)
probs = F.softmax(logits, dim=-1)
prev = (
torch.topk(probs, 1)[1] if args.no_sample else torch.multinomial(probs, 1)
)
if (time_step < args.min_length) and prev.item() in special_tokens_ids:
while prev.item() in special_tokens_ids:
prev = torch.multinomial(probs, num_samples=1)
if prev.item() in special_tokens_ids:
break
current_output.append(prev.item())
return current_output
def beam_search(
caption, history, tokenizer, model, args, current_output=None, video=None
):
special_tokens_ids = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)
if current_output is None:
current_output = []
hyplist = [([], 0.0, current_output)]
best_state = None
comp_hyplist = []
for i in range(args.max_length):
new_hyplist = []
argmin = 0
for out, lp, st in hyplist:
instance, sequence = build_input_from_segments(
caption, history, st, tokenizer, with_eos=False, drop_caption=False
)
input_ids = torch.tensor(
instance["input_ids"], device=args.device
).unsqueeze(0)
token_type_ids = torch.tensor(
instance["token_type_ids"], device=args.device
).unsqueeze(0)
input_embs = model.transformer.wte(input_ids)
if video is not None:
input_embs = torch.cat([model.video_ff(video), input_embs], dim=1)
token_type_ids = torch.cat(
[
torch.ones((1, video.size(1))).long().cuda()
* tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[-2]),
token_type_ids,
],
dim=1,
)
logits = model(input_embs, token_type_ids=token_type_ids)
if "gpt2" == args.model:
logits = logits[0]
logp = F.log_softmax(logits, dim=-1)[:, -1, :]
lp_vec = logp.cpu().data.numpy() + lp
lp_vec = np.squeeze(lp_vec)
if i >= args.min_length:
new_lp = lp_vec[tokenizer.eos_token_id] + args.penalty * (len(out) + 1)
comp_hyplist.append((out, new_lp))
if best_state is None or best_state < new_lp:
best_state = new_lp
count = 1
for o in np.argsort(lp_vec)[::-1]:
if o == tokenizer.unk_token_id or o == tokenizer.eos_token_id:
continue
new_lp = lp_vec[o]
if len(new_hyplist) == args.beam_size:
if new_hyplist[argmin][1] < new_lp:
new_st = copy.deepcopy(st)
new_st.append(int(o))
new_hyplist[argmin] = (out + [o], new_lp, new_st)
argmin = min(enumerate(new_hyplist), key=lambda h: h[1][1])[0]
else:
break
else:
new_st = copy.deepcopy(st)
new_st.append(int(o))
new_hyplist.append((out + [o], new_lp, new_st))
if len(new_hyplist) == args.beam_size:
argmin = min(enumerate(new_hyplist), key=lambda h: h[1][1])[0]
count += 1
hyplist = new_hyplist
if len(comp_hyplist) > 0:
maxhyps = sorted(comp_hyplist, key=lambda h: -h[1])[:1]
return maxhyps
else:
return [([], 0)]
def greedy_decode(
caption, history, tokenizer, model, args, current_output=None, video=None
):
special_tokens_ids = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)
ys = []
for i in range(args.max_length):
instance, sequence = build_input_from_segments(
caption, history, ys, tokenizer, with_eos=False, drop_caption=False
)
input_ids = torch.tensor(instance["input_ids"], device=args.device).unsqueeze(0)
token_type_ids = torch.tensor(
instance["token_type_ids"], device=args.device
).unsqueeze(0)
input_embs = model.transformer.wte(input_ids)
if video is not None:
input_embs = torch.cat([model.video_ff(video), input_embs], dim=1)
token_type_ids = torch.cat(
[
torch.ones((1, video.size(1))).long().cuda()
* tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[-2]),
token_type_ids,
],
dim=1,
)
logits = model(input_embs, token_type_ids=token_type_ids)
if "gpt2" == args.model:
logits = logits[0][0]
logits = logits.cpu().data.numpy()
next_word = np.argsort(logits[-1])[-1]
if next_word == special_tokens_ids[1]:
break
ys.append(next_word)
return ys
# Evaluation routine
def generate_response(model, data, dataset, feature_map, tokenizer, args, ref_data=None):
result_dialogs = []
model.eval()
with torch.no_grad():
iterator = tqdm.tqdm(enumerate(dataset), desc="Generating responses")
for index, instance in iterator:
# logging.info(f"{index}:")
# logging.info("QS: " + instance["predict"])
# prepare input data
start_time = time.time()
if args.beam_search:
raise NotImplementedError("Beam search is not supported!")
hypstr = beam_search(
dataset[idx]["caption"],
dataset[idx]["history"],
tokenizer,
model,
args,
video=i3d,
)
hypstr = hypstr[0][0]
else:
hypstr = sample_sequence(
instance,
tokenizer,
model,
args,
feature_map,
)
hypstr = tokenizer.decode(hypstr, skip_special_tokens=False)
# logging.info("HYP: " + hypstr)
# Create an instance dictionary.
instance_result = {
"dialog_id": instance["dialog_id"],
"turn_id": instance["turn_id"],
"model_prediction": hypstr,
"type": instance["type"],
}
result_dialogs.append(instance_result)
# logging.info("ElapsedTime: %f" % (time.time() - start_time))
# logging.info("-----------------------")
return result_dialogs
def read_commandline_options():
parser = ArgumentParser()
parser.add_argument(
"--model", type=str, default="gpt2", help="Model type (gpt or gpt2)"
)
parser.add_argument(
"--model_checkpoint",
type=str,
default="log_without_caption_with_valid/",
help="Path, url or short name of the model",
)
parser.add_argument(
"--model_epoch", type=int, default=-1, help="Epoch to chose for a given folder"
)
parser.add_argument(
"--max_history",
type=int,
default=3,
help="Number of previous utterances to keep in history",
)
parser.add_argument(
"--device",
type=str,
default="cuda" if torch.cuda.is_available() else "cpu",
help="Device (cuda or cpu)",
)
parser.add_argument(
"--no_sample",
action="store_true",
help="Set to use greedy decoding instead of sampling",
)
parser.add_argument(
"--beam_search",
action="store_true",
help="Set to use beam search instead of sampling",
)
parser.add_argument("--beam_size", type=int, default=5, help="Beam size")
parser.add_argument(
"--max_length",
type=int,
default=100,
help="Maximum length of the output utterances",
)
parser.add_argument(
"--min_length",
type=int,
default=1,
help="Minimum length of the output utterances",
)
parser.add_argument("--penalty", type=float, default=0.3, help="elngth penalty")
parser.add_argument("--seed", type=int, default=42, help="Seed")
parser.add_argument(
"--temperature", type=int, default=0.7, help="Sampling softmax temperature"
)
parser.add_argument(
"--visual_feature_width",
type=int,
default=10,
help="Feature width for each image; 10 - BUTD; 1 - others"
)
parser.add_argument(
"--visual_feature_size",
type=int,
default=2053,
help="Feature size for each image; 2053 - BUTD; 512 - CLIP",
)
parser.add_argument(
"--feature_path", type=str, default="data/", help="Path to features"
)
parser.add_argument(
"--top_k",
type=int,
default=0,
help="Filter top-k tokens before sampling (<=0: no filtering)",
)
parser.add_argument(
"--top_p",
type=float,
default=0.9,
help="Nucleus filtering (top-p) before sampling (<=0.0: no filtering)",
)
parser.add_argument("--test_set", type=str, default="data/test_set4DSTC8-AVSD.json")
parser.add_argument(
"--lbl_test_set",
type=str,
default="data/lbl_undisclosedonly_test_set4DSTC7-AVSD.json",
)
parser.add_argument(
"--special_tokens_path",
type=str,
required=True,
help="Path tp the special tokens used in training/evaluation",
)
parser.add_argument("--output", type=str, default="result.json")
# args = parser.parse_args()
args, unknown = parser.parse_known_args()
return args, parser, unknown
def generate(args):
for arg in vars(args):
print("{}={}".format(arg, getattr(args, arg)))
logging.basicConfig(
level=logging.INFO, format="%(asctime)s %(levelname)s: %(message)s"
)
logging.info("Loading model params from " + args.model_checkpoint)
tokenizer_class = GPT2Tokenizer if "gpt2" == args.model else OpenAIGPTTokenizer
tokenizer = tokenizer_class.from_pretrained(args.model_checkpoint)
with open(args.special_tokens_path, "r") as file_id:
special_tokens_dict = json.load(file_id)
tokenizer.add_special_tokens(special_tokens_dict)
model_class = VideoGPT2LMHeadModel if "gpt2" == args.model else OpenAIGPTLMHeadModel
model_config = GPT2Config.from_pretrained(args.model_checkpoint)
if args.model_epoch:
model = model_class.from_pretrained(
os.path.join(args.model_checkpoint, f"checkpoint_mymodel_{args.model_epoch}.pth"),
config=model_config,
# custom_args={"visual_feature_size": args.visual_feature_size}
)
else:
model = model_class.from_pretrained(args.model_checkpoint, config=model_config)
model.to(args.device)
model.eval()
logging.info("Loading test data from " + args.test_set)
test_data = json.load(open(args.test_set, "r"))
test_dataset, feature_map = get_dataset(
tokenizer,
args.test_set,
args.feature_path,
args.visual_feature_width,
)
# generate sentences
logging.info("-----------------------generate--------------------------")
start_time = time.time()
results = generate_response(model, test_data, test_dataset, feature_map, tokenizer, args)
logging.info("----------------")
logging.info("wall time = %f" % (time.time() - start_time))
if args.output:
logging.info("writing results to " + args.output)
with open(args.output, "w") as file_id:
json.dump(results, file_id)
logging.info("done")
# main
if __name__ == "__main__":
args, _, _ = read_commandline_options()
generate(args)
|
comet_memory_dialog-main
|
models/gpt2_mm/generate.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
from transformers import *
import math
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss, MSELoss
def gelu(x):
return (
0.5
* x
* (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
)
class Attention(nn.Module):
def __init__(self, nx, n_ctx, config, scale=False):
super(Attention, self).__init__()
self.output_attentions = config.output_attentions
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert n_state % config.n_head == 0
self.register_buffer(
"bias", torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx)
)
self.n_head = config.n_head
self.split_size = n_state
self.scale = scale
self.c_attn = Conv1D(n_state * 3, nx)
self.c_proj = Conv1D(n_state, nx)
self.attn_dropout = nn.Dropout(config.attn_pdrop)
self.resid_dropout = nn.Dropout(config.resid_pdrop)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
mask = torch.ones(self.n_head, self.split_size // self.n_head)
heads = (
set(heads) - self.pruned_heads
) # Convert to set and emove already pruned heads
for head in heads:
# Compute how many pruned heads are before the head and move the index accordingly
head = head - sum(1 if h < head else 0 for h in self.pruned_heads)
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index = torch.arange(len(mask))[mask].long()
index_attn = torch.cat(
[index, index + self.split_size, index + (2 * self.split_size)]
)
# Prune conv1d layers
self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
# Update hyper params
self.split_size = (self.split_size // self.n_head) * (self.n_head - len(heads))
self.n_head = self.n_head - len(heads)
self.pruned_heads = self.pruned_heads.union(heads)
def _attn(self, q, k, v, attention_mask=None, head_mask=None):
w = torch.matmul(q, k)
if self.scale:
w = w / math.sqrt(v.size(-1))
nd, ns = w.size(-2), w.size(-1)
b = self.bias[:, :, ns - nd : ns, :ns]
# w = w * b - 1e18 * (1 - b)
if attention_mask is not None:
# Apply the attention mask
b = torch.gt(b + attention_mask[0], 0).float()
w = w * b - 1e18 * (1 - b)
w = w - 1e18 * (1 - attention_mask[1])
else:
w = w * b - 1e18 * (1 - b)
w = nn.Softmax(dim=-1)(w)
w = self.attn_dropout(w)
# Mask heads if we want to
if head_mask is not None:
w = w * head_mask
outputs = [torch.matmul(w, v)]
if self.output_attentions:
outputs.append(w)
return outputs
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
def split_heads(self, x, k=False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
if k:
return x.permute(0, 2, 3, 1) # (batch, head, head_features, seq_length)
else:
return x.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
def forward(self, x, layer_past=None, attention_mask=None, head_mask=None):
x = self.c_attn(x)
query, key, value = x.split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
if layer_past is not None:
past_key, past_value = (
layer_past[0].transpose(-2, -1),
layer_past[1],
) # transpose back cf below
key = torch.cat((past_key, key), dim=-1)
value = torch.cat((past_value, value), dim=-2)
present = torch.stack(
(key.transpose(-2, -1), value)
) # transpose to have same shapes for stacking
attn_outputs = self._attn(query, key, value, attention_mask, head_mask)
a = attn_outputs[0]
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a)
outputs = [a, present] + attn_outputs[1:]
return outputs # a, present, (attentions)
class MLP(nn.Module):
def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)
super(MLP, self).__init__()
nx = config.n_embd
self.c_fc = Conv1D(n_state, nx)
self.c_proj = Conv1D(nx, n_state)
self.act = gelu
self.dropout = nn.Dropout(config.resid_pdrop)
def forward(self, x):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
return self.dropout(h2)
class Block(nn.Module):
def __init__(self, n_ctx, config, scale=False):
super(Block, self).__init__()
nx = config.n_embd
self.ln_1 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon)
self.attn = Attention(nx, n_ctx, config, scale)
self.ln_2 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon)
self.mlp = MLP(4 * nx, config)
def forward(self, x, layer_past=None, attention_mask=None, head_mask=None):
output_attn = self.attn(
self.ln_1(x),
layer_past=layer_past,
attention_mask=attention_mask,
head_mask=head_mask,
)
a = output_attn[0] # output_attn: a, present, (attentions)
x = x + a
m = self.mlp(self.ln_2(x))
x = x + m
outputs = [x] + output_attn[1:]
return outputs # x, present, (attentions)
class VideoGPT2Model(GPT2Model):
def __init__(self, config):
super(VideoGPT2Model, self).__init__(config)
self.h = nn.ModuleList(
[Block(config.n_ctx, config, scale=True) for _ in range(config.n_layer)]
)
def forward(
self,
input_embs,
past=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
):
if past is None:
past_length = 0
past = [None] * len(self.h)
else:
past_length = past[0][0].size(-2)
if position_ids is None:
position_ids = torch.arange(
past_length,
input_embs.size(-2) + past_length,
dtype=torch.long,
device=input_embs.device,
)
position_ids = position_ids.unsqueeze(0).expand_as(input_embs[:, :, 0])
# Attention mask.
if attention_mask is not None:
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask[0] = attention_mask[0].unsqueeze(1).unsqueeze(2)
attention_mask[1] = attention_mask[1].unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_mask[0] = attention_mask[0].to(
dtype=next(self.parameters()).dtype
) # fp16 compatibility
attention_mask[1] = attention_mask[1].to(
dtype=next(self.parameters()).dtype
) # fp16 compatibility
# attention_mask = (1.0 - attention_mask) * -1e18
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# head_mask has shape n_layer x batch x n_heads x N x N
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = (
head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
)
head_mask = head_mask.expand(self.config.n_layer, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = (
head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
) # We can specify head_mask for each layer
head_mask = head_mask.to(
dtype=next(self.parameters()).dtype
) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.config.n_layer
input_shape = input_embs.size()[:2]
# input_ids = input_ids.view(-1, input_ids.size(-1))
position_ids = position_ids.view(-1, position_ids.size(-1))
# inputs_embeds = self.wte(input_ids)
inputs_embeds = input_embs
position_embeds = self.wpe(position_ids)
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
token_type_embeds = self.wte(token_type_ids)
else:
token_type_embeds = 0
hidden_states = inputs_embeds + position_embeds + token_type_embeds
hidden_states = self.drop(hidden_states)
output_shape = input_shape + (hidden_states.size(-1),)
presents = ()
all_attentions = []
all_hidden_states = ()
for i, (block, layer_past) in enumerate(zip(self.h, past)):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (
hidden_states.view(*output_shape),
)
outputs = block(
hidden_states,
layer_past=layer_past,
attention_mask=attention_mask,
head_mask=head_mask[i],
)
hidden_states, present = outputs[:2]
presents = presents + (present,)
if self.output_attentions:
all_attentions.append(outputs[2])
hidden_states = self.ln_f(hidden_states)
hidden_states = hidden_states.view(*output_shape)
# Add last hidden state
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states, presents)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
# let the number of heads free (-1) so we can extract attention even after head pruning
attention_output_shape = (
input_shape[:-1] + (-1,) + all_attentions[0].shape[-2:]
)
all_attentions = tuple(
t.view(*attention_output_shape) for t in all_attentions
)
outputs = outputs + (all_attentions,)
return outputs # last hidden state, presents, (all hidden_states), (attentions)
class VideoGPT2LMHeadModel(GPT2PreTrainedModel):
# def __init__(self, config, **kwargs):
# super(VideoGPT2LMHeadModel, self).__init__(config)
# self.config = config
# self.transformer = VideoGPT2Model(config)
# self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
# self.video_ff = nn.Linear(
# kwargs["custom_args"]["visual_feature_size"],
# config.n_embd
# )
# self.video_inverse_ff = nn.Linear(
# config.n_embd, kwargs["custom_args"]["visual_feature_size"]
# )
# self.init_weights()
# self.tie_weights()
def __init__(self, config):
super(VideoGPT2LMHeadModel, self).__init__(config)
self.transformer = VideoGPT2Model(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
# BUTD - 2053.
self.video_ff = nn.Linear(2053, config.n_embd)
self.video_inverse_ff = nn.Linear(config.n_embd, 2053)
# CLIP - 512.
# self.video_ff = nn.Linear(512, config.n_embd)
# self.video_inverse_ff = nn.Linear(config.n_embd, 512)
self.init_weights()
self.tie_weights()
def tie_weights(self):
"""Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
self._tie_or_clone_weights(self.lm_head, self.transformer.wte)
def forward(
self,
input_embs,
past=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
labels=None,
mode="reply",
):
transformer_outputs = self.transformer(
input_embs,
past=past,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
outputs = (lm_logits,) + transformer_outputs[1:]
if labels is not None:
# Shift so that tokens < n predict n
if mode == "reply":
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[0][..., 1:].contiguous()
# Flatten the tokens
loss_text_fct = CrossEntropyLoss(ignore_index=-1)
loss_text = loss_text_fct(
shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)
)
loss = loss_text
else:
lm_video_regs = self.video_inverse_ff(
hidden_states[:, : labels[1].size(1), :]
)
shift_video_regs = lm_video_regs[..., :-1, :].contiguous()
shift_video_labels = labels[1][..., :-1, :].contiguous()
loss_video_fct = MSELoss(reduce=True, size_average=True)
loss_video = loss_video_fct(shift_video_regs, shift_video_labels)
loss = loss_video
outputs = (loss,) + outputs
return outputs # (loss), lm_logits, presents, (all hidden_states), (attentions)
|
comet_memory_dialog-main
|
models/gpt2_mm/VideoGPT2.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
# coding: utf-8
# author: noctli
import json
import pickle
from itertools import chain
import numpy as np
import torch
import torch.utils.data
from torch.utils.data import Dataset
# from train import SPECIAL_TOKENS, MODEL_INPUTS, PADDED_INPUTS
SPECIAL_TOKENS = [
"<bos>",
"<eos>",
"<speaker1>",
"<speaker2>",
"<cap>",
"<video>",
"<pad>",
]
SPECIAL_TOKENS_DICT = {
"bos_token": "<bos>",
"eos_token": "<eos>",
"additional_special_tokens": ["<speaker1>", "<speaker2>", "<video>", "<cap>"],
"pad_token": "<pad>",
}
MODEL_INPUTS = ["input_ids", "token_type_ids", "lm_labels"]
PADDED_INPUTS = ["input_ids", "token_type_ids", "lm_labels"]
def tokenize(obj, tokenizer):
if isinstance(obj, str):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(obj))
if isinstance(obj, dict):
return dict((n, tokenize(o)) for n, o in obj.items())
return list(tokenize(o) for o in obj)
def get_dataset(
tokenizer, data_file, feature_path=None, undisclosed_only=False, n_history=3
):
dialog_data = json.load(open(data_file, "r"))
dialog_list = []
vid_set = set()
for dialog in dialog_data["dialogs"]:
caption = [tokenize(dialog["caption"], tokenizer)] + [
tokenize(dialog["summary"], tokenizer)
]
questions = [tokenize(d["question"], tokenizer) for d in dialog["dialog"]]
answers = [tokenize(d["answer"], tokenizer) for d in dialog["dialog"]]
vid = dialog["image_id"]
vid_set.add(vid)
if undisclosed_only:
it = range(len(questions) - 1, len(questions))
else:
it = range(len(questions))
qalist = []
history = []
if undisclosed_only:
for n in range(len(questions) - 1):
qalist.append(questions[n])
qalist.append(answers[n])
history = qalist[max(-len(qalist), -n_history * 2) :]
for n in it:
if undisclosed_only:
assert dialog["dialog"][n]["answer"] == "__UNDISCLOSED__"
question = questions[n]
answer = answers[n]
history.append(question)
if n_history == 0:
item = {
"vid": vid,
"history": [question],
"answer": answer,
"caption": caption,
}
else:
item = {
"vid": vid,
"history": history,
"answer": answer,
"caption": caption,
}
dialog_list.append(item)
qalist.append(question)
qalist.append(answer)
history = qalist[max(-len(qalist), -n_history * 2) :]
all_features = {}
if feature_path is not None:
fea_types = ["vggish", "i3d_flow", "i3d_rgb"]
dataname = "<FeaType>/<ImageID>.npy"
for ftype in fea_types:
if undisclosed_only:
basename = dataname.replace("<FeaType>", ftype + "_testset")
else:
basename = dataname.replace("<FeaType>", ftype)
features = {}
for vid in vid_set:
filename = basename.replace("<ImageID>", vid)
filepath = feature_path + filename
features[vid] = (filepath, filepath)
all_features[ftype] = features
return dialog_list, all_features
return dialog_list
class AVSDDataSet(Dataset):
def __init__(self, dialogs, tokenizer, features=None, drop_rate=0.5, train=True):
self.dialogs = dialogs
self.features = features
self.tokenizer = tokenizer
self.drop_rate = drop_rate
self.train = train
def __len__(self):
return len(self.dialogs)
def __getitem__(self, index):
dialog = self.dialogs[index]
vid = dialog["vid"]
his = self.dialogs[index]["history"]
cap = self.dialogs[index]["caption"]
ans = self.dialogs[index]["answer"]
if np.random.rand() < self.drop_rate:
instance, _ = build_input_from_segments(
cap,
his,
ans,
self.tokenizer,
video=False,
drop_caption=True,
train=self.train,
)
else:
instance, _ = build_input_from_segments(
cap,
his,
ans,
self.tokenizer,
video=False,
drop_caption=False,
train=self.train,
)
input_ids = torch.Tensor(instance["input_ids"]).long()
token_type_ids = torch.Tensor(instance["token_type_ids"]).long()
lm_labels = torch.Tensor(instance["lm_labels"]).long()
if self.features is not None:
try:
vgg = np.load(self.features[0]["vggish"][vid][0])
i3d_flow = np.load(self.features[0]["i3d_flow"][vid][0])
i3d_rgb = np.load(self.features[0]["i3d_rgb"][vid][0])
except KeyError:
vgg = np.load(self.features[1]["vggish"][vid][0])
i3d_flow = np.load(self.features[1]["i3d_flow"][vid][0])
i3d_rgb = np.load(self.features[1]["i3d_rgb"][vid][0])
sample_i3d_flow = i3d_flow[range(1, i3d_flow.shape[0], 1)]
sample_i3d_rgb = i3d_rgb[range(1, i3d_rgb.shape[0], 1)]
vgg = torch.from_numpy(vgg).float()
i3d_flow = torch.from_numpy(sample_i3d_flow).float()
i3d_rgb = torch.from_numpy(sample_i3d_rgb).float()
min_length = min([i3d_flow.size(0), i3d_rgb.size(0), vgg.size(0)])
i3d = torch.cat(
[i3d_flow[:min_length], i3d_rgb[:min_length], vgg[:min_length]], dim=1
)
return input_ids, token_type_ids, lm_labels, i3d
else:
return input_ids, token_type_ids, lm_labels
def collate_fn(batch, pad_token, features=None):
def padding(seq, pad_token):
max_len = max([i.size(0) for i in seq])
if len(seq[0].size()) == 1:
result = torch.ones((len(seq), max_len)).long() * pad_token
else:
result = torch.ones((len(seq), max_len, seq[0].size(-1))).float()
for i in range(len(seq)):
result[i, : seq[i].size(0)] = seq[i]
return result
input_ids_list, token_type_ids_list, lm_labels_list, i3d_list = [], [], [], []
for i in batch:
input_ids_list.append(i[0])
token_type_ids_list.append(i[1])
lm_labels_list.append(i[2])
if features is not None:
i3d_list.append(i[3])
input_ids = padding(input_ids_list, pad_token)
token_type_ids = padding(token_type_ids_list, pad_token)
lm_labels = padding(lm_labels_list, -1)
input_mask = input_ids != pad_token
if features is not None:
i3d = padding(i3d_list, pad_token)
i3d_mask = torch.sum(i3d != 1, dim=2) != 0
input_mask = torch.cat([i3d_mask, input_mask], dim=1)
i3d_labels = torch.ones((i3d.size(0), i3d.size(1))).long() * -1
video_mask = torch.cat(
[torch.zeros((i3d.size(0), i3d.size(1))), torch.ones(lm_labels.size())], 1
)
reply_mask = torch.zeros(video_mask.size())
lm_labels = torch.cat([i3d_labels, lm_labels], dim=1)
return (
input_ids,
token_type_ids,
lm_labels,
input_mask,
i3d,
video_mask,
reply_mask,
)
else:
return input_ids, token_type_ids, lm_labels, input_mask
def pad_dataset(dataset, padding=0):
"""Pad the dataset. This could be optimized by defining a Dataset class and padd only batches but this is simpler."""
max_l = max(len(x) for x in dataset["input_ids"])
for name in PADDED_INPUTS:
dataset[name] = [
x + [padding if name != "labels" else -1] * (max_l - len(x))
for x in dataset[name]
]
return dataset
def build_input_from_segments(
caption,
history,
reply,
tokenizer,
with_eos=True,
video=False,
drop_caption=False,
train=True,
):
"""Build a sequence of input from 3 segments: caption(caption+summary) history and last reply"""
bos, eos, speaker1, speaker2, cap = tokenizer.convert_tokens_to_ids(
SPECIAL_TOKENS[:-2]
)
if not drop_caption:
instance = {}
sequence = (
[[bos] + list(chain(*caption))]
+ history
+ [reply + ([eos] if with_eos else [])]
)
sequence = [[cap] + sequence[0] + [eos]] + [
[speaker2 if (len(sequence) - i) % 2 else speaker1] + s
for i, s in enumerate(sequence[1:])
]
instance["input_ids"] = list(chain(*sequence))
instance["token_type_ids"] = [cap] * len(sequence[0]) + [
speaker2 if i % 2 else speaker1
for i, s in enumerate(sequence[1:])
for _ in s
]
if video and train:
# instance["lm_labels"] = sequence[0] + ([-1]*sum(len(s) for s in sequence[1:-1])) + sequence[-1]
instance["lm_labels"] = (
sequence[0]
+ ([-1] * sum(len(s) for s in sequence[1:-1]))
+ sequence[-1]
)
else:
instance["lm_labels"] = (
[-1] * sum(len(s) for s in sequence[:-1])
) + sequence[-1]
else:
instance = {}
sequence = history + [reply + ([eos] if with_eos else [])]
sequence = [
[speaker2 if (len(sequence) - i) % 2 else speaker1] + s
for i, s in enumerate(sequence)
]
instance["input_ids"] = list(chain(*sequence))
instance["token_type_ids"] = [
speaker2 if i % 2 else speaker1 for i, s in enumerate(sequence) for _ in s
]
if video:
instance["lm_labels"] = (
[-1] * sum(len(s) for s in sequence[:-1])
) + sequence[-1]
else:
instance["lm_labels"] = (
[-1] * sum(len(s) for s in sequence[:-1])
) + sequence[-1]
return instance, sequence
|
comet_memory_dialog-main
|
models/gpt2_mm/dataset.py
|
# Copyright (c) 2019-present, HuggingFace Inc.
# All rights reserved. This source code is licensed under the BSD-style license found in the LICENSE file in the root directory of this source tree.
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
import json
import logging
import math
import os
from argparse import ArgumentParser
from collections import defaultdict
from itertools import chain
from pprint import pformat
import torch
from ignite.contrib.handlers import PiecewiseLinear, ProgressBar
from ignite.contrib.handlers.tensorboard_logger import (
OptimizerParamsHandler,
OutputHandler,
TensorboardLogger,
)
from ignite.engine import Engine, Events
from ignite.handlers import ModelCheckpoint
from ignite.metrics import Accuracy, Loss, MetricsLambda, RunningAverage
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data import DataLoader, TensorDataset
from transformers import *
from VideoGPT2 import *
import pickle as pkl
from dataset_memory import collate_fn, get_dataset, MemoryDialogDataset, padding
MODEL_INPUTS = ["input_ids", "token_type_ids", "lm_labels"]
PADDED_INPUTS = ["input_ids", "token_type_ids", "lm_labels"]
logger = logging.getLogger(__file__)
def average_distributed_scalar(scalar, args):
"""Average a scalar over the nodes if we are in distributed training. We use this for distributed evaluation."""
if args.local_rank == -1:
return scalar
scalar_t = (
torch.tensor(scalar, dtype=torch.float, device=args.device)
/ torch.distributed.get_world_size()
)
torch.distributed.all_reduce(scalar_t, op=torch.distributed.ReduceOp.SUM)
return scalar_t.item()
def get_data_loaders_new(args, tokenizer):
train_data = get_dataset(
tokenizer,
args.train_path,
args.feature_path,
args.visual_feature_width,
)
# with open("train_data_gpt2.pkl", "rb") as f:
# train_data = pkl.load(f)
# pkl.dump(train_data, f)
valid_data = get_dataset(
tokenizer,
args.valid_path,
args.feature_path,
args.visual_feature_width,
)
# with open("valid_data_gpt2.pkl", "rb") as f:
# valid_data = pkl.load(f)
# pkl.dump(valid_data, f)
train_dataset = MemoryDialogDataset(
train_data[0],
tokenizer,
(train_data[1], valid_data[1]),
drop_rate=0,
train=True,
)
valid_dataset = MemoryDialogDataset(
valid_data[0],
tokenizer,
(valid_data[1], train_data[1]),
drop_rate=0,
train=False,
)
# for ii in range(len(train_dataset)):
# train_dataset[ii]
# batch = [train_dataset[ii] for ii in range(3)]
# features = True
# collate_fn(batch, tokenizer.pad_token_id, features=features)
# NOTE: FIX this later.
# features = None if args.video_agnostic else True
features = True
train_loader = DataLoader(
train_dataset,
batch_size=args.train_batch_size,
num_workers=1,
shuffle=(not args.distributed),
collate_fn=lambda x: collate_fn(x, tokenizer.pad_token_id, features=features),
)
valid_loader = DataLoader(
valid_dataset,
batch_size=args.valid_batch_size,
num_workers=1,
shuffle=False,
collate_fn=lambda x: collate_fn(x, tokenizer.pad_token_id, features=features),
)
return train_loader, valid_loader
def read_commandline_options():
parser = ArgumentParser()
parser.add_argument(
"--train_path",
type=str,
default="data/train_set4DSTC7-AVSD.json",
help="Path of the trainset",
)
parser.add_argument(
"--feature_path", type=str, default="data/", help="Path to features"
)
parser.add_argument(
"--valid_path",
type=str,
default="data/valid_set4DSTC7-AVSD.json",
help="Path of the validset",
)
parser.add_argument(
"--special_tokens_path",
type=str,
required=True,
help="Path to the special tokens for training",
)
parser.add_argument(
"--model_checkpoint",
type=str,
default="gpt2",
help="Path, url or short name of the model",
)
parser.add_argument(
"--max_history",
type=int,
default=3,
help="Number of previous exchanges to keep in history",
)
parser.add_argument(
"--visual_feature_width",
type=int,
default=10,
help="Feature width for each image; 10 - BUTD; 1 - others"
)
parser.add_argument(
"--visual_feature_size",
type=int,
default=2053,
help="Feature size for each image; 2053 - BUTD; 512 - CLIP",
)
parser.add_argument(
"--train_batch_size", type=int, default=4, help="Batch size for training"
)
parser.add_argument(
"--valid_batch_size", type=int, default=4, help="Batch size for validation"
)
parser.add_argument(
"--drop_rate", type=float, default=0.5, help="drop rate for caption"
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=8,
help="Accumulate gradients on several steps",
)
parser.add_argument("--lr", type=float, default=6.25e-5, help="Learning rate")
parser.add_argument(
"--max_norm", type=float, default=1.0, help="Clipping gradient norm"
)
parser.add_argument(
"--n_epochs", type=int, default=8, help="Number of training epochs"
)
parser.add_argument(
"--eval_before_start",
action="store_true",
help="If true start with a first evaluation before training",
)
parser.add_argument(
"--dataloader_dry_run",
action="store_true",
help="Flag to set only dataloader components",
)
parser.add_argument(
"--video_agnostic",
action="store_true",
help="Ignore video features",
)
parser.add_argument(
"--predict_belief_state", action="store_true", help="Predict belief state"
)
parser.add_argument(
"--device",
type=str,
default="cuda" if torch.cuda.is_available() else "cpu",
help="Device (cuda or cpu)",
)
parser.add_argument(
"--fp16",
type=str,
default="",
help="Set to O0, O1, O2 or O3 for fp16 training (see apex documentation)",
)
parser.add_argument(
"--local_rank",
type=int,
default=-1,
help="Local rank for distributed training (-1: not distributed)",
)
parser.add_argument("--log_path", type=str, default="log/", help="Log path")
# args = parser.parse_args()
args, unknown = parser.parse_known_args()
return args, parser, unknown
def train(args):
if not os.path.exists(args.log_path):
os.makedirs(args.log_path)
# logging is set to INFO (resp. WARN) for main (resp. auxiliary) process. logger.info => log main process only, logger.warning => log all processes
logging.basicConfig(
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN
)
logger.warning(
"Running process %d", args.local_rank
) # This is a logger.warning: it will be printed by all distributed processes
logger.info("Arguments: %s", pformat(args))
# Initialize distributed training if needed
args.distributed = args.local_rank != -1
if args.distributed:
torch.cuda.set_device(args.local_rank)
args.device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl", init_method="env://")
logger.info(
"Prepare tokenizer, pretrained model and optimizer - add special tokens for fine-tuning"
)
tokenizer_class = GPT2Tokenizer
tokenizer = tokenizer_class.from_pretrained(args.model_checkpoint)
# Read special tokens from the file.
with open(args.special_tokens_path, "r") as file_id:
special_tokens_dict = json.load(file_id)
tokenizer.add_special_tokens(special_tokens_dict)
if not args.dataloader_dry_run:
model_class = VideoGPT2LMHeadModel
model = model_class.from_pretrained(args.model_checkpoint)
# model_config = model_class.config_class.from_pretrained(args.model_checkpoint)
# model = model_class(
# model_config,
# custom_args={"visual_feature_size": args.visual_feature_size},
# )
model.resize_token_embeddings(len(tokenizer))
model.to(args.device)
optimizer = AdamW(model.parameters(), lr=args.lr)
# Prepare model for FP16 and distributed training if needed
# (order is important, distributed should be the last)
if args.fp16:
from apex import amp # Apex is only required if we use fp16 training
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16)
if args.distributed:
model = DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank
)
logger.info("Prepare datasets")
train_loader, val_loader = get_data_loaders_new(args, tokenizer)
# Training function and trainer
def update(engine, batch):
model.train()
# Process the input_tokens for the batch.
input_embeds = []
for datum in batch[0]:
instance_embeds = []
for datum_input in datum:
if isinstance(datum_input, dict):
datum_output = model.video_ff(
torch.Tensor(datum_input["features"]).to(args.device)
)
else:
datum_output = model.transformer.wte(datum_input.to(args.device))
instance_embeds.append(datum_output)
input_embeds.append(torch.cat(instance_embeds))
input_embeds, _ = padding(input_embeds, tokenizer.pad_token_id)
token_type_ids = batch[1].to(args.device)
lm_labels = batch[2].to(args.device)
input_mask = batch[3].to(args.device)
reply_mask = torch.zeros(
input_mask.size(), dtype=input_mask.dtype, device=input_mask.device
)
reply_loss = model(
input_embeds,
token_type_ids=token_type_ids,
labels=(lm_labels, None),
attention_mask=[reply_mask, input_mask],
mode="reply",
)[0]
loss = reply_loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_norm)
if engine.state.iteration % args.gradient_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
return loss.item()
trainer = Engine(update)
# Evaluation function and evaluator (evaluator output is the input of the metrics)
def inference(engine, batch):
model.eval()
with torch.no_grad():
# Process the input_tokens for the batch.
input_embeds = []
for datum in batch[0]:
instance_embeds = []
for datum_input in datum:
if isinstance(datum_input, dict):
datum_output = model.video_ff(
torch.Tensor(datum_input["features"]).to(args.device)
)
else:
datum_output = model.transformer.wte(
datum_input.to(args.device)
)
instance_embeds.append(datum_output)
input_embeds.append(torch.cat(instance_embeds))
input_embeds, _ = padding(input_embeds, tokenizer.pad_token_id)
token_type_ids = batch[1].to(args.device)
lm_labels = batch[2].to(args.device)
input_mask = batch[3].to(args.device)
reply_mask = torch.zeros(
input_mask.size(), dtype=input_mask.dtype, device=input_mask.device
)
model_outputs = model(
input_embeds,
token_type_ids=token_type_ids,
attention_mask=[reply_mask, input_mask],
mode="reply",
)[0]
lm_logits = model_outputs # So we can also use GPT2 outputs
lm_logits_flat_shifted = (
lm_logits[..., :-1, :].contiguous().view(-1, lm_logits.size(-1))
)
lm_labels_flat_shifted = lm_labels[..., 1:].contiguous().view(-1)
return lm_logits_flat_shifted, lm_labels_flat_shifted
evaluator = Engine(inference)
# Attach evaluation to trainer: we evaluate when we start the training and at the end of each epoch
trainer.add_event_handler(
Events.EPOCH_COMPLETED, lambda _: evaluator.run(val_loader)
)
if args.n_epochs < 1:
trainer.add_event_handler(Events.COMPLETED, lambda _: evaluator.run(val_loader))
if args.eval_before_start:
trainer.add_event_handler(Events.STARTED, lambda _: evaluator.run(val_loader))
# Linearly decrease the learning rate from lr to zero
scheduler = PiecewiseLinear(
optimizer, "lr", [(0, args.lr), (args.n_epochs * len(train_loader), 0.0)]
)
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
# Prepare metrics - note how we compute distributed metrics
RunningAverage(output_transform=lambda x: x).attach(trainer, "loss")
metrics = {
"nll": Loss(
torch.nn.CrossEntropyLoss(ignore_index=-1),
output_transform=lambda x: (x[0], x[1]),
)
}
metrics.update(
{"average_nll": MetricsLambda(average_distributed_scalar, metrics["nll"], args)}
)
metrics["average_ppl"] = MetricsLambda(math.exp, metrics["average_nll"])
for name, metric in metrics.items():
metric.attach(evaluator, name)
# On the main process: add progress bar, tensorboard, checkpoints and
# save model, configuration and tokenizer before we start to train
if args.local_rank in [-1, 0]:
pbar = ProgressBar(persist=True)
pbar.attach(trainer, metric_names=["loss"])
evaluator.add_event_handler(
Events.COMPLETED,
lambda _: pbar.log_message(
"Validation: %s" % pformat(evaluator.state.metrics)
),
)
tb_logger = TensorboardLogger(log_dir="./tb_logs")
tb_logger.attach(
trainer,
log_handler=OutputHandler(tag="training", metric_names=["loss"]),
event_name=Events.ITERATION_COMPLETED,
)
tb_logger.attach(
trainer,
log_handler=OptimizerParamsHandler(optimizer),
event_name=Events.ITERATION_STARTED,
)
tb_logger.attach(
evaluator,
log_handler=OutputHandler(
tag="validation",
metric_names=list(metrics.keys()),
another_engine=trainer,
),
event_name=Events.EPOCH_COMPLETED,
)
checkpoint_handler = ModelCheckpoint(
args.log_path, "checkpoint", save_interval=1, n_saved=args.n_epochs, require_empty=False
)
trainer.add_event_handler(
Events.EPOCH_COMPLETED,
checkpoint_handler,
{"mymodel": getattr(model, "module", model)},
) # "getattr" take care of distributed encapsulation
torch.save(args, args.log_path + "model_training_args.bin")
getattr(model, "module", model).config.to_json_file(
os.path.join(args.log_path, CONFIG_NAME)
)
tokenizer.save_vocabulary(args.log_path)
# Run the training
trainer.run(train_loader, max_epochs=args.n_epochs)
# On the main process: close tensorboard logger and rename the last
# checkpoint (for easy re-loading with OpenAIGPTModel.from_pretrained method)
if args.local_rank in [-1, 0] and args.n_epochs > 0:
os.rename(
checkpoint_handler._saved[-1][1][-1],
os.path.join(args.log_path, WEIGHTS_NAME),
) # TODO: PR in ignite to have better access to saved file paths (cleaner)
tb_logger.close()
if __name__ == "__main__":
args, _, _ = read_commandline_options()
train(args)
|
comet_memory_dialog-main
|
models/gpt2_mm/train.py
|
#! /usr/bin/env python
"""
Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
Create API and MM-DST result JSONS from model result file.
Author(s): Satwik Kottur
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import collections
import copy
import json
import ast
import re
def parse_flattened_result(to_parse):
"""
Parse out the belief state from the raw text.
Return an empty list if the belief state can't be parsed
Input:
- A single <str> of flattened result
e.g. 'User: Show me something else => Belief State : DA:REQUEST ...'
Output:
- Parsed result in a JSON format, where the format is:
[
{
'act': <str> # e.g. 'DA:REQUEST',
'slots': [
<str> slot_name,
<str> slot_value
]
}, ... # End of a frame
] # End of a dialog
"""
dialog_act_regex = re.compile(r"([\w:?.?]*) *\[(.*)\] *\(([^\]]*)\) *\<([^\]]*)\>")
slot_regex = re.compile(r"([A-Za-z0-9_.-:]*) *= *(\[([^\]]*)\]|[^,]*)")
request_regex = re.compile(r"([A-Za-z0-9_.-:]+)")
object_regex = re.compile(r"([A-Za-z0-9]+)")
belief = []
# Parse
to_parse = to_parse.strip()
# to_parse: 'DIALOG_ACT_1 : [ SLOT_NAME = SLOT_VALUE, ... ] ...'
for dialog_act in dialog_act_regex.finditer(to_parse):
d = {
"act": dialog_act.group(1),
"slots": {},
"request_slots": [],
"memories": [],
}
for slot in slot_regex.finditer(dialog_act.group(2)):
# If parsing python list eval it else keep unique string.
slot_name = slot.group(1).strip()
slot_values = slot.group(2).strip()
# If there are nones, replace them with Nones and later remove them.
if re.match('\[.*\]', slot_values):
try:
slot_values = slot_values.replace("none", "None")
parsed_slot_values = ast.literal_eval(slot_values)
d["slots"][slot_name] = [ii for ii in parsed_slot_values if ii]
except:
# If error when parsing the slots add empty string
print(f"Error parsing: {to_parse}")
d["slots"][slot_name] = ""
else:
d["slots"][slot_name] = slot_values
for request_slot in request_regex.finditer(dialog_act.group(3)):
d["request_slots"].append(request_slot.group(1).strip())
for object_id in object_regex.finditer(dialog_act.group(4)):
d["memories"].append(object_id.group(1).strip())
if d != {}:
belief.append(d)
return belief
def create_result_jsons(results, test_data):
"""Creates two JSON files from results.
Args:
results: List of generated results from the model.
test_data: Raw JSON test file.
Returns:
response_results: Dict containing response results
dst_results: Dict containing DST results
"""
dst_results = copy.deepcopy(test_data)
response_results = collections.defaultdict(list)
dst_pool = {}
for instance in results:
dialog_id = instance["dialog_id"]
turn_id = instance["turn_id"]
if instance["type"] == "API":
index = (dialog_id, turn_id)
dst_pool[index] = instance
else:
if dialog_id not in response_results:
response_results[dialog_id] = {
"dialog_id": dialog_id,
"predictions": [],
}
response_results[dialog_id]["predictions"].append(
{
"turn_id": turn_id,
"response": instance["model_prediction"],
}
)
num_missing = 0
num_present = 0
for dialog_datum in dst_results["dialogue_data"]:
del dialog_datum["mentioned_memory_ids"]
del dialog_datum["memory_graph_id"]
dialog_id = dialog_datum["dialogue_idx"]
for datum in dialog_datum["dialogue"]:
turn_id = datum["turn_idx"]
index = (dialog_id, turn_id)
if index in dst_pool:
model_pred_datum = dst_pool[index]
model_pred = model_pred_datum["model_prediction"].strip(" ")
parsed_result = parse_flattened_result(model_pred)
datum["transcript_annotated"] = parsed_result
num_present += 1
else:
del datum["transcript_annotated"]
print(f"Missing! -- {index}")
num_missing += 1
print(f"Missing: {num_missing} Present: {num_present}")
return list(response_results.values()), dst_results
def main(args):
with open(args["memory_test_json"], "r") as file_id:
test_data = json.load(file_id)
with open(args["model_output_json"], "r") as file_id:
results = json.load(file_id)
response_results, dst_results = create_result_jsons(results, test_data)
# Save the results.
response_results_path = args["model_output_json"].replace(
".json", "_response_results.json"
)
with open(response_results_path, "w") as file_id:
json.dump(response_results, file_id)
dst_results_path = args["model_output_json"].replace(".json", "_dst_results.json")
with open(dst_results_path, "w") as file_id:
json.dump(dst_results, file_id)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--memory_test_json",
required=True,
help="JSON file for test data",
)
parser.add_argument(
"--model_output_json", required=True, help="JSON file with model outputs"
)
try:
parsed_args = vars(parser.parse_args())
except (IOError) as msg:
parser.error(str(msg))
main(parsed_args)
|
comet_memory_dialog-main
|
models/gpt2_mm/utils/create_result_jsons.py
|
#! /usr/bin/env python
"""
Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
Extract BUTD features for memories.
Author(s): Satwik Kottur
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import base64
import json
import os
import pickle
import lmdb
import numpy as np
from PIL import Image
import torch
import tqdm
FEATURE_REGISTRY = {}
def register(cls):
FEATURE_REGISTRY[cls.label] = cls
return cls
# Extracts top-down bottom-up image features.
@register
class ImageFeatureReader(object):
label = "butd"
def __init__(self, feature_path, max_bboxes=-1):
"""Reads BUTD image features.
Args:
feature_path: Path to read the image features.
max_bboxes: Maximum number of bounding boxes.
"""
self.reader = lmdb.open(
feature_path,
max_readers=1,
readonly=True,
lock=False,
readahead=False,
meminit=False,
)
with self.reader.begin(write=False) as file_ptr:
self.image_id_list = pickle.loads(file_ptr.get(b"keys"))
self.num_bboxes = max_bboxes
def __getitem__(self, image_id):
image_id = str(image_id).encode()
assert image_id in self.image_id_list, "Missing image_id!"
with self.reader.begin(write=False) as file_ptr:
item = pickle.loads(file_ptr.get(image_id))
num_boxes = int(item["num_boxes"])
features = np.frombuffer(
base64.b64decode(item["features"]), dtype=np.float32
).reshape(num_boxes, 2048)
boxes = np.frombuffer(
base64.b64decode(item["boxes"]), dtype=np.float32
).reshape(num_boxes, 4)
class_probs = np.frombuffer(
base64.b64decode(item["cls_prob"]), dtype=np.float32
).reshape(num_boxes, 1601)
features_dict = {
"features": features,
"bboxes": boxes,
"class_probs": class_probs,
"num_boxes": num_boxes,
"image_w": int(item["image_w"]),
"image_h": int(item["image_h"]),
}
if self.num_bboxes > 0:
features_dict = self.trim_butd_features(features_dict)
return features_dict
def trim_butd_features(self, features_dict):
"""Trim BUTD features based on class probability.
Args:
feature_dict: BUTD features for images
"""
# Get top class in each bbox and pick ones with highest class probability.
top_class_prob = np.max(features_dict["class_probs"], axis=1)
top_bboxes = np.argsort(-top_class_prob)[: self.num_bboxes]
# Modify the elements.
features_dict["bboxes"] = features_dict["bboxes"][top_bboxes]
features_dict["features"] = features_dict["features"][top_bboxes]
features_dict["num_boxes"] = self.num_bboxes
del features_dict["class_probs"]
return self.augment_butd_features(features_dict)
def augment_butd_features(self, features_dict):
"""Augment BUTD feature with spatial location relative to height x width."""
# Aliases.
image_w = features_dict["image_w"]
image_h = features_dict["image_h"]
location = np.zeros((features_dict["num_boxes"], 5), dtype=np.float32)
location[:, :4] = features_dict["bboxes"]
location[:, 4] = (
(location[:, 3] - location[:, 1])
* (location[:, 2] - location[:, 0])
/ (float(image_w) * float(image_h))
)
location[:, 0] = location[:, 0] / float(image_w)
location[:, 1] = location[:, 1] / float(image_h)
location[:, 2] = location[:, 2] / float(image_w)
location[:, 3] = location[:, 3] / float(image_h)
features = np.concatenate([features_dict["features"], location], axis=-1)
features_dict["features"] = features
return features_dict
# Extracts clip features.
@register
class CLIPFeatureExtractor(object):
"""Extracts visual features using CLIP architecture."""
label = "clip"
def __init__(self, image_folder):
"""Initializes the feature extractor.
Args:
image_folder: Path to the raw COCO images.
"""
self._device = "cuda" if torch.cuda.is_available() else "cpu"
self._model, self._preprocess = clip.load("ViT-B/32", device=self._device)
self._image_folder = image_folder
def __getitem__(self, image_id):
"""Extracts image features for a given image_id.
Args:
image_id: Corresponding MSCOCO image_id
"""
image_path = os.path.join(
self._image_folder, f"COCO_train2014_{image_id:012d}.jpg"
)
image = (
self._preprocess(PIL.Image.open(image_path)).unsqueeze(0).to(self._device)
)
with torch.no_grad():
image_feature = self._model.encode_image(image)
return {
"features": image_feature.cpu().numpy(),
}
# Extracts clip features.
@register
class SWINFeatureExtractor(object):
"""Extracts visual features using SWIN architecture."""
label = "swin"
def __init__(self, image_folder):
"""Initializes the feature extractor.
Args:
image_folder: Path to the raw COCO images.
"""
self._use_gpu = torch.cuda.is_available()
self._model = timm.create_model(
"swin_base_patch4_window7_224",
pretrained=True,
num_classes=0,
)
self._image_folder = image_folder
def _prepare_image(self, image_path):
"""Given image path, load and prepare the image.
Args:
image_path: Path to the image to load
Returns:
image: Loaded image adjusted to the size
"""
image = Image.open(image_path)
image = np.array(image.resize((224, 224)), dtype=np.float32)
if image.ndim != 3:
image = np.stack([image, image, image], axis=2)
image = torch.as_tensor(image).transpose(2, 0)[None]
return image
def __getitem__(self, image_id):
"""Extracts image features for a given image_id.
Args:
image_id: Corresponding MSCOCO image_id
"""
image_path = os.path.join(
self._image_folder, f"COCO_train2014_{image_id:012d}.jpg"
)
image = self._prepare_image(image_path)
with torch.no_grad():
image_feature = self._model(image)
return {
"features": image_feature.cpu().numpy(),
}
def main(args):
memory_graphs = {}
for file_path in args["input_memory_json"]:
# print(f"Reading: {file_path}")
with open(file_path, "r") as file_id:
graph_data = json.load(file_id)
for datum in graph_data:
if datum["memory_graph_id"] in memory_graphs:
print("Multiple memory graph ids exist!")
else:
memory_graphs[datum["memory_graph_id"]] = datum
print(f"# memory dialogs: {len(memory_graphs)}")
memory_dialogs = {}
for file_path in args["input_dialog_json"]:
# print(f"Reading: {file_path}")
with open(file_path, "r") as file_id:
dialog_data = json.load(file_id)
for datum in dialog_data["dialogue_data"]:
dialog_id = datum["dialogue_idx"]
memory_dialogs[dialog_id] = datum
print(f"# dialogs: {len(memory_dialogs)}")
# Load image features and trim if necessary.
if args["feature_type"] == "butd":
feature_extractor = ImageFeatureReader(
args["input_feature_path"], args["max_bboxes"]
)
elif args["feature_type"] == "clip":
feature_extractor = CLIPFeatureExtractor(args["input_image_folder"])
elif args["feature_type"] == "swin":
feature_extractor = SWINFeatureExtractor(args["input_image_folder"])
else:
raise NotImplementedError(f"""Invalid type: {args["feature_type"]}!""")
progress_bar = tqdm.tqdm(memory_dialogs.items(), desc="Getting relevant images")
relevant_image_ids = set()
for dialog_id, datum in progress_bar:
assert datum["memory_graph_id"] in memory_graphs, "Memory graph missing!"
graph = memory_graphs[datum["memory_graph_id"]]
sample_memories = {}
for ii in graph["memories"]:
if ii["memory_id"] in datum["mentioned_memory_ids"]:
sample_memories[ii["memory_id"]] = ii
for mem_id, mem_datum in sample_memories.items():
relevant_image_ids.add((mem_id, mem_datum["media"][0]["media_id"]))
progress_bar = tqdm.tqdm(relevant_image_ids, desc="Extracting features")
for memory_id, image_id in progress_bar:
feature_save_path = os.path.join(
args["feature_save_path"],
f"""mscoco_{args["feature_type"]}_{memory_id}.npy""",
)
np.save(feature_save_path, feature_extractor[image_id])
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--input_dialog_json", nargs="+", required=True, help="Input memories JSON"
)
parser.add_argument(
"--input_memory_json", nargs="+", required=True, help="Input memories metadata"
)
parser.add_argument(
"--feature_save_path", required=True, help="Folder to save memory features"
)
parser.add_argument(
"--input_feature_path",
default=None,
help="Path to image features",
)
parser.add_argument(
"--input_image_folder",
default=None,
help="Path to raw input images",
)
parser.add_argument(
"--feature_type",
choices=["butd", "clip", "swin"],
required=True,
help="Type of visual features to extract",
)
parser.add_argument(
"--max_bboxes", default=-1, type=int, help="Maximum bounding boxes to retain"
)
try:
parsed_args = vars(parser.parse_args())
except (IOError) as msg:
parser.error(str(msg))
# Conditional imports.
if parsed_args["feature_type"] == "clip":
import clip
if parsed_args["feature_type"] == "swin":
import timm
main(parsed_args)
|
comet_memory_dialog-main
|
models/gpt2_mm/utils/extract_memory_features.py
|
#! /usr/bin/env python
"""
Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
Preprocess the memory dialog dataset.
Author(s): Satwik Kottur
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import json
import os
MM_CONTEXT = "<MM>"
START_API_CALL = "<SOAC>"
END_API_CALL = "<EOAC>"
START_API_RESULT = "<SOAR>"
START_RESPONSE = "<SOR>"
END_SENTENCE = "<EOS>"
PAD_TOKEN = "<PAD>"
SYSTEM = "<SYSTEM>"
USER = "<USER>"
TEMPLATE_API_PREDICT = "{context} {START_API_CALL} "
TEMPLATE_API_TARGET = "{belief_state} {END_API_CALL}"
TEMPLATE_RESPONSE_PREDICT = (
"{context} {START_API_CALL} {belief_state} {END_API_CALL} "
"{START_API_RESULT} {api_result} {START_RESPONSE}"
)
TEMPLATE_RESPONSE_TARGET = "{response} {END_SENTENCE}"
def format_memory_dialog_json(json_path, context_length=2, train=False):
""" """
print(f"Reading: {json_path}")
with open(json_path, "r") as file_id:
data = json.load(file_id)
if train:
additional_special_tokens = set(
[
SYSTEM,
USER,
START_API_CALL,
END_API_CALL,
START_RESPONSE,
START_API_RESULT,
MM_CONTEXT,
]
)
instances = []
for dialog_datum in data["dialogue_data"]:
prev_asst_uttr = None
prev_turn = None
context_history = []
for turn in dialog_datum["dialogue"]:
user_uttr = turn["transcript"].replace("\n", " ").strip()
user_uttr_api_call_type = turn["api_call"]["call_type"]
user_uttr_api_result = turn.get("api_result", {})
user_uttr_parameters = turn["transcript_annotated"][-1]["act_attributes"]
asst_uttr = turn["system_transcript"].replace("\n", " ").strip()
# Format main input context
if prev_asst_uttr:
memory_objects = prev_turn["system_transcript_annotated"][-1][
"act_attributes"
]["memories"]
else:
memory_objects = []
context = format_context(
prev_asst_uttr,
user_uttr,
memory_objects,
)
prev_asst_uttr = asst_uttr
prev_turn = turn
# Concat with previous contexts
context_history.append(context)
context = " ".join(context_history[-context_length:])
# Format belief state
# Skip if the api_call is unknown
if user_uttr_api_call_type == "None":
continue
if (
user_uttr_api_result == {}
or user_uttr_api_result.get("status", "None") == "None"
):
continue
belief_state = []
# ***** Temp fix for null participant *****
if "participant" in user_uttr_parameters["slot_values"]:
user_uttr_parameters["slot_values"]["participant"] = [
p
for p in user_uttr_parameters["slot_values"]["participant"]
if p is not None
]
# ************************************************
# Format for API Call.
str_belief_state = format_api_call(
user_uttr_api_call_type, user_uttr_parameters
)
# Track OOVs
if train:
additional_special_tokens.add(user_uttr_api_call_type)
for slot_name in user_uttr_parameters["slot_values"]:
additional_special_tokens.add(str(slot_name))
# Format for API Result
str_api_result = format_api_result(user_uttr_api_result)
new_instance = {
"dialog_id": dialog_datum["dialogue_idx"],
"turn_id": turn["turn_idx"],
}
# Model two prediction problems.
# A: Context -> API call
api_predict = TEMPLATE_API_PREDICT.format(
context=context,
START_API_CALL=START_API_CALL,
)
api_target = TEMPLATE_API_TARGET.format(
belief_state=str_belief_state,
END_API_CALL=END_API_CALL,
)
instances.append(
{
"dialog_id": dialog_datum["dialogue_idx"],
"turn_id": turn["turn_idx"],
"predict": api_predict,
"target": api_target,
"type": "API",
}
)
# B: Context API call, API result --> Response
response_predict = TEMPLATE_RESPONSE_PREDICT.format(
context=context,
START_API_CALL=START_API_CALL,
belief_state=str_belief_state,
END_API_CALL=END_API_CALL,
START_API_RESULT=START_API_RESULT,
api_result=str_api_result,
START_RESPONSE=START_RESPONSE,
)
response_target = TEMPLATE_RESPONSE_TARGET.format(
response=asst_uttr, END_SENTENCE=END_SENTENCE
)
instances.append(
{
"dialog_id": dialog_datum["dialogue_idx"],
"turn_id": turn["turn_idx"],
"predict": response_predict,
"target": response_target,
"type": "RESPONSE",
}
)
if train:
special_tokens = {"eos_token": END_SENTENCE, "pad_token": PAD_TOKEN}
special_tokens["additional_special_tokens"] = list(additional_special_tokens)
else:
special_tokens = None
return instances, data["split"], special_tokens
def format_context(prev_asst_uttr, user_uttr, memory_objects):
context = ""
if prev_asst_uttr:
context += f"{SYSTEM} {prev_asst_uttr} "
# Add multimodal contexts.
context += represent_memory_objects(memory_objects) + " "
context += f"{USER} {user_uttr}"
return context
def format_api_call(user_uttr_api_call_type, user_uttr_parameters):
str_belief_state_per_frame = (
"{act} [ {slot_values} ] ({request_slots}) < {objects} >".format(
act=user_uttr_api_call_type.strip(),
slot_values=", ".join(
[
f"{k.strip()} = {str(v).strip()}"
for k, v in user_uttr_parameters["slot_values"].items()
]
),
request_slots=", ".join(user_uttr_parameters["request_slots"]),
objects=", ".join([str(o) for o in user_uttr_parameters["memories"]]),
)
)
return str_belief_state_per_frame
def format_api_result(user_uttr_api_result):
simple_retrieved_info = {}
if user_uttr_api_result["results"]["retrieved_info"] != []:
for memory_id, info in user_uttr_api_result["results"][
"retrieved_info"
].items():
# memory_id: '[Memory ID: 1035119]'
simple_memory_id = memory_id.split("[Memory ID: ")[-1][:-1]
simple_retrieved_info[simple_memory_id] = {}
for slot, value in info.items():
if slot == "location":
simple_retrieved_info[simple_memory_id][slot] = value["place"]
else:
simple_retrieved_info[simple_memory_id][slot] = value
str_api_result = (
"{api_status} [ {retrieved_info} ] < {retrieved_memories} >".format(
api_status=user_uttr_api_result["status"],
retrieved_info=", ".join(
[
f"{k.strip()} = {str(v).strip()}"
for k, v in simple_retrieved_info.items()
]
).replace("'", ""),
retrieved_memories=", ".join(
[str(o) for o in user_uttr_api_result["results"]["retrieved_memories"]]
),
)
)
return str_api_result
def represent_memory_objects(object_ids):
# Stringify visual objects (JSON)
str_objects = ", ".join([f"{oo}<MM_BREAK>" for oo in object_ids])
return f"{MM_CONTEXT} {str_objects}"
def main(args):
instances, split, special_tokens = format_memory_dialog_json(
args["train_json_path"], train=True
)
save_file_path = os.path.join(args["save_folder"], "mem_dials_gpt2_train.json")
with open(save_file_path, "w") as file_id:
json.dump(instances, file_id)
save_file_path = os.path.join(
args["save_folder"], "mem_dials_gpt2_special_tokens.json"
)
with open(save_file_path, "w") as file_id:
json.dump(special_tokens, file_id)
for file_path in args["unseen_json_path"]:
instances, split, _ = format_memory_dialog_json(file_path)
save_file_path = os.path.join(
args["save_folder"], f"mem_dials_gpt2_{split}.json"
)
with open(save_file_path, "w") as file_id:
json.dump(instances, file_id)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--train_json_path",
required=True,
help="Path to the train dataset",
)
parser.add_argument(
"--unseen_json_path",
default=[],
required=False,
nargs="+",
help="Path to other unseen datsets (val|devtest|test)",
)
parser.add_argument(
"--predict_belief_state",
action="store_true",
help="Include belief state in the prediction",
)
parser.add_argument(
"--save_folder", required=True, help="Path to save the processed files"
)
try:
parsed_args = vars(parser.parse_args())
except (IOError) as msg:
parser.error(str(msg))
main(parsed_args)
|
comet_memory_dialog-main
|
models/gpt2_mm/utils/preprocess_memory_dataset.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
#!/usr/bin/env python3
"""
Script for converting the main SIMMC datasets (.JSON format)
into the line-by-line stringified format (and back).
The reformatted data is used as input for the GPT-2 based
DST model baseline.
"""
import json
import re
import os
# DSTC style dataset fieldnames
FIELDNAME_DIALOG = "dialogue"
FIELDNAME_USER_UTTR = "transcript"
FIELDNAME_ASST_UTTR = "system_transcript"
FIELDNAME_API_CALL = "api_call"
FIELDNAME_API_RESULT = "api_result"
FIELDNAME_USER_STATE = "transcript_annotated"
FIELDNAME_SYSTEM_STATE = "system_transcript_annotated"
# Templates for GPT-2 formatting
START_OF_MULTIMODAL_CONTEXTS = "<SOM>"
END_OF_MULTIMODAL_CONTEXTS = "<EOM>"
START_OF_API_CALL = "=> <SOAC>:"
END_OF_API_CALL = "<EOAC>"
START_OF_API_RESULT = "<SOAR>"
END_OF_API_RESULT = "<EOAR>"
START_OF_RESPONSE = "<SOR>"
END_OF_SENTENCE = "<EOS>"
SYSTEM = "<SYSTEM>"
USER = "<USER>"
TEMPLATE_PREDICT_API = "{context} {START_OF_API_CALL} "
TEMPLATE_TARGET_API = "{context} {START_OF_API_CALL} {belief_state} {END_OF_API_CALL}"
TEMPLATE_PREDICT_RESPONSE = (
"{context} {START_OF_API_CALL} {belief_state} {END_OF_API_CALL} "
"{api_result} {END_OF_API_RESULT} "
)
TEMPLATE_TARGET_RESPONSE = (
"{context} {START_OF_API_CALL} {belief_state} {END_OF_API_CALL} "
"{api_result} {END_OF_API_RESULT} "
"{response} {END_OF_SENTENCE}"
)
TEMPLATE_PREDICT = "{context} {START_OF_API_CALL} "
TEMPLATE_TARGET = (
"{context} {START_OF_API_CALL} {belief_state} {END_OF_API_CALL} "
"{api_result} {END_OF_API_RESULT} "
"{response} {END_OF_SENTENCE}"
)
# No belief state predictions and target.
TEMPLATE_PREDICT_NOBELIEF = "{context} {START_OF_RESPONSE} "
TEMPLATE_TARGET_NOBELIEF = "{context} {START_OF_RESPONSE} {response} {END_OF_SENTENCE}"
def convert_json_to_flattened(
input_path_json,
output_path_predict,
output_path_target,
len_context=2,
use_multimodal_contexts=True,
use_belief_states=True,
input_path_special_tokens="",
output_path_special_tokens="",
):
"""
Input: JSON representation of the dialogs
Output: line-by-line stringified representation of each turn
"""
with open(input_path_json, "r") as f_in:
data = json.load(f_in)["dialogue_data"]
# Predictions and targets for:
# (a) API call
# (b) Response Generation
# Dialog id and turn id for each instance.
predicts = []
targets = []
dialog_turn_info = []
if input_path_special_tokens != "":
with open(input_path_special_tokens, "r") as f_in:
special_tokens = json.load(f_in)
else:
special_tokens = {"eos_token": END_OF_SENTENCE}
additional_special_tokens = [SYSTEM, USER]
if use_belief_states:
additional_special_tokens.append(END_OF_API_CALL)
additional_special_tokens.append(END_OF_API_RESULT)
else:
additional_special_tokens.append(START_OF_RESPONSE)
if use_multimodal_contexts:
additional_special_tokens.extend(
[START_OF_MULTIMODAL_CONTEXTS, END_OF_MULTIMODAL_CONTEXTS]
)
special_tokens["additional_special_tokens"] = additional_special_tokens
if output_path_special_tokens != "":
# If a new output path for special tokens is given,
# we track new OOVs
oov = set()
for _, dialog in enumerate(data):
prev_asst_uttr = None
prev_turn = None
lst_context = []
for turn in dialog[FIELDNAME_DIALOG]:
user_uttr = turn[FIELDNAME_USER_UTTR].replace("\n", " ").strip()
user_uttr_api_call_type = turn[FIELDNAME_API_CALL]["call_type"]
user_uttr_api_result = turn.get(FIELDNAME_API_RESULT, {})
user_uttr_parameters = turn[FIELDNAME_USER_STATE][-1]["act_attributes"]
asst_uttr = turn[FIELDNAME_ASST_UTTR].replace("\n", " ").strip()
# Format main input context
if prev_asst_uttr and use_multimodal_contexts:
memory_objects = prev_turn[FIELDNAME_SYSTEM_STATE][-1][
"act_attributes"
]["memories"]
else:
memory_objects = []
context = format_context(
prev_asst_uttr, user_uttr, memory_objects, use_multimodal_contexts
)
prev_asst_uttr = asst_uttr
prev_turn = turn
# Add multimodal contexts -- user shouldn't have access to ground-truth
"""
if use_multimodal_contexts:
memory_objects = turn[FIELDNAME_API_CALL]['act_attributes']['memories']
context += ' ' + represent_memory_objects(memory_objects)
"""
# Concat with previous contexts
lst_context.append(context)
context = " ".join(lst_context[-len_context:])
# Format belief state
if use_belief_states:
# Skip if the api_call is unknown
if user_uttr_api_call_type == "None":
continue
if (
user_uttr_api_result == {}
or user_uttr_api_result.get("status", "None") == "None"
):
continue
belief_state = []
# for bs_per_frame in user_uttr_api_call_type:
# ***** Temp fix for null participant *****
if "participant" in user_uttr_parameters["slot_values"]:
user_uttr_parameters["slot_values"]["participant"] = [
p
for p in user_uttr_parameters["slot_values"]["participant"]
if p is not None
]
# ************************************************
# Format for API Call
str_belief_state = format_api_call(
user_uttr_api_call_type, user_uttr_parameters
)
# Track OOVs
if output_path_special_tokens != "":
oov.add(user_uttr_api_call_type)
for slot_name in user_uttr_parameters["slot_values"]:
oov.add(str(slot_name))
# slot_name, slot_value = kv[0].strip(), kv[1].strip()
# oov.add(slot_name)
# oov.add(slot_value)
# Format for API Result
str_api_result = format_api_result(user_uttr_api_result)
# A. Format the predicts and targets for API call.
predict = TEMPLATE_PREDICT_API.format(
context=context,
START_OF_API_CALL=START_OF_API_CALL,
)
predicts.append(predict)
target = TEMPLATE_TARGET_API.format(
context=context,
START_OF_API_CALL=START_OF_API_CALL,
belief_state=str_belief_state,
END_OF_API_CALL=END_OF_API_CALL,
)
targets.append(target)
dialog_turn_info.append(
str((dialog["dialogue_idx"], turn["turn_idx"], "api_call"))
)
# B. Format the predicts and targets for response.
predict = TEMPLATE_PREDICT_RESPONSE.format(
context=context,
START_OF_API_CALL=START_OF_API_CALL,
belief_state=str_belief_state,
END_OF_API_CALL=END_OF_API_CALL,
api_result=str_api_result,
END_OF_API_RESULT=END_OF_API_RESULT,
)
predicts.append(predict)
target = TEMPLATE_TARGET_RESPONSE.format(
context=context,
START_OF_API_CALL=START_OF_API_CALL,
belief_state=str_belief_state,
END_OF_API_CALL=END_OF_API_CALL,
api_result=str_api_result,
END_OF_API_RESULT=END_OF_API_RESULT,
response=asst_uttr,
END_OF_SENTENCE=END_OF_SENTENCE,
)
targets.append(target)
dialog_turn_info.append(
str((dialog["dialogue_idx"], turn["turn_idx"], "response"))
)
else:
# Format the main input
predict = TEMPLATE_PREDICT_NOBELIEF.format(
context=context, START_OF_RESPONSE=START_OF_RESPONSE
)
predicts.append(predict)
# Format the main output
target = TEMPLATE_TARGET_NOBELIEF.format(
context=context,
response=asst_uttr,
END_OF_SENTENCE=END_OF_SENTENCE,
START_OF_RESPONSE=START_OF_RESPONSE,
)
targets.append(target)
# Create a directory if it does not exist
directory = os.path.dirname(output_path_predict)
if not os.path.exists(directory):
os.makedirs(directory, exist_ok=True)
directory = os.path.dirname(output_path_target)
if not os.path.exists(directory):
os.makedirs(directory, exist_ok=True)
# Output into text files
with open(output_path_predict, "w") as f_predict:
X = "\n".join(predicts)
f_predict.write(X)
with open(output_path_target, "w") as f_target:
Y = "\n".join(targets)
f_target.write(Y)
output_path_dialog_info = output_path_target.replace(".txt", "_dialog_turn.txt")
with open(output_path_dialog_info, "w") as f_target:
Y = "\n".join(dialog_turn_info)
f_target.write(Y)
if output_path_special_tokens != "":
# Create a directory if it does not exist
directory = os.path.dirname(output_path_special_tokens)
if not os.path.exists(directory):
os.makedirs(directory, exist_ok=True)
with open(output_path_special_tokens, "w") as f_special_tokens:
# Add oov's (acts and slot names, etc.) to special tokens as well
special_tokens["additional_special_tokens"].extend(list(oov))
json.dump(special_tokens, f_special_tokens)
def format_context(prev_asst_uttr, user_uttr, memory_objects, use_multimodal_contexts):
context = ""
if prev_asst_uttr:
context += f"{SYSTEM} : {prev_asst_uttr} "
if use_multimodal_contexts:
# Add multimodal contexts
context += represent_memory_objects(memory_objects) + " "
context += f"{USER} : {user_uttr}"
return context
def format_api_call(user_uttr_api_call_type, user_uttr_parameters):
str_belief_state_per_frame = (
"{act} [ {slot_values} ] ({request_slots}) < {objects} >".format(
act=user_uttr_api_call_type.strip(),
slot_values=", ".join(
[
f"{k.strip()} = {str(v).strip()}"
for k, v in user_uttr_parameters["slot_values"].items()
]
),
request_slots=", ".join(user_uttr_parameters["request_slots"]),
objects=", ".join([str(o) for o in user_uttr_parameters["memories"]]),
)
)
return str_belief_state_per_frame
def format_api_result(user_uttr_api_result):
simple_retrieved_info = {}
if user_uttr_api_result["results"]["retrieved_info"] != []:
for memory_id, info in user_uttr_api_result["results"][
"retrieved_info"
].items():
# memory_id: '[Memory ID: 1035119]'
simple_memory_id = memory_id.split("[Memory ID: ")[-1][:-1]
simple_retrieved_info[simple_memory_id] = {}
for slot, value in info.items():
if slot == "location":
simple_retrieved_info[simple_memory_id][slot] = value["place"]
else:
simple_retrieved_info[simple_memory_id][slot] = value
str_api_result = (
"{api_status} [ {retrieved_info} ] < {retrieved_memories} >".format(
api_status=user_uttr_api_result["status"],
retrieved_info=", ".join(
[
f"{k.strip()} = {str(v).strip()}"
for k, v in simple_retrieved_info.items()
]
).replace("'", ""),
retrieved_memories=", ".join(
[str(o) for o in user_uttr_api_result["results"]["retrieved_memories"]]
),
)
)
return str_api_result
def represent_memory_objects(object_ids):
# Stringify visual objects (JSON)
"""
target_attributes = ['pos', 'color', 'type', 'class_name', 'decor_style']
list_str_objects = []
for obj_name, obj in memory_objects.items():
s = obj_name + ' :'
for target_attribute in target_attributes:
if target_attribute in obj:
target_value = obj.get(target_attribute)
if target_value == '' or target_value == []:
pass
else:
s += f' {target_attribute} {str(target_value)}'
list_str_objects.append(s)
str_objects = ' '.join(list_str_objects)
"""
str_objects = ", ".join([str(o) for o in object_ids])
return f"{START_OF_MULTIMODAL_CONTEXTS} {str_objects} {END_OF_MULTIMODAL_CONTEXTS}"
def parse_flattened_results_from_file(path):
results = []
with open(path, "r") as f_in:
for line in f_in:
parsed = parse_flattened_result(line)
results.append(parsed)
return results
def parse_flattened_result(to_parse):
"""
Parse out the belief state from the raw text.
Return an empty list if the belief state can't be parsed
Input:
- A single <str> of flattened result
e.g. 'User: Show me something else => Belief State : DA:REQUEST ...'
Output:
- Parsed result in a JSON format, where the format is:
[
{
'act': <str> # e.g. 'DA:REQUEST',
'slots': [
<str> slot_name,
<str> slot_value
]
}, ... # End of a frame
] # End of a dialog
"""
# dialog_act_regex = re.compile(r'([\w:?.?]*) *\[([^\]]*)\] *\(([^\]]*)\) *\<([^\]]*)\>')
dialog_act_regex = re.compile(r"([\w:?.?]*) *\[(.*)\] *\(([^\]]*)\) *\<([^\]]*)\>")
slot_regex = re.compile(r"([A-Za-z0-9_.-:]*) *= *(\[([^\]]*)\]|[^,]*)")
# TODO: More elegant way to match in a non-greedy way. Needs testing.
# slot_regex = re.compile(r"([A-Za-z0-9_.-:]*) *= *(\[(.*?)\]|[^,]*)")
request_regex = re.compile(r"([A-Za-z0-9_.-:]+)")
object_regex = re.compile(r"([A-Za-z0-9]+)")
belief = []
# Parse
splits = to_parse.strip().split(START_OF_API_CALL)
if len(splits) == 2:
to_parse = splits[1].strip()
splits = to_parse.split(END_OF_API_CALL)
if len(splits) == 2:
# to_parse: 'DIALOG_ACT_1 : [ SLOT_NAME = SLOT_VALUE, ... ] ...'
to_parse = splits[0].strip()
for dialog_act in dialog_act_regex.finditer(to_parse):
d = {
"act": dialog_act.group(1),
"slots": [],
"request_slots": [],
"memories": [],
}
for slot in slot_regex.finditer(dialog_act.group(2)):
d["slots"].append([slot.group(1).strip(), slot.group(2).strip()])
for request_slot in request_regex.finditer(dialog_act.group(3)):
d["request_slots"].append(request_slot.group(1).strip())
for object_id in object_regex.finditer(dialog_act.group(4)):
d["memories"].append(object_id.group(1).strip())
if d != {}:
belief.append(d)
return belief
def test_example(to_parse):
"""Tests parser on an example string.
Args:
to_parse: String to parse.
"""
print(to_parse)
result = parse_flattened_result(to_parse)
for slot, value in result[0]["slots"]:
print(f"{slot} = {value}")
print("-" * 50)
if __name__ == "__main__":
test_examples = [
" => <SOAC>: API_CALL_TYPE.SEARCH [ location = Alki Beach ] () < > <EOAC>",
" => <SOAC>: API_CALL_TYPE.GET_INFO [ ] (time) < 1022778 > <EOAC>",
" => <SOAC>: API_CALL_TYPE.SEARCH [ activity = ['cooking sausages', 'another activity'], time = 3 ] () < > <EOAC>",
" => <SOAC>: API_CALL_TYPE.SEARCH [ location = Bear Mountain, participant = ['Shane', 'Satwik'], activity = ['eating', 'typing'], time = 2021 ] () < > <EOAC>",
]
for example in test_examples:
test_example(example)
|
comet_memory_dialog-main
|
models/gpt2_text/gpt2_dst/utils/convert.py
|
#!/usr/bin/env python3
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa).
GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned
using a masked language modeling (MLM) loss.
Adapted from:
https://github.com/huggingface/transformers/blob/master/examples/language-modeling/run_language_modeling.py
"""
import argparse
import glob
import json
import logging
import os
import random
import re
import shutil
from typing import Dict, List, Tuple
import numpy as np
import torch
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers import (
MODEL_WITH_LM_HEAD_MAPPING,
WEIGHTS_NAME,
AdamW,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
PreTrainedModel,
PreTrainedTokenizer,
get_linear_schedule_with_warmup,
)
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
MODEL_CONFIG_CLASSES = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
class TextDataset(Dataset):
def __init__(
self, tokenizer: PreTrainedTokenizer, args, file_path: str, block_size=512
):
assert os.path.isfile(file_path)
block_size = block_size - (
tokenizer.model_max_length - tokenizer.model_max_length_single_sentence
)
self.examples = []
with open(file_path, encoding="utf-8") as f:
text = f.read()
tokenized_text = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(text))
for i in range(
0, len(tokenized_text) - block_size + 1, block_size
): # Truncate in block of block_size
self.examples.append(
tokenizer.build_inputs_with_special_tokens(
tokenized_text[i : i + block_size]
)
)
def __len__(self):
return len(self.examples)
def __getitem__(self, item):
return torch.tensor(self.examples[item], dtype=torch.long)
class LineByLineTextDataset(Dataset):
def __init__(
self, tokenizer: PreTrainedTokenizer, args, file_path: str, block_size=512
):
print(file_path)
assert os.path.isfile(file_path)
# Here, we do not cache the features, operating under the assumption
# that we will soon use fast multithreaded tokenizers from the
# `tokenizers` repo everywhere =)
logger.info("Creating features from dataset file at %s", file_path)
with open(file_path, encoding="utf-8") as f:
lines = [
line
for line in f.read().splitlines()
if (len(line) > 0 and not line.isspace())
]
self.examples = tokenizer.batch_encode_plus(
lines, add_special_tokens=True, max_length=block_size
)["input_ids"]
def __len__(self):
return len(self.examples)
def __getitem__(self, i):
return torch.tensor(self.examples[i], dtype=torch.long)
def load_and_cache_examples(args, tokenizer, evaluate=False):
file_path = args.eval_data_file if evaluate else args.train_data_file
if args.line_by_line:
dataset = LineByLineTextDataset(
tokenizer, args, file_path=file_path, block_size=args.block_size
)
else:
dataset = TextDataset(
tokenizer, args, file_path=file_path, block_size=args.block_size
)
# Unknown issues have been reported around not being able to handle incomplete batches (e.g. w/ older CUDA 9.2)
# Below is a workaround in case you encounter this issue.
# Alternatively, --nocuda could avoid this issue too.
# Comment out the following if you do not encounuter this issue or if you are not using any GPU.
n = len(dataset) % args.per_gpu_train_batch_size
if n != 0:
print("Truncating from %d examples" % len(dataset.examples))
dataset.examples = dataset.examples[:-n]
print("Truncating to %d examples" % len(dataset.examples))
return dataset
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def _sorted_checkpoints(
args, checkpoint_prefix="checkpoint", use_mtime=False
) -> List[str]:
ordering_and_checkpoint_path = []
glob_checkpoints = glob.glob(
os.path.join(args.output_dir, "{}-*".format(checkpoint_prefix))
)
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match(".*{}-([0-9]+)".format(checkpoint_prefix), path)
if regex_match and regex_match.groups():
ordering_and_checkpoint_path.append(
(int(regex_match.groups()[0]), path)
)
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
return checkpoints_sorted
def _rotate_checkpoints(args, checkpoint_prefix="checkpoint", use_mtime=False) -> None:
if not args.save_total_limit:
return
if args.save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
checkpoints_sorted = _sorted_checkpoints(args, checkpoint_prefix, use_mtime)
if len(checkpoints_sorted) <= args.save_total_limit:
return
number_of_checkpoints_to_delete = max(
0, len(checkpoints_sorted) - args.save_total_limit
)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info(
"Deleting older checkpoint [{}] due to args.save_total_limit".format(
checkpoint
)
)
shutil.rmtree(checkpoint)
def mask_tokens(
inputs: torch.Tensor, tokenizer: PreTrainedTokenizer, args
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original."""
if tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer."
)
labels = inputs.clone()
# We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
probability_matrix = torch.full(labels.shape, args.mlm_probability)
special_tokens_mask = [
tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True)
for val in labels.tolist()
]
probability_matrix.masked_fill_(
torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0
)
if tokenizer._pad_token is not None:
padding_mask = labels.eq(tokenizer.pad_token_id)
probability_matrix.masked_fill_(padding_mask, value=0.0)
masked_indices = torch.bernoulli(probability_matrix).bool()
labels[~masked_indices] = -100 # We only compute loss on masked tokens
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = (
torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
)
inputs[indices_replaced] = tokenizer.convert_tokens_to_ids(tokenizer.mask_token)
# 10% of the time, we replace masked input tokens with random word
indices_random = (
torch.bernoulli(torch.full(labels.shape, 0.5)).bool()
& masked_indices
& ~indices_replaced
)
random_words = torch.randint(len(tokenizer), labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels
def train(
args, train_dataset, model: PreTrainedModel, tokenizer: PreTrainedTokenizer
) -> Tuple[int, float]:
"""Train the model"""
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
def collate(examples: List[torch.Tensor]):
if tokenizer._pad_token is None:
return pad_sequence(examples, batch_first=True)
return pad_sequence(
examples, batch_first=True, padding_value=tokenizer.pad_token_id
)
train_sampler = (
RandomSampler(train_dataset)
if args.local_rank == -1
else DistributedSampler(train_dataset)
)
train_dataloader = DataLoader(
train_dataset,
sampler=train_sampler,
batch_size=args.train_batch_size,
collate_fn=collate,
)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = (
args.max_steps
// (len(train_dataloader) // args.gradient_accumulation_steps)
+ 1
)
else:
t_total = (
len(train_dataloader)
// args.gradient_accumulation_steps
* args.num_train_epochs
)
model = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model.resize_token_embeddings(len(tokenizer))
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in model.named_parameters()
if not any(nd in n for nd in no_decay)
],
"weight_decay": args.weight_decay,
},
{
"params": [
p
for n, p in model.named_parameters()
if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
optimizer = AdamW(
optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon
)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if (
args.model_name_or_path
and os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt"))
and os.path.isfile(os.path.join(args.model_name_or_path, "scheduler.pt"))
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(
torch.load(os.path.join(args.model_name_or_path, "optimizer.pt"))
)
scheduler.load_state_dict(
torch.load(os.path.join(args.model_name_or_path, "scheduler.pt"))
)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use fp16 training."
)
model, optimizer = amp.initialize(
model, optimizer, opt_level=args.fp16_opt_level
)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True,
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(
" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size
)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if args.model_name_or_path and os.path.exists(args.model_name_or_path):
try:
# set global_step to gobal_step of last saved checkpoint from model path
checkpoint_suffix = args.model_name_or_path.split("-")[-1].split("/")[0]
global_step = int(checkpoint_suffix)
epochs_trained = global_step // (
len(train_dataloader) // args.gradient_accumulation_steps
)
steps_trained_in_current_epoch = global_step % (
len(train_dataloader) // args.gradient_accumulation_steps
)
logger.info(
" Continuing training from checkpoint, will skip to saved global_step"
)
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(
" Will skip the first %d steps in the first epoch",
steps_trained_in_current_epoch,
)
except ValueError:
logger.info(" Starting fine-tuning.")
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(
epochs_trained,
int(args.num_train_epochs),
desc="Epoch",
disable=args.local_rank not in [-1, 0],
)
set_seed(args) # Added here for reproducibility
for _ in train_iterator:
epoch_iterator = tqdm(
train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]
)
for step, batch in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
inputs, labels = (
mask_tokens(batch, tokenizer, args) if args.mlm else (batch, batch)
)
inputs = inputs.to(args.device)
labels = labels.to(args.device)
model.train()
outputs = (
model(inputs, masked_lm_labels=labels)
if args.mlm
else model(inputs, labels=labels)
)
loss = outputs[
0
] # model outputs are always tuple in transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(
amp.master_params(optimizer), args.max_grad_norm
)
else:
torch.nn.utils.clip_grad_norm_(
model.parameters(), args.max_grad_norm
)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if (
args.local_rank in [-1, 0]
and args.logging_steps > 0
and global_step % args.logging_steps == 0
):
# Log metrics
if (
args.local_rank == -1 and args.evaluate_during_training
): # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer)
for key, value in results.items():
tb_writer.add_scalar(
"eval_{}".format(key), value, global_step
)
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar(
"loss",
(tr_loss - logging_loss) / args.logging_steps,
global_step,
)
logging_loss = tr_loss
if (
args.local_rank in [-1, 0]
and args.save_steps > 0
and global_step % args.save_steps == 0
):
checkpoint_prefix = "checkpoint"
# Save model checkpoint
output_dir = os.path.join(
args.output_dir, "{}-{}".format(checkpoint_prefix, global_step)
)
os.makedirs(output_dir, exist_ok=True)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
_rotate_checkpoints(args, checkpoint_prefix)
torch.save(
optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt")
)
torch.save(
scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt")
)
logger.info(
"Saving optimizer and scheduler states to %s", output_dir
)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def evaluate(
args, model: PreTrainedModel, tokenizer: PreTrainedTokenizer, prefix=""
) -> Dict:
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_output_dir = args.output_dir
eval_dataset = load_and_cache_examples(args, tokenizer, evaluate=True)
if args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir, exist_ok=True)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
def collate(examples: List[torch.Tensor]):
if tokenizer._pad_token is None:
return pad_sequence(examples, batch_first=True)
return pad_sequence(
examples, batch_first=True, padding_value=tokenizer.pad_token_id
)
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(
eval_dataset,
sampler=eval_sampler,
batch_size=args.eval_batch_size,
collate_fn=collate,
)
# multi-gpu evaluate
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
model.eval()
for batch in tqdm(eval_dataloader, desc="Evaluating"):
inputs, labels = (
mask_tokens(batch, tokenizer, args) if args.mlm else (batch, batch)
)
inputs = inputs.to(args.device)
labels = labels.to(args.device)
with torch.no_grad():
outputs = (
model(inputs, masked_lm_labels=labels)
if args.mlm
else model(inputs, labels=labels)
)
lm_loss = outputs[0]
eval_loss += lm_loss.mean().item()
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps
perplexity = torch.exp(torch.tensor(eval_loss))
result = {"perplexity": perplexity}
output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results {} *****".format(prefix))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
return result
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--train_data_file",
default=None,
type=str,
required=True,
help="The input training data file (a text file).",
)
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--model_type",
type=str,
required=True,
help="The model architecture to be trained or fine-tuned.",
)
# Other parameters
parser.add_argument(
"--eval_data_file",
default=None,
type=str,
help="An optional input evaluation data file to evaluate the perplexity on (a text file).",
)
parser.add_argument(
"--line_by_line",
action="store_true",
help="Whether distinct lines of text in the dataset are to be handled as distinct sequences.",
)
parser.add_argument(
"--should_continue",
action="store_true",
help="Whether to continue from latest checkpoint in output_dir",
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
help="The model checkpoint for weights initialization. Leave None if you want to train a model from scratch.",
)
parser.add_argument(
"--mlm",
action="store_true",
help="Train with masked-language modeling loss instead of language modeling.",
)
parser.add_argument(
"--mlm_probability",
type=float,
default=0.15,
help="Ratio of tokens to mask for masked language modeling loss",
)
parser.add_argument(
"--config_name",
default=None,
type=str,
help="Optional pretrained config name or path if not the same as model_name_or_path. If both are None, initialize a new config.",
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help="Optional pretrained tokenizer name or path if not the same as model_name_or_path. If both are None, initialize a new tokenizer.",
)
parser.add_argument(
"--add_special_tokens",
default=None,
type=str,
help="Optional file containing a JSON dictionary of special tokens that should be added to the tokenizer.",
)
parser.add_argument(
"--cache_dir",
default=None,
type=str,
help="Optional directory to store the pre-trained models downloaded from s3 (instead of the default one)",
)
parser.add_argument(
"--block_size",
default=-1,
type=int,
help="Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens).",
)
parser.add_argument(
"--do_train", action="store_true", help="Whether to run training."
)
parser.add_argument(
"--do_eval", action="store_true", help="Whether to run eval on the dev set."
)
parser.add_argument(
"--evaluate_during_training",
action="store_true",
help="Run evaluation during training at each logging step.",
)
parser.add_argument(
"--per_gpu_train_batch_size",
default=4,
type=int,
help="Batch size per GPU/CPU for training.",
)
parser.add_argument(
"--per_gpu_eval_batch_size",
default=4,
type=int,
help="Batch size per GPU/CPU for evaluation.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.",
)
parser.add_argument(
"--weight_decay", default=0.0, type=float, help="Weight decay if we apply some."
)
parser.add_argument(
"--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer."
)
parser.add_argument(
"--max_grad_norm", default=1.0, type=float, help="Max gradient norm."
)
parser.add_argument(
"--num_train_epochs",
default=1.0,
type=float,
help="Total number of training epochs to perform.",
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument(
"--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps."
)
parser.add_argument(
"--logging_steps", type=int, default=500, help="Log every X updates steps."
)
parser.add_argument(
"--save_steps",
type=int,
default=500,
help="Save checkpoint every X updates steps.",
)
parser.add_argument(
"--save_total_limit",
type=int,
default=None,
help="Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default",
)
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name_or_path ending and ending with step number",
)
parser.add_argument(
"--no_cuda", action="store_true", help="Avoid using CUDA when available"
)
parser.add_argument(
"--overwrite_output_dir",
action="store_true",
help="Overwrite the content of the output directory",
)
parser.add_argument(
"--overwrite_cache",
action="store_true",
help="Overwrite the cached training and evaluation sets",
)
parser.add_argument(
"--seed", type=int, default=42, help="random seed for initialization"
)
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument(
"--local_rank",
type=int,
default=-1,
help="For distributed training: local_rank",
)
parser.add_argument(
"--server_ip", type=str, default="", help="For distant debugging."
)
parser.add_argument(
"--server_port", type=str, default="", help="For distant debugging."
)
args = parser.parse_args()
if (
args.model_type in ["bert", "roberta", "distilbert", "camembert"]
and not args.mlm
):
raise ValueError(
"BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the --mlm "
"flag (masked language modeling)."
)
if args.eval_data_file is None and args.do_eval:
raise ValueError(
"Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file "
"or remove the --do_eval argument."
)
if args.should_continue:
sorted_checkpoints = _sorted_checkpoints(args)
if len(sorted_checkpoints) == 0:
raise ValueError(
"Used --should_continue but no checkpoint was found in --output_dir."
)
else:
args.model_name_or_path = sorted_checkpoints[-1]
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
and not args.should_continue
):
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir
)
)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(
address=(args.server_ip, args.server_port), redirect_output=True
)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device(
"cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu"
)
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Barrier to make sure only the first process in distributed training download model & vocab
if args.config_name:
config = AutoConfig.from_pretrained(args.config_name, cache_dir=args.cache_dir)
elif args.model_name_or_path:
config = AutoConfig.from_pretrained(
args.model_name_or_path, cache_dir=args.cache_dir
)
else:
# When we release a pip version exposing CONFIG_MAPPING,
# we can do `config = CONFIG_MAPPING[args.model_type]()`.
raise ValueError(
"You are instantiating a new config instance from scratch. This is not supported, but you can do it from another script, save it,"
"and load it from here, using --config_name"
)
if args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(
args.tokenizer_name, cache_dir=args.cache_dir
)
elif args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(
args.model_name_or_path, cache_dir=args.cache_dir, local_files_only=True
)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another script, save it,"
"and load it from here, using --tokenizer_name"
)
if args.add_special_tokens:
if not os.path.exists(args.add_special_tokens):
raise ValueError(
"Additional special tokens file {args.add_special_tokens} not found}"
)
with open(args.add_special_tokens, "rb") as handle:
special_tokens_dict = json.load(handle)
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
logger.info(f"Added {num_added_toks} tokens")
logger.info(f"All special tokens: {tokenizer.all_special_tokens}")
if args.block_size <= 0:
args.block_size = tokenizer.model_max_length
# Our input block size will be the max possible for the model
else:
args.block_size = min(args.block_size, tokenizer.model_max_length)
if args.model_name_or_path:
model = AutoModelWithLMHead.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir,
)
else:
logger.info("Training new model from scratch")
model = AutoModelWithLMHead.from_config(config)
# ensure model aligns with any addition of special tokens
# (unclear if this step is needed for a new model)
if args.add_special_tokens:
model.resize_token_embeddings(len(tokenizer))
model.to(args.device)
if args.local_rank == 0:
torch.distributed.barrier() # End of barrier to make sure only the first process in distributed training download model & vocab
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Barrier to make sure only the first process in distributed training process the dataset, and the others will use the cache
train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False)
if args.local_rank == 0:
torch.distributed.barrier()
global_step, tr_loss = train(args, train_dataset, model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Saving best-practices: if you use save_pretrained for the model and tokenizer, you can reload them using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
if args.local_rank in [-1, 0]:
os.makedirs(args.output_dir, exist_ok=True)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
# Load a trained model and vocabulary that you have fine-tuned
model = AutoModelWithLMHead.from_pretrained(args.output_dir)
tokenizer = AutoTokenizer.from_pretrained(args.output_dir)
model.to(args.device)
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = [
os.path.dirname(c)
for c in sorted(
glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True)
)
]
logging.getLogger("transformers.modeling_utils").setLevel(
logging.WARN
) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
prefix = (
checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else ""
)
model = AutoModelWithLMHead.from_pretrained(checkpoint)
model.to(args.device)
result = evaluate(args, model, tokenizer, prefix=prefix)
result = {k + "_{}".format(global_step): v for k, v in result.items()}
results.update(result)
return results
if __name__ == "__main__":
main()
|
comet_memory_dialog-main
|
models/gpt2_text/gpt2_dst/scripts/run_language_modeling.py
|
#!/usr/bin/env python3
"""
Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
Scripts for converting the main SIMMC datasets (.JSON format)
into the line-by-line stringified format (and back).
The reformatted data is used as input for the GPT-2 based
DST model baseline.
"""
from gpt2_dst.utils.convert import convert_json_to_flattened
import argparse
if __name__ == "__main__":
# Parse input args
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_path_json", help="input path to the original dialog data"
)
parser.add_argument("--output_path_predict", help="output path for model input")
parser.add_argument("--output_path_target", help="output path for full target")
parser.add_argument(
"--input_path_special_tokens",
help="input path for special tokens. blank if not provided",
default="",
)
parser.add_argument(
"--output_path_special_tokens",
help="output path for special tokens. blank if not saving",
default="",
)
parser.add_argument(
"--len_context",
help="# of turns to include as dialog context",
type=int,
default=2,
)
parser.add_argument(
"--use_multimodal_contexts",
help="determine whether to use the multimodal contexts each turn",
type=int,
default=1,
)
parser.add_argument(
"--no_belief_states",
dest="use_belief_states",
action="store_false",
default=True,
help="determine whether to use belief state for each turn",
)
args = parser.parse_args()
input_path_json = args.input_path_json
output_path_predict = args.output_path_predict
output_path_target = args.output_path_target
input_path_special_tokens = args.input_path_special_tokens
output_path_special_tokens = args.output_path_special_tokens
len_context = args.len_context
use_multimodal_contexts = bool(args.use_multimodal_contexts)
# DEBUG:
print("Belief states: {}".format(args.use_belief_states))
# Convert the data into GPT-2 friendly format
convert_json_to_flattened(
input_path_json,
output_path_predict,
output_path_target,
input_path_special_tokens=input_path_special_tokens,
output_path_special_tokens=output_path_special_tokens,
len_context=len_context,
use_multimodal_contexts=use_multimodal_contexts,
use_belief_states=args.use_belief_states,
)
|
comet_memory_dialog-main
|
models/gpt2_text/gpt2_dst/scripts/preprocess_input.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
#!/usr/bin/env python3
"""
Scripts for evaluating the GPT-2 DST model predictions.
First, we parse the line-by-line stringified format into responses
and compute BLEU score.
"""
import argparse
import json
from gpt2_dst.utils.convert import parse_flattened_results_from_file
from utils.evaluate_dst import evaluate_from_flat_list
import nltk
import numpy as np
def normalize_sentence(sentence):
"""Normalize the sentences and tokenize."""
return nltk.tokenize.word_tokenize(sentence.lower())
def parse_response_from_file(input_path):
"""Parses the response from a flattened file.
Args:
input_path: Path to read the responses from.
"""
lines = []
with open(input_path, "r") as file_id:
for ii in file_id.readlines():
split_line = ii.split("<SOR>", 1)
lines.append(
(split_line[0].strip("\n"), split_line[1].strip("\n").strip("<EOS>"))
)
return lines
if __name__ == "__main__":
# Parse input args
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_path_target", help="path for target, line-separated format (.txt)"
)
parser.add_argument(
"--input_path_predicted",
help="path for model prediction output, line-separated format (.txt)",
)
parser.add_argument(
"--output_path_report", help="path for saving evaluation summary (.json)"
)
args = parser.parse_args()
input_path_target = args.input_path_target
input_path_predicted = args.input_path_predicted
output_path_report = args.output_path_report
# Convert the data from the GPT-2 friendly format to JSON
list_target = parse_response_from_file(input_path_target)
list_predicted = parse_response_from_file(input_path_predicted)
# Compute BLEU scores.
bleu_scores = []
# Smoothing function.
chencherry = nltk.translate.bleu_score.SmoothingFunction()
for response, gt_response in zip(list_predicted, list_target):
assert response[0] == gt_response[0], "Input contexts do not match!"
bleu_score = nltk.translate.bleu_score.sentence_bleu(
[normalize_sentence(gt_response[1])],
normalize_sentence(response[1]),
smoothing_function=chencherry.method7,
)
bleu_scores.append(bleu_score)
print(
"BLEU score: {} +- {}".format(
np.mean(bleu_scores), np.std(bleu_scores) / np.sqrt(len(bleu_scores))
)
)
|
comet_memory_dialog-main
|
models/gpt2_text/gpt2_dst/scripts/evaluate_response.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
#! /usr/bin/env python
"""
Gets the best model given all the checkpoints.
Author(s): Satwik Kottur
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import os
import re
def main(args):
for folder_name in args["model_checkpoint_folder"]:
listing = [ii for ii in os.listdir(folder_name) if "checkpoint-" in ii]
valid_metrics = {}
for checkpoint_name in listing:
checkpoint_folder = os.path.join(folder_name, checkpoint_name)
eval_path = os.path.join(checkpoint_folder, "eval_results.txt")
epoch_search = re.search(r"checkpoint-(\d*)", checkpoint_name)
with open(eval_path, "r") as file_id:
result = [ii.strip("\n") for ii in file_id.readlines()][0]
perplexity_search = re.search(r"([0-9\.]+)", result)
# NOTE: Does not handle error conditions.
if perplexity_search is None or epoch_search is None:
print(f"Missing epoch: {checkpoint_name}")
continue
perplexity = float(perplexity_search.group(1))
epoch = int(epoch_search.group(1))
valid_metrics[epoch] = perplexity
best_epoch, _ = sorted(valid_metrics.items(), key=lambda x: x[1])[0]
best_folder = os.path.join(folder_name, f"checkpoint-{best_epoch}")
print(best_folder)
print("." * 50)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--model_checkpoint_folder",
nargs="+",
required=True,
help="List of model checkpoint folders",
)
try:
parsed_args = vars(parser.parse_args())
except (IOError) as msg:
parser.error(str(msg))
main(parsed_args)
|
comet_memory_dialog-main
|
models/gpt2_text/gpt2_dst/scripts/get_best_model.py
|
#!/usr/bin/env python3
# coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Conditional text generation with the auto-regressive models of the library (GPT/GPT-2/CTRL/Transformer-XL/XLNet)
Adapted from
https://github.com/huggingface/transformers/blob/master/examples/text-generation/run_generation.py
"""
import argparse
import logging
import os
import numpy as np
import torch
from transformers import (
CTRLLMHeadModel,
CTRLTokenizer,
GPT2LMHeadModel,
GPT2Tokenizer,
OpenAIGPTLMHeadModel,
OpenAIGPTTokenizer,
TransfoXLLMHeadModel,
TransfoXLTokenizer,
XLMTokenizer,
XLMWithLMHeadModel,
XLNetLMHeadModel,
XLNetTokenizer,
)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
MAX_LENGTH = int(10000) # Hardcoded max length to avoid infinite loop
MODEL_CLASSES = {
"gpt2": (GPT2LMHeadModel, GPT2Tokenizer),
"ctrl": (CTRLLMHeadModel, CTRLTokenizer),
"openai-gpt": (OpenAIGPTLMHeadModel, OpenAIGPTTokenizer),
"xlnet": (XLNetLMHeadModel, XLNetTokenizer),
"transfo-xl": (TransfoXLLMHeadModel, TransfoXLTokenizer),
"xlm": (XLMWithLMHeadModel, XLMTokenizer),
}
# Padding text to help Transformer-XL and XLNet with short prompts as proposed by Aman Rusia
# in https://github.com/rusiaaman/XLNet-gen#methodology
# and https://medium.com/@amanrusia/xlnet-speaks-comparison-to-gpt-2-ea1a4e9ba39e
PADDING_TEXT = """In 1991, the remains of Russian Tsar Nicholas II and his family
(except for Alexei and Maria) are discovered.
The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the
remainder of the story. 1883 Western Siberia,
a young Grigori Rasputin is asked by his father and a group of men to perform magic.
Rasputin has a vision and denounces one of the men as a horse thief. Although his
father initially slaps him for making such an accusation, Rasputin watches as the
man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous,
with people, even a bishop, begging for his blessing. <eod> </s> <eos>"""
def set_seed(args):
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
#
# Functions to prepare models' input
#
def prepare_ctrl_input(args, _, tokenizer, prompt_text):
if args.temperature > 0.7:
logger.info(
"CTRL typically works better with lower temperatures (and lower top_k)."
)
encoded_prompt = tokenizer.encode(prompt_text, add_special_tokens=True)
if not any(encoded_prompt[0] == x for x in tokenizer.control_codes.values()):
logger.info(
"WARNING! You are not starting your generation from a control code so you won't get good results"
)
return prompt_text
def prepare_xlm_input(args, model, tokenizer, prompt_text):
# kwargs = {"language": None, "mask_token_id": None}
# Set the language
use_lang_emb = hasattr(model.config, "use_lang_emb") and model.config.use_lang_emb
if hasattr(model.config, "lang2id") and use_lang_emb:
available_languages = model.config.lang2id.keys()
if args.xlm_language in available_languages:
language = args.xlm_language
else:
language = None
while language not in available_languages:
language = input(
"Using XLM. Select language in "
+ str(list(available_languages))
+ " >>> "
)
model.config.lang_id = model.config.lang2id[language]
# kwargs["language"] = tokenizer.lang2id[language]
# TODO fix mask_token_id setup when configurations will be synchronized between models and tokenizers
# XLM masked-language modeling (MLM) models need masked token
# is_xlm_mlm = "mlm" in args.model_name_or_path
# if is_xlm_mlm:
# kwargs["mask_token_id"] = tokenizer.mask_token_id
return prompt_text
def prepare_xlnet_input(args, _, tokenizer, prompt_text):
prompt_text = (
args.padding_text if args.padding_text else PADDING_TEXT
) + prompt_text
return prompt_text
def prepare_transfoxl_input(args, _, tokenizer, prompt_text):
prompt_text = (
args.padding_text if args.padding_text else PADDING_TEXT
) + prompt_text
return prompt_text
PREPROCESSING_FUNCTIONS = {
"ctrl": prepare_ctrl_input,
"xlm": prepare_xlm_input,
"xlnet": prepare_xlnet_input,
"transfo-xl": prepare_transfoxl_input,
}
def adjust_length_to_model(length, max_sequence_length):
if length < 0 and max_sequence_length > 0:
length = max_sequence_length
elif 0 < max_sequence_length < length:
length = max_sequence_length # No generation bigger than model size
elif length < 0:
length = MAX_LENGTH # avoid infinite loop
return length
def load_model(model_type, model_name_or_path, device, length=100):
try:
model_class, tokenizer_class = MODEL_CLASSES[model_type]
except KeyError:
raise KeyError(
"the model {} you specified is not supported. You are welcome to add it and open a PR :)"
)
tokenizer = tokenizer_class.from_pretrained(model_name_or_path)
model = model_class.from_pretrained(model_name_or_path)
model.to(device)
length = adjust_length_to_model(
length, max_sequence_length=model.config.max_position_embeddings
)
return model, tokenizer, length
def generate_sequences(
model,
tokenizer,
prompt,
device="cpu",
length=100,
temperature=1.0,
k=0,
p=0.9,
repetition_penalty=1.0,
num_return_sequences=1,
stop_token="<EOS>",
verbose=True,
):
output_sequences, encoded_prompt = generate_sequence_tokens(
model,
tokenizer,
prompt,
device,
length,
temperature,
k,
p,
repetition_penalty,
num_return_sequences,
)
generated_sequences = []
for generated_sequence_idx, generated_sequence in enumerate(output_sequences):
if verbose:
print(
"=== GENERATED SEQUENCE {sequence_idx} ===".format(
sequence_idx=generated_sequence_idx + 1,
)
)
generated_sequence = generated_sequence.tolist()
# Decode text
text = tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True)
# Remove all text after the stop token
text = text[: text.find(stop_token) if stop_token else None]
# Add the prompt at the beginning of the sequence. Remove the
# excess text that was used for pre-processing
generated_text = (
prompt
+ text[
len(
tokenizer.decode(
encoded_prompt[0], clean_up_tokenization_spaces=True
)
) :
]
)
# generated_text = text[len(tokenizer.decode(encoded_prompt[0], clean_up_tokenization_spaces=True)):]
generated_sequences.append(generated_text)
if verbose:
print(prompt)
print("-")
print(generated_text)
return generated_sequences
def generate_sequence_tokens(
model,
tokenizer,
prompt_text,
device="cpu",
length=100,
temperature=1.0,
k=0,
p=0.9,
repetition_penalty=1.0,
num_return_sequences=1,
):
# Assumes model_type not in PREPROCESSING_FUNCTIONS
# Strip any trailing \n if provided
prompt_text = prompt_text.strip("\n")
# Enode prompt
encoded_prompt = tokenizer.encode(
prompt_text, add_special_tokens=True, return_tensors="pt"
)
encoded_prompt = encoded_prompt.to(device)
output_sequences = model.generate(
input_ids=encoded_prompt,
max_length=length + len(encoded_prompt[0]),
temperature=temperature,
top_k=k,
top_p=p,
repetition_penalty=repetition_penalty,
do_sample=True,
num_return_sequences=num_return_sequences,
)
# Remove the batch dimension when returning multiple sequences
if len(output_sequences.shape) > 2:
output_sequences.squeeze_()
return output_sequences, encoded_prompt
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: "
+ ", ".join(MODEL_CLASSES.keys()),
)
parser.add_argument("--prompt", type=str, default="")
parser.add_argument(
"--prompts_from_file",
type=str,
default=None,
help="""
read prompts from a file and generate, overrides any prompt given on the
command line""",
)
parser.add_argument("--length", type=int, default=20)
parser.add_argument(
"--stop_token",
type=str,
default=None,
help="Token at which text generation is stopped",
)
parser.add_argument(
"--temperature",
type=float,
default=1.0,
help="temperature of 1.0 has no effect, lower tend toward greedy sampling",
)
parser.add_argument(
"--repetition_penalty",
type=float,
default=1.0,
help="primarily useful for CTRL model; in that case, use 1.2",
)
parser.add_argument("--k", type=int, default=0)
parser.add_argument("--p", type=float, default=0.9)
parser.add_argument(
"--padding_text",
type=str,
default="",
help="Padding text for Transfo-XL and XLNet.",
)
parser.add_argument(
"--xlm_language",
type=str,
default="",
help="Optional language when used with the XLM model.",
)
parser.add_argument(
"--seed", type=int, default=42, help="random seed for initialization"
)
parser.add_argument(
"--no_cuda", action="store_true", help="Avoid using CUDA when available"
)
parser.add_argument(
"--num_return_sequences",
type=int,
default=1,
help="The number of samples to generate.",
)
parser.add_argument(
"--path_output",
type=str,
default=None,
help="Path to output predictions in a line separated text file.",
)
args = parser.parse_args()
args.device = torch.device(
"cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu"
)
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
set_seed(args)
if args.prompts_from_file and not os.path.exists(args.prompts_from_file):
raise Exception(f"prompt file '{args.prompts_from_file}' not found")
# Initialize the model and tokenizer
args.model_type = args.model_type.lower()
# Load model
model, tokenizer, args.length = load_model(
args.model_type, args.model_name_or_path, args.device, args.length
)
logger.info(args)
results = []
prompts = []
if args.prompts_from_file:
with open(args.prompts_from_file) as handle:
prompts = handle.readlines()
while True:
if not prompts:
prompts = [args.prompt if args.prompt else input("Model prompt >>> ")]
if not args.prompt and (
len(prompts) == 0
or prompts[0].strip() == ""
or prompts[0].lower() == "quit"
):
break # break while True loop
n_prompts = len(prompts)
for i, prompt_text in enumerate(prompts):
generated_sequences = generate_sequences(
model,
tokenizer,
prompt_text,
args.device,
args.length,
args.temperature,
args.k,
args.p,
args.repetition_penalty,
args.num_return_sequences,
args.stop_token,
)
results.append(generated_sequences)
prompts = []
if args.prompt or args.prompts_from_file:
break # break while True loop
if args.path_output is not None:
# Create a directory if it does not exist
directory = os.path.dirname(args.path_output)
if not os.path.exists(directory):
os.makedirs(directory, exist_ok=True)
# Format results into a line-separated string file
str_results = "\n".join(
[" || ".join(generated_sequences) for generated_sequences in results]
)
# Save to a file
with open(args.path_output, "w") as f_out:
f_out.write(str_results)
return results
if __name__ == "__main__":
main()
|
comet_memory_dialog-main
|
models/gpt2_text/gpt2_dst/scripts/run_generation.py
|
#!/usr/bin/env python3
"""
Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
Scripts for evaluating the GPT-2 DST model predictions.
First, we parse the line-by-line stringified format into responses
and compute BLEU score.
"""
import argparse
import ast
import copy
import json
import re
import numpy as np
import tqdm
from gpt2_dst.utils.convert import parse_flattened_result
def convert_slots_to_dict(api_call_json):
"""Converts the slots from list of lists to a dict.
Args:
api_call_json: JSON containing the parsed API call
"""
for frame_ind, frame in enumerate(api_call_json):
slot_dict = {}
for slot_name, slot_value in frame["slots"]:
if re.match("\[.*\]", slot_value):
try:
slot_dict[slot_name] = ast.literal_eval(slot_value)
except:
# If error when parsing the slots add empty string
print(f"Error parsing: {slot_value} -> {frame}")
slot_dict[slot_name] = ""
else:
slot_dict[slot_name] = slot_value
frame["slots"] = slot_dict
return api_call_json
def parse_results_from_file(input_path, turn_info, original_data):
"""Parse targets from a flattened file to create response, dst evaluation files.
Args:
input_path: Path to read the responses from.
turn_info: List of dialog, turn info.
original_data: Original JSON target.
Returns:
dst_json: JSON file with DST results
responses_json: JSON file with responses
"""
# Collate all lines to ensure they start with either <USER> or <SYSTEM>.
with open(input_path, "r") as file_id:
lines = [ii.strip() for ii in file_id.readlines()]
fixed_lines = []
current_line = ""
for line in lines:
if line[:6] == "<USER>" or line[:8] == "<SYSTEM>":
fixed_lines.append(line)
else:
fixed_lines[-1] += line
print(f"Collating: {len(lines)} -> {len(fixed_lines)}")
lines = fixed_lines
# Identify API call string and response in each line.
assert len(lines) == len(turn_info), "#lines and #turn_info do not match!"
responses_json = {}
dst_pool = {}
for line_ind, line in enumerate(lines):
dialog_id, turn_id, prediction_type = turn_info[line_ind]
if prediction_type == "api_call":
api_call_json = parse_flattened_result(line.split("<EOAC>")[0] + "<EOAC>")
# Convert slots from list of list to dicts.
api_call_json = convert_slots_to_dict(api_call_json)
dst_index = (dialog_id, turn_id)
assert dst_index not in dst_pool, "Result already exists!"
dst_pool[dst_index] = api_call_json
# Check if memories are integers, else skip.
for frame_info in api_call_json:
memories = []
for ii in frame_info["memories"]:
try:
ii_int = int(ii)
memories.append(ii)
except:
pass
frame_info["memories"] = memories
elif prediction_type == "response":
response_str = line.split("<EOAR>")[-1].strip()
if dialog_id not in responses_json:
responses_json[dialog_id] = {
"dialog_id": dialog_id,
"predictions": [],
}
responses_json[dialog_id]["predictions"].append(
{
"turn_id": turn_id,
"response": response_str,
}
)
else:
raise ValueError(f"Invalid prediction_type: {prediction_type}!")
responses_json = list(responses_json.values())
num_missing = 0
num_present = 0
dst_json = copy.deepcopy(original_data)
for dialog_datum in dst_json["dialogue_data"]:
del dialog_datum["mentioned_memory_ids"]
del dialog_datum["memory_graph_id"]
dialog_id = dialog_datum["dialogue_idx"]
for datum in dialog_datum["dialogue"]:
del datum["transcript_annotated"]
turn_id = datum["turn_idx"]
index = (dialog_id, turn_id)
if index in dst_pool:
datum["transcript_annotated"] = dst_pool[index]
num_present += 1
else:
print(f"Missing! -- {index}")
num_missing += 1
print(f"Missing: {num_missing} Present: {num_present}")
return dst_json, responses_json
if __name__ == "__main__":
# Parse input args
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_target_json", required=True, help="Path to target JSON file"
)
parser.add_argument(
"--input_dialog_ids",
required=True,
help="Path for dialog, turn ids for input (.txt)",
)
parser.add_argument(
"--input_path_predicted",
required=True,
help="path for model prediction output, line-separated format (.txt)",
)
parser.add_argument(
"--output_path_report",
required=True,
help="Path to save evaluation summary (dst and response) (.json)",
)
args = parser.parse_args()
input_path_predicted = args.input_path_predicted
output_path_report = args.output_path_report
# Read the input target JSON file.
with open(args.input_target_json, "r") as file_id:
original_data = json.load(file_id)
# Read the dialog and turn ids.
with open(args.input_dialog_ids, "r") as file_id:
turn_info = [ast.literal_eval(ii.strip("\n")) for ii in file_id.readlines()]
# Convert the data from the GPT-2 friendly format to JSON formats.
dst_json, responses_json = parse_results_from_file(
input_path_predicted, turn_info, original_data
)
# Saving both the DST and response JSON.
dst_json_path = args.output_path_report.replace(".json", "_dst_results.json")
print(f"Saving DST results: {dst_json_path}")
with open(dst_json_path, "w") as file_id:
json.dump(dst_json, file_id)
responses_json_path = args.output_path_report.replace(
".json", "_response_results.json"
)
print(f"Saving responses: {responses_json_path}")
with open(responses_json_path, "w") as file_id:
json.dump(responses_json, file_id)
|
comet_memory_dialog-main
|
models/gpt2_text/gpt2_dst/scripts/reformat_dst_response_outputs.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
#!/usr/bin/env python3
"""
Scripts for evaluating the GPT-2 DST model predictions.
First, we parse the line-by-line stringified format into
the structured DST output.
We then run the main DST Evaluation script to get results.
"""
import argparse
import json
from gpt2_dst.utils.convert import parse_flattened_results_from_file
from utils.evaluate_dst import evaluate_from_flat_list
if __name__ == "__main__":
# Parse input args
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_path_target", help="path for target, line-separated format (.txt)"
)
parser.add_argument(
"--input_path_predicted",
help="path for model prediction output, line-separated format (.txt)",
)
parser.add_argument(
"--output_path_report", help="path for saving evaluation summary (.json)"
)
args = parser.parse_args()
input_path_target = args.input_path_target
input_path_predicted = args.input_path_predicted
output_path_report = args.output_path_report
# Convert the data from the GPT-2 friendly format to JSON
list_target = parse_flattened_results_from_file(input_path_target)
list_predicted = parse_flattened_results_from_file(input_path_predicted)
# Evaluate
report = evaluate_from_flat_list(list_target, list_predicted)
# Save report
with open(output_path_report, "w") as f_out:
json.dump(report, f_out)
|
comet_memory_dialog-main
|
models/gpt2_text/gpt2_dst/scripts/evaluate.py
|
"""
Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
Script evaluates response generation using GT responses.
Expected JSON format:
[
"dialog_id": <dialog_id>,
"predictions": [
{
"turn_id": <turn_id>,
"response": <str; model output>,
}
...
]
...
]
Author(s): Satwik Kottur
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import json
import nltk
import numpy as np
import tqdm
def normalize_sentence(sentence):
"""Normalize the sentences and tokenize."""
return nltk.tokenize.word_tokenize(sentence.lower())
def evaluate_response_generation(
gt_responses,
model_responses,
single_round_eval=False,
record_instance_results=None,
compute_bert_score=False,
):
"""Evaluates response generation using the raw data and model predictions.
Args:
gt_responses: Ground truth responses.
model_responses: Generated responses.
single_round_eval: Evaluate only for the last turn.
record_instance_results: Save path for instance level metrics.
"""
gt_responses_pool = {ii["dialogue_idx"]: ii for ii in gt_responses["dialogue_data"]}
bleu_scores = []
# Smoothing function.
chencherry = nltk.translate.bleu_score.SmoothingFunction()
# Lazy initialization for bert score.
if compute_bert_score:
import bert_score
bert_scorer = bert_score.BERTScorer(lang="en")
bert_scores = []
num_evaluations = 0
for model_datum in tqdm.tqdm(model_responses, desc="Evaluating"):
dialog_id = model_datum["dialog_id"]
num_gt_rounds = len(gt_responses_pool[dialog_id]["dialogue"])
for round_datum in model_datum["predictions"]:
round_id = round_datum["turn_id"]
# Skip if single_round_eval and this is not the last round.
if single_round_eval and round_id != num_gt_rounds - 1:
continue
response = round_datum["response"]
gt_datum = gt_responses_pool[dialog_id]["dialogue"][round_id]
gt_response = gt_datum["system_transcript"]
try:
gt_response_clean = normalize_sentence(gt_response)
response_clean = normalize_sentence(response)
bleu_score = nltk.translate.bleu_score.sentence_bleu(
[gt_response_clean],
response_clean,
smoothing_function=chencherry.method7,
)
bleu_scores.append(bleu_score)
if compute_bert_score:
_, _, bert_f1 = bert_scorer.score(
[" ".join(response_clean)], [" ".join(gt_response_clean)]
)
bert_scores.append(bert_f1.item())
except:
print(f"Model: {response} -> GT: {gt_response}")
# Add the result to datum and save it back.
if record_instance_results:
round_datum["bleu"] = bleu_score
round_datum["response_len"] = len(normalize_sentence(gt_response))
if compute_bert_score:
round_datum["bert_score"] = bert_f1
print("#Instances evaluated BLEU: {}".format(len(bleu_scores)))
if record_instance_results:
print(f"Saving per instance results: {record_instance_results}")
with open(record_instance_results, "w") as file_id:
json.dump(model_responses, file_id)
bleu_str_mean = np.mean(bleu_scores)
bleu_str_err = np.std(bleu_scores) / np.sqrt(len(bleu_scores))
if compute_bert_score:
bert_score_mean = np.mean(bert_scores)
bert_score_err = np.std(bert_scores) / np.sqrt(len(bert_scores))
else:
bert_score_mean, bert_score_err = None, None
return bleu_str_mean, bleu_str_err, bert_score_mean, bert_score_err
def main(args):
print("Reading: {}".format(args["data_json_path"]))
with open(args["data_json_path"], "r") as file_id:
gt_responses = json.load(file_id)
print("Reading: {}".format(args["model_response_path"]))
with open(args["model_response_path"], "r") as file_id:
model_responses = json.load(file_id)
if args["record_instance_results"]:
instance_results_path = args["model_response_path"].replace(
".json", "_results.json"
)
else:
instance_results_path = None
bleu_score, bleu_std_err, bert_score, bert_score_err = evaluate_response_generation(
gt_responses,
model_responses,
args["single_round_evaluation"],
instance_results_path,
args["compute_bert_score"],
)
print(f"BLEU Score: {bleu_score:.4f} +- {bleu_std_err}")
if args["compute_bert_score"]:
print(f"BERT Score: {bert_score:.4f} +- {bert_score_err}")
report = {
"bleu_score": bleu_score,
"bleu_std_err": bleu_std_err,
"bert_score": bert_score,
"bert_score_err": bert_score_err,
}
return report
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Response Generation Evaluation")
parser.add_argument(
"--data_json_path",
default="data/mem_dials_devtest.json",
help="Data with gold responses",
)
parser.add_argument(
"--model_response_path", default=None, help="Responses generated by the model"
)
parser.add_argument(
"--single_round_evaluation",
dest="single_round_evaluation",
action="store_true",
default=False,
help="Single round evaluation for hidden split",
)
parser.add_argument(
"--record_instance_results",
dest="record_instance_results",
action="store_true",
default=False,
help="Records per instance results and save it back",
)
parser.add_argument(
"--compute_bert_score",
dest="compute_bert_score",
action="store_true",
default=False,
help="Compute BERT score along with BLEU-4",
)
try:
parsed_args = vars(parser.parse_args())
except (IOError) as msg:
parser.error(str(msg))
main(parsed_args)
|
comet_memory_dialog-main
|
models/gpt2_text/utils/response_evaluation.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
# !/usr/bin/env python3
"""
Util functions for evaluating the DST model predictions.
The script includes a main function which takes
the original JSON data file and the predicted model output file
(in the same format), and outputs the report.
"""
import argparse
import json
import copy
import numpy as np
def reformat_turn_intents(turn_intents, ground_truth_act=None):
new_intents = []
for intent in turn_intents:
frame_intent = copy.deepcopy(intent)
if "act_attributes" in frame_intent:
frame_intent.update(frame_intent["act_attributes"])
del frame_intent["act_attributes"]
# Process ground truth examples.
if "slot_values" in frame_intent:
# Tuples are inmutable so we use list of two.
frame_intent["slots"] = [
[key, value] for key, value in frame_intent["slot_values"].items()
]
# FIX: Temporarily remove "None" from participants.
for index, (slot, values) in enumerate(frame_intent["slots"]):
if slot == "participant":
frame_intent["slots"][index][1] = [
ii for ii in values if ii is not None
]
del frame_intent["slot_values"]
# Process model predictions.
else:
frame_intent["slots"] = [
[key, value] for key, value in frame_intent["slots"].items()
]
# Removes repeated slots and sorts them for correct comparison for both
# ground truth and model predictions.
for index, (slot, values) in enumerate(frame_intent["slots"]):
if type(values) is list:
frame_intent["slots"][index][1] = sorted(list(set(values)))
else:
frame_intent["slots"][index][1] = [values]
# If new act is provided, re-assign.
if ground_truth_act:
frame_intent["act"] = ground_truth_act
# Convery memories from string to integer.
if frame_intent["memories"] and ground_truth_act is None:
frame_intent["memories"] = [
int(ii) for ii in intent["memories"] if ii.isnumeric()
]
new_intents.append(frame_intent)
return new_intents
def evaluate_from_json(d_true, d_pred):
"""
<list>d_true and <list>d_pred are in the following format:
(Equivalent to "dialogue_data" field in the input data JSON file)
[
{
"dialogue": [
{
"belief_state": [
[
{
'act': <str>,
'slots': [
[
SLOT_NAME, SLOT_VALUE
], ...
]
},
[End of a frame]
...
],
]
}
[End of a turn]
...
],
}
[End of a dialogue]
...
]
"""
d_true_flattened = []
d_pred_flattened = []
for i in range(len(d_true)):
# Iterate through each dialog
dialog_true = d_true[i]["dialogue"]
dialog_pred = d_pred[i]["dialogue"]
dialogue_idx = d_true[i]["dialogue_idx"]
for j in range(len(dialog_true)):
# Iterate through each turn
turn_true = dialog_true[j]["belief_state"]
turn_pred = dialog_pred[j]["belief_state"]
turn_true["turn_idx"] = j
turn_true["dialogue_idx"] = dialogue_idx
d_true_flattened.append(turn_true)
d_pred_flattened.append(turn_pred)
return evaluate_from_flat_list(d_true_flattened, d_pred_flattened)
def evaluate_from_json_conservative(d_true, d_pred, lowercase=False):
"""
<list>d_true and <list>d_pred are in the following format:
(Equivalent to "dialogue_data" field in the input data JSON file)
[
{
"dialogue": [
{
"belief_state": [
[
{
'act': <str>,
'slots': [
[
SLOT_NAME, SLOT_VALUE
], ...
]
},
[End of a frame]
...
],
]
}
[End of a turn]
...
],
}
[End of a dialogue]
...
]
"""
d_true_flattened = []
d_pred_flattened = []
num_present = 0
num_absent = 0
dst_pool = {ii["dialogue_idx"]: ii for ii in d_pred}
for gt_datum in d_true:
# Iterate through each dialog
dialog_true = gt_datum["dialogue"]
dialogue_idx = gt_datum["dialogue_idx"]
if dialogue_idx not in dst_pool:
print(f"Missing: {dialogue_idx}")
num_absent += len(gt_datum["dialogue"])
continue
# num_present += len(gt_datum["dialogue"])
dialog_pred = dst_pool[dialogue_idx]["dialogue"]
for turn_id in range(len(dialog_true)):
# Iterate through each turn
if "transcript_annotated" not in dialog_pred[turn_id]:
print(f"Missing: {dialogue_idx} {turn_id}")
num_absent += 1
continue
num_present += 1
turn_true = dialog_true[turn_id]["transcript_annotated"]
turn_pred = dialog_pred[turn_id]["transcript_annotated"]
# API calls are formatted as acts.
reformatted_act = dialog_true[turn_id]["api_call"]["call_type"]
turn_true = reformat_turn_intents(turn_true, reformatted_act)
turn_pred = reformat_turn_intents(turn_pred)
d_true_flattened.append(turn_true)
d_pred_flattened.append(turn_pred)
# print(len(d_true_flattened))
# print(len(d_pred_flattened))
print(f"# present: {num_present} # absent: {num_absent}")
return evaluate_from_flat_list(
d_true_flattened, d_pred_flattened, lowercase=lowercase
)
def evaluate_from_flat_list(d_true, d_pred, lowercase=False):
"""
<list>d_true and <list>d_pred are in the following format:
(Each element represents a single turn, with (multiple) frames)
[
[
{
'act': <str>,
'slots': [
[
SLOT_NAME, SLOT_VALUE
], ...
]
},
[End of a frame]
...
],
[End of a turn]
...
]
"""
c = initialize_count_dict()
# Count # corrects & # wrongs
for i in range(len(d_true)):
true_turn = d_true[i]
pred_turn = d_pred[i]
turn_evaluation = evaluate_turn(true_turn, pred_turn, lowercase=lowercase)
c = add_dicts(c, turn_evaluation)
# Calculate metrics
joint_accuracy = c["n_correct_beliefs"] / c["n_frames"]
act_rec, act_prec, act_f1 = rec_prec_f1(
n_correct=c["n_correct_acts"], n_true=c["n_true_acts"], n_pred=c["n_pred_acts"]
)
slot_rec, slot_prec, slot_f1 = rec_prec_f1(
n_correct=c["n_correct_slots"],
n_true=c["n_true_slots"],
n_pred=c["n_pred_slots"],
)
request_slot_rec, request_slot_prec, request_slot_f1 = rec_prec_f1(
n_correct=c["n_correct_request_slots"],
n_true=c["n_true_request_slots"],
n_pred=c["n_pred_request_slots"],
)
object_rec, object_prec, object_f1 = rec_prec_f1(
n_correct=c["n_correct_objects"],
n_true=c["n_true_objects"],
n_pred=c["n_pred_objects"],
)
# Calculate std err
act_f1_stderr = d_f1(c["n_true_acts"], c["n_pred_acts"], c["n_correct_acts"])
slot_f1_stderr = d_f1(c["n_true_slots"], c["n_pred_slots"], c["n_correct_slots"])
request_slot_f1_stderr = d_f1(
c["n_true_request_slots"],
c["n_pred_request_slots"],
c["n_correct_request_slots"],
)
object_f1_stderr = d_f1(
c["n_true_objects"], c["n_pred_objects"], c["n_correct_objects"]
)
return {
"joint_accuracy": joint_accuracy,
"act_rec": act_rec,
"act_prec": act_prec,
"act_f1": act_f1,
"act_f1_stderr": act_f1_stderr,
"slot_rec": slot_rec,
"slot_prec": slot_prec,
"slot_f1": slot_f1,
"slot_f1_stderr": slot_f1_stderr,
"request_slot_rec": request_slot_rec,
"request_slot_prec": request_slot_prec,
"request_slot_f1": request_slot_f1,
"request_slot_f1_stderr": request_slot_f1_stderr,
"object_rec": object_rec,
"object_prec": object_prec,
"object_f1": object_f1,
"object_f1_stderr": object_f1_stderr,
}
def evaluate_turn(true_turn, pred_turn, lowercase=False):
count_dict = initialize_count_dict()
# Must preserve order in which frames appear.
for frame_idx in range(len(true_turn)):
# For each frame
true_frame = true_turn[frame_idx]
if frame_idx >= len(pred_turn):
pred_frame = {}
else:
pred_frame = pred_turn[frame_idx]
count_dict = add_dicts(
count_dict,
evaluate_frame(true_frame, pred_frame, strict=False, lowercase=lowercase),
)
return count_dict
def evaluate_frame(true_frame, pred_frame, strict=True, lowercase=False):
"""
If strict=True,
For each dialog_act (frame), set(slot values) must match.
If dialog_act is incorrect, its set(slot values) is considered wrong.
"""
count_dict = initialize_count_dict()
count_dict["n_frames"] += 1
# Compare Dialog Actss
true_act = true_frame["act"] if "act" in true_frame else None
pred_act = pred_frame["act"] if "act" in pred_frame else None
if not lowercase:
b_correct_act = true_act == pred_act
else:
# Lowercase evaluation.
b_correct_act = true_act.lower() == str(pred_act).lower()
count_dict["n_correct_acts"] += b_correct_act
count_dict["n_true_acts"] += "act" in true_frame
count_dict["n_pred_acts"] += "act" in pred_frame
# Compare Slots
if not lowercase:
true_frame_slot_values = {f"{k}={v}" for k, v in true_frame.get("slots", [])}
pred_frame_slot_values = {f"{k}={v}" for k, v in pred_frame.get("slots", [])}
else:
true_frame_slot_values = {
f"{k}={v}".lower() for k, v in true_frame.get("slots", [])
}
pred_frame_slot_values = {
f"{k}={v}".lower() for k, v in pred_frame.get("slots", [])
}
count_dict["n_true_slots"] += len(true_frame_slot_values)
count_dict["n_pred_slots"] += len(pred_frame_slot_values)
if strict and not b_correct_act:
pass
else:
count_dict["n_correct_slots"] += len(
true_frame_slot_values.intersection(pred_frame_slot_values)
)
# if len(true_frame_slot_values.intersection(pred_frame_slot_values)) != len(pred_frame_slot_values):
# print(true_frame_slot_values)
# print(pred_frame_slot_values)
# print(len(true_frame_slot_values.intersection(pred_frame_slot_values)) == len(pred_frame_slot_values))
# print('--')
# Compare Request slots
true_frame_request_slot_values = {rs for rs in true_frame.get("request_slots", [])}
pred_frame_request_slot_values = {rs for rs in pred_frame.get("request_slots", [])}
# print(true_frame_request_slot_values)
if not lowercase:
true_frame_request_slot_values = {
rs for rs in true_frame.get("request_slots", [])
}
pred_frame_request_slot_values = {
rs for rs in pred_frame.get("request_slots", [])
}
else:
true_frame_request_slot_values = {
rs.lower() for rs in true_frame.get("request_slots", [])
}
pred_frame_request_slot_values = {
rs.lower() for rs in pred_frame.get("request_slots", [])
}
count_dict["n_true_request_slots"] += len(true_frame_request_slot_values)
count_dict["n_pred_request_slots"] += len(pred_frame_request_slot_values)
if strict and not b_correct_act:
pass
else:
count_dict["n_correct_request_slots"] += len(
true_frame_request_slot_values.intersection(pred_frame_request_slot_values)
)
# Compare Objects
true_frame_object_values = {
object_id for object_id in true_frame.get("memories", [])
}
pred_frame_object_values = {
object_id for object_id in pred_frame.get("memories", [])
}
count_dict["n_true_objects"] += len(true_frame_object_values)
count_dict["n_pred_objects"] += len(pred_frame_object_values)
if strict and not b_correct_act:
pass
else:
count_dict["n_correct_objects"] += len(
true_frame_object_values.intersection(pred_frame_object_values)
)
# Joint
count_dict["n_correct_beliefs"] += (
b_correct_act
and true_frame_slot_values == pred_frame_slot_values
and true_frame_request_slot_values == pred_frame_request_slot_values
and true_frame_object_values == pred_frame_object_values
)
return count_dict
def add_dicts(d1, d2):
return {k: d1[k] + d2[k] for k in d1}
def rec_prec_f1(n_correct, n_true, n_pred):
rec = n_correct / n_true if n_true != 0 else 0
prec = n_correct / n_pred if n_pred != 0 else 0
f1 = 2 * prec * rec / (prec + rec) if (prec + rec) != 0 else 0
return rec, prec, f1
def d_f1(n_true, n_pred, n_correct):
# 1/r + 1/p = 2/F1
# dr / r^2 + dp / p^2 = 2dF1 /F1^2
# dF1 = 1/2 F1^2 (dr/r^2 + dp/p^2)
dr = b_stderr(n_true, n_correct)
dp = b_stderr(n_pred, n_correct)
r = n_correct / n_true if n_true else 0
p = n_correct / n_pred if n_pred else 0
f1 = 2 * p * r / (p + r) if p + r != 0 else 0
d_f1 = 0.5 * f1**2 * (dr / r**2 + dp / p**2) if p * r != 0 else 0
return d_f1
def b_stderr(n_total, n_pos):
return np.std(b_arr(n_total, n_pos)) / np.sqrt(n_total)
def b_arr(n_total, n_pos):
out = np.zeros(int(n_total))
out[: int(n_pos)] = 1.0
return out
def initialize_count_dict():
c = {
"n_frames": 0.0,
"n_true_acts": 0.0,
"n_pred_acts": 0.0,
"n_correct_acts": 0.0,
"n_true_slots": 0.0,
"n_pred_slots": 0.0,
"n_correct_slots": 0.0,
"n_true_request_slots": 0.0,
"n_pred_request_slots": 0.0,
"n_correct_request_slots": 0.0,
"n_true_objects": 0.0,
"n_pred_objects": 0.0,
"n_correct_objects": 0.0,
"n_correct_beliefs": 0.0,
}
return copy.deepcopy(c)
if __name__ == "__main__":
# Parse input args
parser = argparse.ArgumentParser()
parser.add_argument("--input_path_target", help="path for target (.json)")
parser.add_argument(
"--input_path_predicted", help="path for model prediction output (.json)"
)
parser.add_argument(
"--output_path_report", help="path for saving evaluation summary (.json)"
)
parser.add_argument(
"--lowercase",
action="store_true",
default=False,
help="Evaluate a lowercase model",
)
args = parser.parse_args()
input_path_target = args.input_path_target
input_path_predicted = args.input_path_predicted
output_path_report = args.output_path_report
# Read the JSON file input
# json_predicted must have the same structure as the original input JSON
# e.g. {'dialogue_data': [ ... ]}
json_target = json.load(open(input_path_target, "r"))
json_predicted = json.load(open(input_path_predicted, "r"))
# Evaluate
report = evaluate_from_json_conservative(
json_target["dialogue_data"],
json_predicted["dialogue_data"],
lowercase=args.lowercase,
)
# report = evaluate_from_json(json_target['dialogue_data'], json_predicted['dialogue_data'])
print(report)
# Save report
with open(output_path_report, "w") as f_out:
json.dump(report, f_out)
|
comet_memory_dialog-main
|
models/gpt2_text/utils/evaluate_dst.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#!/usr/bin/env python3
"""
Description: merges the synthetically generated dialogs (.json, .p)
and the tab-separated Appen annotations (.txt)
to putput the merged dialogs in both .json and .p formats
"""
import os
import json
import csv
import random
import pickle
from utils import load_data_pickle
if __name__ == "__main__":
# Parameters for generation
path_tuples = [
# Pilot 1: 50 dialogs
# [
# '/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/pilot_1_mem_dials.p',
# '/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/paraphrased_0622.csv',
# '/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/pilot_1_mem_dials_merged.json',
# '/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/pilot_1_mem_dials_merged.p',
# ],
# Pilot 2: 450 dialogs
[
"/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/pilot_2_mem_dials.p",
"/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/paraphrased_0622.csv",
"/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/pilot_2_mem_dials_merged.json",
"/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/pilot_2_mem_dials_merged.p",
],
# Batch 1: 2000 dialogs
[
"/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/batch_1_mem_dials.p",
"/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/paraphrased_0622.csv",
"/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/batch_1_mem_dials_merged.json",
"/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/batch_1_mem_dials_merged.p",
],
# Batch 2: 500 dialogs
[
"/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/batch_2_mem_dials.p",
"/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/paraphrased_0622.csv",
"/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/batch_2_mem_dials_merged.json",
"/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/batch_2_mem_dials_merged.p",
],
# Batch 3: 2000 dialogs
[
"/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/batch_3_mem_dials.p",
"/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/paraphrased_0622.csv",
"/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/batch_3_mem_dials_merged.json",
"/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/batch_3_mem_dials_merged.p",
],
# Batch 4: 6000 dialogs
[
"/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/batch_4_mem_dials.p",
"/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/paraphrased_0622.csv",
"/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/batch_4_mem_dials_merged.json",
"/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/batch_4_mem_dials_merged.p",
],
]
for path_tuple in path_tuples:
path_in_synth = path_tuple[0]
path_in_appen = path_tuple[1]
path_out_json = path_tuple[2]
path_out_pickle = path_tuple[3]
# Load original synth
original_dialogs = load_data_pickle(path_in_synth)
mm_dialogs = []
# Load paraphrased
fieldname_to_turn_idx = {
"turn0_paraphrase": 0,
"turn1_paraphrase": 1,
"turn2_paraphrase": 2,
"turn3_paraphrase": 3,
"turn4_paraphrase": 4,
"turn5_paraphrase": 5,
"turn6_paraphrase": 6,
"turn7_paraphrase": 7,
"turn8_paraphrase": 8,
"turn9_paraphrase": 9,
"turn10_paraphrase": 10,
"turn11_paraphrase": 11,
"turn12_paraphrase": 12,
"turn13_paraphrase": 13,
"turn14_paraphrase": 14,
"turn15_paraphrase": 15,
"turn16_paraphrase": 16,
"turn17_paraphrase": 17,
"turn18_paraphrase": 18,
"turn19_paraphrase": 19,
"turn20_paraphrase": 20,
"turn21_paraphrase": 21,
"turn22_paraphrase": 22,
"turn23_paraphrase": 23,
}
COL_DIALOG_ID = 88
turn_idx_to_col = {}
dialog_id_to_utter = {}
with open(path_in_appen, "r", encoding="mac_roman") as f:
reader = csv.reader(f, delimiter=",", quotechar='"')
for i, line in enumerate(reader):
if i == 0:
for col_id, fieldname in enumerate(line):
if fieldname in fieldname_to_turn_idx:
turn_idx = fieldname_to_turn_idx[fieldname]
turn_idx_to_col[turn_idx] = col_id
else:
dialog_id = int(line[COL_DIALOG_ID])
dialog_id_to_utter[dialog_id] = []
for turn_idx in range(len(turn_idx_to_col)):
if turn_idx in turn_idx_to_col:
utter = line[turn_idx_to_col[turn_idx]]
utter = utter.strip()
if utter != "":
dialog_id_to_utter[dialog_id].append(utter)
else:
if turn_idx < 16:
print(
"Check dialog id %d, turn %d"
% (dialog_id, turn_idx)
)
# Merge
for i, mm_d in enumerate(original_dialogs):
d = mm_d.dialog
dialog_id = d.idx
if dialog_id not in dialog_id_to_utter:
print("Dialog %d is missing." % dialog_id)
continue
mm_dialogs.append(mm_d)
n_rounds = int(len(dialog_id_to_utter[dialog_id]) / 2)
# TODO: discarding the utterances with missing paraphrases for now
# Causes: residuals & incompletes from annotations, etc.
mm_dialogs[-1].dialog.user_turns = mm_dialogs[-1].dialog.user_turns[
:n_rounds
]
mm_dialogs[-1].dialog.asst_turns = mm_dialogs[-1].dialog.asst_turns[
:n_rounds
]
for j in range(n_rounds):
try:
user_turn = d.user_turns[j]
asst_turn = d.asst_turns[j]
user_turn_idx = j * 2
asst_turn_idx = j * 2 + 1
user_paraphrase = dialog_id_to_utter[dialog_id][user_turn_idx]
asst_paraphrase = dialog_id_to_utter[dialog_id][asst_turn_idx]
mm_dialogs[-1].dialog.user_turns[j].frames[
-1
].uttr = user_paraphrase
mm_dialogs[-1].dialog.asst_turns[j].frames[
-1
].uttr = asst_paraphrase
except:
print("Missing rounds %d from dialog %d" % (j, dialog_id))
print(len(dialog_id_to_utter[dialog_id]))
print(len(d.user_turns))
# Output
print("Outputting JSON file at %s..." % path_out_json)
json.dump(
{"dialogue_data": [mm_d.to_dict() for mm_d in mm_dialogs]},
open(path_out_json, "w"),
indent=4,
)
pickle.dump(mm_dialogs, open(path_out_pickle, "wb"))
|
comet_memory_dialog-main
|
dialog_simulator/merge_synth_and_appen.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#!/usr/bin/env python3
import random
import numpy as np
from typing import List, Tuple
from SimulatorBase import SimulatorBase
from constants import GoalType, DialogAct, GoalMemoryRefType
from Data import MemoryDialog, Goal, GoalParameter, Frame, ActAttributes, APIResponse
from MemoryServiceAPI import MemoryServiceAPI
from utils import (
str_slot_values,
str_request_slots,
str_memories,
get_template,
get_slot_values_simple_from_json,
)
random.seed(0)
class UserSimulator(SimulatorBase):
def __init__(self, *args, **kwargs):
super(UserSimulator, self).__init__(*args, **kwargs)
self.memory_service_api = None
class ModelBasedUserSimulator(UserSimulator):
def __init__(self, *args, **kwargs):
super(ModelBasedUserSimulator, self).__init__(*args, **kwargs)
def fit_goal_to_intent(self, args):
# Define the goal to intent mapping behavior
pass
def is_servable(self, goal: Goal) -> bool:
# Check whether this simulator can serve the input goal.
pass
def execute_turn(
self,
goal: Goal,
memory_dialog: MemoryDialog,
memory_service_api: MemoryServiceAPI,
) -> Frame:
# Need to define this behavior e.g. as a config, a model, etc.
pass
def generate_uttr(self, frame: Frame, goal: Goal) -> str:
pass
class RuleBasedUserSimulator(UserSimulator):
def __init__(self, *args, **kwargs):
super(RuleBasedUserSimulator, self).__init__(*args, **kwargs)
def fit_goal_to_intent(self, args):
# Define the goal to intent mapping behavior
pass
def is_servable(self, goal: Goal) -> bool:
# Check whether this simulator can serve the input goal.
pass
def execute_turn(
self,
goal: Goal,
memory_dialog: MemoryDialog,
memory_service_api: MemoryServiceAPI,
) -> Frame:
# Need to define this behavior e.g. as a config, a model, etc.
pass
class HybridUserSimulator(UserSimulator):
def is_servable(self, goal: Goal) -> bool:
# Check whether this simulator can serve the input goal.
pass
def execute_turn(
self,
goal: Goal,
memory_dialog: MemoryDialog,
memory_service_api: MemoryServiceAPI,
) -> Frame:
# If a Goal is servable by the model based simulator,
# generate with a model based simulator first.
# Otherwise resort to the predefined rules.
pass
def generate_uttr(self, frame: Frame, goal: Goal) -> str:
pass
class PilotUserSimulator(UserSimulator):
"""
Includes the simplest implementation of a UserSimulator.
Use this class as a guide for implementing more complex
simulators.
"""
def __init__(self, *args, **kwargs):
super(PilotUserSimulator, self).__init__(*args, **kwargs)
# Simple interaction deterministic mapping
self._goal_to_handler = {
GoalType.UNKNOWN: self.UserGoalHandler(),
GoalType.SEARCH: self.UserSearchGoalHandler(),
GoalType.REFINE_SEARCH: self.UserRefineSearchGoalHandler(),
GoalType.GET_RELATED: self.UserGetRelatedGoalHandler(),
GoalType.GET_INFO: self.UserGetInfoGoalHandler(),
# GoalType.GET_AggregatedINFO: self.UserGetAggregatedInfoGoalHandler(),
GoalType.SHARE: self.UserShareGoalHandler(),
}
def is_servable(self, goal: Goal) -> bool:
# Check whether this simulator can serve the input goal.
return True
def execute_turn(self, goal: Goal, memory_dialog: MemoryDialog) -> Frame:
handler = self._goal_to_handler[goal.goal_type]
return handler.execute_turn(goal, memory_dialog, self.memory_service_api)
def generate_uttr(self, frame: Frame, goal: Goal) -> Frame:
handler = self._goal_to_handler[goal.goal_type]
uttr = handler.generate_uttr(frame, goal, self.memory_service_api)
frame.uttr = uttr
return frame
class UserGoalHandler:
def __init__(self, *args, **kwargs):
self.available_user_main_acts = [
DialogAct.UNKNOWN,
]
self.available_user_disambiguation_acts = [DialogAct.INFORM_DISAMBIGUATE]
self._uttr_template_disambiguate_memories = {
DialogAct.INFORM_DISAMBIGUATE: ["I mean these ones: {memories}"],
}
def execute_turn(
self,
goal: Goal,
memory_dialog: MemoryDialog,
memory_service_api: MemoryServiceAPI,
) -> Frame:
if len(memory_dialog.dialog.asst_turns) > 0:
last_asst_turn = memory_dialog.dialog.asst_turns[-1]
else:
last_asst_turn = None
if last_asst_turn is None or (
not last_asst_turn.is_disambiguation_request()
):
# 1. User does a main act according to the Goal
if True:
# 1. (1) Main Act
# Get a random dialog act label
user_dialog_act = random.choice(self.available_user_main_acts)
# Randomly fill the act_attributes
list_act_attributes = []
for goal_parameter in goal.goal_parameters:
act_attributes = ActAttributes(
slot_values=self.get_slot_values(goal_parameter),
slot_values_resolved=self.get_slot_values_resolved(
goal_parameter
),
request_slots=self.get_request_slots(goal_parameter),
memories=self.get_memories(
goal.goal_type,
goal_parameter,
memory_dialog,
memory_service_api,
),
)
list_act_attributes.append(act_attributes)
else:
# 1. (2) Answer follow-up questions
# TODO
pass
else:
# 2. Answer disambiguation request
user_dialog_act, list_act_attributes = self.disambiguate_last_turn(
memory_dialog
)
# Return an Frame memory with the generated intent and attributes
# TODO: handle multiple goal parameters & multiple acts
return Frame("", user_dialog_act, list_act_attributes[0])
def get_slot_values(self, goal_parameter: GoalParameter):
return get_slot_values_simple_from_json(goal_parameter.filter)
def get_slot_values_resolved(self, goal_parameter: GoalParameter):
# return {k: str(v) for k, v in goal_parameter.filter.items()}
return goal_parameter.filter
def get_request_slots(self, goal_parameter: GoalParameter):
return goal_parameter.request_slots
def get_memories(
self,
goal_type: GoalType,
goal_parameter: GoalParameter,
memory_dialog: MemoryDialog,
memory_service_api: MemoryServiceAPI,
) -> List:
return get_memories(
goal_type,
goal_parameter,
memory_dialog,
memory_service_api,
n_min_memories=0,
n_max_memories=2,
)
def generate_uttr(
self, frame: Frame, goal: Goal, memory_service_api: MemoryServiceAPI
) -> str:
if frame.dialog_act in set([DialogAct.INFORM_DISAMBIGUATE]):
template = get_template(
self._uttr_template_disambiguate_memories, frame
)
return template.format(
memories=str_memories(
frame.act_attributes.memories, memory_service_api, verbose=False
)
)
else:
return self.generate_uttr_main(frame, goal, memory_service_api)
def generate_uttr_main(
self, frame: Frame, goal: Goal, memory_service_api: MemoryServiceAPI
) -> str:
template = get_template(self._uttr_template, frame)
uttr = template.format(
search_filter=str_slot_values(frame.act_attributes.slot_values),
request_slots=str_request_slots(frame.act_attributes.request_slots),
memories=str_memories(
frame.act_attributes.memories, memory_service_api, verbose=False
),
)
return uttr
def disambiguate_last_turn(self, memory_dialog: MemoryDialog):
# TODO: Make it more robust
user_dialog_act = random.choice(self.available_user_disambiguation_acts)
assert len(memory_dialog.dialog.user_turns) > 0
# **** TODO **** : handle multiple goal parameters & multiple acts
# **** TODO 8*** : pick the right frame instead of choosing the last frame
list_act_attributes = [
memory_dialog.dialog.user_turns[-1].frames[-1].act_attributes
]
return user_dialog_act, list_act_attributes
class UserSearchGoalHandler(UserGoalHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.available_user_main_acts = [
DialogAct.REQUEST_GET,
]
self._uttr_template = {
DialogAct.REQUEST_GET: [
"Show me photos.",
"I am looking for some photos.",
],
}
self._uttr_template_s = {
DialogAct.REQUEST_GET: [
"Show me photos with {search_filter}.",
"I am looking for some photos with {search_filter}.",
],
}
def get_request_slots(self, goal_parameter: GoalParameter):
return []
def get_memories(
self,
goal_type: GoalType,
goal_parameter: GoalParameter,
memory_dialog: MemoryDialog,
memory_service_api: MemoryServiceAPI,
):
return []
def generate_uttr_main(
self, frame: Frame, goal: Goal, memory_service_api: MemoryServiceAPI
) -> str:
search_filter = frame.act_attributes.slot_values
if search_filter == {}:
template = get_template(self._uttr_template, frame)
else:
template = get_template(self._uttr_template_s, frame)
uttr = template.format(search_filter=str_slot_values(search_filter))
return uttr
class UserRefineSearchGoalHandler(UserGoalHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.available_user_main_acts = [
DialogAct.INFORM_REFINE,
]
self._uttr_template = {
DialogAct.INFORM_REFINE: [
"Do I have any other photos?",
"Are there any other photos?",
],
}
self._uttr_template_s = {
DialogAct.INFORM_REFINE: [
"I would like to refine/change my search to include {search_filter}.",
"Refine/change my search to include {search_filter}.",
"Do I have any other photos that also include {search_filter}?",
],
}
def get_slot_values(self, goal_parameter: GoalParameter):
# TODO: Need to account for invalid refine, e.g. looking for wooden area rugs
return get_slot_values_simple_from_json(goal_parameter.filter)
def get_request_slots(self, goal_parameter: GoalParameter):
return []
def get_memories(
self,
goal_type: GoalType,
goal_parameter: GoalParameter,
memory_dialog: MemoryDialog,
memory_service_api: MemoryServiceAPI,
):
return []
def generate_uttr_main(
self, frame: Frame, goal: Goal, memory_service_api: MemoryServiceAPI
) -> str:
search_filter = frame.act_attributes.slot_values
if len(search_filter) > 0:
template = get_template(self._uttr_template_s, frame)
elif len(search_filter) == 0:
template = get_template(self._uttr_template, frame)
else:
print("This should not happen")
uttr = template.format(
search_filter=str_slot_values(frame.act_attributes.slot_values),
request_slots=str_request_slots(frame.act_attributes.request_slots),
memories=str_memories(
frame.act_attributes.memories, memory_service_api, verbose=False
),
)
return uttr
class UserGetRelatedGoalHandler(UserGoalHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.available_user_main_acts = [
DialogAct.INFORM_GET,
]
self._uttr_template_o = {
DialogAct.INFORM_GET: [
"I would like to see something similar/related to {memories}.",
"Is there anything related to {memories}.",
"Is there any other photo/video related to {memories}.",
"Do I have any other photos/videos similar/related to {memories}?",
"Could you show me any other photos/videos like {memories}?",
"Show me other photos/videos like {memories}.",
]
}
self._uttr_template_or = {
DialogAct.INFORM_GET: [
"I would like to see something related to {memories} with the similar/same {request_slots}.",
"Is there anything related to {memories} with the similar/same {request_slots}.",
"Is there any other photo/video related to {memories} with the similar/same {request_slots}.",
"Do I have any other photo/video like {memories} with the similar/same {request_slots}?",
"Could you show me any other photo/video related to {memories} with the similar/same {request_slots}?",
"Show me other photos/videos like {memories} with the similar/same {request_slots}?",
]
}
self._uttr_template_os = {
DialogAct.INFORM_GET: [
"I would like to see something related to {memories}, and/but with {search_filter}.",
"Is there anything related to {memories}, and/but with {search_filter}.",
"Is there any other photo/video related to {memories}, and/but with {search_filter}.",
"Do I have any other photo/video like {memories} , and/but with {search_filter}?",
"Could you show me any other photo/video related to {memories}, and/but with {search_filter}?",
"Show me other photos/videos like {memories}, and/but with {search_filter}.",
]
}
self._uttr_template_ors = {
DialogAct.INFORM_GET: [
"I would like to see something related "
"to {memories} on {request_slots}, but with {search_filter}.",
"Is there anything related "
"to {memories} on {request_slots}, but with {search_filter}.",
"Show me something like "
"{memories} on paremters: {request_slots}, but with {search_filter}.",
"Do I have any photos/videos like "
"{memories} on paremters: {request_slots}, but with {search_filter}?",
]
}
def generate_uttr_main(
self, frame: Frame, goal: Goal, memory_service_api: MemoryServiceAPI
) -> str:
search_filter = frame.act_attributes.slot_values
request_slots = frame.act_attributes.request_slots
memories = frame.act_attributes.memories
if len(request_slots) > 0 and len(search_filter) > 0:
template = get_template(self._uttr_template_ors, frame)
elif len(request_slots) > 0 and len(search_filter) == 0:
template = get_template(self._uttr_template_or, frame)
elif len(request_slots) == 0 and len(search_filter) > 0:
template = get_template(self._uttr_template_os, frame)
elif len(request_slots) == 0 and len(search_filter) == 0:
template = get_template(self._uttr_template_o, frame)
else:
print("This should not happen")
uttr = template.format(
search_filter=str_slot_values(frame.act_attributes.slot_values),
request_slots=str_request_slots(frame.act_attributes.request_slots),
memories=str_memories(
frame.act_attributes.memories, memory_service_api, verbose=False
),
)
return uttr
def get_memories(
self,
goal_type: GoalType,
goal_parameter: GoalParameter,
memory_dialog: MemoryDialog,
memory_service_api: MemoryServiceAPI,
):
return get_memories(
goal_type,
goal_parameter,
memory_dialog,
memory_service_api,
n_min_memories=1,
n_max_memories=1,
)
class UserGetInfoGoalHandler(UserGoalHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.available_user_main_acts = [
DialogAct.ASK_GET,
]
self._uttr_template = {
DialogAct.ASK_GET: [
"Can I get {request_slots} of {memories}?",
"Do you know {request_slots} of {memories}?",
"(Who/where/when/what/...) {request_slots} of {memories}?",
],
}
def get_slot_values(self, goal_parameter: GoalParameter):
return {}
def get_memories(
self,
goal_type: GoalType,
goal_parameter: GoalParameter,
memory_dialog: MemoryDialog,
memory_service_api: MemoryServiceAPI,
):
n_max_memories = 2 if random.random() > 0.9 else 1
return get_memories(
goal_type,
goal_parameter,
memory_dialog,
memory_service_api,
n_min_memories=1,
n_max_memories=n_max_memories,
)
class UserCompareGoalHandler(UserGoalHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.available_user_main_acts = [
DialogAct.REQUEST_COMPARE,
]
self._uttr_template_o = {
DialogAct.REQUEST_COMPARE: [
"How do they compare: {memories}?",
]
}
self._uttr_template_or = {
DialogAct.REQUEST_COMPARE: [
"How do they compare on {request_slots}: {memories}?"
]
}
def get_slot_values(self, goal_parameter: GoalParameter):
return {}
def get_memories(
self,
goal_type: GoalType,
goal_parameter: GoalParameter,
memory_dialog: MemoryDialog,
memory_service_api: MemoryServiceAPI,
):
return get_memories(
goal_type,
goal_parameter,
memory_dialog,
memory_service_api,
n_min_memories=2,
n_max_memories=2,
)
def generate_uttr_main(
self, frame: Frame, goal: Goal, memory_service_api: MemoryServiceAPI
) -> str:
request_slots = frame.act_attributes.request_slots
memories = frame.act_attributes.memories
if len(request_slots) > 0:
template = get_template(self._uttr_template_or, frame)
else:
template = get_template(self._uttr_template_o, frame)
uttr = template.format(
request_slots=str_request_slots(frame.act_attributes.request_slots),
memories=str_memories(
frame.act_attributes.memories, memory_service_api, verbose=False
),
)
return uttr
class UserShareGoalHandler(UserGoalHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.available_user_main_acts = [
DialogAct.REQUEST_SHARE,
]
self._uttr_template = {
DialogAct.REQUEST_SHARE: [
"Please share: {memories}.",
"Could you please share: {memories}?",
"I like these: {memories} - could you please share them.",
"Love these photos: {memories} - please share them.",
]
}
def get_request_slots(self, goal_parameter: GoalParameter):
return []
def get_slot_values(self, goal_parameter: GoalParameter):
return {}
def get_memories(
self,
goal_type: GoalType,
goal_parameter: GoalParameter,
memory_dialog: MemoryDialog,
memory_service_api: MemoryServiceAPI,
):
# Need to pick from previous turns
n_max_memories = 2 if random.random() > 0.7 else 1
return get_memories(
goal_type,
goal_parameter,
memory_dialog,
memory_service_api,
n_min_memories=1,
n_max_memories=n_max_memories,
)
def get_memories(
goal_type: GoalType,
goal_parameter: GoalParameter,
memory_dialog: MemoryDialog,
memory_service_api: MemoryServiceAPI,
n_min_memories=0,
n_max_memories=2,
) -> List:
# TODO: implement
n_memories = random.randint(n_min_memories, n_max_memories)
candidate_memories = []
# (1) Determine where to choose the memory from
if goal_parameter.reference_type == GoalMemoryRefType.PREV_TURN:
# Candidate memories are from the immediate previous turn
# TODO: add a more robust report-abort if candidates are empty
# ** TODO ** : pick the right frame instead of just the last one
candidate_memories.extend(
memory_dialog.dialog.asst_turns[-1].frames[-1].act_attributes.memories
)
elif goal_parameter.reference_type == GoalMemoryRefType.DIALOG:
# Candidate memories are anywhere from the previous dialog
# TODO: add a more robust report-abort if candidates are empty
# ** TODO ** : pick the right frame instead of just the last one
for turn in memory_dialog.dialog.asst_turns + memory_dialog.dialog.user_turns:
candidate_memories.extend(turn.frames[-1].act_attributes.memories)
elif goal_parameter.reference_type == GoalMemoryRefType.GRAPH:
# Candidate memories are anywhere from the scene
candidate_memories = memory_dialog.memory_graph.get_memories()
else:
print("Object reference not specified")
pass
# (2) Weighted sampling: favor the ones that are talked the most
memory_id_to_memory_dedup = {}
memory_id_to_count = {}
for memory in candidate_memories:
memory_id = memory.data["memory_id"]
# Count
memory_id_to_count[memory_id] = memory_id_to_count.get(memory_id, 0.0) + 1
# Dedup for each memory_id
if memory_id not in memory_id_to_memory_dedup:
memory_id_to_memory_dedup[memory_id] = memory
else:
pass
candidate_memories_dedup = []
candidate_memories_p = []
sum_counts = sum([c for c in memory_id_to_count.values()])
sum_counts = 1.0 if sum_counts == 0 else sum_counts
for memory_id in memory_id_to_count:
candidate_memories_dedup.append(memory_id_to_memory_dedup[memory_id])
candidate_memories_p.append(memory_id_to_count[memory_id] / sum_counts)
return np.random.choice(
candidate_memories_dedup, p=candidate_memories_p, size=n_memories, replace=False
)
return candidate_memories
"""
# e.g. COMPARE / GET_RELATED / GET_INFO should be used only
# among memories with the same type
if goal_type in \
set([GoalType.COMPARE, GoalType.GET_RELATED, GoalType.GET_INFO]):
memory_types = []
for candidate_memory in candidate_memories:
prefab_path = candidate_memory['prefab_path']
obj_metadata = memory_service_api.lookup(prefab_path)
memory_types.append(obj_metadata['type'])
target_memory_type = random.choice(memory_types)
candidate_memories = [
o for o in candidate_memories \
if memory_service_api.lookup(o['prefab_path'])['type'] == target_memory_type
]
"""
|
comet_memory_dialog-main
|
dialog_simulator/UserSimulator.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#!/usr/bin/env python3
from constants import API_CALL_TYPE, TurnSpeaker, DialogAct
from Data import Turn, Frame, ActAttributes, MemoryDialog, APIResponse, APIRequest
from typing import Dict, Tuple
import sys
sys.path.append("/Users/shanemoon/workspace/memory_dialog/models/")
from gpt2_dst.scripts.run_generation import generate_sequences
from gpt2_dst.utils.convert import (
format_context,
format_api_call,
format_api_result,
parse_flattened_result,
TEMPLATE_PREDICT,
TEMPLATE_PREDICT_RESPONSE,
START_OF_API_CALL,
END_OF_API_CALL,
END_OF_API_RESULT,
END_OF_SENTENCE,
)
from utils import resolve_sv_entities
class MemoryDialogModelBase:
def __init__(self, *args, **kwargs):
self.displayed_memories = []
def predict_api_call(self, query: str, memory_dialog: MemoryDialog) -> Dict:
return {
"call_type": API_CALL_TYPE.UNDEFINED,
"slot_values": {},
"request_slots": [],
"memories": [], # <list> of <Memory> objects
}
def construct_api_request(
self, query: str, memory_dialog: MemoryDialog
) -> Tuple[Turn, APIRequest]:
# Predict / extract call_type and parameters from query
predicted = self.predict_api_call(query, memory_dialog)
# Cast user query into a Turn instance
query_frame = Frame(
uttr=query,
dialog_act=predicted["dialog_act"],
act_attributes=ActAttributes(
slot_values=predicted["slot_values"],
request_slots=predicted["request_slots"],
# <list> of <Memory> objects
memories=predicted["memories"],
),
)
# For now, we assume one frame per turn
user_turn = Turn(frames=[query_frame], speaker=TurnSpeaker.USER, goal=None)
# Gegenerate an API request from the predicted values
str_call_type = predicted["call_type"]
try:
call_type = eval(str_call_type)
except Exception:
call_type = API_CALL_TYPE.UNDEFINED
api_parameters = {
"slot_values": predicted["slot_values"],
"request_slots": predicted["request_slots"],
"memories": predicted["memories"], # <list> of <Memory> objects
"n_max_results": 2,
}
# Call API
api_request = APIRequest(
call_type=call_type, parameters=api_parameters, memory_dialog=memory_dialog
)
return user_turn, api_request
def update_display(self, api_response: APIResponse):
if api_response.status is not None:
retrieved_memories = (
api_response.to_dict().get("results", {}).get("retrieved_memories", [])
)
self.displayed_memories = retrieved_memories
def predict_assistant_response(
self,
query: str,
api_call: APIRequest,
api_response: APIResponse,
memory_dialog: MemoryDialog,
) -> Dict:
return {
"uttr": "",
"dialog_act": DialogAct.UNKNOWN,
"slot_values": {},
"request_slots": [],
"memories": [],
}
def construct_assistant_response(
self,
query: str,
api_call: APIRequest,
api_response: APIResponse,
memory_dialog: MemoryDialog,
) -> Turn:
predicted = self.predict_assistant_response(
query, api_call, api_response, memory_dialog
)
response_frame = Frame(
uttr=predicted["uttr"],
dialog_act=predicted["dialog_act"],
act_attributes=ActAttributes(
slot_values=predicted["slot_values"],
slot_values_resolved={},
request_slots=predicted["request_slots"],
memories=predicted["memories"],
),
)
# For now, we assume one frame per turn
assistant_turn = Turn(
frames=[response_frame], speaker=TurnSpeaker.ASSISTANT, goal=None
)
return assistant_turn
class PilotMemoryDialogModel(MemoryDialogModelBase):
def __init__(self, *args, **kwargs):
super(PilotMemoryDialogModel, self).__init__(*args, **kwargs)
self.model = kwargs.pop("model")
self.tokenizer = kwargs.pop("tokenizer")
self.length = kwargs.pop("length")
self.parameter_ontology = kwargs.pop("parameter_ontology")
self.prev_asst_uttr = None
self.lst_context = []
self.turn_id = 0
def predict_api_call(self, query: str, memory_dialog: MemoryDialog) -> Dict:
# Form the prompt
to_predict = self.form_prompt_for_api_call(
self.lst_context, self.prev_asst_uttr, query
)
# Generate the sequence
generated = generate_sequences(
self.model, self.tokenizer, to_predict, verbose=False
)[0]
# Extract the api_call
parsed_api_call, _ = self.parse_assistant_response(generated)
call_type = parsed_api_call.get("act", None)
slot_values = {k: v for k, v in parsed_api_call.get("slots", [])}
request_slots = parsed_api_call.get("request_slots", [])
memory_ids = parsed_api_call.get("memories", [])
memories = memory_dialog.memory_graph.get_memories_by_ids(memory_ids)
# Entity Resolution for locations, etc.
slot_values = resolve_sv_entities(slot_values, self.parameter_ontology)
# Form an API call
return {
"call_type": call_type,
"dialog_act": DialogAct.UNKNOWN,
"slot_values": slot_values,
"request_slots": request_slots,
"memories": memories, # <list> of <Memory> objects
}
def predict_assistant_response(
self,
query: str,
api_call: APIRequest,
api_response: APIResponse,
memory_dialog: MemoryDialog,
) -> Dict:
# Form the prompt
to_predict = self.form_prompt_for_response(
self.lst_context, self.prev_asst_uttr, query, api_call, api_response
)
# Generate the sequence
generated = generate_sequences(
self.model, self.tokenizer, to_predict, verbose=False
)[0]
_, response_text = self.parse_assistant_response(generated)
self.prev_asst_uttr = response_text
if api_response.results is not None:
memories = api_response.results.get("retrieved_memories", [])
else:
memories = []
return {
"uttr": response_text,
"dialog_act": DialogAct.UNKNOWN,
"slot_values": {},
"request_slots": [],
"memories": memories, # <list> of <Memory> objects
}
def form_prompt_for_api_call(
self, lst_context, prev_asst_uttr, user_uttr, len_context=2
):
# Format main input context
context = format_context(
prev_asst_uttr,
user_uttr,
self.displayed_memories,
use_multimodal_contexts=True,
)
# Concat with previous contexts
lst_context.append(context)
context = " ".join(lst_context[-len_context:])
# Format the main input
predict = TEMPLATE_PREDICT.format(
context=context,
START_OF_API_CALL=START_OF_API_CALL,
)
print("============== Prompt Sequence ==============")
print(predict)
print("=============================================")
return predict
def form_prompt_for_response(
self,
lst_context,
prev_asst_uttr,
user_uttr,
api_call,
api_response,
len_context=2,
):
# Format main input context
# Context should already have been formatted
context = " ".join(lst_context[-len_context:])
# Format API call
json_api_call = api_call.to_dict(simple=True)
str_api_call = format_api_call(
json_api_call["call_type"], json_api_call["parameters"]
)
# Format API result
json_api_response = api_response.to_dict()
str_api_result = format_api_result(json_api_response)
# Format the main input
predict = TEMPLATE_PREDICT_RESPONSE.format(
context=context,
START_OF_API_CALL=START_OF_API_CALL,
belief_state=str_api_call,
END_OF_API_CALL=END_OF_API_CALL,
api_result=str_api_result,
END_OF_API_RESULT=END_OF_API_RESULT,
)
print("============== Prompt Sequence ==============")
print(predict)
print("=============================================")
return predict
def parse_assistant_response(self, generated):
print("============== Generated Sequence ==============")
print(generated)
print("================================================")
parsed = parse_flattened_result(generated)
if parsed == []:
parsed_api_call = {}
else:
# For now, we only consider one api_call per turn
parsed_api_call = parsed[-1]
if parsed_api_call == {}:
response_text = "I could not understand. Could you repeat please?"
if END_OF_API_RESULT in generated:
response_text = generated.split(END_OF_API_RESULT)[-1]
response_text = response_text.replace(END_OF_SENTENCE, "")
else:
response_text = "(No system response)"
return parsed_api_call, response_text
|
comet_memory_dialog-main
|
dialog_simulator/MemoryDialogModel.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#!/usr/bin/env python3
import json, random, traceback, os
from typing import List, Tuple
from constants import TurnSpeaker, DialogAct, API_STATUS
from Data import Dialog, MemoryDialog, MemoryGraph, Turn, Goal
from UserSimulator import PilotUserSimulator
from AssistantSimulator import PilotAssistantSimulator
from GoalGenerator import RuleBasedGoalGenerator
from MemoryServiceAPI import MemoryServiceAPI
from utils import build_parameter_ontology
random.seed(0)
class MemoryDialogSimulator:
def __init__(self, *args, **kwargs):
# Initialize user simulator, assistant simulator, memory_graphs etc.
self.domain = kwargs.pop("domain")
self._memory_service_api = kwargs.pop("memory_service_api", MemoryServiceAPI())
self._user_simulator = kwargs.pop("user_simulator", PilotUserSimulator())
self._assistant_simulator = kwargs.pop(
"assistant_simulator", PilotAssistantSimulator()
)
self._goal_generator = kwargs.pop(
"goal_generator", RuleBasedGoalGenerator(domain=self.domain)
)
self._memory_graph_bank = kwargs.pop("memory_graph_bank", {})
self._user_simulator.register_memory_service_api(self._memory_service_api)
self._assistant_simulator.register_memory_service_api(self._memory_service_api)
def set_user_simulator(self, user_simulator):
self._user_simulator = user_simulator
def set_assistant_simulator(self, assistant_simulator):
self._assistant_simulator = assistant_simulator
def set_goal_generator(self, goal_generator):
self._goal_generator = goal_generator
def set_memory_service_api(self, memory_service_api):
self._memory_service_api = memory_service_api
def sample_goals(self, memory_graph, goal_config) -> List[Goal]:
return self._goal_generator.sample_goals(
memory_graph=memory_graph, goal_config=goal_config
)
def sample_memory_graph(self) -> MemoryGraph:
if self._memory_graph_bank == {}:
# Empty memory graph
return MemoryGraph()
# Randomly sample a memory
# TODO: allow for more organized way of sampling memories
memory_graph_id = random.choice(list(self._memory_graph_bank.keys()))
memory_graph = self._memory_graph_bank[memory_graph_id]
return MemoryGraph(data=memory_graph)
def batch_generate_dialog_flows(
self,
n_dialogs: int,
n_max_turns: int,
start_dialog_idx: int,
goal_config: dict = {},
) -> List[MemoryGraph]:
# Batch generate multiple dialogs using the same simulators
memory_dialogs = []
for i in range(n_dialogs):
# Continue until generation is successful
generation_success = False
while not generation_success:
try:
# Sample a memory graph (user)
memory_graph = self.sample_memory_graph()
# Create an empty memory dialog
memory_dialog = MemoryDialog(memory_graph=memory_graph)
# Generate Goal Config
goal_config["parameter_ontology"] = build_parameter_ontology(
memory_dialog.memory_graph,
self._memory_service_api.metadata,
self.domain,
)
# Sample goals for this dialog
goals = self.sample_goals(
memory_graph=memory_dialog.memory_graph, goal_config=goal_config
)
# Generate dialog flow
memory_dialog = self.generate_dialog_flow(
goals, memory_dialog, n_max_turns
)
memory_dialog.dialog.idx = start_dialog_idx + i
# If everything is successful, append to memory_dialogs
generation_success = True
memory_dialogs.append(memory_dialog)
except:
# TODO: Make a more robust abort strategy
print("** Error in generating dialog. Ignoring this one. **")
traceback.print_exc()
print()
return memory_dialogs
def generate_dialog_flow(
self,
goals: List[Goal],
memory_dialog: MemoryDialog,
n_max_turns: int,
initialize=True,
) -> MemoryDialog:
if initialize:
# Initialize memory_dialog
memory_dialog.initialize()
# Iterate and generate a dialog turn by turn
i = 0
while not goals == [] and i < n_max_turns:
# Pick a goal
current_goal = goals.pop(0)
goal_met = False
print("Goal:", current_goal)
while not goal_met and i < n_max_turns:
# Generate a turn
memory_dialog = self.generate_turn(current_goal, memory_dialog)
# End of a turn: update dialog & goals
i += 1
goal_met = memory_dialog.is_goal_met(current_goal)
is_valid_dialog = self.validate_dialog(memory_dialog)
if not is_valid_dialog:
# If something is not right about this dialog, abort.
# TODO: abort gracefully
assert False
return memory_dialog
def generate_turn(self, goal: Goal, memory_dialog: MemoryDialog) -> MemoryDialog:
# TODO: extend it for multiple frames per turn
# (1) Generate a User turn, given a target goal and a memory_dialog
# Generate dialog act and slots
user_frame = self._user_simulator.execute_turn(goal, memory_dialog)
# Template based utterance generation
user_frame = self._user_simulator.generate_uttr(user_frame, goal)
# Instantiate a user turn, and update the memory_dialog
user_turn = Turn([user_frame], TurnSpeaker.USER, goal)
memory_dialog.dialog.add_user_turn(user_turn)
print("U:", user_turn)
# (2) Generate a Assistant turn, given a target goal and a memory_dialog
# Generate dialog act and slots
asst_frame, api_request, api_result = self._assistant_simulator.execute_turn(
goal, memory_dialog
)
# Template based utterance generation
asst_frame = self._assistant_simulator.generate_uttr(asst_frame, goal)
# Instantiate a user turn, and update the memory_dialog
asst_turn = Turn([asst_frame], TurnSpeaker.ASSISTANT, goal)
memory_dialog.dialog.add_asst_turn(asst_turn)
print("A:", asst_turn)
# Add goals and api_calls
memory_dialog.dialog.add_goal(goal)
memory_dialog.dialog.add_api_call(api_request)
memory_dialog.dialog.add_api_result(api_result)
return memory_dialog
def validate_dialog(self, memory_dialog: MemoryDialog) -> bool:
# Check for any undesirable traits of a dialog
n_turns = len(memory_dialog.dialog.asst_turns)
# (1) Multiple sharing of the same memory
set_shared_memory_ids = set()
for user_turn in memory_dialog.dialog.user_turns:
# TODO: Handle multiple frames per turn
dialog_act = user_turn.frames[-1].dialog_act
if dialog_act == DialogAct.REQUEST_SHARE:
memories_to_share = user_turn.frames[-1].act_attributes.memories
for m in memories_to_share:
memory_id = m.data["memory_id"]
if memory_id in set_shared_memory_ids:
# If this memory_id is already shared, abort
return False
set_shared_memory_ids.add(memory_id)
# (2) Too frequent search fails
n_search_fails = 0
for api_result in memory_dialog.dialog.api_results:
status = api_result.status
if status == API_STATUS.SEARCH_NOT_FOUND:
n_search_fails += 1
if (n_turns <= 4 and n_search_fails >= 2) or (
n_turns > 4 and n_search_fails >= 3
):
return False
# Otherwise, this dialog is good.
return True
|
comet_memory_dialog-main
|
dialog_simulator/MemoryDialogSimulator.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#!/usr/bin/env python3
import random
import json
from MemoryDialogModel import PilotMemoryDialogModel
from Data import MemoryGraph, MemoryDialog, Turn
from MemoryServiceAPI import MemoryServiceAPI
import sys
sys.path.append("/Users/shanemoon/workspace/memory_dialog/models/")
from gpt2_dst.scripts.run_generation import load_model
class InteractiveDialogHandler:
def __init__(self, *args, **kwargs):
self.model = kwargs.pop("model", None)
self.memory_graph = kwargs.pop("memory_graph", None)
self.api = kwargs.pop("api", None)
# Start an empty dialog data
self.memory_dialog = MemoryDialog(memory_graph=self.memory_graph)
self.memory_dialog.initialize()
def execute_turn(self, user_query: str) -> Turn:
"""
Given user_query, construct an API call,
get the API response, and return an Assistant Turn.
"""
# Construct the API request
try:
user_turn, api_request = self.model.construct_api_request(
user_query, self.memory_dialog
)
print("============== API Request ==============")
print(api_request)
print("=========================================\n")
# Call API to get responses back
api_response = self.api.call_api(api_request)
print("============== API Response ==============")
print(api_response)
print("==========================================\n")
# Update the display based on the API results
self.model.update_display(api_response)
# Generate an Assistant response based on the API response
assistant_turn = self.model.construct_assistant_response(
user_query, api_request, api_response, self.memory_dialog
)
print("============== Assistant Response ==============")
print(assistant_turn)
print("================================================\n")
# Update the memory_dialog with the new user and assistant turns
self.memory_dialog.dialog.add_user_turn(user_turn)
self.memory_dialog.dialog.add_asst_turn(assistant_turn)
# Update the model
self.model.prev_asst_uttr = assistant_turn.frames[-1].uttr
self.model.turn_id += 1
return assistant_turn
except:
return None
def run_loop_command_prompt(self):
while True:
print()
user_query = input(">> Enter your query (or type quit): ")
if user_query == "quit":
break
response = self.execute_turn(user_query=user_query)
if __name__ == "__main__":
# Define paths
# path_memory_graph_list = '/Users/shanemoon/workspace/memory_dialog/dialog_simulator/memories/final/mscoco_memory_graphs_1k.json'
path_memory_graph_list = "/Users/shanemoon/workspace/memory_dialog/dialog_simulator/memories/final/mscoco_memory_graphs_mini.json"
path_model = (
"/Users/shanemoon/workspace/memory_dialog/models/gpt2_dst/save/model_v2"
)
path_parameter_ontology = "/Users/shanemoon/workspace/memory_dialog/dialog_simulator/final_data/all_parameter_ontology.json"
# Hyperparameters for the demo
random_memory_graph = False
# Load parameters
memory_graph_list = json.load(open(path_memory_graph_list, "r"))
memory_graph_bank = {}
for memory_graph in memory_graph_list:
memory_graph_id = memory_graph["memory_graph_id"]
for i in range(len(memory_graph["memories"])):
memory_graph["memories"][i]["memory_graph_id"] = memory_graph_id
memory_graph_bank[memory_graph_id] = memory_graph
parameter_ontology = json.load(open(path_parameter_ontology, "r"))
# Select a Memory Graph
if random_memory_graph:
memory_graph = MemoryGraph(
data=memory_graph_bank[random.choice(list(memory_graph_bank.keys()))]
)
else:
memory_graph_id = "RbXAfFDz8r72"
memory_graph = MemoryGraph(data=memory_graph_bank[memory_graph_id])
# Load the model parameters
gpt2_model, tokenizer, length = load_model(
model_type="gpt2", model_name_or_path=path_model, device="cpu", length=150
)
# Instsantiate the dialog handler
model = PilotMemoryDialogModel(
model=gpt2_model,
tokenizer=tokenizer,
length=length,
parameter_ontology=parameter_ontology,
)
api = MemoryServiceAPI()
dialog_handler = InteractiveDialogHandler(
model=model, memory_graph=memory_graph, api=api
)
# Run loop
dialog_handler.run_loop_command_prompt()
|
comet_memory_dialog-main
|
dialog_simulator/InteractiveDialogHandler.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#!/usr/bin/env python3
"""
Merges multiple batches of SIMMC 2.0 files into one,
and also outputs train, dev, devtest, and test sets.
"""
import os
import json
import csv
import random
import pickle
import numpy as np
from utils import load_data_pickle
if __name__ == "__main__":
random.seed(0)
np.random.seed(0)
# Paths for merge
path_in_pickle = "/Users/shanemoon/workspace/memory_dialog/dialog_simulator/final_data/mem_dials_merged.p"
path_out_tsv = "/Users/shanemoon/workspace/memory_dialog/dialog_simulator/final_data/user_utterances.tsv"
mm_dialogs = []
mm_dialogs.extend(load_data_pickle(path_in_pickle))
# Output
print("Total: %d dialogs" % len(mm_dialogs))
with open(path_out_tsv, "w", newline="") as csvfile:
writer = csv.writer(csvfile, delimiter="\t", quotechar="'")
writer.writerow(["dialog_id", "turn_id", "user_utterance"])
for i, mm_dialog in enumerate(mm_dialogs):
user_turns = mm_dialog.dialog.user_turns
dialog_id = mm_dialog.dialog.idx
for j, user_turn in enumerate(user_turns):
user_uttr = user_turn.frames[-1].uttr
if user_uttr not in set(["N/A", "NA"]):
row = [dialog_id, j, user_uttr]
writer.writerow(row)
|
comet_memory_dialog-main
|
dialog_simulator/get_user_utterances.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#!/usr/bin/env python3
from enum import Enum
class GoalType(Enum):
UNKNOWN = "unknown"
SEARCH = "search"
REFINE_SEARCH = "refine_search"
GET_RELATED = "get_related"
GET_INFO = "get_info"
GET_AGGREGATED_INFO = "get_aggregated_info"
SHARE = "share"
CHITCHAT = "chitchat"
class DialogAct(Enum):
UNKNOWN = "unknown"
INFORM_GET = "INFORM:GET"
INFORM_REFINE = "INFORM:REFINE"
INFORM_PREFER = "INFORM:PREFER"
INFORM_DISPREFER = "INFORM:DISPREFER"
INFORM_SHARE = "INFORM:SHARE"
INFORM_DISAMBIGUATE = "INFORM:DISAMBIGUATE"
INFORM_CHITCHAT = "INFORM:CHITCHAT"
REQUEST_GET = "REQUEST:GET"
REQUEST_REFINE = "REQUEST:REFINE"
REQUEST_PREFER = "REQUEST:PREFER"
REQUEST_DISPREFER = "REQUEST:DISPREFER"
REQUEST_SHARE = "REQUEST:SHARE"
REQUEST_DISAMBIGUATE = "REQUEST:DISAMBIGUATE"
CONFIRM_GET = "CONFIRM:GET"
CONFIRM_REFINE = "CONFIRM:REFINE"
CONFIRM_PREFER = "CONFIRM:PREFER"
CONFIRM_DISPREFER = "CONFIRM:DISPREFER"
CONFIRM_SHARE = "CONFIRM:SHARE"
CONFIRM_DISAMBIGUATE = "CONFIRM:DISAMBIGUATE"
PROMPT_GET = "PROMPT:GET"
PROMPT_REFINE = "PROMPT:REFINE"
PROMPT_PREFER = "PROMPT:PREFER"
PROMPT_DISPREFER = "PROMPT:DISPREFER"
PROMPT_SHARE = "PROMPT:SHARE"
PROMPT_DISAMBIGUATE = "PROMPT:DISAMBIGUATE"
ASK_GET = "ASK:GET"
ASK_REFINE = "ASK:REFINE"
ASK_PREFER = "ASK:PREFER"
ASK_DISPREFER = "ASK:DISPREFER"
ASK_SHARE = "ASK:SHARE"
ASK_DISAMBIGUATE = "ASK:DISAMBIGUATE"
class GoalMemoryRefType(Enum):
PREV_TURN = "PREV_TURN"
DIALOG = "DIALOG"
GRAPH = "GRAPH"
NOT_SPECIFIED = "Not Specified"
class ObjectRefType(Enum):
R1 = "R1" # Unique object in the scene
R2 = "R2" # Object in the dialog history, same view point
R3 = "R3" # Object in the dialog history, previous view point
NOT_SPECIFIED = "Not Specified"
class API_STATUS(Enum):
SEARCH_FOUND = "Search Founud"
SEARCH_NOT_FOUND = "Search Not Founud"
INFO_FOUND = "Info Found"
INFO_NOT_FOUND = "Info Not Found"
SHARED = "Shared"
class API_CALL_TYPE(Enum):
SEARCH = "Search"
REFINE_SEARCH = "Refine Search"
GET_INFO = "Get Info"
SHARE = "Share"
GET_RELATED = "Get Related"
UNDEFINED = "Undefined"
class TurnSpeaker(Enum):
USER = "User"
ASSISTANT = "Assistant"
numeric_slots = {"time"}
non_visual_slots = {
"location",
"time",
}
visual_slots = {"participant", "activity"}
all_slots = {"time", "location", "participant", "activity"}
|
comet_memory_dialog-main
|
dialog_simulator/constants.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#!/usr/bin/env python3
from constants import API_CALL_TYPE, TurnSpeaker, DialogAct
from Data import Turn, Frame, ActAttributes, MemoryDialog, APIResponse, APIRequest
from typing import Dict, Tuple
class DummyMemoryDialogModel(MemoryDialogModelBase):
def __init__(self, *args, **kwargs):
super(DummyMemoryDialogModel, self).__init__(*args, **kwargs)
def predict_api_call(self, query: str) -> Dict:
return {
"call_type": API_CALL_TYPE.SEARCH,
"dialog_act": DialogAct.UNKNOWN,
"slot_values": {},
"request_slots": [],
"memories": [],
}
def predict_assistant_response(
self, query: str, api_response: APIResponse, memory_dialog: MemoryDialog
):
response_str = (
"User asked:"
+ query
+ ". Dialog history: "
+ str(memory_dialog)
+ ". API response:"
+ str(api_response)
)
return {
"uttr": response_str,
"dialog_act": DialogAct.UNKNOWN,
"slot_values": {},
"request_slots": [],
"memories": api_response.results.get("retrieved_memories"),
}
|
comet_memory_dialog-main
|
dialog_simulator/DummyMemoryDialogModel.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#!/usr/bin/env python3
"""
Merges multiple batches of SIMMC 2.0 files into one,
and also outputs train, dev, devtest, and test sets.
"""
import os
import json
import csv
import random
import pickle
import numpy as np
from utils import load_data_pickle
if __name__ == "__main__":
random.seed(0)
np.random.seed(0)
# Paths for merge
paths_to_merge = [
#'/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/pilot_1_mem_dials_merged.p',
"/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/pilot_2_mem_dials_merged.p",
"/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/batch_1_mem_dials_merged.p",
"/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/batch_2_mem_dials_merged.p",
"/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/batch_3_mem_dials_merged.p",
"/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/batch_4_mem_dials_merged.p",
]
path_out_json = "/Users/shanemoon/workspace/memory_dialog/dialog_simulator/final_data/mem_dials_merged.json"
path_out_pickle = "/Users/shanemoon/workspace/memory_dialog/dialog_simulator/final_data/mem_dials_merged.p"
mm_dialogs = []
for path_in_pickle in paths_to_merge:
# Load original synth
mm_dialogs.extend(load_data_pickle(path_in_pickle))
# Output
print("Total: %d dialogs" % len(mm_dialogs))
json.dump(
{
"dialogue_data": [mm_d.to_dict() for mm_d in mm_dialogs],
"split": "all",
"year": 2021,
"domain": "memory",
},
open(path_out_json, "w"),
indent=4,
)
pickle.dump(mm_dialogs, open(path_out_pickle, "wb"))
# Split
r_train = 0.85
r_dev = 0.10
r_devtest = 0.04
r_test = 0.01
r_mini = 0.001
path_out_train_json = "/Users/shanemoon/workspace/memory_dialog/dialog_simulator/final_data/mem_dials_train.json"
path_out_dev_json = "/Users/shanemoon/workspace/memory_dialog/dialog_simulator/final_data/mem_dials_dev.json"
path_out_devtest_json = "/Users/shanemoon/workspace/memory_dialog/dialog_simulator/final_data/mem_dials_devtest.json"
path_out_test_json = "/Users/shanemoon/workspace/memory_dialog/dialog_simulator/final_data/mem_dials_test.json"
path_out_mini_json = "/Users/shanemoon/workspace/memory_dialog/dialog_simulator/final_data/mem_dials_mini.json"
n_dialogs = len(mm_dialogs)
indices = np.arange(n_dialogs)
np.random.shuffle(indices)
n_train = int(n_dialogs * r_train)
n_dev = int(n_dialogs * r_dev)
n_devtest = int(n_dialogs * r_devtest)
n_test = int(n_dialogs * r_test)
n_mini = int(n_dialogs * r_mini)
train_indices = indices[:n_train]
dev_indices = indices[n_train : n_train + n_dev]
devtest_indices = indices[n_train + n_dev : n_train + n_dev + n_devtest]
test_indices = indices[n_train + n_dev + n_devtest :]
mini_indices = test_indices[:n_mini]
mm_dialogs_train = [mm_d for i, mm_d in enumerate(mm_dialogs) if i in train_indices]
mm_dialogs_dev = [mm_d for i, mm_d in enumerate(mm_dialogs) if i in dev_indices]
mm_dialogs_devtest = [
mm_d for i, mm_d in enumerate(mm_dialogs) if i in devtest_indices
]
mm_dialogs_test = [mm_d for i, mm_d in enumerate(mm_dialogs) if i in test_indices]
mm_dialogs_mini = [mm_d for i, mm_d in enumerate(mm_dialogs) if i in mini_indices]
json.dump(
{
"dialogue_data": [mm_d.to_dict() for mm_d in mm_dialogs_train],
"split": "train",
"year": 2021,
"domain": "memory",
},
open(path_out_train_json, "w"),
indent=4,
)
json.dump(
{
"dialogue_data": [mm_d.to_dict() for mm_d in mm_dialogs_dev],
"split": "dev",
"year": 2021,
"domain": "memory",
},
open(path_out_dev_json, "w"),
indent=4,
)
json.dump(
{
"dialogue_data": [mm_d.to_dict() for mm_d in mm_dialogs_devtest],
"split": "devtest",
"year": 2021,
"domain": "memory",
},
open(path_out_devtest_json, "w"),
indent=4,
)
json.dump(
{
"dialogue_data": [mm_d.to_dict() for mm_d in mm_dialogs_test],
"split": "test",
"year": 2021,
"domain": "memory",
},
open(path_out_test_json, "w"),
indent=4,
)
json.dump(
{
"dialogue_data": [mm_d.to_dict() for mm_d in mm_dialogs_mini],
"split": "mini",
"year": 2021,
"domain": "memory",
},
open(path_out_mini_json, "w"),
indent=4,
)
|
comet_memory_dialog-main
|
dialog_simulator/merge_data_json.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#!/usr/bin/env python3
from constants import visual_slots, all_slots
import random
random.seed(0)
def build_parameter_ontology(memory_graph, metadata, domain=None, ontology=None):
if ontology is None:
ontology = {
"visual": {},
"non_visual": {},
"all": {},
}
for memory in memory_graph.get_memories():
for slot, value in memory.data.items():
if slot not in all_slots:
continue
slot_category = "visual" if slot in visual_slots else "non_visual"
if slot not in ontology["all"]:
ontology["all"][slot] = []
ontology[slot_category][slot] = []
if value not in ontology["all"][slot]:
ontology["all"][slot].append(value)
ontology[slot_category][slot].append(value)
return ontology
def batch_build_parameter_ontology(memory_graph_bank):
ontology = {
"visual": {},
"non_visual": {},
"all": {},
}
for i, memory_graph in enumerate(memory_graph_bank.values()):
if i % 100 == 0:
print("Processing memory graph %d" % i)
ontology = build_parameter_ontology(
memory_graph=memory_graph, metadata={}, ontology=ontology
)
return ontology
def str_memory(memory, memory_service_api=None, verbose=True):
"""
memory: <Memory> object
"""
memory_index = str(memory.data["memory_id"])
memory_activity = str(
", ".join([a["activity_name"] for a in memory.data["activity"]])
)
time = str(memory.data["time"])[:-3] + " (" + memory.data["time_part"] + ")"
location = memory.data["location"]["geo_tag"].get("place", "")
if verbose:
template = (
"[Memory ID: {memory_index} ({memory_activity}), {time}, @ {location}]"
)
else:
template = "[Memory ID: {memory_index}]"
return template.format(
memory_index=memory_index,
memory_activity=memory_activity,
time=time,
location=location,
)
def str_slot_values(slot_values):
return "{ " + ", ".join([f"{k}: {v}" for k, v in slot_values.items()]) + " }"
def str_request_slots(request_slots):
return "{ " + ", ".join([s for s in request_slots]) + " }"
def str_memories(memories, memory_service_api=None, verbose=True):
# memories: <list> of <Memory> objects
return (
"{ "
+ str([str_memory(o, memory_service_api, verbose) for o in memories])
+ " }"
)
def int_memory_ids(memories):
return [int(m.data["memory_id"]) for m in memories]
def get_template(template_map, nlu_label):
return random.choice(template_map.get(nlu_label.dialog_act))
def load_data_pickle(path_pickle):
import pickle
return pickle.load(open(path_pickle, "rb"))
def weighted_choice(population, weights):
return random.choices(population=population, weights=weights, k=1)[0]
def get_slot_values_simple_from_json(
slot_values,
location_target="place",
participant_target="name",
activity_target="activity_name",
):
if slot_values == None:
return {}
out = {}
for slot, value in slot_values.items():
if slot == "location":
out[slot] = get_location_simple_from_json(value, target=location_target)
elif slot == "participant":
out[slot] = get_participant_simple_from_json(
value, target=participant_target
)
elif slot == "activity":
out[slot] = get_activity_simple_from_json(value, target=activity_target)
else:
out[slot] = str(value)
return out
def get_location_simple_from_json(location_json, target="place"):
"""
JSON format:
"location":{
"gps":{
"lat":40.00,
"lon":100.00
},
"geo_tag":{
"place":"Summit at Snoqualmie",
"city":"Seattle",
"state":"Washington",
"country":"USA"
}
"""
if target in location_json["geo_tag"]:
return location_json["geo_tag"][target]
return location_json["geo_tag"].get("city")
def get_participant_simple_from_json(participant_json, target="name"):
"""
JSON format:
"participant":[
{
"name":"John",
"memory_graph_id":1
},
{
"name":"Mary",
"memory_graph_id":2
}
],
"""
return [p[target] for p in participant_json]
def get_activity_simple_from_json(activity_json, target="activity_name"):
"""
JSON format:
"activity":[
{
"activity_name":"skiing"
}
]
"""
return [a[target] for a in activity_json]
def get_edit_distance(s1, s2):
if len(s1) > len(s2):
s1, s2 = s2, s1
distances = range(len(s1) + 1)
for i2, c2 in enumerate(s2):
distances_ = [i2 + 1]
for i1, c1 in enumerate(s1):
if c1 == c2:
distances_.append(distances[i1])
else:
distances_.append(
1 + min((distances[i1], distances[i1 + 1], distances_[-1]))
)
distances = distances_
return distances[-1]
def resolve_sv_entities(slot_values: dict, parameter_ontology: dict) -> dict:
if "location" in slot_values:
str_location = slot_values["location"]
resolved_location_obj = resolve_location(
str_location, parameter_ontology["all"]["location"], True
)
slot_values["location"] = resolved_location_obj
if "participant" in slot_values:
str_participant = slot_values["participant"]
resolved_participant_obj = resolve_participant(
str_participant, parameter_ontology["all"]["participant"], True
)
slot_values["participant"] = resolved_participant_obj
if "activity" in slot_values:
str_activity = slot_values["activity"]
resolved_activity_obj = resolve_activity(
str_activity, parameter_ontology["all"]["activity"], True
)
slot_values["activity"] = resolved_activity_obj
return slot_values
def resolve_location(str_location: str, location_ontology: list, fuzzy: bool) -> dict:
print("Resolving location: %s" % str_location)
# Strict match
for target_location_obj in location_ontology:
if str_location.lower() == target_location_obj["geo_tag"]["place"].lower():
return target_location_obj
# If strict match doesn't work & fuzzy == True:
if fuzzy:
print("Trying fuzzy match for location %s" % str_location)
for target_location_obj in location_ontology:
edit_distance = get_edit_distance(
str_location.lower(), target_location_obj["geo_tag"]["place"].lower()
)
if edit_distance < 7:
print("Fuzzy match found for location %s" % str_location)
return target_location_obj
print("Match not found for location %s" % str_location)
return {}
def resolve_list_entities(
str_entity: str, entity_ontology: list, fuzzy: bool, target_key: str
) -> dict:
"""
(input) str_entities: [
'element_1', ...
e.g. 'skiing', 'snowboarding'
]
(target) list_entities: [
{
'target_key': <str>,
e.g. 'activity_name': 'skiing'
}
]
"""
# First, try converting the str to a list
try:
set_entity = set(name.lower() for name in eval(str_entity))
# Strict match
for target_entity_obj in entity_ontology:
target_entity = set(
str(p.get(target_key, "")).lower() for p in target_entity_obj
)
if set_entity == target_entity:
return target_entity_obj
# Fuzzy match 1
if fuzzy and len(set_entity) > 1:
print("Trying fuzzy match for entity %s" % str_entity)
match_thershold = max(1, int(len(set_entity) / 2) - 1)
for target_entity_obj in entity_ontology:
target_entity = set(
str(p.get(target_key, "")).lower() for p in target_entity_obj
)
if len(set_entity.intersection(target_entity)) >= match_thershold:
print("Fuzzy match found for %s" % str_entity)
return target_entity_obj
except:
print("Can't convert to list.")
# Fuzzy match 2
if fuzzy:
print("Trying fuzzy match for entity %s" % str_entity)
for target_entity_obj in entity_ontology:
edit_distance = get_edit_distance(
str_entity.lower().replace("'", ""),
str(
[str(p.get(target_key, "")).lower() for p in target_entity_obj]
).replace("'", ""),
)
if edit_distance < 9:
print("Fuzzy match found for %s" % str_entity)
return target_entity_obj
print("Match not found for %s" % str_entity)
return {}
def resolve_participant(
str_participant: str, participant_ontology: list, fuzzy: bool
) -> dict:
print("Resolving participant: %s" % str_participant)
return resolve_list_entities(
str_entity=str_participant,
entity_ontology=participant_ontology,
fuzzy=fuzzy,
target_key="name",
)
def resolve_activity(str_activity: str, activity_ontology: list, fuzzy: bool) -> dict:
print("Resolving activity: %s" % str_activity)
return resolve_list_entities(
str_entity=str_activity,
entity_ontology=activity_ontology,
fuzzy=fuzzy,
target_key="activity_name",
)
if __name__ == "__main__":
# Test resolve entities
import json
path_parameter_ontology = "/Users/shanemoon/workspace/memory_dialog/dialog_simulator/final_data/all_parameter_ontology.json"
parameter_ontology = json.load(open(path_parameter_ontology, "r"))
list_slot_values = [
# Strict match
{
"location": "Seattle Downtown",
"participant": "['Carl', 'Bryan', 'Emily']",
"activity": "['cooking sausages']",
},
# Fuzzy match by set intersection
{
"location": "seattle downtow",
"participant": "['Carl', 'Shane']",
"activity": "['cooking sausages', 'peeling potatoes']",
},
# Fuzzy match with incomplete list formats
{
"location": "Bay Area",
"participant": "Carl Bryan Emily",
"activity": "[cooking sausages",
},
]
for slot_values in list_slot_values:
print("------------------------------------")
print(resolve_sv_entities(slot_values, parameter_ontology))
|
comet_memory_dialog-main
|
dialog_simulator/utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#!/usr/bin/env python3
import random
from typing import List, Tuple
from SimulatorBase import SimulatorBase
from constants import GoalType, DialogAct, API_STATUS, API_CALL_TYPE
from Data import (
MemoryDialog,
Goal,
Frame,
ActAttributes,
APIRequest,
APIResponse,
GoalParameter,
)
from MemoryServiceAPI import MemoryServiceAPI
from utils import str_slot_values, str_request_slots, str_memories, get_template
random.seed(0)
class AssistantSimulator(SimulatorBase):
def __init__(self, *args, **kwargs):
super(AssistantSimulator, self).__init__(*args, **kwargs)
self.memory_service_api = None
class ModelBasedAssistantSimulator(AssistantSimulator):
def __init__(self, *args, **kwargs):
super(ModelBasedAssistantSimulator, self).__init__(*args, **kwargs)
def fit_goal_to_intent(self, args):
# Define the goal to intent mapping behavior
pass
def is_servable(self, goal: Goal) -> bool:
# Check whether this simulator can serve the input goal.
pass
def execute_turn(
self, goal: Goal, memory_dialog: MemoryDialog
) -> Tuple[Frame, APIRequest, APIResponse]:
# Need to define this behavior e.g. as a config, a model, etc.
pass
def generate_uttr(self, frame: Frame, goal: Goal) -> str:
pass
class RuleBasedAssistantSimulator(AssistantSimulator):
def __init__(self, *args, **kwargs):
super(RuleBasedAssistantSimulator, self).__init__(*args, **kwargs)
def fit_goal_to_intent(self, args):
# Define the goal to intent mapping behavior
pass
def is_servable(self, goal: Goal) -> bool:
# Check whether this simulator can serve the input goal.
pass
def execute_turn(
self, goal: Goal, memory_dialog: MemoryDialog
) -> Tuple[Frame, APIRequest, APIResponse]:
# Need to define this behavior e.g. as a config, a model, etc.
pass
def generate_uttr(self, frame: Frame, goal: Goal) -> str:
pass
class PilotAssistantSimulator(AssistantSimulator):
"""
Includes the simplest implementation of a AssistantSimulator.
Use this class as a guide for implementing more complex
simulators.
"""
def __init__(self, *args, **kwargs):
super(PilotAssistantSimulator, self).__init__(*args, **kwargs)
# Simple interaction deterministic mapping
self._goal_to_handler = {
GoalType.UNKNOWN: self.AssistantGoalHandler(),
GoalType.SEARCH: self.AssistantSearchGoalHandler(),
GoalType.REFINE_SEARCH: self.AssistantRefineSearchGoalHandler(),
GoalType.GET_RELATED: self.AssistantGetRelatedGoalHandler(),
GoalType.GET_INFO: self.AssistantGetInfoGoalHandler(),
# GoalType.GET_AGGREGATED_INFO: self.AssistantGetAggregatedInfoGoalHandler(),
GoalType.SHARE: self.AssistantShareGoalHandler(),
}
def is_servable(self, goal: Goal) -> bool:
# Check whether this simulator can serve the input goal.
return True
def execute_turn(
self, goal: Goal, memory_dialog: MemoryDialog
) -> Tuple[Frame, APIRequest, APIResponse]:
handler = self._goal_to_handler[goal.goal_type]
return handler.execute_turn(goal, memory_dialog, self.memory_service_api)
def generate_uttr(self, frame: Frame, goal: Goal) -> Frame:
handler = self._goal_to_handler[goal.goal_type]
uttr = handler.generate_uttr(frame, goal, self.memory_service_api)
frame.uttr = uttr
return frame
class AssistantGoalHandler:
def execute_turn(
self,
goal: Goal,
memory_dialog: MemoryDialog,
memory_service_api: MemoryServiceAPI,
) -> Tuple[Frame, APIRequest, APIResponse]:
return (
Frame("", DialogAct.UNKNOWN, ActAttributes()),
APIRequest(),
APIResponse(),
)
def generate_uttr(
self, frame: Frame, goal: Goal, memory_service_api: MemoryServiceAPI
) -> str:
template = get_template(self._uttr_template, frame)
verbose_memory = True if random.random() < 0.35 else False
uttr = template.format(
slot_values=str_slot_values(frame.act_attributes.slot_values),
request_slots=str_request_slots(frame.act_attributes.request_slots),
memories=str_memories(
frame.act_attributes.memories,
memory_service_api,
verbose=verbose_memory,
),
)
return uttr
class AssistantSearchGoalHandler(AssistantGoalHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.available_asst_acts = [
DialogAct.INFORM_GET,
# DialogAct.PROMPT_REFINE
]
self.user_search_acts = set(
[DialogAct.REQUEST_GET, DialogAct.INFORM_REFINE, DialogAct.INFORM_GET]
)
self.asst_search_acts = set(
[
DialogAct.INFORM_GET,
]
)
self._uttr_template = {
DialogAct.INFORM_GET: [
"Here is what I found: {memories}.",
"Check out these photos: (summarize) {memories}.",
"How is what I found: {memories}. They match some of the criteria: {slot_values}.",
"I found these photos: {memories}.",
"Here is what I found: {memories}. [[ Please comment on the retrieved photos. ]]",
"Here is what I found: {memories}. [[ Briefly summarize what is visible in the photos. ]]",
]
}
self._uttr_template_no_results = {
DialogAct.INFORM_GET: [
"Sorry, I could not find any photo/video for {slot_values}.",
"Sorry, I could not find any photo/video.",
"I could not find any photo that matches the criteria {slot_values}.",
]
}
def generate_uttr(
self, frame: Frame, goal: Goal, memory_service_api: MemoryServiceAPI
) -> str:
memories = frame.act_attributes.memories
if len(memories) > 0:
template = get_template(self._uttr_template, frame)
else:
template = get_template(self._uttr_template_no_results, frame)
verbose_memory = True if random.random() < 0.35 else False
uttr = template.format(
slot_values=str_slot_values(frame.act_attributes.slot_values),
request_slots=str_request_slots(frame.act_attributes.request_slots),
memories=str_memories(
frame.act_attributes.memories,
memory_service_api,
verbose=verbose_memory,
),
)
return uttr
def execute_turn(
self,
goal: Goal,
memory_dialog: MemoryDialog,
memory_service_api: MemoryServiceAPI,
) -> Tuple[Frame, APIRequest, APIResponse]:
assert len(memory_dialog.dialog.user_turns) > 0
last_user_turn = memory_dialog.dialog.user_turns[-1]
# Check the routing logic here
if last_user_turn.has_dialog_acts(self.user_search_acts):
# 1. User requests SEARCH with parameters
# Get a random dialog act label
asst_dialog_act = random.choice(self.available_asst_acts)
api_response = APIResponse()
api_request = APIRequest()
if asst_dialog_act in self.asst_search_acts:
# 1. (1) Return Search results
# Randomly fill the act_attributes
list_act_attributes = []
for goal_parameter in goal.goal_parameters:
# Construct an API request
# ** TODO **: grab the correct frame, instead of the last frame
requested_act_attributes = last_user_turn.frames[
-1
].act_attributes
api_parameters = {
"slot_values": requested_act_attributes.slot_values_resolved
}
if goal_parameter.request_slots != []:
api_parameters[
"request_slots"
] = goal_parameter.request_slots
call_type = None
if goal.goal_type in set([GoalType.REFINE_SEARCH]):
call_type = API_CALL_TYPE.REFINE_SEARCH
else:
call_type = API_CALL_TYPE.SEARCH
api_request = APIRequest(
call_type=call_type,
parameters=api_parameters,
memory_dialog=memory_dialog,
)
# Send in the request and get the API Response back
api_response = memory_service_api.call_api(api_request)
# Construct Act Attributes from the API Response
act_attributes = ActAttributes()
if api_response.status == API_STATUS.SEARCH_FOUND:
act_attributes = ActAttributes(
slot_values=requested_act_attributes.slot_values,
slot_values_resolved=requested_act_attributes.slot_values_resolved,
request_slots=[],
memories=api_response.results.get(
"retrieved_memories", []
),
)
elif api_response.status == API_STATUS.SEARCH_NOT_FOUND:
# TODO: we can put a special logic here
act_attributes = ActAttributes(
slot_values=requested_act_attributes.slot_values,
slot_values_resolved=requested_act_attributes.slot_values_resolved,
request_slots=[],
memories=api_response.results.get(
"retrieved_memories", []
),
)
list_act_attributes.append(act_attributes)
else:
# 1. (2) Follow-up questions
# 1. (3) Check disambiguation request
# TODO
pass
else:
# 2. Handle disambiguation info
# TODO
pass
# Return an Frame object with the generated intent and attributes
# TODO: handle multiple goal parameters & multiple acts
return (
Frame("", asst_dialog_act, list_act_attributes[0]),
api_request,
api_response,
)
class AssistantSearchGoalHandler(AssistantSearchGoalHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class AssistantRefineSearchGoalHandler(AssistantSearchGoalHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def execute_turn(
self,
goal: Goal,
memory_dialog: MemoryDialog,
memory_service_api: MemoryServiceAPI,
) -> Tuple[Frame, APIRequest, APIResponse]:
# Execute
return super().execute_turn(goal, memory_dialog, memory_service_api)
class AssistantGetRelatedGoalHandler(AssistantGoalHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.available_asst_acts = [
DialogAct.INFORM_GET,
]
self._uttr_template = {
DialogAct.INFORM_GET: [
"Here are some of the related photos I found: {memories}.",
"Here are the related memories: {memories}.",
"Here are the related memories I found: {memories}.",
"Here are the related memories: {memories}. They match some of the criteria: {request_slots}.",
"Here are the related memories: {memories}. [[ Please comment on the retrieved photos ]].",
"Here are the related memories: {memories}. [[ Please summarize what is visible in the photos briefly ]].",
]
}
self._uttr_template_no_request_slots = {
DialogAct.INFORM_GET: ["Here are the related memories: {memories}."]
}
self._uttr_template_no_results = {
DialogAct.INFORM_GET: [
"I could not find any related memory that matches the criteria.",
"Sorry, I could not find any related memory. Anything else I can help?",
]
}
def generate_uttr(
self, frame: Frame, goal: Goal, memory_service_api: MemoryServiceAPI
) -> str:
memories = frame.act_attributes.memories
request_slots = frame.act_attributes.request_slots
if len(memories) > 0:
if len(request_slots) > 0:
template = get_template(self._uttr_template, frame)
else:
template = get_template(self._uttr_template_no_request_slots, frame)
else:
template = get_template(self._uttr_template_no_results, frame)
verbose_memory = True if random.random() < 0.35 else False
uttr = template.format(
slot_values=str_slot_values(frame.act_attributes.slot_values),
request_slots=str_request_slots(frame.act_attributes.request_slots),
memories=str_memories(
frame.act_attributes.memories,
memory_service_api,
verbose=verbose_memory,
),
)
return uttr
def execute_turn(
self,
goal: Goal,
memory_dialog: MemoryDialog,
memory_service_api: MemoryServiceAPI,
) -> Tuple[Frame, APIRequest, APIResponse]:
assert len(memory_dialog.dialog.user_turns) > 0
last_user_turn = memory_dialog.dialog.user_turns[-1]
if True:
# 1. User requests GET SIMILAR with parameters
# Get a random dialog act label
asst_dialog_act = random.choice(self.available_asst_acts)
api_response = APIResponse()
api_request = APIRequest()
if True:
# 1. (1) Return GET_RELATED results
# Randomly fill the act_attributes
list_act_attributes = []
for goal_parameter in goal.goal_parameters:
# Construct an API request
api_request = APIRequest(
call_type=API_CALL_TYPE.GET_RELATED,
parameters={
##### TODO: fix it so it grabs the right frame (instead of the last frame)
"memories": last_user_turn.frames[
-1
].act_attributes.memories,
"request_slots": last_user_turn.frames[
-1
].act_attributes.request_slots,
"slot_values": goal_parameter.filter, ## TODO
},
memory_dialog=memory_dialog,
)
# Send in the request and get the API Response back
api_response = memory_service_api.call_api(api_request)
# Construct Act Attributes from the API Response
act_attributes = ActAttributes()
if api_response.status == API_STATUS.SEARCH_FOUND:
act_attributes = ActAttributes(
slot_values=api_response.results.get(
"retrieved_info", {}
),
request_slots=api_response.results.get(
"request_slots", []
),
memories=api_response.results.get(
"retrieved_memories", []
),
)
elif api_response.status == API_STATUS.SEARCH_NOT_FOUND:
# TODO: we can put a special logic here
act_attributes = ActAttributes(
slot_values=api_response.results.get(
"retrieved_info", {}
),
request_slots=api_response.results.get(
"request_slots", []
),
memories=api_response.results.get(
"retrieved_memories", []
),
)
list_act_attributes.append(act_attributes)
else:
# 1. (2) Follow-up questions
# 1. (3) Check disambiguation request
# TODO
pass
else:
# 2. Handle disambiguation info
# TODO
pass
# Return an Frame object with the generated intent and attributes
# TODO: handle multiple goal parameters & multiple acts
return (
Frame("", asst_dialog_act, list_act_attributes[0]),
api_request,
api_response,
)
class AssistantGetInfoGoalHandler(AssistantGoalHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.available_asst_main_acts = [
DialogAct.INFORM_GET,
]
self.available_asst_disambiguation_acts = [
DialogAct.REQUEST_DISAMBIGUATE,
]
self._uttr_template = {
DialogAct.INFORM_GET: [
"Here is the info on {request_slots}: {slot_values}",
"I found the info on {request_slots}: {slot_values}",
"Here is the info I found: {slot_values}",
],
DialogAct.REQUEST_DISAMBIGUATE: [
"Which photo or video do you mean?",
"Could you clarify which photo or video you are referring to?",
],
}
def execute_turn(
self,
goal: Goal,
memory_dialog: MemoryDialog,
memory_service_api: MemoryServiceAPI,
) -> Tuple[Frame, APIRequest, APIResponse]:
assert len(memory_dialog.dialog.user_turns) > 0
last_user_turn = memory_dialog.dialog.user_turns[-1]
api_request = APIRequest()
if not last_user_turn.is_disambiguation_response():
# 1. User requests GET INFO with parameters
api_response = APIResponse()
# Request for disambiguation at a random rate
n_mentioned_memories = len(memory_dialog.dialog.mentioned_memory_ids)
if n_mentioned_memories > 1:
skip_disambiguation = random.random() > 0.4
else:
# Only one or less memory was mentioned
skip_disambiguation = True
if skip_disambiguation:
(
asst_dialog_act,
list_act_attributes,
api_request,
api_response,
) = self.main_act(goal, memory_dialog, memory_service_api)
else:
# 1. (2) Raise disambiguation request
# TODO
asst_dialog_act = random.choice(
self.available_asst_disambiguation_acts
)
list_act_attributes = [ActAttributes()]
api_response = APIResponse()
else:
# 2. Handle disambiguation info
(
asst_dialog_act,
list_act_attributes,
api_request,
api_response,
) = self.main_act(goal, memory_dialog, memory_service_api)
# Return an Frame object with the generated intent and attributes
# TODO: handle multiple goal parameters & multiple acts
return (
Frame("", asst_dialog_act, list_act_attributes[0]),
api_request,
api_response,
)
def main_act(
self,
goal: Goal,
memory_dialog: MemoryDialog,
memory_service_api: MemoryServiceAPI,
):
last_user_turn = memory_dialog.dialog.user_turns[-1]
# 1. (1) Return info results
# Get a random dialog act label
asst_dialog_act = random.choice(self.available_asst_main_acts)
list_act_attributes = []
for goal_parameter in goal.goal_parameters:
# Construct an API request
api_request = APIRequest(
call_type=API_CALL_TYPE.GET_INFO,
parameters={
##### TODO: fix it so it grabs the right frame (instead of the last frame)
"memories": last_user_turn.frames[-1].act_attributes.memories,
"request_slots": last_user_turn.frames[
-1
].act_attributes.request_slots,
},
memory_dialog=memory_dialog,
)
# Send in the request and get the API Response back
api_response = memory_service_api.call_api(api_request)
# Construct Act Attributes from the API Response
act_attributes = ActAttributes()
if api_response.status == API_STATUS.INFO_FOUND:
act_attributes = ActAttributes(
slot_values=api_response.results.get("retrieved_info", {}),
request_slots=api_response.results.get("request_slots", []),
memories=api_response.results.get("retrieved_memories", []),
)
elif api_response.status == API_STATUS.INFO_NOT_FOUND:
# TODO
pass
list_act_attributes.append(act_attributes)
return asst_dialog_act, list_act_attributes, api_request, api_response
class AssistantShareGoalHandler(AssistantGoalHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.available_asst_acts = [
DialogAct.CONFIRM_SHARE,
]
self._uttr_template = {
DialogAct.CONFIRM_SHARE: [
"Confirmed. I will share {memories}.",
"Confirmed. I will share them.",
],
}
def execute_turn(
self,
goal: Goal,
memory_dialog: MemoryDialog,
memory_service_api: MemoryServiceAPI,
) -> Tuple[Frame, APIRequest, APIResponse]:
assert len(memory_dialog.dialog.user_turns) > 0
last_user_turn = memory_dialog.dialog.user_turns[-1]
if True:
# 1. User requests SHARE with parameters
# Get a random dialog act label
asst_dialog_act = random.choice(self.available_asst_acts)
api_response = APIResponse()
api_request = APIRequest()
if True:
# 1. (1) Return info results
list_act_attributes = []
for goal_parameter in goal.goal_parameters:
# Construct an API request
api_request = APIRequest(
call_type=API_CALL_TYPE.SHARE,
parameters={
## TODO: fix so it grabs the right frame
"memories": last_user_turn.frames[
-1
].act_attributes.memories,
},
memory_dialog=memory_dialog,
)
# Send in the request and get the API Response back
api_response = memory_service_api.call_api(api_request)
# Construct Act Attributes from the API Response
act_attributes = ActAttributes()
if api_response.status == API_STATUS.SHARED:
act_attributes = ActAttributes(
slot_values={},
request_slots=[],
memories=api_response.results.get(
"retrieved_memories", []
),
)
list_act_attributes.append(act_attributes)
else:
# 1. (2) Raise disambiguation request
# TODO
pass
else:
# 2. Handle disambiguation info
# TODO
pass
# Return an Frame object with the generated intent and attributes
# TODO: handle multiple goal parameters & multiple acts
return (
Frame("", asst_dialog_act, list_act_attributes[0]),
api_request,
api_response,
)
|
comet_memory_dialog-main
|
dialog_simulator/AssistantSimulator.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#!/usr/bin/env python3
import os
import copy
import json
import csv
import random
import pickle
from MemoryDialogSimulator import MemoryDialogSimulator
from UserSimulator import PilotUserSimulator
from AssistantSimulator import PilotAssistantSimulator
from GoalGenerator import RuleBasedGoalGenerator
from MemoryServiceAPI import MemoryServiceAPI
from utils import str_memory
if __name__ == "__main__":
# Parameters for generation
domain = "memory"
random.seed(0)
n_dialogs = 6000
n_max_turns = 8 # 5, 8, 10
goal_config = {
"n_min_goals": 3, # 4
"n_max_goals": 6, # 6
}
start_dialog_idx = 5500
# path_memory_graph_list = '/Users/shanemoon/workspace/memory_dialog/dialog_simulator/memories/final/memory_may21_v1_100graphs.json'
path_memory_graph_list = "/Users/shanemoon/workspace/memory_dialog/dialog_simulator/memories/final/mscoco_memory_graphs_1k.json"
path_out_json = "/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/batch_4_mem_dials.json"
path_out_csv = "/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/batch_4_mem_dials.tsv"
path_out_pickle = "/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/batch_4_mem_dials.p"
# Make sure we are not overwriting
debug = False
if not debug:
assert not os.path.exists(path_out_json)
assert not os.path.exists(path_out_csv)
assert not os.path.exists(path_out_pickle)
# Load parameters
memory_graph_list = json.load(open(path_memory_graph_list, "r"))
memory_graph_bank = {}
for memory_graph in memory_graph_list:
memory_graph_id = memory_graph["memory_graph_id"]
for i in range(len(memory_graph["memories"])):
memory_graph["memories"][i]["memory_graph_id"] = memory_graph_id
memory_graph_bank[memory_graph_id] = memory_graph
# Initialize the multimodal simulator
sim = MemoryDialogSimulator(
user_simulator=PilotUserSimulator(),
assistant_simulator=PilotAssistantSimulator(),
goal_generator=RuleBasedGoalGenerator(domain=domain),
memory_service_api=MemoryServiceAPI(metadata={}),
memory_graph_bank=memory_graph_bank,
domain=domain,
)
# Generate dialogs
memory_dialogs = sim.batch_generate_dialog_flows(
n_dialogs=n_dialogs,
n_max_turns=n_max_turns,
start_dialog_idx=start_dialog_idx,
goal_config=goal_config,
)
# Output dialogs
# a. Pickle output
pickle.dump(memory_dialogs, open(path_out_pickle, "wb"))
# b. JSON output
json.dump(
{"dialogue_data": [m_d.to_dict() for m_d in memory_dialogs]},
open(path_out_json, "w"),
indent=4,
)
# c. print output
for i, m_d in enumerate(memory_dialogs[:20]):
d = m_d.dialog
str_dialog = ""
print(f"----- Dialog {d.idx} ----- ")
for j in range(len(d.user_turns)):
user_turn = d.user_turns[j]
asst_turn = d.asst_turns[j]
for user_frame in user_turn.frames:
str_dialog += "U: " + user_frame.uttr + "\n"
# str_dialog += 'U: ' + str(user_frame.nlu.act_attributes.slot_values.values()) + '\n'
for asst_frame in asst_turn.frames:
str_dialog += "A: " + asst_frame.uttr + "\n"
# str_dialog += 'A: ' + str(asst_frame.nlu.act_attributes.slot_values.values()) + '\n'
print(str_dialog)
# d. TSV output for annotation
url_blank = "https://simmc2.s3-us-west-1.amazonaws.com/white.png"
with open(path_out_csv, "w", newline="") as csvfile:
writer = csv.writer(csvfile, delimiter="\t", quotechar="'")
writer.writerow(
[
"dialog_id",
"dialog",
"img_0_url",
"img_1_url",
"img_2_url",
"img_3_url",
"img_4_url",
"img_5_url",
"img_6_url",
"img_7_url",
"img_8_url",
"img_9_url",
"img_10_url",
"img_11_url",
"img_12_url",
"img_13_url",
"img_14_url",
"img_15_url",
"img_0_desc",
"img_1_desc",
"img_2_desc",
"img_3_desc",
"img_4_desc",
"img_5_desc",
"img_6_desc",
"img_7_desc",
"img_8_desc",
"img_9_desc",
"img_10_desc",
"img_11_desc",
"img_12_desc",
"img_13_desc",
"img_14_desc",
"img_15_desc",
"metadata",
]
)
for _, m_d in enumerate(memory_dialogs):
mg = m_d.memory_graph
d = m_d.dialog
dialog_data = []
image_id = 0
all_image_urls = [url_blank]
all_memories = [None]
display_image_ids = [image_id]
for i in range(len(d.user_turns)):
# User turn
user_turn = d.user_turns[i]
user_utter = "USER: " + ". ".join(
[frame.uttr for frame in user_turn.frames]
)
user_turn_data = {
"turn_id": i * 2,
"speaker": "USER",
"utterance": user_utter.replace("'", ""),
"image_id": copy.deepcopy(display_image_ids),
"validation": []
#'validation': make_validation_tokens_for_turn(user_turn)
}
# Assistant turn
asst_turn = d.asst_turns[i]
asst_utter = "ASSISTANT: " + ". ".join(
[frame.uttr for frame in asst_turn.frames]
)
memory_ids = asst_turn.frames[-1].act_attributes.to_dict()["memories"]
if memory_ids != []:
display_urls = []
display_image_ids = []
for memory_id in memory_ids:
display_urls.extend(mg.get_memory_url(memory_id))
image_id += 1
display_image_ids.append(image_id)
all_image_urls.extend(display_urls)
all_memories.extend(mg.get_memories_by_ids(memory_ids))
asst_turn_data = {
"turn_id": i * 2 + 1,
"speaker": "ASSISTANT",
"utterance": asst_utter.replace("'", ""),
"image_id": copy.deepcopy(display_image_ids),
"validation": []
#'validation': make_validation_tokens_for_turn(asst_turn)
}
dialog_data.append(user_turn_data)
dialog_data.append(asst_turn_data)
# This should be true, assuming each memory has one image.
assert len(all_image_urls) == len(all_memories)
writer.writerow(
[
d.idx,
str(json.dumps(dialog_data)),
all_image_urls[0], # url_0
all_image_urls[1] if len(all_image_urls) > 1 else "",
all_image_urls[2] if len(all_image_urls) > 2 else "",
all_image_urls[3] if len(all_image_urls) > 3 else "",
all_image_urls[4] if len(all_image_urls) > 4 else "",
all_image_urls[5] if len(all_image_urls) > 5 else "",
all_image_urls[6] if len(all_image_urls) > 6 else "",
all_image_urls[7] if len(all_image_urls) > 7 else "",
all_image_urls[8] if len(all_image_urls) > 8 else "",
all_image_urls[9] if len(all_image_urls) > 9 else "",
all_image_urls[10] if len(all_image_urls) > 10 else "",
all_image_urls[11] if len(all_image_urls) > 11 else "",
all_image_urls[12] if len(all_image_urls) > 12 else "",
all_image_urls[13] if len(all_image_urls) > 13 else "",
all_image_urls[14] if len(all_image_urls) > 14 else "",
all_image_urls[15] if len(all_image_urls) > 15 else "",
"", # url_0
str_memory(all_memories[1]) if len(all_image_urls) > 1 else "",
str_memory(all_memories[2]) if len(all_image_urls) > 2 else "",
str_memory(all_memories[3]) if len(all_image_urls) > 3 else "",
str_memory(all_memories[4]) if len(all_image_urls) > 4 else "",
str_memory(all_memories[5]) if len(all_image_urls) > 5 else "",
str_memory(all_memories[6]) if len(all_image_urls) > 6 else "",
str_memory(all_memories[7]) if len(all_image_urls) > 7 else "",
str_memory(all_memories[8]) if len(all_image_urls) > 8 else "",
str_memory(all_memories[9]) if len(all_image_urls) > 9 else "",
str_memory(all_memories[10]) if len(all_image_urls) > 10 else "",
str_memory(all_memories[11]) if len(all_image_urls) > 11 else "",
str_memory(all_memories[12]) if len(all_image_urls) > 12 else "",
str_memory(all_memories[13]) if len(all_image_urls) > 13 else "",
str_memory(all_memories[14]) if len(all_image_urls) > 14 else "",
str_memory(all_memories[15]) if len(all_image_urls) > 15 else "",
{}, # mockup
]
)
# print(json.dumps(dialog_data))
# (5) Summary
print("n_dialogs:", len(memory_dialogs))
print("n_turns:", sum([len(m_d.dialog.asst_turns) for m_d in memory_dialogs]))
|
comet_memory_dialog-main
|
dialog_simulator/main.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#!/usr/bin/env python3
import random
from constants import (
GoalType,
GoalMemoryRefType,
numeric_slots,
non_visual_slots,
visual_slots,
all_slots,
)
from Data import Goal, GoalParameter, MemoryTime
from utils import weighted_choice
import copy
random.seed(0)
class RuleBasedGoalGenerator:
def __init__(self, *args, **kwargs):
self.non_visual_slots = non_visual_slots
self.visual_slots = visual_slots
self.all_slots = all_slots
def sample_goals(self, *args, **kwargs):
memory_graph = kwargs.pop("memory_graph", None)
goal_config = kwargs.pop("goal_config", {})
n_min_goals = goal_config.get("n_min_goals", 3)
n_max_goals = goal_config.get("n_max_goals", 5)
n_goals = random.randint(n_min_goals, n_max_goals)
goal_type_list = [
GoalType.SEARCH,
GoalType.REFINE_SEARCH,
GoalType.GET_RELATED,
GoalType.GET_INFO,
GoalType.GET_AGGREGATED_INFO,
GoalType.SHARE,
GoalType.CHITCHAT,
]
goal_type_list_weights_start = [
1,
0,
0,
0,
0,
0,
0,
# 1, 0, 0, 0, 1, 0, 0,
]
goal_type_list_weights_mid = [
0.8,
1.1,
1.7,
1.1,
0,
0.1,
0,
# 1, 0.8, 0.8, 1, 1, 0.5, 0.5,
]
goal_type_list_weights_end = [
0.3,
0.5,
0.6,
0.5,
0,
3,
0,
# 0.5, 0.5, 0.5, 0.5, 0.5, 3, 1,
]
# Randomly sample from the goal type list
# For now, we enforce the goals to start with BROWSE
# and end with ADD_TO_CART
# TODO: allow for a more flexible way of generating
# goal types
goal_types = (
random.choices(
population=goal_type_list, weights=goal_type_list_weights_start, k=1
)
+ random.choices(
population=goal_type_list,
weights=goal_type_list_weights_mid,
k=n_goals - 2,
)
+ random.choices(
population=goal_type_list, weights=goal_type_list_weights_end, k=1
)
)
# Make a complete goal with an accompanying set of goal parameters
# for each goal_type
goals = []
for goal_type in goal_types:
# For now, we pass in a random set of goal_parameters
goal_parameters = self.sample_goal_parameters(
goal_type, memory_graph, goal_config
)
goals.append(Goal(goal_type=goal_type, goal_parameters=goal_parameters))
return goals
def sample_goal_parameters(self, goal_type, memory_graph, goal_config):
# Sample goal parameters according to the input sample
# TODO: IMPLEMENT **
goal_parameters = []
parameter_ontology = goal_config["parameter_ontology"]
# (1) Pick a search filter
search_filter = {}
if goal_type in set(
[GoalType.SEARCH, GoalType.REFINE_SEARCH, GoalType.GET_RELATED]
):
if goal_type == GoalType.GET_RELATED:
n_slots = weighted_choice(population=[1, 2], weights=[0.93, 0.07])
else:
n_slots = weighted_choice(population=[1, 2], weights=[0.75, 0.25])
# Candidate slots: exclude a few slots that
# are semantically infeasible
# **** TODO ****: confirm that there is no slot to exclude
candidate_slots = self.all_slots - set([""])
search_filter_slots = random.choices(
population=list(candidate_slots), k=n_slots
)
for search_filter_slot in search_filter_slots:
# We first randomly assign a value for a randomly selected slot
if search_filter_slot == "time":
# Instead of choosing a specific datetime,
# search by year or month instead.
random_datetime = MemoryTime(
str_datetime=random.choice(
parameter_ontology["all"].get(search_filter_slot)
)
)
if random.random() > 0.1:
search_filter_value = str(MemoryTime(year=random_datetime.year))
else:
search_filter_value = str(
MemoryTime(
year=random_datetime.year, month=random_datetime.month
)
)
if goal_type == GoalType.GET_RELATED:
# A special value for refine_search: 'next' and 'prev'
# e.g. "where did we go next?"
if random.random() > 0.3:
search_filter_value = random.choice(
["right after", "right before", "on the same day"]
)
elif search_filter_slot == "location":
# TODO: Instead of choosing a specific location,
# occasionally search with a coarser query.
search_filter_value = random.choice(
parameter_ontology["all"].get(search_filter_slot)
)
if random.random() > 0.7:
search_filter_value = copy.deepcopy(search_filter_value)
search_filter_value["geo_tag"].get("place")
else:
# TODO: handle subsampling of participants & activities
search_filter_value = random.choice(
parameter_ontology["all"].get(search_filter_slot)
)
if search_filter_value != "":
search_filter[search_filter_slot] = search_filter_value
# (2) Pick an object reference type
object_reference_type = GoalMemoryRefType.NOT_SPECIFIED
if goal_type in set([GoalType.GET_RELATED, GoalType.GET_INFO, GoalType.SHARE]):
object_reference_type = weighted_choice(
population=[
GoalMemoryRefType.PREV_TURN,
GoalMemoryRefType.DIALOG,
GoalMemoryRefType.GRAPH,
],
weights=[0.8, 0.2, 0.0],
)
# (3) Pick slots to request (e.g. in questions)
request_slots = []
if goal_type in set([GoalType.GET_INFO]):
# We randomly sample slots to ask
# ****** TODO *******: make sure it's not asking about
# the parameters that were already in search filter
ask_from_visual_slot = random.random() > 0.9
if ask_from_visual_slot:
# ask about visual_slots (rare): people, activity
n_request_slots = 1
request_slots.extend(
random.sample(self.non_visual_slots, n_request_slots)
)
else:
# ask about non_visual_slots: time, location
n_request_slots = weighted_choice(population=[1, 2], weights=[0.8, 0.2])
request_slots.extend(
random.sample(self.non_visual_slots, n_request_slots)
)
elif goal_type in set([GoalType.GET_RELATED]):
# We randomly sample slots to ask
# iff search_filter is empty
if len(search_filter) == 0:
n_request_slots = weighted_choice(population=[0, 1], weights=[0.4, 0.6])
request_slots.extend(random.sample(self.all_slots, n_request_slots))
elif goal_type in set([GoalType.GET_AGGREGATED_INFO]):
# ****** TODO *******
pass
# (4) Compile it into a goal parameter
goal_parameter = GoalParameter(
filter=search_filter,
reference_type=object_reference_type,
request_slots=request_slots,
)
goal_parameters.append(goal_parameter)
return goal_parameters
|
comet_memory_dialog-main
|
dialog_simulator/GoalGenerator.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#!/usr/bin/env python3
from Data import MemoryDialog, Goal, Frame
from typing import List
class SimulatorBase:
def register_memory_service_api(self, memory_service_api):
self.memory_service_api = memory_service_api
def fit_goal_to_intent(self, args):
# Define the goal to intent mapping behavior
pass
def is_servable(self, goal: Goal) -> bool:
# Check whether this simulator can serve the input goal.
pass
def generate_nlu_label(self, goal: Goal, context: MemoryDialog) -> Frame:
# Need to define this behavior first e.g. as a config, a model, etc.
pass
def generate_uttr(self, nlu_label: Frame) -> str:
pass
|
comet_memory_dialog-main
|
dialog_simulator/SimulatorBase.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#!/usr/bin/env python3
from __future__ import annotations
from constants import GoalType, GoalMemoryRefType, DialogAct
from utils import str_memories, int_memory_ids, get_slot_values_simple_from_json
import pickle
from datetime import datetime
class MemoryDialog:
def __init__(self, *args, **kwargs):
self.memory_graph = kwargs.pop("memory_graph", {}) # JSON format
self.dialog = kwargs.pop("dialog", None)
self.domain = kwargs.pop("domain", None)
def initialize(self):
self.dialog = Dialog(domain=self.domain)
def update(self, *args, **kwargs):
# Reflects change in scenes or in dialogs
# TODO: implement
if "memory_graph" in kwargs:
self.memory_graph = kwargs.pop("memory_graph")
if "dialog" in kwargs:
self.dialog = kwargs.pop("dialog")
def is_goal_met(self, goal):
# TODO: implement a more robust goal checking logic
# For now, we look where there is a hanging 'disambiguation' request
if self.dialog.asst_turns == []:
return False
last_asst_turn = self.dialog.asst_turns[-1]
goal_met = not last_asst_turn.is_disambiguation_request()
return goal_met
def to_dict(self):
out = self.dialog.to_dict()
out["memory_graph_id"] = self.memory_graph.get_id()
return out
def get_memories(self):
return self.memory_graph.get_memories()
class MemoryGraph:
def __init__(self, *args, **kwargs):
json_data = kwargs.pop("data", {})
self.load_data(json_data)
def load_data(self, json_data):
self.id = json_data["memory_graph_id"]
self.memories = [Memory(data=m) for m in json_data["memories"]]
self.groups = json_data["memory_groups"]
# Construct the memory to day/event mapping.
self.trip_map = {}
self.day_map = {}
self.event_map = {}
for trip_ind, trip_datum in enumerate(self.groups):
for day_ind, day_datum in enumerate(trip_datum["days"]):
for event_ind, event_datum in enumerate(day_datum["events"]):
for memory_id in event_datum["memories"]:
self.trip_map[memory_id] = trip_ind
self.day_map[memory_id] = day_ind
self.event_map[memory_id] = event_ind
def get_day_events(self, memory_id):
"""Get the day events given memory_id."""
trip_datum = self.groups[self.trip_map[memory_id]]
return trip_datum["days"][self.day_map[memory_id]]
def get_events(self, memory_id):
"""Get the events given memory_id."""
day_datum = self.get_day_events(memory_id)
return day_datum["events"][self.event_map[memory_id]]
def get_id(self):
return self.id
def get_memories(self):
return self.memories
def get_memory_by_id(self, memory_id):
for memory in self.memories:
if int(memory.data["memory_id"]) == int(memory_id):
return memory
def get_memories_by_ids(self, memory_ids):
return [self.get_memory_by_id(memory_id) for memory_id in memory_ids]
def get_memory_url(self, memory_id):
for memory in self.memories:
if memory.data["memory_id"] == memory_id:
return memory.get_memory_url()
return []
class Memory:
def __init__(self, *args, **kwargs):
self.data = kwargs.pop("data", {})
self.load_data(self.data)
def __str__(self):
return "Memory ID: {id} ({narrations}), Time: {time}, Loc: {location}".format(
id=self.data["memory_id"],
narrations=self.data["narrations"],
time=self.data["time"],
location=str(self.data["location"]["geo_tag"].get("place", "")),
)
def load_data(self, json_data):
# ** TODO **
"""
self.id = json_data['memory_id']
self.time = json_data['time']
self.start_time = json_data['start_time']
self.end_time = json_data['end_time']
self.narrations = json_data['narrations']
self.media = json_data['media']
self.location = json_data['location']
self.participant = json_data['participant']
self.activity = json_data['activity']
self.object = json_data['object']
"""
pass
def get_memory_url(self):
return [a["url"] for a in self.data["media"]]
class ActAttributes:
def __init__(self, *args, **kwargs):
self.slot_values = kwargs.pop("slot_values", {}) # slot_value pairs
self.slot_values_resolved = kwargs.pop("slot_values_resolved", {})
self.request_slots = kwargs.pop("request_slots", [])
self.memories = kwargs.pop("memories", []) # list of Memory objects
def __str__(self):
out = "{slot_values} | {request_slots} | {memories}".format(
slot_values=str(self.slot_values),
request_slots=str(self.request_slots),
memories=str_memories(self.memories),
)
return out
def to_dict(self):
return {
"slot_values": self.slot_values,
#'slot_values_resolved': self.slot_values_resolved,
"request_slots": self.request_slots,
"memories": int_memory_ids(self.memories),
}
class Frame:
def __init__(self, uttr: str, dialog_act: DialogAct, act_attributes: ActAttributes):
self.uttr = uttr
self.dialog_act = dialog_act
self.act_attributes = act_attributes
def __str__(self):
out = "{uttr} | {dialog_act} | {act_attributes}".format(
uttr=str(self.uttr),
dialog_act=self.dialog_act.value,
act_attributes=str(self.act_attributes),
)
return out
def to_dict(self):
return {
"uttr": self.uttr,
"act": self.dialog_act.value,
"act_attributes": self.act_attributes.to_dict(),
}
def is_disambiguation_request(self):
return self.dialog_act in set(
[DialogAct.REQUEST_DISAMBIGUATE, DialogAct.ASK_DISAMBIGUATE]
)
def is_disambiguation_response(self):
return self.dialog_act in set([DialogAct.INFORM_DISAMBIGUATE])
class Turn:
def __init__(self, frames, speaker, goal=None):
self.frames = frames
self.speaker = speaker
self.goal = goal
def __str__(self):
out = "{frames}".format(
frames=" / ".join([str(frame) for frame in self.frames])
)
return out
def is_disambiguation_request(self):
return True in set(frame.is_disambiguation_request() for frame in self.frames)
def is_disambiguation_response(self):
return True in set(frame.is_disambiguation_response() for frame in self.frames)
def get_uttr(self):
return ". ".join([f.uttr for f in self.frames])
def get_frames_to_dict(self):
return [f.to_dict() for f in self.frames]
def has_dialog_acts(self, dialog_acts):
"""
Return whether this turn contains
any of the input target dialog acts in its frames.
"""
for frame in self.frames:
if frame.dialog_act in dialog_acts:
return True
return False
class Dialog:
def __init__(self, idx=None, domain=None):
self.user_turns = []
self.asst_turns = []
self.goals = []
self.api_calls = []
self.api_results = []
self.idx = idx
self.domain = domain
self.mentioned_memory_ids = set([])
def __str__(self):
str_turns = []
for i in range(len(self.user_turns)):
user_turn = self.user_turns[i]
asst_turn = self.asst_turns[i]
str_turns.append(f"[Turn {i}] U: {user_turn}, A: {asst_turn}")
return str([t for t in str_turns])
def to_dict(self):
out = {
"dialogue": [],
"dialogue_idx": self.idx,
"domain": self.domain,
"mentioned_memory_ids": list(self.mentioned_memory_ids),
}
for i in range(len(self.user_turns)):
user_turn = self.user_turns[i]
asst_turn = self.asst_turns[i]
goal = self.goals[i]
api_call = self.api_calls[i]
turn_data = {
"turn_idx": i,
"system_transcript": asst_turn.get_uttr(),
"system_transcript_annotated": asst_turn.get_frames_to_dict(),
"transcript": user_turn.get_uttr(),
"transcript_annotated": user_turn.get_frames_to_dict(),
"goal_type": str(goal.goal_type),
"api_call": api_call.to_dict(),
#'api_result': api_result.to_dict()
}
try:
# Some earlier data is missing api_result
api_result = self.api_results[i]
turn_data["api_result"] = api_result.to_dict()
except:
api_result = {}
out["dialogue"].append(turn_data)
return out
def add_turn(self, user_turn, asst_turn):
self.add_user_turn(user_turn)
self.add_asst_turn(asst_turn)
def add_goal(self, goal):
self.goals.append(goal)
def add_api_call(self, api_call):
self.api_calls.append(api_call)
def add_api_result(self, api_result):
self.api_results.append(api_result)
def add_user_turn(self, user_turn):
self.user_turns.append(user_turn)
for frame in user_turn.frames:
for m in frame.act_attributes.memories:
self.mentioned_memory_ids.add(m.data["memory_id"])
def add_asst_turn(self, asst_turn):
self.asst_turns.append(asst_turn)
for frame in asst_turn.frames:
for m in frame.act_attributes.memories:
self.mentioned_memory_ids.add(m.data["memory_id"])
class APIRequest:
def __init__(self, *args, **kwargs):
self.call_type = kwargs.pop("call_type", None)
self.parameters = kwargs.pop("parameters", None)
self.memory_dialog = kwargs.pop("memory_dialog", None)
def __str__(self):
out = "call_type: {call_type}, parameters: {parameters}".format(
call_type=self.call_type, parameters=str(self.parameters)
)
return out
def to_dict(self, simple=False):
if self.parameters is not None:
parameters = {
"slot_values": self.parameters.get("slot_values", []),
"request_slots": self.parameters.get("request_slots", {}),
"memories": int_memory_ids(self.parameters.get("memories"))
if "memories" in self.parameters
else [],
}
if simple:
parameters["slot_values"] = get_slot_values_simple_from_json(
parameters["slot_values"]
)
else:
parameters = {}
return {"call_type": str(self.call_type), "parameters": parameters}
class APIResponse:
def __init__(self, *args, **kwargs):
self.status = kwargs.pop("status", None)
self.request = kwargs.pop("request", None)
self.results = kwargs.pop("results", {})
def __str__(self):
out = "status: {status}, results: {results}".format(
status=self.status, results=str(self.results)
)
return out
def to_dict(self):
return {
"status": str(self.status),
"results": {
"retrieved_memories": int_memory_ids(
self.results.get("retrieved_memories", [])
),
"retrieved_info": self.results.get("retrieved_info", []),
},
}
class GoalParameter:
def __init__(self, *args, **kwargs):
self.filter = kwargs.pop("filter", {}) # slot_value pairs
self.reference_type = kwargs.pop(
"reference_type", GoalMemoryRefType.NOT_SPECIFIED
)
self.request_slots = kwargs.pop(
"request_slots", []
) # need to map to Multimodal Context
def __str__(self):
out = "{filter} | {reference_type} | {request_slots}".format(
filter=str(self.filter),
reference_type=self.reference_type.value,
request_slots=str(self.request_slots),
)
return out
class Goal(object):
def __init__(self, *args, **kwargs):
self.goal_type = kwargs.pop("goal_type", GoalType.UNKNOWN)
self.goal_parameters = kwargs.pop("goal_parameters", [])
def __str__(self):
out = "{goal_type} | {goal_parameters}".format(
goal_type=str(self.goal_type),
goal_parameters=[str(p) for p in self.goal_parameters],
)
return out
class MemoryTime(object):
NOT_SPECIFIED = -1
def __init__(self, *args, **kwargs):
# Allows for not_specified time for easy calculation
self.year = kwargs.pop("year", self.NOT_SPECIFIED)
self.month = kwargs.pop("month", self.NOT_SPECIFIED)
self.day = kwargs.pop("day", self.NOT_SPECIFIED)
self.hour = kwargs.pop("hour", self.NOT_SPECIFIED)
self.minute = kwargs.pop("minute", self.NOT_SPECIFIED)
self.second = kwargs.pop("second", self.NOT_SPECIFIED)
if "str_datetime" in kwargs:
self.load_datetime(kwargs.pop("str_datetime"))
def load_datetime(self, str_datetime: str):
# datetime: "2021-04-10 10:00:00"
try:
datetime_obj = datetime.fromisoformat(str_datetime)
self.year = datetime_obj.year
self.month = datetime_obj.month
self.day = datetime_obj.day
self.hour = datetime_obj.hour
self.minute = datetime_obj.minute
self.second = datetime_obj.second
except:
year_month = str_datetime.split("-")
if len(year_month) == 1:
self.year = int(year_month[0])
else:
self.year = int(year_month[0])
self.month = int(year_month[1])
def is_within(self, target_memory_time: self):
# return whether self is within target_memory_time
# for now, we assume that either year and/or month is provided
if target_memory_time.year is not self.NOT_SPECIFIED:
if self.year != target_memory_time.year:
return False
if target_memory_time.month is not self.NOT_SPECIFIED:
if self.month != target_memory_time.month:
return False
return True
def __str__(self):
if self.day is self.NOT_SPECIFIED:
if self.month is self.NOT_SPECIFIED:
if self.year is self.NOT_SPECIFIED:
return ""
else:
return "%d" % self.year
else:
return "%d-%02d" % (self.year, self.month)
full_format = "%d-%02d-%02d %02d:%02d:%02d" % (
self.year,
self.month,
self.day,
self.hour,
self.minute,
self.second,
)
return full_format
class MemoryLocation(object):
def __init__(self, *args, **kwargs):
self.data = kwargs.pop("data", {})
def is_within(self, target_memory_location: self):
# return whether self is within target_memory_time
memory_geo_tag = self.data["geo_tag"]
target_geo_tag = target_memory_location.data["geo_tag"]
if "place" in target_geo_tag:
return target_geo_tag["place"] == memory_geo_tag.get("place", "")
elif "city" in target_geo_tag:
return target_geo_tag["city"] == memory_geo_tag.get("city", "")
elif "state" in target_geo_tag:
return target_geo_tag["state"] == memory_geo_tag.get("state", "")
elif "country" in target_geo_tag:
return target_geo_tag["country"] == memory_geo_tag.get("country", "")
return False
if __name__ == "__main__":
# Memory Time operation test
memory_time_1 = MemoryTime(year=2016, month=3)
memory_time_2 = MemoryTime(year=2016, month=12)
memory_time_3 = MemoryTime(year=2016)
memory_time_4 = MemoryTime(year=2020)
memory_time_5 = MemoryTime(str_datetime="2020-10-23 10:00:00")
memory_time_6 = MemoryTime(str_datetime="2020-10")
print(memory_time_1)
print(memory_time_2)
print(memory_time_3)
print(memory_time_4)
print(memory_time_5)
print(memory_time_6)
print(memory_time_1.is_within(memory_time_2))
print(memory_time_1.is_within(memory_time_3))
print(memory_time_1.is_within(memory_time_4))
print(memory_time_5.is_within(memory_time_4))
goal = Goal(
goal_type=GoalType.GET_RELATED,
goal_parameters=[GoalParameter(filter={"time": memory_time_5})],
)
print(goal)
# Memory Graph Test
import json
path_memory_graph_list = "/Users/shanemoon/workspace/memory_dialog/dialog_simulator/memories/pilot/memory_may21_v1_100graphs.json"
memory_graph_list = json.load(open(path_memory_graph_list, "r"))
target_memory_graph_id = "St8BTzNuLCRb"
target_memory_graph_idx = -1
for i, memory_graph in enumerate(memory_graph_list):
if target_memory_graph_id == memory_graph["memory_graph_id"]:
target_memory_graph_idx = i
break
print(target_memory_graph_idx)
sample_memory_graph = memory_graph_list[target_memory_graph_idx]
mg = MemoryGraph(data=sample_memory_graph)
target_memory_index = 1
day_events = mg.get_day_events(memory_id=target_memory_index)
events = mg.get_events(memory_id=target_memory_index)
print("Target memory id:", target_memory_index)
print("Day events indices:", day_events)
print("Events indices:", events)
print("Event memories:", [str(mg.memories[idx]) for idx in events["memories"]])
|
comet_memory_dialog-main
|
dialog_simulator/Data.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#!/usr/bin/env python3
import random
from typing import Dict, Tuple
from Data import APIRequest, APIResponse, MemoryTime, MemoryLocation
from constants import API_CALL_TYPE, API_STATUS, GoalType
from utils import str_memory
from datetime import datetime
random.seed(0)
class MemoryServiceAPI:
def __init__(self, *args, **kwargs):
self.metadata = kwargs.pop("metadata", {})
def call_api(self, api_request: APIRequest) -> APIResponse:
status = None
results = None
if api_request.call_type == API_CALL_TYPE.SEARCH:
results, status = self.search(api_request)
elif api_request.call_type == API_CALL_TYPE.REFINE_SEARCH:
results, status = self.refine_search(api_request)
elif api_request.call_type == API_CALL_TYPE.GET_RELATED:
results, status = self.get_related(api_request)
elif api_request.call_type == API_CALL_TYPE.GET_INFO:
results, status = self.get_info(api_request)
elif api_request.call_type == API_CALL_TYPE.SHARE:
results, status = self.share(api_request)
# Construct a response
api_response = APIResponse(status=status, results=results, request=api_request)
return api_response
def search(self, api_request: APIRequest) -> Tuple[Dict, API_STATUS]:
# Unpack API Request
search_filter = api_request.parameters["slot_values"]
memory_dialog = api_request.memory_dialog
# Unpack more parameters
n_max_results = api_request.parameters.get("n_max_results", 2)
exclude_memory_ids = api_request.parameters.get("exclude_memory_ids", set())
# Prepare search candidates
search_candidates = memory_dialog.get_memories()
# Prepare search output
retrieved_memories = []
# Execute search
for memory in search_candidates:
# If there was an exlusion request, modify the search candidates
if int(memory.data["memory_id"]) in exclude_memory_ids:
continue
# TODO: ****** implement *****
meet_criteria = True
for slot, value in search_filter.items():
# TODO: handle special cases
if slot == "time":
if search_filter.get("time", None) in {
"right before",
"right after",
"on the same day",
}:
# This is an error case -- that can happen
# due to the wrong model behaviors.
print("Wrong request ...")
meet_criteria = False
break
memory_time = MemoryTime(str_datetime=memory.data["time"])
search_time = MemoryTime(str_datetime=value)
if not memory_time.is_within(search_time):
meet_criteria = False
break
elif slot == "location":
memory_location = MemoryLocation(data=memory.data["location"])
search_location = MemoryLocation(data=value)
if not memory_location.is_within(search_location):
meet_criteria = False
break
elif slot == "participant":
memory_participants = {
p["name"] for p in memory.data["participant"]
}
search_participants = [p["name"] for p in value]
for search_participant in search_participants:
if search_participant not in memory_participants:
meet_criteria = False
break
elif slot == "activity":
memory_activities = {
a["activity_name"] for a in memory.data["activity"]
}
search_activities = [a["activity_name"] for a in value]
for search_activity in search_activities:
if search_activity not in memory_activities:
meet_criteria = False
break
else:
# General cases
if type(memory.data[slot]) == list:
pass
if value not in memory.data[slot]:
meet_criteria = False
break
else:
if value != memory.data[slot]:
meet_criteria = False
break
if meet_criteria:
retrieved_memories.append(memory)
# ** TODO: check if search_filter and retrieved_memories match **
# print('=====')
# print('search_filter', search_filter)
# print('-----')
# print('retrieved_memories', retrieved_memories)
# Rank and return only n_results
n_results = random.randint(1, n_max_results)
if len(retrieved_memories) > n_results:
random.shuffle(retrieved_memories)
retrieved_memories = retrieved_memories[:n_results]
# Output
results = {"retrieved_memories": retrieved_memories}
if results["retrieved_memories"] != []:
status = API_STATUS.SEARCH_FOUND
else:
status = API_STATUS.SEARCH_NOT_FOUND
return (results, status)
def refine_search(self, api_request: APIRequest) -> Tuple[Dict, API_STATUS]:
# Adjust the search based on the memory_dialog
memory_dialog = api_request.memory_dialog
# Search for previous search filter
prev_filter = None
for i in reversed(range(len(memory_dialog.dialog.asst_turns))):
asst_turn = memory_dialog.dialog.asst_turns[i]
turn_goal = asst_turn.goal
if turn_goal.goal_type in {GoalType.SEARCH, GoalType.GET_RELATED}:
# TODO: change it to reflect multi goal parameters
prev_filter = turn_goal.goal_parameters[0].filter
break
# Reconstruct the goal to include the previous search parameters
if prev_filter is not None:
search_filter = api_request.parameters["slot_values"]
# Previous request
for k, v in prev_filter.items():
search_filter[k] = v
# New request
for k, v in api_request.parameters["slot_values"].items():
search_filter[k] = v
api_request.parameters["slot_values"] = search_filter
else:
# This dialog is not allowed -- Refine should always
# happen after a Search or GET_RELATED. Hence abort.
### TODO: abort gracefully
print("***** Refine error *****")
assert False
# Exclude memories that are already discussed
api_request.parameters[
"exclude_memory_ids"
] = memory_dialog.dialog.mentioned_memory_ids
return self.search(api_request)
def get_related(self, api_request: APIRequest) -> Tuple[Dict, API_STATUS]:
# Unpack API Request
search_filter = api_request.parameters["slot_values"]
if search_filter.get("time", None) in {
"right before",
"right after",
"on the same day",
}:
# This is a special request to retrieve
# related memories in the same time group (from the same day)
return self.get_connected(api_request, search_filter.get("time"))
else:
# Treat it as a modified search request
# where slot values are taken from the input memories
request_slots = api_request.parameters["request_slots"]
memories = api_request.parameters["memories"]
memory_dialog = api_request.memory_dialog
# If request_slots is not specified, randomly sample a few slots
if request_slots == []:
request_slot_candidates = {
"time",
"location",
"activity",
"participant",
}
# If a value is specified for a slot, exclude it
# from the candidates
request_slot_candidates -= search_filter.keys()
request_slots = random.choices(
population=list(request_slot_candidates), k=random.randint(1, 1)
)
for request_slot in request_slots:
for memory in memories:
request_slot_value = memory.data[request_slot]
# TODO: make it take multiple values
search_filter[request_slot] = request_slot_value
# Make a search request with the updated filter
api_request.parameters["slot_values"] = search_filter
# Exclude memories that are already discussed
api_request.parameters[
"exclude_memory_ids"
] = memory_dialog.dialog.mentioned_memory_ids
return self.search(api_request)
def get_connected(
self, api_request: APIRequest, time_constraint: str
) -> Tuple[Dict, API_STATUS]:
_ = api_request.parameters["slot_values"]
## TODO: handle multiple memories
target_memory = api_request.parameters["memories"][0]
memory_graph = api_request.memory_dialog.memory_graph
target_memory_index = -1
for i, memory in enumerate(memory_graph.memories):
if memory.data["memory_id"] == target_memory.data["memory_id"]:
target_memory_index = i
break
# Get connected memories
connected_memory_indices = memory_graph.get_events(target_memory_index)[
"memories"
]
connected_memories = []
# Compare time
target_time = datetime.fromisoformat(target_memory.data["time"])
for idx in connected_memory_indices:
if idx == target_memory_index:
continue
connected_memory = memory_graph.memories[idx]
connected_memory_time = datetime.fromisoformat(
connected_memory.data["time"]
)
if time_constraint == "right after":
if target_time < connected_memory_time:
connected_memories.append(connected_memory)
elif time_constraint == "right before":
if target_time > connected_memory_time:
connected_memories.append(connected_memory)
elif time_constraint == "on the same day":
connected_memories.append(connected_memory)
# Output
results = {"retrieved_memories": connected_memories}
if results["retrieved_memories"] != []:
status = API_STATUS.SEARCH_FOUND
else:
status = API_STATUS.SEARCH_NOT_FOUND
return (results, status)
def get_info(self, api_request: APIRequest) -> Tuple[Dict, API_STATUS]:
# Unpack API Request
request_slots = api_request.parameters.get("request_slots", [])
memories = api_request.parameters.get("memories", [])
# Unpack more parameters
# TODO
# Prepare get_info output
lookup_results = {
"retrieved_memories": memories,
"retrieved_info": {},
"request_slots": request_slots,
}
# If request_slots is not specified, randomly sample a few slots
if request_slots == []:
if len(memories) > 0:
memory = memories[0]
request_slots = [k for k in memory.data if random.random() > 0.8]
def summarize_info(memory_data, slot):
if slot == "location":
return memory_data[slot]["geo_tag"]
else:
return memory_data[slot]
# Look up info
for memory in memories:
# Add the requested info
s_memory = str_memory(memory, verbose=False)
if request_slots == []:
# Give all relevant information
lookup_results["retrieved_info"][s_memory] = {
slot: summarize_info(memory.data, slot)
for slot in ["time", "location", "participant", "activity"]
}
else:
lookup_results["retrieved_info"][s_memory] = {}
for slot in request_slots:
if slot in memory.data:
lookup_results["retrieved_info"][s_memory][
slot
] = summarize_info(memory.data, slot)
# Add extra info
# TODO
# TODO: status can be INFO_NOT_FOUND
status = API_STATUS.INFO_FOUND
return (lookup_results, status)
def share(self, api_request) -> Tuple[Dict, API_STATUS]:
# Unpack API Request
memories = api_request.parameters["memories"]
# Prepare output
results = {"retrieved_memories": memories}
status = API_STATUS.SHARED
return (results, status)
|
comet_memory_dialog-main
|
dialog_simulator/MemoryServiceAPI.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
from setuptools import setup, find_packages
setup(name='cmr', version='1.0', packages=find_packages())
|
CMR-main
|
setup.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# %%
import json
import argparse
import pandas as pd
from argparse import Namespace
import numpy as np
import glob, os
from pandas.core import base
os.chdir("/private/home/yuchenlin/SemanticDebugger")
base_dir = "experiments/results/qa/"
split = "test"
num_streams = 6
def sma(values):
return float(np.mean(values))
def show_result(path):
if path == "experiments/results/qa/qa_er_lr=3e-5_ep=10_l2w=0_rs=64_rf=1_T=100,b=64,alpha=0.9,beta=0.5,gamma=0.8-test[5]_seed=42_result.json":
print()
o = json.load(open(path))
r = {}
debugger_args = eval(o["debugger_args"])
data_args = eval(o["data_args"])
r["stream_id"] = data_args.stream_id
path = path.replace(base_dir, "")
r["path"] = path.replace(",", "|")
path_info = path.replace(",", "|").replace("_", "|").replace("-", "|").split("|")
if path_info[-2].startswith("seed="):
r["seed"] = path_info[-2][4:]
elif "nonecl" in path:
r["seed"] = "N/A (42)"
else:
return None
r["standard_path"] = "|".join(path_info[:-2])
for _ind in range(10):
txt = f"test[{_ind}]"
if txt in r["standard_path"]:
r["standard_path"] = r["standard_path"].replace(txt, "test[]")
break
# r["prefix"] = prefix
r["method_class"] = o["method_class"]
r["cl_method"] = o["method_class"]
if r["cl_method"] == "simple_cl":
if hasattr(debugger_args, "diff_loss_weight"):
r["cl_method"] = f'{r["cl_method"]}-l2w={debugger_args.diff_loss_weight}'
elif r["cl_method"] == "online_ewc":
ewc_lambda= debugger_args.ewc_lambda
ewc_gamma= debugger_args.ewc_gamma
r["cl_method"] = f'{r["cl_method"]}-{ewc_lambda}-{ewc_gamma}'
elif r["cl_method"] == "er":
replay_size = debugger_args.replay_size
replay_freq = debugger_args.replay_frequency
r["cl_method"] = f'{r["cl_method"]}-{replay_size}-{replay_freq}'
if hasattr(debugger_args, "diff_loss_weight"):
r["cl_method"] = f'{r["cl_method"]}-l2w={debugger_args.diff_loss_weight}'
elif r["cl_method"] == "mir":
replay_size = debugger_args.replay_size
replay_freq = debugger_args.replay_frequency
replay_candidate_size = debugger_args.replay_candidate_size
mir_abalation_args = debugger_args.mir_abalation_args
r["cl_method"] = f'{r["cl_method"]}-{replay_size}/{replay_candidate_size}-{replay_freq}-{mir_abalation_args}'
if hasattr(debugger_args, "diff_loss_weight"):
r["cl_method"] = f'{r["cl_method"]}-l2w={debugger_args.diff_loss_weight}'
# replay_size = debugger_args.replay_size
elif r["cl_method"] == "index_cl_bart_io_index":
replay_size = debugger_args.replay_size
replay_freq = debugger_args.replay_frequency
r["cl_method"] = f'{r["cl_method"]}-{replay_size}-{replay_freq}'
r["steps"] = o["model_update_steps"]
r["lr"] = 0 if r["cl_method"]=="none_cl" else debugger_args.learning_rate
r["num_epochs"] = 0 if r["cl_method"]=="none_cl" else debugger_args.num_epochs
start = data_args.submission_stream_data.index("submission_stream.") + len("submission_stream.")
end = data_args.submission_stream_data.index(".json")
# if "-test.json" in data_args.submission_stream_data:
# end = data_args.submission_stream_data.index("-test.json")
# elif "-val.json" in data_args.submission_stream_data:
# end = data_args.submission_stream_data.index("-val.json")
ns_config_str = data_args.submission_stream_data[start:end]
r["ns_config"] = ns_config_str
if "-val" in ns_config_str:
ns_config_str = ns_config_str.replace("-val", "")
mode = "val"
elif "-test" in ns_config_str:
ns_config_str = ns_config_str.replace("-test", "")
mode = "test"
ns_config = eval(f"dict({ns_config_str})")
r.update(ns_config)
online = o["online_eval_results"]
EFRs = [item["EFR"] for item in online]
UKRs = [item["UKR"] for item in online if "UKR" in item]
OKRs = [item["OKR"] for item in online if "OKR" in item]
KGs = [item["KG"] for item in online if "KG" in item]
CSRs = [item["CSR"] for item in online if "CSR" in item]
if mode!="val" and len(EFRs) != ns_config["T"]:
print(f"Error: ----> path={path}; len(EFRs)={len(EFRs)}")
return None
last_step = online[-1]
if last_step["timecode"] != ns_config["T"] -1:
print(f'Error: ----> path={path}; last_step["timecode"]={last_step["timecode"]} the results does not match the length')
return None
r["AEFR(T)"] = float(np.mean(EFRs))
r["AUKR"] = sma(UKRs)
r["AOKR"] = sma(OKRs)
r["ACSR"] = sma(CSRs)
r["AKG"] = sma(KGs)
r["AOEC"] = float(np.mean([r["AUKR"], r["AOKR"], r["ACSR"], r["AKG"]]))
r["UKR(T)"] = UKRs[-1]
r["OKR(T)"] = OKRs[-1]
r["CSR(T)"] = CSRs[-1]
r["KG(T)"] = KGs[-1]
r["OEC(T)"] = float(np.mean([r["UKR(T)"], r["OKR(T)"], r["CSR(T)"], r["KG(T)"]]))
return r
# %%
def _sort(column):
# def tm_sorter(column):
"""Sort function"""
cl_methods = ['none_cl', "simple_cl", "online_ewc", "er", "mir", "index_cl_bart_io_index"]
correspondence = {team: order for order, team in enumerate(cl_methods)}
return column.map(correspondence)
# %%
if __name__ == '__main__':
# %%
os.makedirs(f"{base_dir}/csvs/", exist_ok=True)
result_files = []
for file in glob.glob(f'{base_dir}/*.json'):
if split not in file:
continue
result_files.append(file)
print("\n".join(result_files))
# %%
results = []
for r_file in result_files:
# print(r_file)
r = show_result(r_file)
if r:
results.append(r)
# print(results)
results.sort(key=lambda x:x["cl_method"])
results = pd.DataFrame(results)
pd.set_option('display.float_format', lambda x: '%.3f' % x)
# %%
results.to_csv(f"{base_dir}/csvs/full_results.csv", index=False, sep=",")
for ns_config in results.ns_config.unique():
# print(ns_config)
r = results[results["ns_config"]==ns_config]
# r = r[((r["lr"]==3.5e-5) & (r["num_epochs"]==10)) | (r["cl_method"] == "none_cl") | (r["cl_method"] == "none_cl_offline_eval")]
items = []
for standard_path in results.standard_path.unique():
r_r = results[results["standard_path"]==standard_path]
# if len(r_r) != num_streams:
# print(f"{standard_path} does not have {num_streams} runs, so we skip it.")
# continue
# %%
# print(r_r)
do_average_across_stream_id = False
do_average_across_seed = True
shown_item = dict()
if do_average_across_stream_id:
records = r_r.to_dict("records")
shown_item = records[0]
shown_item["stream_id"] = -1
# print(shown_item)
if do_average_across_seed:
r_r = r_r[(r_r["stream_id"] == 5) & (r_r["stream_id"] != "N/A (42)")]
if r_r.empty:
continue
records = r_r.to_dict("records")
shown_item = records[0]
shown_item["seed"] = -1
print(f"len(r_r)={len(r_r)}")
keys = ["AEFR(T)", "AUKR", "AOKR", "ACSR", "AKG", "UKR(T)", "AOEC", "OKR(T)", "CSR(T)", "KG(T)", "OEC(T)"]
for key in keys:
shown_item[key] = r_r[key].mean()
shown_item["OEC(T)-std"] = r_r["OEC(T)"].std()
shown_item["OEC(T)-min"] = r_r["OEC(T)"].min()
shown_item["OEC(T)-median"] = r_r["OEC(T)"].median()
shown_item["OEC(T)-max"] = r_r["OEC(T)"].max()
items.append(shown_item)
r = pd.DataFrame(items)
if "AEFR(T)" not in r:
print()
r = r[(r["AEFR(T)"]>=0.9) | (r["cl_method"]=="none_cl")]
r = r.sort_values(by=["steps", "lr", "num_epochs", "cl_method"])
r = r.sort_values(by=["cl_method"], key = lambda x: x.str.len())
r = r.sort_values(by="method_class", key=_sort, kind="mergesort")
r = r.drop(columns=["ns_config", "method_class", "path", "standard_path", "ACSR", "AOEC", "AKG", "AUKR", "AOKR"])
# r = r.drop(columns=["lr", "num_epochs"])
r.to_csv(f"{base_dir}/csvs/{ns_config}.csv", index=False, sep=",")
print("-"*50)
print(f'ns_config="{ns_config.replace(",", " & ")}",')
print(open(f"{base_dir}/csvs/{ns_config}.csv").read())
# %%
|
CMR-main
|
experiments/report_results.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
import pandas as pd
import os
import glob
from io import StringIO
import altair as alt
from torch import clamp
from cmr.notebooks.draw_utils import draw_grouped_bars
# os.chdir()
# os.makedirs("csvs/", exist_ok=True)
# result_files = []
all_data = []
header = ""
for file in glob.glob("experiments/results/qa/csvs/*.csv"):
print(file)
lines = open(file).read().splitlines()
header = lines[0]
data = lines[1:]
all_data += data
# result_files.append(file)
# data = pd.read_csv(file)
# print(all_data)
# print(len(all_data))
all_data.insert(0, header)
df = pd.read_csv(StringIO("\n".join(all_data)))
cl_methods = ['none_cl', 'simple_cl-l2w=0.0', 'online_ewc-250.0-0.9', 'er-32-3-l2w=0.0', 'mir-32/256-3-none-l2w=0.0', 'mir-32/256-3-largest_afterloss-l2w=0.0']
cl_prefix = ["Frozen", "CFT", "OnEWC", "ER", "MIR", "MaxLoss"]
for a,b in zip(cl_methods, cl_prefix):
df = df.replace(a, b)
df.rename(columns={'OEC(T)':'OECT'}, inplace=True)
# df = df[df.cl_method != "Frozen"]
settings = [(0.9, 0.5, 0.8), (0.9, 0.1, 0.8), (0.9, 0.9, 0.8), (0.9, 0.5, 0.2), (0.9, 0.5, 0.5), (0.1, 0.5, 0.8)]
# (0.9, 0.1, 0.8), (0.9, 0.9, 0.8)
table = []
for alpha, beta, gamma in settings:
data = df[(df["alpha"]==alpha) & (df["beta"]==beta) & (df["gamma"]==gamma)]
prefix = f"$alpha$={alpha},$beta$={beta},$gamma$={gamma}"
# print()
OECTs = {c: data[data.cl_method==c].iloc[0]["OECT"] for c in cl_prefix[:]}
OECTs["prefix"] = prefix
table.append(OECTs)
# print(data)
# print(OECTs)
# color_dom = cl_prefix
# color_range = ["gray", "blue", "orange", "green", "black"]
# fig = draw_grouped_bars(df=data, fig_title=f"{alpha}-{beta}-{gamma}", y_scale=[0.6, 0.68], x_key="cl_method", y_key="OECT", y_title="", height=250, width=175, color_dom=color_dom, color_range=color_range, bin_width=30)
# color=alt.Color("cl_method", scale=alt.Scale(domain=color_dom, range=color_range), sort=color_dom, legend=None)
# print(data)
# y=alt.Y("OECT:Q", scale=alt.Scale(domain=[0.3, 0.7]), axis=alt.Axis(grid=False))
# fig = alt.Chart(data).mark_bar(clip=True).encode(x="cl_method", y=y)
# fig.save(f'figures/settings/{alpha}-{beta}-{gamma}.png', scale_factor=3.0)
# fig.save("")
table = pd.DataFrame(table)
print(table.to_csv(index=False,))
|
CMR-main
|
experiments/bakcup/report_all_settings.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.