python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) 2015-present, Meta Platforms, Inc. and affiliates.
# All rights reserved.
import logging
import os
import os.path as osp
import time
import hydra
import matplotlib.pyplot as plt
import pytorch_lightning as pl
import torch
from hydra.utils import get_original_cwd
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import loggers as pl_loggers
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
from torch.utils.data.dataloader import DataLoader
from dataset_utils import get_datasets
from lit_utils import LitModelCFBased
logger = logging.getLogger(__name__)
def get_loader_loss(model_h, dataloader) -> torch.Tensor:
criterion = torch.nn.BCEWithLogitsLoss(reduction="none")
loss_list = []
cf_embeddings_list = []
with torch.no_grad():
for cf_vectors, y in dataloader:
logits = model_h(cf_vectors)
loss = criterion(logits, y.float())
loss_list.append(loss.detach().cpu())
cf_embeddings = model_h.get_embeddings(cf_vectors)
cf_embeddings_list.append(cf_embeddings.cpu())
loss_vec = torch.vstack(loss_list)
cf_embeddings_matrix = torch.vstack(cf_embeddings_list)
return loss_vec, cf_embeddings_matrix
@hydra.main(
config_path="../configs",
config_name="train_model_cf_based",
)
def train_model_cf_based(cfg: DictConfig):
t_start = time.time()
logger.info(cfg)
out_dir = os.getcwd()
os.chdir(get_original_cwd())
logger.info(f"{out_dir=}")
pl.utilities.seed.seed_everything(cfg.seed)
logger.info(f"{torch.cuda.is_available()=}")
# Configure logging
tb_logger = pl_loggers.TensorBoardLogger(out_dir)
tb_logger.log_hyperparams(OmegaConf.to_container(cfg))
# Configure checkpoint saver
checkpoint_callback = ModelCheckpoint(
dirpath=out_dir,
monitor="ap/val" if cfg.is_debug is False else "ap/train",
save_top_k=1,
mode="max",
)
# Load data
t0 = time.time()
train_dataset, test_dataset, dataset_meta, pos_weight = get_datasets(
cfg.train_df_path,
cfg.test_df_path,
cfg.cf_vector_df_path,
out_dir,
cfg.labeled_ratio,
cfg.is_use_bias,
is_skip_img=True,
)
logger.info(f"Loadded data in {time.time() -t0 :.2f} sec")
logger.info(
"Sizes [trainset testset num_classes]=[{} {} {}]".format(
dataset_meta["train_set_size"],
dataset_meta["test_set_size"],
dataset_meta["num_classes"],
)
)
# Create dataloder
t0 = time.time()
trainloader = DataLoader(
train_dataset,
batch_size=cfg.batch_size,
num_workers=cfg.num_workers,
shuffle=True,
)
testloader = DataLoader(
test_dataset,
batch_size=cfg.batch_size,
num_workers=cfg.num_workers,
)
# Load model
lit_h = LitModelCFBased(
dataset_meta["num_classes"], dataset_meta["cf_vector_dim"], cfg, pos_weight
)
trainer = pl.Trainer(
min_epochs=cfg["epochs"],
max_epochs=cfg["epochs"],
progress_bar_refresh_rate=1,
logger=tb_logger,
callbacks=[
checkpoint_callback,
LearningRateMonitor(logging_interval="epoch"),
],
fast_dev_run=cfg.is_debug,
num_sanity_val_steps=0,
gpus=[cfg.gpu] if torch.cuda.is_available() else None,
)
trainer.fit(lit_h, trainloader, testloader)
logger.info(
f"Finish training in {time.time() -t_start :.2f} sec. {lit_h.map_best=}"
)
logger.info(f"{os.getcwd()=}")
# Save confidence
t0 = time.time()
trainloader = DataLoader(
train_dataset,
batch_size=cfg.batch_size,
num_workers=cfg.num_workers,
shuffle=False,
)
# Save products
for (loader, set_name) in [(trainloader, "train"), (testloader, "test")]:
loss_vec, cf_embeddings = get_loader_loss(lit_h, loader)
out_path = osp.join(out_dir, f"cf_based_{set_name}_loss.pt")
torch.save(loss_vec, out_path)
if cfg.save_as_asset is True:
logger.info(f"{cfg.save_as_asset=}")
out_path = osp.join(out_dir, "..", f"cf_based_{set_name}_loss.pt")
torch.save(loss_vec, out_path)
out_path = osp.join(out_dir, f"cf_embeddings_{set_name}.pt")
torch.save(cf_embeddings, out_path)
if cfg.save_as_asset is True:
logger.info(f"{cfg.save_as_asset=}")
out_path = osp.join(out_dir, "..", f"cf_embeddings_{set_name}.pt")
torch.save(cf_embeddings, out_path)
logger.info(
f"Finish get_loader_loss in {time.time() -t0 :.2f} sec. {cf_embeddings.shape} {out_path=}"
)
plt.hist(loss_vec.mean(axis=1).numpy(), bins=1000)
plt.xlabel("Loss")
plt.ylabel("Count")
plt.savefig(osp.join(out_dir, f"{set_name}_loss_vec_hist.jpg"))
plt.close()
if __name__ == "__main__":
train_model_cf_based()
|
collaborative_image_understanding-main
|
src/main_train_model_cf_based.py
|
# Copyright (c) 2015-present, Meta Platforms, Inc. and affiliates.
# All rights reserved.
import logging
import os
import os.path as osp
import time
from glob import glob
import hydra
import numpy as np
import torch
from omegaconf import DictConfig, OmegaConf
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
from hydra.utils import get_original_cwd, to_absolute_path
from dataset_utils import get_datasets
from lit_utils import LitModel
logger = logging.getLogger(__name__)
def load_cfg_file(base_dir: str):
cfg_path = osp.join(base_dir, ".hydra", "config.yaml")
cfg = OmegaConf.load(cfg_path)
return cfg
def load_train_model(base_dir: str):
model_path = glob(osp.join(base_dir, "epoch*.ckpt"))[0]
haparam_path = glob(osp.join(base_dir, "default", "version_0", "hparams.yaml"))[0]
model = LitModel.load_from_checkpoint(model_path, hparams_file=haparam_path)
model.eval()
return model
@hydra.main(
config_path="../configs",
config_name="predict_testset",
)
def predict_testset(cfg: DictConfig):
os.chdir(get_original_cwd())
dir_path = cfg.dir_path
resource_dict = OmegaConf.load(dir_path)
logger.info(resource_dict)
resource_dict = {
key: osp.join(resource_dict["base_path"], value)
for key, value in resource_dict.items()
if key != "base_path"
}
cfg_file = load_cfg_file(resource_dict["label_ratio_1.0_no_cf"])
cfg_file.batch_size = cfg.batch_size
cfg_file.batch_size = cfg.num_workers
out_dir = "."
# Load data
_, test_dataset, dataset_meta, _ = get_datasets(
cfg_file.train_df_path,
cfg_file.test_df_path,
cfg_file.cf_vector_df_path,
out_dir,
cfg_file.labeled_ratio,
cfg_file.is_use_bias,
cf_based_train_loss_path=cfg_file.cf_based_train_loss_path,
cf_based_test_loss_path=cfg_file.cf_based_test_loss_path,
is_use_cf_embeddings=cfg_file.is_use_cf_embeddings,
cf_embeddings_train_path=cfg_file.cf_embeddings_train_path,
cf_embeddings_test_path=cfg_file.cf_embeddings_test_path,
confidence_type=cfg_file.confidence_type,
is_plot_conf_hist=False,
)
logger.info(
"Sizes [trainset testset num_classes cf_vector_dim]=[{} {} {} {}]".format(
dataset_meta["train_set_size"],
dataset_meta["test_set_size"],
dataset_meta["num_classes"],
dataset_meta["cf_vector_dim"],
)
)
testloader = DataLoader(
test_dataset,
batch_size=cfg.batch_size,
num_workers=cfg.num_workers,
)
torch.multiprocessing.set_sharing_strategy("file_system")
torch.set_grad_enabled(False)
t0 = time.time()
for key, base_dir in resource_dict.items():
logger.info(f"{key}, {base_dir}")
preds_path = osp.join(base_dir, "preds.npy")
if osp.exists(preds_path):
continue
model = load_train_model(base_dir)
model = model.to("cuda")
model.half()
preds_list, labels = [], []
for batch in tqdm(testloader):
imgs, labels_i = batch[0], batch[3]
preds, _ = model(imgs.to("cuda").half())
preds_list.append(torch.sigmoid(preds).cpu().numpy())
labels.append(labels_i.numpy())
preds = np.vstack(preds_list)
labels = np.vstack(labels)
np.save(preds_path, preds)
np.save(osp.join(base_dir, "labels.npy"), labels)
logger.info(f"Finish in {time.time()-t0:.2f}. {preds_path}")
if __name__ == "__main__":
predict_testset()
|
collaborative_image_understanding-main
|
src/main_predict_testset.py
|
# Copyright (c) 2015-present, Meta Platforms, Inc. and affiliates.
# All rights reserved.
import logging
import os
import os.path as osp
import time
import hydra
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from tqdm import tqdm
from main_process_labels import keep_only_exists_images
from data_utils import DataHelper
logger = logging.getLogger(__name__)
def create_one_hot_vector(df: pd.DataFrame) -> list:
assert "category" in df.columns # the int label
int_labels = df["category"].to_numpy()
onehot_encoder = OneHotEncoder(sparse=False)
integer_encoded = int_labels.reshape(len(int_labels), 1)
onehot_encoded = onehot_encoder.fit_transform(integer_encoded)
label_vec = onehot_encoded.tolist()
return label_vec
def download_data_to_folder(df: pd.DataFrame, data_helper, img_out_dir: str) -> list:
assert "img_path" in df.columns # the dst path
assert "url" in df.columns # The image url to download from
# Download data
dst_exsits = data_helper.list_files_in_dir(img_out_dir)
to_download_df = df[
~df["img_path"].apply(lambda x: osp.basename(x)).isin(dst_exsits)
]
logger.info(f"Post filtering. [Before After]=[{len(df)} {len(to_download_df)}]")
num_failed = 0
for _, row in tqdm(to_download_df.iterrows(), total=len(to_download_df)):
row["url"], row["img_path"]
try:
data_helper.download_url(row["url"], row["img_path"])
except:
num_failed += 1
logger.info(f"{len(df)=} {num_failed=}")
def save_train_test_split(df: pd.DataFrame, out_dir: str, train_set_ratio: float):
df_train, df_test = train_test_split(df, train_size=train_set_ratio)
out_path = osp.join(out_dir, "df_train.pkl")
df_train.to_pickle(out_path)
logger.info(f"df_train to {out_path=}")
out_path = osp.join(out_dir, "df_test.pkl")
df_test.to_pickle(out_path)
logger.info(f"df_test to {out_path=}")
@hydra.main(
config_path="../configs",
config_name="download_pinterest_sets",
)
def download_pinterest_data(cfg):
t0 = time.time()
logger.info(cfg)
logger.info(f"{os.getcwd()=}")
img_out_dir = osp.abspath(osp.join(cfg.data_dir, cfg.category))
df_out_dir = osp.join(os.getcwd(), "..")
data_helper = DataHelper(is_debug=cfg.is_debug, is_override=cfg.is_override)
data_helper.create_dir(cfg.data_dir)
data_helper.create_dir(img_out_dir)
# Load url df
t1 = time.time()
url_df = pd.read_csv(
osp.join(cfg.data_dir, cfg.url_file),
delimiter="|",
names=["pin_id", "url"],
)
repin_df = pd.read_csv(
osp.join(cfg.data_dir, cfg.repin_file),
delimiter="|",
names=["pin_id", "user_id", "category", "board_id"],
)
label_name_df = pd.read_csv(
osp.join(cfg.data_dir, cfg.label_name_file),
delimiter="|",
names=["name", "label_id"],
)
logger.info(
f"Loaded dfs. {time.time()-t1:.2f}[s]. {[len(url_df),len(repin_df), len(label_name_df)]=}"
)
# Filter by number of intercations
interaction_count = pd.value_counts(repin_df["pin_id"])
pin_id_to_keep = interaction_count[
interaction_count > cfg.num_interactions_min
].index.to_numpy()
df = pd.merge(repin_df, url_df, on=["pin_id"], how="inner")
repin_df = repin_df[repin_df["pin_id"].isin(pin_id_to_keep)]
url_df = url_df[url_df["pin_id"].isin(pin_id_to_keep)]
logger.info(f"Filtered {time.time()-t1:.2f}[s]. {[len(url_df),len(repin_df)]=}")
# Download data
t1 = time.time()
len_init = len(url_df)
url_df["img_path"] = url_df["pin_id"].apply(
lambda x: osp.abspath(osp.join(img_out_dir, str(x) + ".jpg"))
)
download_data_to_folder(url_df, data_helper, img_out_dir)
url_df = keep_only_exists_images(url_df)
logger.info(
f"Downloaded files in {time.time()-t1:.2f}[s]. {len(url_df)}/{len_init}"
)
# Create unified df: use intersection of pin_id
df = pd.merge(repin_df, url_df, on=["pin_id"], how="inner")
# Recommender
rating_path = osp.join(cfg.data_dir, f"rating_{cfg.category}_user_based.txt")
recommender_df = df[["user_id", "pin_id"]]
recommender_df["rating"] = 5.0 # Align with other datasets
recommender_df.to_csv(rating_path, sep="\t", index=False, header=False)
# Prepare df to vision training
df_to_vision = df[["pin_id", "img_path", "category"]].drop_duplicates(
subset="pin_id"
)
df_to_vision = df_to_vision.rename(columns={"pin_id": "asin"})
df_to_vision["label_vec"] = create_one_hot_vector(df_to_vision)
# Train-test split
save_train_test_split(df_to_vision, df_out_dir, cfg.train_set_ratio)
logger.info(f"Finish {cfg.category} in {time.time() -t0:.2f} [s]")
if __name__ == "__main__":
download_pinterest_data()
|
collaborative_image_understanding-main
|
src/main_download_pinterest_sets.py
|
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
import logging
import os.path as osp
import time
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset
from torchvision import transforms
from torchvision.datasets.folder import default_loader
logger = logging.getLogger(__name__)
class DfDatasetWithCF(Dataset):
def __init__(
self,
df: pd.DataFrame,
transform=None,
target_transform=None,
is_use_bias: bool = False,
is_skip_img: bool = False,
) -> None:
super().__init__()
self.df = df
self.loader = default_loader
self.transform = transform
self.target_transform = target_transform
self.is_use_bias = is_use_bias
self.is_skip_img = is_skip_img
def __len__(self):
return len(self.df)
def __getitem__(self, index):
row = self.df.iloc[index]
img_path = row["img_path"]
pos_img_path = row["pos_img_path"] if "pos_img_path" in row else row["img_path"]
cf_vector = torch.tensor(row["embs"])
target = torch.tensor(row["label_vec"])
is_labeled = row["is_labeled"]
cf_bias = torch.tensor(row["bias"])
cf_confidence = torch.tensor(row["cf_confidence"])
if self.is_use_bias is True:
cf_vector = torch.hstack((cf_vector, cf_bias)).float()
if self.is_skip_img is True:
return cf_vector, target
# Load image
image = self.loader(img_path)
image_pos = self.loader(pos_img_path)
if self.transform is not None:
image = self.transform(image)
image_pos = self.transform(image_pos)
if self.target_transform is not None:
target = self.target_transform(target)
return (
image,
image_pos,
cf_vector,
target,
is_labeled,
cf_confidence,
)
# Transformation as ImageNet training
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_transform = transforms.Compose(
[
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1),
transforms.RandomGrayscale(),
transforms.ToTensor(),
normalize,
]
)
test_transform = transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]
)
def get_loss_based_confidence(cf_based_loss_path: str):
assert cf_based_loss_path is not None
cf_based_loss = torch.load(cf_based_loss_path)
loss_mean = cf_based_loss.mean(axis=1)
cf_confidence = 1 / loss_mean
return cf_confidence
def pos_label_loss_based(
cf_based_loss_path: str, label_vecs: pd.Series
) -> torch.tensor:
assert cf_based_loss_path is not None
cf_based_loss = torch.load(cf_based_loss_path)
label_vecs = torch.tensor(label_vecs.tolist()).bool()
# Get only loss of GT labels
loss_mean = torch.tensor(
[values[mask].mean() for values, mask in zip(cf_based_loss, label_vecs)]
)
cf_confidence = 1 / loss_mean
# For samples without positive labels: set the confidence to 0
cf_confidence[torch.isnan(cf_confidence)] = 0.0
return cf_confidence
def clip_confidence(
cf_conf_train: torch.Tensor, cf_conf_test: torch.Tensor, max_min_ratio: float
) -> np.ndarray:
lower_limit, upper_limit = torch.quantile(cf_conf_train, torch.tensor([0.25, 0.75]))
if max_min_ratio is None or lower_limit == upper_limit:
return cf_conf_train, cf_conf_test
cf_conf_train_clipped = torch.clip(cf_conf_train, lower_limit, upper_limit)
cf_conf_test_clipped = torch.clip(cf_conf_test, lower_limit, upper_limit)
# Normalize to keep min-max value between 1 and max_min_ratio
min_val, max_val = cf_conf_train_clipped.min(), cf_conf_train_clipped.max()
cf_conf_train_clipped = 1 + (cf_conf_train_clipped - min_val) * (
max_min_ratio - 1
) / (max_val - min_val)
cf_conf_test_clipped = 1 + (cf_conf_test_clipped - min_val) * (
max_min_ratio - 1
) / (max_val - min_val)
# Log
min_val, max_val = cf_conf_train_clipped.min(), cf_conf_train_clipped.max()
logger.info(
f"clip_confidence: {max_min_ratio=} [min max]={min_val:.2f} {max_val:.2f}"
)
return cf_conf_train_clipped, cf_conf_test_clipped
def assign_positive_cf(df_train, df_test):
df_all_set = pd.concat(
(df_train[["asin", "img_path"]], df_test[["asin", "img_path"]])
)
pos_img_path = pd.merge(
df_train[["asin", "pos_asin"]],
df_all_set,
left_on=["pos_asin"],
right_on=["asin"],
how="left",
)["img_path"]
df_train["pos_img_path"] = pos_img_path
pos_img_path = pd.merge(
df_test[["asin", "pos_asin"]],
df_all_set,
left_on=["pos_asin"],
right_on=["asin"],
how="left",
)["img_path"]
df_test["pos_img_path"] = pos_img_path
return df_train, df_test
def plot_and_save_conf_histogram(
out_dir: str,
confidence_type: str,
cf_conf: np.ndarray,
cf_conf_clipped: np.ndarray,
):
mask = np.isfinite(cf_conf)
value_min, value_max = np.round(
[np.min(cf_conf_clipped), np.max(cf_conf_clipped)], 2
)
_, axs = plt.subplots(2, 1, sharex=False)
ax = axs[0]
_, bins, _ = ax.hist(cf_conf[mask], bins=100, alpha=0.5, label="raw", color="C0")
ax.hist(cf_conf_clipped, bins=bins, alpha=0.5, label="clipped", color="C1")
ax.set_ylabel("Count")
ax.set_yscale("log")
ax.legend()
ax.set_title(f"Confidence {confidence_type=}")
ax = axs[1]
ax.hist(cf_conf_clipped, bins=100, alpha=0.5, label="clipped", color="C1")
ax.set_ylabel("Count")
ax.set_yscale("log")
ax.set_yscale("log")
ax.set_xlabel(f"Confidnce value. [min max]=[{value_min} {value_max}]")
plt.tight_layout()
plt.savefig(osp.join(out_dir, "cf_confidence.jpg"))
plt.close()
def get_datasets(
df_train_path: str,
df_test_path: str,
cf_vector_df_path: str,
out_dir: str,
labeled_ratio: float = 1.0,
is_use_bias: bool = False,
is_skip_img: bool = False,
cf_based_train_loss_path: str = None,
cf_based_test_loss_path: str = None,
is_use_cf_embeddings: bool = False,
cf_embeddings_train_path: str = None,
cf_embeddings_test_path: str = None,
confidence_type: str = "uniform",
conf_max_min_ratio: float = None,
is_plot_conf_hist: bool = True,
):
t0 = time.time()
df_train = pd.read_pickle(df_train_path)
df_test = pd.read_pickle(df_test_path)
cf_df = pd.read_pickle(cf_vector_df_path)
logger.info(
f"Loaded df in {time.time() -t0 :.2f} sec. {len(df_train)=} {len(df_test)=} {len(cf_df)=}"
)
# Add CF vectors
t0 = time.time()
cf_df["asin"] = cf_df["asin"].astype(df_train["asin"].dtype)
df_train = pd.merge(df_train, cf_df, on=["asin"], how="inner")
df_test = pd.merge(df_test, cf_df, on=["asin"], how="inner")
logger.info(
f"merge df in {time.time() -t0 :.2f} sec. {len(df_train)=} {len(df_test)=} {len(cf_df)=}"
)
if is_use_cf_embeddings is True:
t0 = time.time()
cf_embeddings_train = torch.load(cf_embeddings_train_path)
cf_embeddings_test = torch.load(cf_embeddings_test_path)
df_train["embs"] = cf_embeddings_train.tolist()
df_test["embs"] = cf_embeddings_test.tolist()
logger.info(f"Override cf vectors in {time.time() -t0 :.2f} sec.")
# Add positive CF
if "pos_asin" in df_train.columns:
df_train, df_test = assign_positive_cf(df_train, df_test)
# Hide labels
df_train["is_labeled"] = torch.rand(len(df_train)) > 1.0 - labeled_ratio
df_test["is_labeled"] = True
# Define positive weight: Since positives are much less than negatives, increase their weights
train_labels = np.array(
df_train[df_train["is_labeled"] == True].label_vec.to_list()
)
pos_weight = len(train_labels) / (train_labels.sum(axis=0) + 1e-6)
# Apply confidence to cf vector
t0 = time.time()
if confidence_type == "uniform":
cf_conf_train = torch.ones(len(df_train))
cf_conf_test = torch.ones(len(df_test))
elif confidence_type == "loss_based":
cf_conf_train = get_loss_based_confidence(cf_based_train_loss_path)
cf_conf_test = get_loss_based_confidence(cf_based_test_loss_path)
elif confidence_type == "num_intercations":
cf_conf_train = torch.from_numpy(np.sqrt(df_train["num_intercations"].values))
cf_conf_test = torch.from_numpy(np.sqrt(df_test["num_intercations"].values))
elif confidence_type == "pos_label_loss_based":
cf_conf_train = pos_label_loss_based(
cf_based_train_loss_path, df_train["label_vec"]
)
cf_conf_test = pos_label_loss_based(
cf_based_test_loss_path, df_test["label_vec"]
)
else:
raise ValueError(f"{confidence_type} is not supported")
cf_conf_train_clipped, cf_conf_test_clipped = clip_confidence(
cf_conf_train, cf_conf_test, conf_max_min_ratio
)
df_train["cf_confidence"] = cf_conf_train_clipped
df_test["cf_confidence"] = cf_conf_test_clipped
logger.info(f"CF confidence in {time.time() -t0 :.2f} sec.")
if is_plot_conf_hist is True:
plot_and_save_conf_histogram(
out_dir,
confidence_type,
cf_conf_train.numpy(),
cf_conf_train_clipped.numpy(),
)
logger.info(f"Plotted CF confidence in {time.time() -t0 :.2f} sec. {out_dir=}")
# Construct dataset
train_dataset = DfDatasetWithCF(
df_train,
transform=train_transform,
is_use_bias=is_use_bias,
is_skip_img=is_skip_img,
)
test_dataset = DfDatasetWithCF(
df_test,
transform=test_transform,
is_use_bias=is_use_bias,
is_skip_img=is_skip_img,
)
# Get metadata
num_classes = len(df_train["label_vec"].iloc[0])
cf_vector_dim = len(df_train["embs"].iloc[0])
if is_use_bias is True:
cf_vector_dim += 1
dataset_meta = {
"train_set_size": len(df_train),
"test_set_size": len(df_test),
"num_classes": num_classes,
"cf_vector_dim": cf_vector_dim,
}
return train_dataset, test_dataset, dataset_meta, pos_weight
|
collaborative_image_understanding-main
|
src/dataset_utils.py
|
# Copyright (c) 2015-present, Meta Platforms, Inc. and affiliates.
# All rights reserved.
import logging
import os
import os.path as osp
from typing import List
import pandas as pd
import requests
from tqdm import tqdm
logger = logging.getLogger(__name__)
class DataHelper:
def __init__(self, is_debug: bool, is_override: bool):
self.is_debug = is_debug
self.is_override = is_override
def is_img_path(self, path: str) -> bool:
if path.lower().endswith((".jpg", ".png", ".jepg", ".gif", ".tiff")):
return True
else:
return False
def is_exist(self, path: str):
is_path_exist = osp.exists(path)
if self.is_override:
is_path_exist = False
return is_path_exist
def download_url(
self,
url: str,
dst: str = None,
is_force_download: bool = False,
):
if self.is_debug:
logger.info(f"download_url: {url=} {dst=} {is_force_download=}")
if dst is None:
dst = os.path.basename(url)
if is_force_download is False and self.is_exist(dst):
return
r = requests.get(url)
with open(dst, "wb") as f:
f.write(r.content)
def ungzip_file(self, path_src: str, path_dst: str):
logger.info(f"ungzip_file: {path_src=} {path_dst=}")
if self.is_exist(path_dst):
return
os.system(f"gzip -dk {path_src}")
def read_pickle(self, pkl_path: str) -> pd.DataFrame:
logger.info(f"pd.read_pickle {pkl_path}")
df = pd.read_pickle(pkl_path)
return df
def save_df_as_pkl(self, json_path: str, pkl_path: str):
logger.info(f"save_df_as_pkl: {json_path=} {pkl_path=}")
with open(json_path, "r") as fin:
df = {}
for i, line in enumerate(tqdm(fin)):
df[i] = eval(line)
df = pd.DataFrame.from_dict(df, orient="index")
df.to_pickle(pkl_path)
def create_dir(self, dst: str):
logger.info(f"create_dir {dst=}")
os.makedirs(dst, exist_ok=True)
def list_files_in_dir(self, path: str) -> List[str]:
return os.listdir(path)
|
collaborative_image_understanding-main
|
src/data_utils.py
|
# Copyright (c) 2015-present, Meta Platforms, Inc. and affiliates.
# All rights reserved.
import logging
import os
import os.path as osp
import time
from glob import glob
import hydra
import pandas as pd
import torch
from hydra.utils import get_original_cwd
from omegaconf import DictConfig, OmegaConf
from sklearn.metrics import average_precision_score
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
from dataset_utils import get_datasets
from lit_utils import LitModel
logging.basicConfig()
logger = logging.getLogger(__name__)
def get_products(model, dataloader):
preds_list, labels, embeddings_list = [], [], []
for batch in tqdm(dataloader):
imgs, labels_i = batch[0], batch[3]
preds, _ = model(imgs.to("cuda").half())
embeddings = model.get_embeddings(imgs.to("cuda").half())
preds_list.append(torch.sigmoid(preds).cpu())
embeddings_list.append(embeddings.cpu())
labels.append(labels_i)
preds = torch.vstack(preds_list)
embeddings = torch.vstack(embeddings_list)
labels = torch.vstack(labels)
return preds, embeddings, labels
def load_cfg_file(base_dir: str):
cfg_path = osp.join(base_dir, ".hydra", "config.yaml")
cfg = OmegaConf.load(cfg_path)
return cfg
def load_trained_model(base_dir: str):
model_path = glob(osp.join(base_dir, "epoch*.ckpt"))[0]
haparam_path = glob(osp.join(base_dir, "default", "version_0", "hparams.yaml"))[0]
model = LitModel.load_from_checkpoint(model_path, hparams_file=haparam_path)
model.eval()
return model
@hydra.main(
config_path="../configs",
config_name="extract_embeddings",
)
def extract_embeddings(cfg: DictConfig):
out_dir = os.getcwd()
os.chdir(get_original_cwd())
logger.info(os.getcwd())
dir_path = cfg.dir_path
resource_dict = {"label_ratio_1.0_no_cf": dir_path}
logger.info(resource_dict)
logger.info("load_cfg_file")
cfg_file = load_cfg_file(resource_dict["label_ratio_1.0_no_cf"])
cfg_file.batch_size = cfg.batch_size
cfg_file.batch_size = cfg.num_workers
# Load data
logger.info("Load datasets")
train_dataset, test_dataset, dataset_meta, _ = get_datasets(
cfg_file.train_df_path,
cfg_file.test_df_path,
cfg_file.cf_vector_df_path,
out_dir,
cfg_file.labeled_ratio,
cfg_file.is_use_bias,
cf_based_train_loss_path=cfg_file.cf_based_train_loss_path,
cf_based_test_loss_path=cfg_file.cf_based_test_loss_path,
is_use_cf_embeddings=cfg_file.is_use_cf_embeddings,
cf_embeddings_train_path=cfg_file.cf_embeddings_train_path,
cf_embeddings_test_path=cfg_file.cf_embeddings_test_path,
confidence_type=cfg_file.confidence_type,
is_plot_conf_hist=False,
)
logger.info(
"Sizes [trainset testset num_classes cf_vector_dim]=[{} {} {} {}]".format(
dataset_meta["train_set_size"],
dataset_meta["test_set_size"],
dataset_meta["num_classes"],
dataset_meta["cf_vector_dim"],
)
)
trainloader = DataLoader(
train_dataset,
batch_size=cfg.batch_size,
num_workers=cfg.num_workers,
shuffle=False,
pin_memory=True,
)
testloader = DataLoader(
test_dataset,
batch_size=cfg.batch_size,
num_workers=cfg.num_workers,
shuffle=False,
pin_memory=True,
)
torch.multiprocessing.set_sharing_strategy("file_system")
torch.set_grad_enabled(False)
for key, base_dir in resource_dict.items():
logger.info(f"{key}, {base_dir}")
model = load_trained_model(base_dir)
model = model.to("cuda").half()
dfs = []
for loader_type, loader in [
("test", testloader),
("train", trainloader),
]:
t0 = time.time()
preds, embeddings, labels = get_products(model, loader)
ap = average_precision_score(labels, preds)
logger.info(f"Finish {loader_type=} in {time.time()-t0:.2f}. {ap=}")
df = loader.dataset.df
df["pred"] = preds.tolist()
df["image_embedding"] = embeddings.tolist()
df["set_type"] = loader_type
df["label_vec_dataloder_output"] = labels.tolist()
dfs.append(df)
# Save products
df = pd.concat(dfs)
df = df.rename(
columns={"embs": "cf_vec"},
)
df.to_pickle(osp.join(out_dir, f"{cfg.dataset_name}_features.pkl"))
df.to_csv(osp.join(out_dir, f"{cfg.dataset_name}_features.csv"))
if __name__ == "__main__":
extract_embeddings()
|
collaborative_image_understanding-main
|
src/main_extract_embeddings.py
|
# Copyright (c) 2015-present, Meta Platforms, Inc. and affiliates.
# All rights reserved.
import logging
import os
import time
import os.path as osp
import hydra
import pytorch_lightning as pl
import torch
from hydra.utils import get_original_cwd
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import loggers as pl_loggers
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
from torch.utils.data.dataloader import DataLoader
from dataset_utils import get_datasets
from lit_utils import LitModel
logger = logging.getLogger(__name__)
@hydra.main(
config_path="../configs",
config_name="train_model_with_cf_pretraining",
)
def train_model_with_cf_pretraining(cfg: DictConfig):
t_start = time.time()
logger.info(cfg)
out_dir = os.getcwd()
os.chdir(get_original_cwd())
logger.info(f"{out_dir=}")
pl.utilities.seed.seed_everything(cfg.seed)
logger.info(f"{torch.cuda.is_available()=}")
# Configure logging
tb_logger = pl_loggers.TensorBoardLogger(out_dir)
tb_logger.log_hyperparams(OmegaConf.to_container(cfg))
# Configure checkpoint saver
checkpoint_callback = ModelCheckpoint(
dirpath=out_dir,
monitor="ap/val" if cfg.is_debug is False else "ap/train",
save_top_k=1,
mode="max",
)
# Load data
t0 = time.time()
train_dataset, test_dataset, dataset_meta, pos_weight = get_datasets(
cfg.train_df_path,
cfg.test_df_path,
cfg.cf_vector_df_path,
out_dir,
cfg.labeled_ratio,
cfg.is_use_bias,
cf_based_train_loss_path=cfg.cf_based_train_loss_path,
cf_based_test_loss_path=cfg.cf_based_test_loss_path,
is_use_cf_embeddings=cfg.is_use_cf_embeddings,
cf_embeddings_train_path=cfg.cf_embeddings_train_path,
cf_embeddings_test_path=cfg.cf_embeddings_test_path,
confidence_type=cfg.confidence_type,
conf_max_min_ratio=cfg.conf_max_min_ratio,
)
logger.info(f"Loadded data in {time.time() -t0 :.2f} sec")
logger.info(
"Sizes [trainset testset num_classes cf_vector_dim]=[{} {} {} {}]".format(
dataset_meta["train_set_size"],
dataset_meta["test_set_size"],
dataset_meta["num_classes"],
dataset_meta["cf_vector_dim"],
)
)
# Create dataloder
t0 = time.time()
trainloader = DataLoader(
train_dataset,
batch_size=cfg.batch_size,
num_workers=cfg.num_workers,
shuffle=True,
pin_memory=True,
)
testloader = DataLoader(
test_dataset,
batch_size=cfg.batch_size,
num_workers=cfg.num_workers,
pin_memory=True,
)
# First training: predict CF vector
cfg["cf_weight"], cfg["label_weight"] = 1.0, 0.0
lit_h = LitModel(
dataset_meta["num_classes"], dataset_meta["cf_vector_dim"], cfg, pos_weight
)
trainer = pl.Trainer(
min_epochs=cfg["epochs"],
max_epochs=cfg["epochs"],
progress_bar_refresh_rate=1,
logger=tb_logger,
callbacks=[
checkpoint_callback,
LearningRateMonitor(logging_interval="epoch"),
],
fast_dev_run=cfg.is_debug,
num_sanity_val_steps=0,
gpus=[cfg.gpu] if torch.cuda.is_available() else None,
precision=16,
)
trainer.fit(lit_h, trainloader, testloader)
logger.info(f"Finish cf training in {time.time() -t_start :.2f} sec")
logger.info(f"{out_dir=}")
trainer.save_checkpoint(osp.join(out_dir, "model_pretrained_cf.ckpt"))
# Second training: predict labels
cfg["cf_weight"], cfg["label_weight"] = 0.0, 1.0
trainer = pl.Trainer(
min_epochs=cfg["epochs"],
max_epochs=cfg["epochs"],
progress_bar_refresh_rate=1,
logger=tb_logger,
callbacks=[
checkpoint_callback,
LearningRateMonitor(logging_interval="epoch"),
],
fast_dev_run=cfg.is_debug,
num_sanity_val_steps=0,
gpus=[cfg.gpu] if torch.cuda.is_available() else None,
precision=16,
)
trainer.fit(lit_h, trainloader, testloader)
logger.info(
f"Finish label training in {time.time() -t_start :.2f} sec. {lit_h.map_best=:.3f}"
)
logger.info(f"{out_dir=}")
trainer.save_checkpoint(osp.join(out_dir, "model.ckpt"))
if __name__ == "__main__":
train_model_with_cf_pretraining()
|
collaborative_image_understanding-main
|
src/train_model_with_cf_pretraining.py
|
# Copyright (c) 2015-present, Meta Platforms, Inc. and affiliates.
# All rights reserved.
import logging
import os
import os.path as osp
import time
from itertools import chain
import hydra
import numpy as np
import pandas as pd
from omegaconf import DictConfig
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MultiLabelBinarizer
from torchvision.datasets.folder import default_loader as img_loader
logger = logging.getLogger(__name__)
def keep_only_exists_images(df: pd.DataFrame) -> pd.DataFrame:
img_exists = []
for img_path in df["img_path"]:
if osp.exists(img_path):
try:
img_loader(img_path)
img_exists.append(True)
except:
img_exists.append(False)
else:
img_exists.append(False)
df["img_exists"] = img_exists
logger.info(f"Img exsists {df.img_exists.sum()}/{len(df)}")
return df[df["img_exists"] == True]
def execute_train_test_split(
df: pd.DataFrame, train_set_ratio: float, min_label_count_thresh: int
):
n_iter = 0
min_label_count = 0
while min_label_count < min_label_count_thresh:
df_train, df_test = train_test_split(df, train_size=train_set_ratio)
train_labels = np.array(df_train.label_vec.to_list())
test_labels = np.array(df_test.label_vec.to_list())
min_label_count = min(
train_labels.sum(axis=0).min(), test_labels.sum(axis=0).min()
)
logger.info(f"[{n_iter}] train-test split {min_label_count=}")
n_iter += 1
return df_train, df_test
@hydra.main(
config_path="../configs",
config_name="process_labels_movielens",
)
def process_labels_movielens(cfg: DictConfig):
out_dir = os.getcwd()
logger.info(cfg)
logger.info(os.getcwd())
# Load df
t0 = time.time()
meta_path = osp.join(cfg.data_dir, f"movies.dat")
meta_df = pd.read_csv(
meta_path, delimiter="::", names=["asin", "movie_name", "categories"]
)
logger.info(f"Loadded meta_df in {time.time() -t0:.2f} sec. {len(meta_df)=}")
# Add image paths
meta_df["img_path"] = meta_df["asin"].apply(
lambda x: osp.join(cfg.data_dir, cfg.category, str(x) + ".jpg")
)
# Keep only items with images
df = keep_only_exists_images(meta_df)[["asin", "img_path", "categories"]]
# Find top level label name by most ferquent
df = df[df["categories"] != "(no genres listed)"]
df["merged_labels"] = df["categories"].apply(lambda cat_list: cat_list.split("|"))
# Count number of samples for each category: remove downlevel category if there are not enough samples
label_count = pd.value_counts(
list(chain.from_iterable(df["merged_labels"].tolist()))
)
# Encode to Multilabel vector
mlb = MultiLabelBinarizer()
df["label_vec"] = mlb.fit_transform(df["merged_labels"].tolist()).tolist()
logger.info(f"\n{df.head()}")
# Save results
out_path = osp.join(out_dir, "label_count.csv")
label_count.to_csv(out_path, header=False)
out_path = osp.join(out_dir, "df_w_labels.pkl")
df = df.reset_index()
df.to_pickle(out_path)
logger.info(f"Save to {out_path}")
out_path = osp.join(out_dir, "label_mapper.csv")
pd.DataFrame(mlb.classes_).to_csv(out_path, header=False)
logger.info(f"Save to {out_path}")
# Train-test split
df_train, df_test = execute_train_test_split(
df, cfg.train_set_ratio, cfg.min_label_count
)
# Save train
out_path = osp.join(out_dir, "df_train.pkl")
df_train = df_train.reset_index()
df_train.to_pickle(out_path)
logger.info(f"Save to {out_path}")
out_path = osp.join(out_dir, "..", "df_train.pkl")
df_train.to_pickle(out_path)
logger.info(f"Save to {out_path}")
# Save test
out_path = osp.join(out_dir, "df_test.pkl")
df_test = df_test.reset_index()
df_test.to_pickle(out_path)
logger.info(f"Save to {out_path}")
out_path = osp.join(out_dir, "..", "df_test.pkl")
df_test.to_pickle(out_path)
logger.info(f"Save to {out_path}")
logger.info("Finish")
if __name__ == "__main__":
process_labels_movielens()
|
collaborative_image_understanding-main
|
src/main_process_labels_movielens.py
|
# Copyright (c) 2015-present, Meta Platforms, Inc. and affiliates.
# All rights reserved.
import logging
import os
import os.path as osp
import time
import cornac
import hydra
import pandas as pd
import torch
from cornac.eval_methods import RatioSplit
from cornac.metrics import AUC, MAP
from omegaconf import DictConfig
from recommender_utils import RecommendationDataset, VAECFWithBias
logger = logging.getLogger(__name__)
@hydra.main(
config_path="../configs",
config_name="train_recommender",
)
def train_recommender(cfg: DictConfig):
out_dir = os.getcwd()
logger.info(cfg)
logger.info(os.getcwd())
# Initalize dataset
t1 = time.time()
dataset_h = RecommendationDataset(cfg.data_dir, cfg.category, cfg.user_based)
dataset = dataset_h.load_feedback()
rs = RatioSplit(
data=dataset,
test_size=cfg.test_size,
rating_threshold=1.0,
seed=cfg.seed,
exclude_unknowns=True,
verbose=True,
)
logger.info(f"Loaded dataset in {time.time()-t1:.2f}")
# Initalize model
models = []
if "most_pop" in cfg.models:
model = cornac.models.MostPop()
models.append(model)
if "bpr" in cfg.models:
bpr = cornac.models.BPR(
k=10, max_iter=1000, learning_rate=0.001, lambda_reg=0.001, seed=123
)
models.append(bpr)
if "vae_no_bias" in cfg.models:
model = cornac.models.VAECF(
k=cfg.bottleneck_size,
autoencoder_structure=list(cfg.emb_size),
act_fn="tanh",
likelihood="mult",
n_epochs=cfg.n_epochs,
batch_size=cfg.batch_size,
learning_rate=cfg.lr,
beta=cfg.beta,
seed=cfg.seed,
use_gpu=True,
verbose=True,
)
models.append(model)
if "vae_no_bias" in cfg.models:
vaecf = VAECFWithBias(
k=cfg.bottleneck_size,
autoencoder_structure=list(cfg.emb_size),
act_fn="tanh",
likelihood="mult",
n_epochs=cfg.n_epochs,
batch_size=cfg.batch_size,
learning_rate=cfg.lr,
lr_steps=cfg.lr_steps,
beta=cfg.beta,
seed=cfg.seed,
use_gpu=True,
verbose=True,
out_dir=out_dir,
)
models.append(vaecf)
# Run training
t0 = time.time()
metrics = [AUC(), MAP()]
cornac.Experiment(
eval_method=rs,
models=models,
metrics=metrics,
user_based=False,
).run()
logger.info(f"Finish training in {time.time() -t0:.2f} sec")
if "bpr" in cfg.models:
logger.info(bpr)
embs = bpr.i_factors
bias = bpr.i_biases
if "vae_no_bias" in cfg.models:
logger.info(vaecf.vae)
# Save vae model
out_path = osp.join(out_dir, "vae.pt")
torch.save(vaecf.vae.state_dict(), out_path)
embs = vaecf.vae.decoder.fc1.weight.detach().cpu()
bias = vaecf.vae.item_bias.weight.detach().cpu().squeeze()
# Create CF data frame
num_intercations = rs.train_set.csc_matrix.sum(axis=0).tolist()[0]
df = pd.DataFrame(
{
"asin": list(rs.train_set.item_ids),
"embs": embs.tolist(),
"bias": bias.tolist(),
"num_intercations": num_intercations,
}
)
# Save to: out path
out_path = osp.join(out_dir, "cf_df.pkl")
logger.info(out_path)
df.to_pickle(out_path)
if cfg.test_size == 0.0:
# Save to: dataset output top dir
out_path = osp.join(out_dir, "..", "cf_df.pkl")
logger.info(out_path)
df.to_pickle(out_path)
logger.info(f"Finish in {time.time()-t0:.2f} sec")
if __name__ == "__main__":
train_recommender()
|
collaborative_image_understanding-main
|
src/main_train_recommender.py
|
# Copyright (c) 2015-present, Meta Platforms, Inc. and affiliates.
# All rights reserved.
import logging
import os.path as osp
from typing import List
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
from cornac.data import Reader
from cornac.models import VAECF
from cornac.models.recommender import Recommender
from cornac.models.vaecf.vaecf import VAE, learn
from tqdm.auto import trange
logger = logging.getLogger(__name__)
def learn(
vae,
train_set,
n_epochs,
batch_size,
learn_rate,
lr_steps,
beta,
verbose,
out_dir: str,
device=torch.device("cpu"),
):
loss_list, lr_list = [], []
optimizer = torch.optim.Adam(params=vae.parameters(), lr=learn_rate)
schedular = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=lr_steps)
progress_bar = trange(1, n_epochs + 1, disable=not verbose)
for _ in progress_bar:
sum_loss = 0.0
count = 0
for batch_id, u_ids in enumerate(
train_set.user_iter(batch_size, shuffle=False)
):
u_batch = train_set.matrix[u_ids, :]
u_batch.data = np.ones(len(u_batch.data)) # Binarize data
u_batch = u_batch.A
u_batch = torch.tensor(u_batch, dtype=torch.float32, device=device)
# Reconstructed batch
u_batch_, mu, logvar = vae(u_batch)
loss = vae.loss(u_batch, u_batch_, mu, logvar, beta)
optimizer.zero_grad()
loss.backward()
optimizer.step()
sum_loss += loss.data.item()
count += len(u_batch)
if batch_id % 10 == 0:
progress_bar.set_postfix(loss=(sum_loss / count))
schedular.step()
loss_list.append(sum_loss / count)
lr_list += schedular.get_last_lr()
_, axs = plt.subplots(2, 1, sharex=True)
ax = axs[0]
ax.plot(loss_list)
ax.set_ylabel("loss")
ax.set_yscale("log")
ax.grid()
ax = axs[1]
ax.plot(lr_list)
ax.set_ylabel("lr")
ax.set_yscale("log")
ax.set_xlabel("epoch")
ax.grid()
plt.tight_layout()
plt.savefig(osp.join(out_dir, "loss.jpg"))
plt.close()
return vae
class VAEWithBias(VAE):
def __init__(self, z_dim, ae_structure, act_fn, likelihood):
logger.info("VAEWithBias")
super().__init__(z_dim, ae_structure, act_fn, likelihood)
# Add bias
num_items = ae_structure[0]
self.item_bias = torch.nn.Embedding(num_items, 1)
def decode(self, z):
h = self.decoder(z)
if self.likelihood == "mult":
return torch.softmax(h + self.item_bias.weight.T, dim=1)
else:
raise NotImplementedError()
return torch.sigmoid(h)
class VAECFWithBias(VAECF):
def __init__(
self,
name="VAECF",
k=10,
autoencoder_structure=[20],
act_fn="tanh",
likelihood="mult",
n_epochs=100,
batch_size=100,
learning_rate=0.001,
lr_steps=[10],
out_dir=".",
beta=1.0,
trainable=True,
verbose=False,
seed=None,
use_gpu=False,
):
super().__init__(
name=name,
k=k,
autoencoder_structure=autoencoder_structure,
act_fn=act_fn,
likelihood=likelihood,
n_epochs=n_epochs,
batch_size=batch_size,
learning_rate=learning_rate,
beta=beta,
trainable=trainable,
verbose=verbose,
seed=seed,
use_gpu=use_gpu,
)
self.lr_steps = lr_steps
self.out_dir = out_dir
def fit(self, train_set, val_set=None):
"""Fit the model to observations.
Parameters
----------
train_set: :obj:`cornac.data.Dataset`, required
User-Item preference data as well as additional modalities.
val_set: :obj:`cornac.data.Dataset`, optional, default: None
User-Item preference data for model selection purposes (e.g., early stopping).
Returns
-------
self : object
"""
Recommender.fit(self, train_set, val_set)
self.device = (
torch.device("cuda:0")
if (self.use_gpu and torch.cuda.is_available())
else torch.device("cpu")
)
if self.trainable:
if self.seed is not None:
torch.manual_seed(self.seed)
torch.cuda.manual_seed(self.seed)
if not hasattr(self, "vae"):
data_dim = train_set.matrix.shape[1]
self.vae = VAEWithBias(
self.k,
[data_dim] + self.autoencoder_structure,
self.act_fn,
self.likelihood,
).to(self.device)
learn(
self.vae,
self.train_set,
n_epochs=self.n_epochs,
batch_size=self.batch_size,
learn_rate=self.learning_rate,
lr_steps=self.lr_steps,
beta=self.beta,
verbose=self.verbose,
device=self.device,
out_dir=self.out_dir,
)
elif self.verbose:
logger.info("%s is trained already (trainable = False)" % (self.name))
return self
class RecommendationDataset:
def __init__(
self,
data_dir: str,
category: str = "Clothing_Shoes_and_Jewelry",
user_based: bool = True,
) -> None:
self.data_dir = data_dir
self.category = category
self.review_path = osp.join(self.data_dir, f"reviews_{category}.pkl")
self.rating_path = osp.join(self.data_dir, f"rating_{category}_user_based.txt")
if not osp.exists(self.rating_path):
self.convert_review_pkl_to_rating()
def convert_review_pkl_to_rating(self):
review_df = pd.read_pickle(
osp.join(self.data_dir, f"reviews_{self.category}.pkl")
)
# Algin to rating.txt format
review_df = review_df[["reviewerID", "asin", "overall"]]
review_df.to_csv(self.rating_path, sep="\t", index=False, header=False)
def load_feedback(self, reader: Reader = None) -> List:
reader = Reader(bin_threshold=1.0) if reader is None else reader
return reader.read(self.rating_path, sep="\t")
|
collaborative_image_understanding-main
|
src/recommender_utils.py
|
# Copyright (c) 2015-present, Meta Platforms, Inc. and affiliates.
# All rights reserved.
import logging
import os
import time
import hydra
import pytorch_lightning as pl
import torch
from hydra.utils import get_original_cwd
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import loggers as pl_loggers
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
from torch.utils.data.dataloader import DataLoader
from dataset_utils import get_datasets
from lit_utils import LitModel
logger = logging.getLogger(__name__)
@hydra.main(
config_path="../configs",
config_name="train_model",
)
def train_model(cfg: DictConfig):
t_start = time.time()
logger.info(cfg)
out_dir = os.getcwd()
os.chdir(get_original_cwd())
logger.info(f"{out_dir=}")
pl.utilities.seed.seed_everything(cfg.seed)
logger.info(f"{torch.cuda.is_available()=}")
# Configure logging
tb_logger = pl_loggers.TensorBoardLogger(out_dir)
tb_logger.log_hyperparams(OmegaConf.to_container(cfg))
# Configure checkpoint saver
checkpoint_callback = ModelCheckpoint(
dirpath=out_dir,
monitor="ap/val" if cfg.is_debug is False else "ap/train",
save_top_k=1,
mode="max",
)
# Load data
t0 = time.time()
train_dataset, test_dataset, dataset_meta, pos_weight = get_datasets(
cfg.train_df_path,
cfg.test_df_path,
cfg.cf_vector_df_path,
out_dir,
cfg.labeled_ratio,
cfg.is_use_bias,
cf_based_train_loss_path=cfg.cf_based_train_loss_path,
cf_based_test_loss_path=cfg.cf_based_test_loss_path,
is_use_cf_embeddings=cfg.is_use_cf_embeddings,
cf_embeddings_train_path=cfg.cf_embeddings_train_path,
cf_embeddings_test_path=cfg.cf_embeddings_test_path,
confidence_type=cfg.confidence_type,
conf_max_min_ratio=cfg.conf_max_min_ratio,
)
logger.info(f"Loadded data in {time.time() -t0 :.2f} sec")
logger.info(
"Sizes [trainset testset num_classes cf_vector_dim]=[{} {} {} {}]".format(
dataset_meta["train_set_size"],
dataset_meta["test_set_size"],
dataset_meta["num_classes"],
dataset_meta["cf_vector_dim"],
)
)
# Create dataloder
t0 = time.time()
trainloader = DataLoader(
train_dataset,
batch_size=cfg.batch_size,
num_workers=cfg.num_workers,
shuffle=True,
pin_memory=True,
)
testloader = DataLoader(
test_dataset,
batch_size=cfg.batch_size,
num_workers=cfg.num_workers,
pin_memory=True,
)
# Load model
lit_h = LitModel(
dataset_meta["num_classes"],
dataset_meta["cf_vector_dim"],
cfg,
pos_weight,
out_dir=out_dir,
)
trainer = pl.Trainer(
min_epochs=cfg["epochs"],
max_epochs=cfg["epochs"],
progress_bar_refresh_rate=1,
logger=tb_logger,
callbacks=[
checkpoint_callback,
LearningRateMonitor(logging_interval="epoch"),
],
fast_dev_run=cfg.is_debug,
num_sanity_val_steps=0,
gpus=[cfg.gpu] if torch.cuda.is_available() else None,
precision=16,
)
trainer.fit(lit_h, trainloader, testloader)
logger.info(
f"Finish training in {time.time() -t_start :.2f} sec. {lit_h.map_best=:.3f}"
)
logger.info(f"{os.getcwd()=}")
if __name__ == "__main__":
train_model()
|
collaborative_image_understanding-main
|
src/main_train_model.py
|
# Copyright (c) 2015-present, Meta Platforms, Inc. and affiliates.
# All rights reserved.
import logging
import os
import os.path as osp
import hydra
import numpy as np
import pandas as pd
import scipy
from hydra.utils import get_original_cwd
from omegaconf import DictConfig, OmegaConf
from scipy.stats import ttest_rel
from sklearn.metrics import average_precision_score
from tqdm import tqdm
import torch
logger = logging.getLogger(__name__)
def mean_confidence_interval(data, confidence=0.9):
a = 1.0 * np.array(data)
n = len(a)
se = scipy.stats.sem(a)
h = se * scipy.stats.t.ppf((1 + confidence) / 2.0, n - 1)
return h
def compare_results(preds_dict: dict, eval_function, metric_dict: dict):
for dataset_name, dataset_dict in preds_dict.items():
print(dataset_name)
if dataset_name in metric_dict and dataset_name != "Toys":
print(f"{dataset_name} exists in metric_dict")
continue
labels = dataset_dict["labels"]
no_cf_preds = dataset_dict["no_cf_preds"]
with_cf_preds = dataset_dict["with_cf_preds"]
df = single_set_compare_results(
labels, no_cf_preds, with_cf_preds, eval_function
)
metric_dict[dataset_name] = df
logger.info(df[["no_cf", "with_cf", "improvement"]].round(3).T)
return metric_dict
def single_label_ratio_compare_results(
label_ratio, labels, preds_a, preds_b, eval_function
):
# Define output
res_dict = {"label_ratio": label_ratio}
# Evaluate performance
perf_a = eval_function(labels, preds_a)
perf_b = eval_function(labels, preds_b)
res_dict["pvalue"] = ttest_rel(perf_a, perf_b).pvalue
# No CF
res_dict["no_cf"] = np.mean(perf_a)
res_dict["no_cf_std"] = np.std(perf_a)
res_dict["no_cf_ci"] = mean_confidence_interval(perf_a)
# With CF
res_dict["with_cf"] = np.mean(perf_b)
res_dict["with_cf_std"] = np.std(perf_b)
res_dict["with_cf_ci"] = mean_confidence_interval(perf_b)
return res_dict
def single_set_compare_results(
labels, no_cf_pred_list, with_cf_pred_list, eval_function
):
# Defining a dict
res_dicts = []
total = len(no_cf_pred_list)
label_ratios = np.arange(0.1, 1.1, 0.1)
for label_ratio, preds_a, preds_b in tqdm(
zip(label_ratios, no_cf_pred_list, with_cf_pred_list), total=total
):
res_dict = single_label_ratio_compare_results(
label_ratio, labels, preds_a, preds_b, eval_function
)
res_dicts.append(res_dict)
df = pd.DataFrame(res_dicts)
df.set_index("label_ratio")
df["improvement"] = df["with_cf"] / df["no_cf"] - 1.0
return df
def calc_top1_acc(labels, preds):
return np.array(
[labels[n][top1] for n, top1 in enumerate(np.argmax(preds, axis=1))]
)
def calc_recall_at_k(labels, preds, k: int = 5):
recalls = []
for pred, label in zip(torch.tensor(preds), torch.tensor(labels)):
_, pred_idx = torch.topk(pred, k=k) # The predicted labels
label_idx = torch.where(label == 1)[0] # The ground truth labels
# In case there are no labels
if len(label_idx) == 0:
continue
# Recal per item
recall_i = sum(el in pred_idx for el in label_idx) / len(label_idx)
recalls.append(recall_i)
return recalls
def calc_recall_at_1(labels, preds):
return calc_recall_at_k(labels, preds, k=1)
def calc_recall_at_3(labels, preds):
return calc_recall_at_k(labels, preds, k=3)
def calc_recall_at_5(labels, preds):
return calc_recall_at_k(labels, preds, k=5)
def calc_recall_at_10(labels, preds):
return calc_recall_at_k(labels, preds, k=10)
def calc_precision_at_k(labels, preds, k: int = 5):
ps = []
for pred, label in zip(torch.tensor(preds), torch.tensor(labels)):
_, pred_idx = torch.topk(pred, k=k) # The predicted labels
label_idx = torch.where(label == 1)[0] # The ground truth labels
# In case there are no labels
if len(label_idx) == 0:
continue
# Recal per item
p_i = sum(el in label_idx for el in pred_idx) / k
ps.append(p_i)
return ps
def calc_precision_at_1(labels, preds):
return calc_precision_at_k(labels, preds, k=1)
def calc_precision_at_3(labels, preds):
return calc_precision_at_k(labels, preds, k=3)
def calc_precision_at_5(labels, preds):
return calc_precision_at_k(labels, preds, k=5)
def calc_precision_at_10(labels, preds):
return calc_precision_at_k(labels, preds, k=10)
def calc_ap_score(labels, preds) -> np.ndarray:
aps = []
num_experiments = 50
num_samples = int(0.9 * len(labels))
idxs_list = np.random.randint(
low=0, high=len(labels), size=(num_experiments, num_samples)
)
for idxs in idxs_list:
labels_chosen, preds_chosen = labels[idxs], preds[idxs]
mask = labels_chosen.sum(axis=0) > 0
ap = average_precision_score(labels_chosen[:, mask], preds_chosen[:, mask])
aps.append(ap)
return np.array(aps)
def build_label_ratio_dicts(results_path):
res_dict = OmegaConf.load(results_path)
# Build absolute path
res_dict = {
key: osp.join(res_dict["base_path"], value)
for key, value in res_dict.items()
if key != "base_path"
}
no_cf_dict = {key: value for key, value in res_dict.items() if "_no_cf" in key}
with_cf_dict = {key: value for key, value in res_dict.items() if "_with_cf" in key}
return no_cf_dict, with_cf_dict
def load_preds(base_path):
no_cf_dict, with_cf_dict = build_label_ratio_dicts(base_path)
labels = np.load(osp.join(list(no_cf_dict.values())[0], "labels.npy"))
no_cf_preds, with_cf_preds = [], []
no_cf_aps, with_cf_aps = [], []
for (key_a, path_a), (key_b, path_b) in zip(
no_cf_dict.items(), with_cf_dict.items()
):
preds_a = np.load(osp.join(path_a, "preds.npy"))
preds_b = np.load(osp.join(path_b, "preds.npy"))
ap_a = average_precision_score(labels, preds_a) # ,average='micro')
ap_b = average_precision_score(labels, preds_b) # ,average='micro')
ratio = np.round(100 * np.round(ap_b, 3) / np.round(ap_a, 3) - 100, 2)
print(
f"{key_a} {key_b} [{ap_a:.3f} {ap_b:.3f} {ratio:.3f}%]. size={preds_a.shape}"
)
no_cf_preds.append(preds_a)
with_cf_preds.append(preds_b)
no_cf_aps.append(ap_a)
with_cf_aps.append(ap_b)
return {
"no_cf_preds": no_cf_preds,
"with_cf_preds": with_cf_preds,
"labels": labels,
"no_cf_ap": np.array(no_cf_aps),
"with_cf_ap": np.array(with_cf_aps),
}
@hydra.main(
config_path="../configs",
config_name="evaluate_methods",
)
def evaluate_methods(cfg: DictConfig):
os.chdir(get_original_cwd())
out_path = osp.join("../outputs/figures")
metric_res_dicts_path = osp.join(out_path, "metric_res_dicts.npy")
dataset_mapping = {
"pinterest": "Pinterest",
"movielens": "MovieLens",
"Clothing_Shoes_and_Jewelry": "Clothing",
"Toys_and_Games": "Toys",
}
preds_dict = {}
for dataset_name, print_name in dataset_mapping.items():
print(dataset_name)
preds_dict[print_name] = load_preds(
osp.join(f"../outputs/{dataset_name}/results.yaml")
)
metric_funcs = {
"mAP": calc_ap_score,
}
if osp.exists(metric_res_dicts_path):
metric_res_dicts = np.load(metric_res_dicts_path, allow_pickle=True).item()
else:
metric_res_dicts = {}
for metric_name, metric_func in metric_funcs.items():
logger.info(metric_name)
# Initilize output: if metric exsits, use previous results
single_metric_res_dict = {}
if metric_name in metric_res_dicts:
single_metric_res_dict = metric_res_dicts[metric_name]
# metric -> dataset -> performance dataframe
single_metric_res_dict = compare_results(
preds_dict, metric_func, single_metric_res_dict
)
# Add to dict
metric_res_dicts[metric_name] = single_metric_res_dict
np.save(metric_res_dicts_path, metric_res_dicts)
logger.info("")
np.save(metric_res_dicts_path, metric_res_dicts)
if __name__ == "__main__":
evaluate_methods()
|
collaborative_image_understanding-main
|
src/eval_utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import subprocess
import sys
from setuptools import Extension, find_packages, setup
from setuptools.command.build_ext import build_ext
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
# Convert distutils Windows platform specifiers to CMake -A arguments
PLAT_TO_CMAKE = {
"win32": "Win32",
"win-amd64": "x64",
"win-arm32": "ARM",
"win-arm64": "ARM64",
}
def _get_version():
path = os.path.join(ROOT_DIR, "version.txt")
version = open(path, "r").read().strip()
return version
# A CMakeExtension needs a sourcedir instead of a file list.
# The name must be the _single_ output extension from the CMake build.
# If you need multiple extensions, see scikit-build.
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=""):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuild(build_ext):
def build_extension(self, ext):
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
# required for auto-detection & inclusion of auxiliary "native" libs
if not extdir.endswith(os.path.sep):
extdir += os.path.sep
debug = int(os.environ.get("DEBUG", 0)) if self.debug is None else self.debug
cfg = "Debug" if debug else "Release"
# CMake lets you override the generator - we need to check this.
# Can be set with Conda-Build, for example.
cmake_generator = os.environ.get("CMAKE_GENERATOR", "")
cmake_args = [
f"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY={extdir}",
f"-DPYTHON_EXECUTABLE={sys.executable}",
f"-DCMAKE_BUILD_TYPE={cfg}", # not used on MSVC, but no harm
"-DBUILD_PYARK=ON",
"-DBUILD_ARK_TESTS=OFF",
"-DBUILD_ARK_EXAMPLES=OFF",
]
build_args = []
# Adding CMake arguments set as environment variable
# (needed e.g. to build for ARM OSx on conda-forge)
if "CMAKE_ARGS" in os.environ:
cmake_args += [item for item in os.environ["CMAKE_ARGS"].split(" ") if item]
if self.compiler.compiler_type != "msvc":
# Using Ninja-build since it a) is available as a wheel and b)
# multithreads automatically. MSVC would require all variables be
# exported for Ninja to pick it up, which is a little tricky to do.
# Users can override the generator with CMAKE_GENERATOR in CMake
# 3.15+.
if not cmake_generator:
try:
import ninja # noqa: F401
cmake_args += ["-GNinja"]
except ImportError:
pass
else:
# Single config generators are handled "normally"
single_config = any(x in cmake_generator for x in {"NMake", "Ninja"})
# CMake allows an arch-in-generator style for backward compatibility
contains_arch = any(x in cmake_generator for x in {"ARM", "Win64"})
# Specify the arch if using MSVC generator, but only if it doesn't
# contain a backward-compatibility arch spec already in the
# generator name.
if not single_config and not contains_arch:
cmake_args += ["-A", PLAT_TO_CMAKE[self.plat_name]]
# Multi-config generators have a different way to specify configs
if not single_config:
cmake_args += [
f"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{cfg.upper()}={extdir}"
]
build_args += ["--config", cfg]
if sys.platform.startswith("darwin"):
# Cross-compile support for macOS - respect ARCHFLAGS if set
archs = re.findall(r"-arch (\S+)", os.environ.get("ARCHFLAGS", ""))
if archs:
cmake_args += ["-DCMAKE_OSX_ARCHITECTURES={}".format(";".join(archs))]
# Set CMAKE_BUILD_PARALLEL_LEVEL to control the parallel build level
# across all generators.
if "CMAKE_BUILD_PARALLEL_LEVEL" not in os.environ:
# self.parallel is a Python 3 only way to set parallel jobs by hand
# using -j in the build_ext call, not supported by pip or PyPA-build.
if hasattr(self, "parallel") and self.parallel:
# CMake 3.12+ only.
build_args += [f"-j{self.parallel}"]
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(
["cmake", ext.sourcedir] + cmake_args, cwd=self.build_temp
)
subprocess.check_call(
["cmake", "--build", "."] + build_args, cwd=self.build_temp
)
def main():
# The information here can also be placed in setup.cfg - better separation of
# logic and declaration, and simpler if you include description/version in a file.
setup(
name="projectaria_tools",
version=_get_version(),
description="Project Aria Tools",
long_description="Python API for sensor models and streaming of Aria datasets.",
url="https://github.com/facebookresearch/aria_data_tools",
ext_modules=[CMakeExtension("projectaria_tools", sourcedir=ROOT_DIR)],
author="Meta Reality Labs Research",
cmdclass={"build_ext": CMakeBuild},
zip_safe=False,
python_requires=">=3.6",
packages=find_packages(),
license="Apache-2.0",
)
if __name__ == "__main__":
main()
|
Aria_data_tools-main
|
src/setup.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import numpy as np
from PIL import Image
from projectaria_tools.dataprovider import AriaVrsDataProvider, StreamId
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--vrs",
dest="vrs_path",
type=str,
required=True,
help="path to vrs file",
)
return parser.parse_args()
# DataProvider samples
# Extract 10 RGB camera thumbnails from a VRS file
# Teachings:
# - How to initialize the AriaVrsDataProvider
# - How to register a given Player to the DataProvider
# - How to initialize a StreamId and query data records for it at a given timestamp
if __name__ == "__main__":
args = parse_args()
aria_data_provider: AriaVrsDataProvider = AriaVrsDataProvider()
if not aria_data_provider.openFile(args.vrs_path):
print(f"failed to open vrs: {args.vrs_path}")
aria_data_provider.setRgbCameraPlayer()
aria_data_provider.setVerbose(True)
# from https://facebookresearch.github.io/Aria_data_tools/docs/sensors-measurements/
rgb_camera_recordable_type_id = 214
rgb_camera_instance_id = 1
rgb_camera_stream_id = StreamId(
rgb_camera_recordable_type_id, rgb_camera_instance_id
)
recording_start = aria_data_provider.getFirstTimestampSec()
recording_end = aria_data_provider.getLastDataRecord(rgb_camera_stream_id).timestamp
sample_count = 10
sample_timestamps = np.linspace(recording_start, recording_end, sample_count)
width = aria_data_provider.getImageWidth(rgb_camera_stream_id)
height = aria_data_provider.getImageHeight(rgb_camera_stream_id)
resize_ratio = 10
big_image = new_image = Image.new(
"RGB", (int(width * sample_count / resize_ratio), int(height / resize_ratio))
)
current_width = 0
for sample in sample_timestamps:
aria_data_provider.readDataRecordByTime(rgb_camera_stream_id, sample)
rgb_player = aria_data_provider.getRgbCameraPlayer()
img = rgb_player.getData()
img_buf = img.pixelFrame.getBuffer()
buffer_array = np.array(img_buf, dtype=np.uint8)
image_array = buffer_array.reshape((height, width, 3))
image = Image.fromarray(image_array)
new_size = (
int(image.size[0] / resize_ratio),
int(image.size[1] / resize_ratio),
)
image = image.resize(new_size).rotate(-90)
big_image.paste(image, (current_width, 0))
current_width = int(current_width + width / resize_ratio)
big_image.show()
|
Aria_data_tools-main
|
src/python/tutorials/DataProvider_ImageTimeline.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import numpy as np
from projectaria_tools import sensors
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--vrs",
dest="vrs_path",
type=str,
required=True,
help="path to vrs file",
)
return parser.parse_args()
# Sensors samples
# Show how to manipulate the most common function in Sensors module
# Teachings:
# - How to retrieve calibration data from a given VRS file
# - How to retrieve per sensor calibration data
# - How to retrieve Camera calibration and use it to project/unproject points
# - How to transfer points between sensor frames
if __name__ == "__main__":
args = parse_args()
#
# Read calibration data from a VRS file
#
print("Attempting to read calibration data from: ", args.vrs_path)
calib_str = sensors.getCalibStrFromFile(args.vrs_path)
device = sensors.DeviceModel.fromJson(calib_str)
print(f"Cameras: {device.getCameraLabels()}")
print(f"IMUs: {device.getImuLabels()}")
print(f"Magnetometers: {device.getMagnetometerLabels()}")
print(f"Barometers: {device.getBarometerLabels()}")
print(f"Microphones: {device.getMicrophoneLabels()}")
#
# Demonstrate how to use camera model
# Create a 3D points and project and unproject it with a given camera
camLabel = "camera-slam-left"
p_slamLeft = np.array([3.0, 2.0, 1.0])
uv_slamLeft = device.getCameraCalib(camLabel).projectionModel.project(p_slamLeft)
print(
f"Projecting 3D point {p_slamLeft} to image space of {camLabel}: "
+ f"{uv_slamLeft}."
)
p_slamLeft_convertBack = device.getCameraCalib(camLabel).projectionModel.unproject(
uv_slamLeft
)
print(
f"Unprojecting 2D pixel {uv_slamLeft} to 3D space in "
+ f"the frame of {camLabel}: {p_slamLeft_convertBack}."
)
# Transform points between sensor frames.
imuLabel = "imu-left"
p_imuLeft = device.transform(p_slamLeft, camLabel, imuLabel)
print(
f"Transforming {p_slamLeft} from {camLabel} frame to {imuLabel} "
+ f"frame: {p_imuLeft}"
)
# Rectifying points with the IMU accelerometer model.
p_imuLeft_rect = device.getImuCalib(
imuLabel
).accel.compensateForSystematicErrorFromMeasurement(p_imuLeft)
print(
f"Point {p_imuLeft} is rectified by the accelerometer model "
+ f"of {imuLabel} as: {p_imuLeft_rect}"
)
|
Aria_data_tools-main
|
src/python/tutorials/Sensors.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import time
import json
import numpy as np
from ax.modelbridge.generation_strategy import GenerationStep, GenerationStrategy
from ax.modelbridge.strategies.rembo import HeSBOStrategy
from ax.storage.json_store.encoder import object_to_json
from ax.service.ax_client import AxClient
from fb_abr_problem import ParkContextualRunner
########
# Define problem
ABR_CONTEXT_CONFIG_DICT = {
'c0': {'name': 'c0', 'delay': 0.09111558887847584},
'c1': {'name': 'c1', 'delay': 0.13919983731019495},
'c10': {'name': 'c10', 'delay': 0.04709563378153773},
'c11': {'name': 'c11', 'delay': 0.09175980911983045},
'c2': {'name': 'c2', 'delay': 0.05811786663939401},
'c3': {'name': 'c3', 'delay': 0.15680707174733982},
'c4': {'name': 'c4', 'delay': 0.21008791350238118},
'c5': {'name': 'c5', 'delay': 0.12895778597785987},
'c6': {'name': 'c6', 'delay': 0.05922074675831855},
'c7': {'name': 'c7', 'delay': 0.0751735817104147},
'c8': {'name': 'c8', 'delay': 0.08200189263592551},
'c9': {'name': 'c9', 'delay': 0.0962324885998359}
}
num_trials = 75
for rep in range(25):
print('====================', rep)
num_contexts = len(ABR_CONTEXT_CONFIG_DICT)
benchmark_problem = ParkContextualRunner(
num_contexts=num_contexts,
context_dict=ABR_CONTEXT_CONFIG_DICT
)
decomposition = benchmark_problem.contextual_parameter_decomposition
t1 = time.time()
gs = HeSBOStrategy(
D=len(benchmark_problem._contextual_parameters), d=8, init_per_proj=8
)
axc = AxClient(generation_strategy=gs)
experiment_parameters = benchmark_problem.contextual_parameters
axc.create_experiment(
name="cbo_aggregated_reward_experiment",
parameters=experiment_parameters,
objective_name="aggregated_reward",
minimize=True,
)
context_reward_list = []
def evaluation_aggregated_reward(parameters):
# put parameters into 1-D array
# x = [bw_c0, bf_c0, c_c0, ...]
x = []
for context_name in benchmark_problem.context_name_list:
x.extend([parameters.get(param) for param in decomposition[context_name]])
aggregated_reward, context_reward = benchmark_problem.f(np.array(x))
return {"aggregated_reward": (aggregated_reward, 0.0)}, context_reward
for itrial in range(num_trials):
parameters, trial_index = axc.get_next_trial()
aggregated_res, context_res = evaluation_aggregated_reward(parameters)
axc.complete_trial(trial_index=trial_index, raw_data=aggregated_res)
context_res["trial_index"] = itrial
context_reward_list.append(context_res)
res = json.dumps(
{
"experiment": object_to_json(axc.experiment),
"context_rewards": context_reward_list,
}
)
with open(f'results/hesbo_park_rep_{rep}.json', "w") as fout:
json.dump(res, fout)
print ('=============', time.time() - t1)
|
ContextualBO-main
|
park_abr/run_park_hesbo.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Run benchmarks for Ensemble BO.
Requires installing EBO from https://github.com/zi-w/Ensemble-Bayesian-Optimization.
"""
import os
import sys
sys.path.insert(1, os.path.join(os.getcwd(), 'Ensemble-Bayesian-Optimization'))
import time
import json
from ebo_core.ebo import ebo
import numpy.matlib
import numpy as np
from fb_abr_problem import ParkContextualRunner
########
# Define problem
ABR_CONTEXT_CONFIG_DICT = {
'c0': {'name': 'c0', 'delay': 0.09111558887847584},
'c1': {'name': 'c1', 'delay': 0.13919983731019495},
'c10': {'name': 'c10', 'delay': 0.04709563378153773},
'c11': {'name': 'c11', 'delay': 0.09175980911983045},
'c2': {'name': 'c2', 'delay': 0.05811786663939401},
'c3': {'name': 'c3', 'delay': 0.15680707174733982},
'c4': {'name': 'c4', 'delay': 0.21008791350238118},
'c5': {'name': 'c5', 'delay': 0.12895778597785987},
'c6': {'name': 'c6', 'delay': 0.05922074675831855},
'c7': {'name': 'c7', 'delay': 0.0751735817104147},
'c8': {'name': 'c8', 'delay': 0.08200189263592551},
'c9': {'name': 'c9', 'delay': 0.0962324885998359}
}
lbs = np.array([0.0, 0.0, 0.0, 0.0001] * len(ABR_CONTEXT_CONFIG_DICT))
ubs = np.array([1.0, 3.0, 1.0, 0.25] * len(ABR_CONTEXT_CONFIG_DICT))
###### Prepare EBO
# All options except: x_range, dx, max_value, T, gp_sigma, dim_limit (3)
# Taken as package defaults from test_ebo.py
core_options = {
'B':10, # number of candidates to be evaluated
'dim_limit':4, # max dimension of the input for each additive function component
'isplot':0, # 1 if plotting the result; otherwise 0.
'z':None, 'k':None, # group assignment and number of cuts in the Gibbs sampling subroutine
'alpha':1., # hyperparameter of the Gibbs sampling subroutine
'beta':np.array([5.,2.]),
'opt_n':1000, # points randomly sampled to start continuous optimization of acfun
'pid':'test3', # process ID for Azure
'datadir':'tmp_data/', # temporary data directory for Azure
'gibbs_iter':10, # number of iterations for the Gibbs sampling subroutine
'useAzure':False, # set to True if use Azure for batch evaluation
'func_cheap':True, # if func cheap, we do not use Azure to test functions
'n_add':None, # this should always be None. it makes dim_limit complicated if not None.
'nlayers': 100, # number of the layers of tiles
'gp_type':'l1', # other choices are l1, sk, sf, dk, df
'n_bo':10, # min number of points selected for each partition
'n_bo_top_percent': 0.5, # percentage of top in bo selections
'n_top':10, # how many points to look ahead when doing choose Xnew
'min_leaf_size':10, # min number of samples in each leaf
'max_n_leaves':10, # max number of leaves
'thresAzure':1, # if batch size > thresAzure, we use Azure
'save_file_name': 'tmp/tmp.pk',
}
for rep in range(25):
print('================', rep)
options = {
'x_range': np.vstack((lbs, ubs)),
'dx': 4 * len(ABR_CONTEXT_CONFIG_DICT),
'max_value': 180, # Give it a pretty good guess for max value
'T': 75,
'gp_sigma': 1e-7,
}
options.update(core_options)
##### Run optimization
r_contextual = ParkContextualRunner(
num_contexts=len(ABR_CONTEXT_CONFIG_DICT),
context_dict=ABR_CONTEXT_CONFIG_DICT,
max_eval=75,
return_context_reward=False,
)
t1 = time.time()
f = lambda x: -r_contextual.f(x) # since EBO maximizes
e = ebo(f, options)
try:
e.run()
except Exception:
pass
with open("results/ebo_park_rep_{rep}.json".format(rep), 'w') as fout:
# times -1 to fs
json.dump([-reward for reward in r_contextual.fs], fout)
# json.dump(r_contextual.fs, fout)
print ('=============', time.time() - t1)
print(time.time() - t1)
|
ContextualBO-main
|
park_abr/run_park_ebo.py
|
#! /usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, List
import numpy as np
import park
from ax.utils.common.logger import get_logger
NUM_RUNS = 400
TH_DEFAULT = 3
TH_START_DEFAULT = 1
logger = get_logger(name="ABR_SIM_FB")
class Agent(object):
def __init__(
self,
bw,
bf,
c,
exp_weight,
th=TH_DEFAULT,
th_start=TH_START_DEFAULT,
num_encodes=5,
):
"""Constructor.
Args:
bw: bandwidth prediction scaling term
bf: buffer scaling term
c: constant shift
th: distance between thresholds for different
bitrate
th: starting threshold of bitrates
exp_weight: expoential weights for bandwidth estimate
num_encodes: number of encoding levels (available
bitrates)
"""
self.bw = bw
self.bf = bf
self.c = c
self.num_encodes = num_encodes
self.exp_weight = exp_weight
self.th_levels = [th_start + i * th for i in range(num_encodes)]
self.th_levels.append(np.inf) # avoid empty sequence at loopup
self.reset()
def reset(self):
self.prev_bw = []
self.prev_t = []
def exp_avg_bw(self, prev_bw, prev_t):
"""Expoential average bandwidth based on previous observations.
Args:
prev_bw: list of previous bandwidth observation
prev_t: time intervals to the bandwidth observations
"""
assert len(prev_bw) == len(prev_t)
if len(prev_bw) == 0:
return 0 # no previous observations
prev_bw = np.array(prev_bw)
prev_t = np.array(prev_t)
prev_t_cumsum = np.cumsum(prev_t[::-1])[::-1]
prev_t_exp = np.exp(-self.exp_weight * prev_t_cumsum)
bw = np.sum(prev_bw * prev_t_exp) / np.sum(prev_t_exp)
return bw
def get_action(self, obs):
# network bandwidth measurement for downloading the
# last video chunk (with some normalization)
curr_bw = obs[0] / 100000
curr_t = obs[1]
self.prev_bw.append(curr_bw)
self.prev_t.append(curr_t)
# estimate bandwidth with expoential average over past observations
bw_est = self.exp_avg_bw(self.prev_bw, self.prev_t)
# current video buffer occupancy with some normalization (see
# https://github.com/park-project/park/blob/master/park/envs/abr_sim/abr.py
# L82-L88 for more details)
curr_bf = obs[2] / 10
# here we assume the network bandwidth for downloading
# the next chunk is the same (you can use more sophisticated method)
th = self.bw * bw_est + self.bf * curr_bf + self.c
# check which bitrate level is just below the threshold
act = min(i for i in range(self.num_encodes + 1) if self.th_levels[i] > th)
return act
class ContextualAgent(Agent):
def __init__(self, bw_dict, bf_dict, c_dict, exp_weight_dict, num_encodes=5):
"""Contextual agent Constructor that resets bandwidths, buffer etc for
different contexts.
"""
self.bw_dict = bw_dict
self.bf_dict = bf_dict
self.c_dict = c_dict
self.exp_weight_dict = exp_weight_dict
self.num_encodes = num_encodes
self.reset(context_name=None)
def reset(self, context_name):
self.prev_bw = []
self.prev_t = []
if context_name is not None:
self.bw = self.bw_dict[context_name]
self.bf = self.bf_dict[context_name]
self.c = self.c_dict[context_name]
self.th = TH_DEFAULT
self.th_start = TH_START_DEFAULT
self.th_levels = [
self.th_start + i * self.th for i in range(self.num_encodes)
]
self.th_levels.append(np.inf) # avoid empty sequence at loopup
self.exp_weight = self.exp_weight_dict[context_name]
class ParkNoncontextualRunner:
def __init__(self, context_dict, max_eval=1000, return_context_reward=True):
# For tracking iterations
self.fs = []
self.context_fs = []
self.n_eval = 0
self.max_eval = max_eval
self.context_dict = context_dict
self.return_context_reward = return_context_reward
# define search space for non-dp setting
self._base_parameters = [
{
"name": "bw",
"type": "range",
"bounds": [0.0, 1.0],
"value_type": "float",
"log_scale": False,
},
{
"name": "bf",
"type": "range",
"bounds": [0.0, 3.0],
"value_type": "float",
"log_scale": False,
},
{
"name": "c",
"type": "range",
"bounds": [0.0, 1.0],
"value_type": "float",
"log_scale": False,
},
{
"name": "exp_weight",
"type": "range",
"bounds": [0.0001, 0.25],
"value_type": "float",
"log_scale": False,
},
]
self.n_params = len(self._base_parameters)
@property
def base_parameters(self) -> List[Dict]:
return self._base_parameters
def f(self, x):
"""
x = [bw, bf, c, exp_weight]
"""
if self.n_eval >= self.max_eval:
raise StopIteration("Evaluation budget exhuasted")
agent = Agent(bw=x[0], bf=x[1], c=x[2], exp_weight=x[3])
rewards, context_rewards = run_non_contextual_experiments_multiple_times(
agent=agent, context_dict=self.context_dict, num_runs=NUM_RUNS
) # Change this to 1 to make it faster
f_x = np.mean(rewards)
self.n_eval += 1
self.fs.append(f_x)
self.context_fs.append(context_rewards)
if self.return_context_reward is False:
return -f_x
return -f_x, context_rewards # because maximization
class ParkContextualRunner(ParkNoncontextualRunner):
def __init__(
self, num_contexts, context_dict, max_eval=1000, return_context_reward=True
):
super().__init__(
context_dict=context_dict,
max_eval=max_eval,
return_context_reward=return_context_reward,
)
self.num_contexts = num_contexts
self.context_name_list = list(context_dict.keys())
self._contextual_parameters = []
for context_name in self.context_name_list:
self._contextual_parameters.extend(
[
{
"name": f"{self._base_parameters[j]['name']}_{context_name}",
"type": self._base_parameters[j]["type"],
"bounds": self._base_parameters[j]["bounds"],
"value_type": self._base_parameters[j]["value_type"],
"log_scale": self._base_parameters[j]["log_scale"],
}
for j in range(self.n_params)
]
)
self._decomposition = {
f"{context_name}": [
f"{self._base_parameters[j]['name']}_{context_name}"
for j in range(self.n_params)
]
for context_name in self.context_name_list
}
@property
def contextual_parameters(self) -> List[Dict]:
return self._contextual_parameters
@property
def contextual_parameter_decomposition(self) -> List[Dict]:
return self._decomposition
def f(self, x):
"""
x = [bw_1, bf_1, c_1, exp_weight_1, bw_2, bf_2, c_2, exp_weight_2, ...]
"""
if self.n_eval >= self.max_eval:
raise StopIteration("Evaluation budget exhuasted")
bw_dict = {
f"{self.context_name_list[i]}": x[i * self.n_params]
for i in range(self.num_contexts)
}
bf_dict = {
f"{self.context_name_list[i]}": x[i * self.n_params + 1]
for i in range(self.num_contexts)
}
c_dict = {
f"{self.context_name_list[i]}": x[i * self.n_params + 2]
for i in range(self.num_contexts)
}
exp_weight_dict = {
f"{self.context_name_list[i]}": x[i * self.n_params + 3]
for i in range(self.num_contexts)
}
agent = ContextualAgent(
bw_dict=bw_dict,
bf_dict=bf_dict,
c_dict=c_dict,
exp_weight_dict=exp_weight_dict,
)
# Change this to 1 to make it run faster
rewards, context_rewards = run_contextual_experiments_multiple_times(
agent=agent, context_dict=self.context_dict, num_runs=NUM_RUNS
)
f_x = np.mean(rewards)
self.n_eval += 1
self.fs.append(f_x)
self.context_fs.append(context_rewards)
if self.return_context_reward is False:
return -f_x
return -f_x, context_rewards
def run_contextual_experiments_multiple_times(agent, context_dict, num_runs):
total_rewards = []
context_rewards = {}
for context_name, context_val in context_dict.items():
env = park.make("abr_sim_fb")
reward_list = []
for irun in range(num_runs):
obs = env.reset(context_val, irun)
if len(obs) == 0:
break
agent.reset(context_name)
done = False
rewards = 0
while not done:
act = agent.get_action(obs)
obs, reward, done, info = env.step(act)
rewards += reward # context weight could be applied here
total_rewards.append(rewards)
reward_list.append(rewards)
context_rewards[context_name] = -(np.mean(reward_list))
return total_rewards, context_rewards
def run_non_contextual_experiments_multiple_times(agent, context_dict, num_runs):
total_rewards = []
context_rewards = {}
for context_name, context_val in context_dict.items():
env = park.make("abr_sim_fb")
reward_list = []
for irun in range(num_runs):
obs = env.reset(context_val, irun)
if len(obs) == 0:
break
agent.reset()
done = False
rewards = 0
while not done:
act = agent.get_action(obs)
obs, reward, done, info = env.step(act)
rewards += reward # context weight could be applied here
total_rewards.append(rewards)
reward_list.append(rewards)
context_rewards[context_name] = -(np.mean(reward_list))
return total_rewards, context_rewards
|
ContextualBO-main
|
park_abr/fb_abr_problem.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Run benchmarks for Add-GP-UCB.
Requires installing dragonfly-opt from pip. The experiments here used version
0.1.4.
"""
import cma
import time
import json
from argparse import Namespace
import numpy as np
from dragonfly import minimise_function
from fb_abr_problem import ParkContextualRunner
########
# Define problem
ABR_CONTEXT_CONFIG_DICT = {
'c0': {'name': 'c0', 'delay': 0.09111558887847584},
'c1': {'name': 'c1', 'delay': 0.13919983731019495},
'c10': {'name': 'c10', 'delay': 0.04709563378153773},
'c11': {'name': 'c11', 'delay': 0.09175980911983045},
'c2': {'name': 'c2', 'delay': 0.05811786663939401},
'c3': {'name': 'c3', 'delay': 0.15680707174733982},
'c4': {'name': 'c4', 'delay': 0.21008791350238118},
'c5': {'name': 'c5', 'delay': 0.12895778597785987},
'c6': {'name': 'c6', 'delay': 0.05922074675831855},
'c7': {'name': 'c7', 'delay': 0.0751735817104147},
'c8': {'name': 'c8', 'delay': 0.08200189263592551},
'c9': {'name': 'c9', 'delay': 0.0962324885998359}
}
num_trials = 75
for rep in range(25):
print('====================', rep)
r_contextual = ParkContextualRunner(
num_contexts=len(ABR_CONTEXT_CONFIG_DICT),
context_dict=ABR_CONTEXT_CONFIG_DICT,
max_eval=75,
return_context_reward=False,
)
t1 = time.time()
options = Namespace(acq="add_ucb")
try:
minimise_function(
r_contextual.f,
domain=[[0.0, 1.0], [0.0, 3.0], [0.0, 1.0], [0.0001, 0.25]] * len(ABR_CONTEXT_CONFIG_DICT),
max_capital=num_trials,
options=options,
)
except StopIteration:
pass
with open(f'results/add_ucb_park_rep_{rep}.json', 'w') as fout:
# times -1 to fs
json.dump([-reward for reward in r_contextual.fs], fout)
print ('=============', time.time() - t1)
|
ContextualBO-main
|
park_abr/run_park_add_ucb.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import time
import json
import numpy as np
from ax.modelbridge.generation_strategy import GenerationStep, GenerationStrategy
from ax.modelbridge.strategies.rembo import REMBOStrategy
from ax.storage.json_store.encoder import object_to_json
from ax.service.ax_client import AxClient
from fb_abr_problem import ParkContextualRunner
########
# Define problem
ABR_CONTEXT_CONFIG_DICT = {
'c0': {'name': 'c0', 'delay': 0.09111558887847584},
'c1': {'name': 'c1', 'delay': 0.13919983731019495},
'c10': {'name': 'c10', 'delay': 0.04709563378153773},
'c11': {'name': 'c11', 'delay': 0.09175980911983045},
'c2': {'name': 'c2', 'delay': 0.05811786663939401},
'c3': {'name': 'c3', 'delay': 0.15680707174733982},
'c4': {'name': 'c4', 'delay': 0.21008791350238118},
'c5': {'name': 'c5', 'delay': 0.12895778597785987},
'c6': {'name': 'c6', 'delay': 0.05922074675831855},
'c7': {'name': 'c7', 'delay': 0.0751735817104147},
'c8': {'name': 'c8', 'delay': 0.08200189263592551},
'c9': {'name': 'c9', 'delay': 0.0962324885998359}
}
num_trials = 75
for rep in range(25):
print('====================', rep)
num_contexts = len(ABR_CONTEXT_CONFIG_DICT)
benchmark_problem = ParkContextualRunner(
num_contexts=num_contexts,
context_dict=ABR_CONTEXT_CONFIG_DICT
)
decomposition = benchmark_problem.contextual_parameter_decomposition
t1 = time.time()
gs = REMBOStrategy(
D=len(benchmark_problem._contextual_parameters), d=8, init_per_proj=8
)
axc = AxClient(generation_strategy=gs)
experiment_parameters = benchmark_problem.contextual_parameters
axc.create_experiment(
name="cbo_aggregated_reward_experiment",
parameters=experiment_parameters,
objective_name="aggregated_reward",
minimize=True,
)
context_reward_list = []
def evaluation_aggregated_reward(parameters):
# put parameters into 1-D array
# x = [bw_c0, bf_c0, c_c0, ...]
x = []
for context_name in benchmark_problem.context_name_list:
x.extend([parameters.get(param) for param in decomposition[context_name]])
aggregated_reward, context_reward = benchmark_problem.f(np.array(x))
return {"aggregated_reward": (aggregated_reward, 0.0)}, context_reward
for itrial in range(num_trials):
parameters, trial_index = axc.get_next_trial()
aggregated_res, context_res = evaluation_aggregated_reward(parameters)
axc.complete_trial(trial_index=trial_index, raw_data=aggregated_res)
context_res["trial_index"] = itrial
context_reward_list.append(context_res)
res = json.dumps(
{
"experiment": object_to_json(axc.experiment),
"context_rewards": context_reward_list,
}
)
with open(f'results/rembo_park_rep_{rep}.json', "w") as fout:
json.dump(res, fout)
print ('=============', time.time() - t1)
|
ContextualBO-main
|
park_abr/run_park_rembo.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import time
import json
import numpy as np
from ax.modelbridge.generation_strategy import GenerationStep, GenerationStrategy
from ax.modelbridge.factory import get_sobol
from ax.storage.json_store.encoder import object_to_json
from ax.service.ax_client import AxClient
from cbo_generation_strategy import get_ContextualBO
from fb_abr_problem import ParkContextualRunner
########
# Define problem
ABR_CONTEXT_CONFIG_DICT = {
'c0': {'name': 'c0', 'delay': 0.09111558887847584},
'c1': {'name': 'c1', 'delay': 0.13919983731019495},
'c10': {'name': 'c10', 'delay': 0.04709563378153773},
'c11': {'name': 'c11', 'delay': 0.09175980911983045},
'c2': {'name': 'c2', 'delay': 0.05811786663939401},
'c3': {'name': 'c3', 'delay': 0.15680707174733982},
'c4': {'name': 'c4', 'delay': 0.21008791350238118},
'c5': {'name': 'c5', 'delay': 0.12895778597785987},
'c6': {'name': 'c6', 'delay': 0.05922074675831855},
'c7': {'name': 'c7', 'delay': 0.0751735817104147},
'c8': {'name': 'c8', 'delay': 0.08200189263592551},
'c9': {'name': 'c9', 'delay': 0.0962324885998359}
}
num_trials = 75
for rep in range(25):
print('====================', rep)
num_contexts = len(ABR_CONTEXT_CONFIG_DICT)
benchmark_problem = ParkContextualRunner(
num_contexts=num_contexts,
context_dict=ABR_CONTEXT_CONFIG_DICT
)
decomposition = benchmark_problem.contextual_parameter_decomposition
t1 = time.time()
gs = GenerationStrategy(
name="SAC",
steps=[
GenerationStep(get_sobol, 8),
GenerationStep(
get_ContextualBO,
-1,
model_kwargs={"decomposition": decomposition},
),
],
)
axc = AxClient(generation_strategy=gs)
experiment_parameters = benchmark_problem.contextual_parameters
axc.create_experiment(
name="cbo_aggregated_reward_experiment",
parameters=experiment_parameters,
objective_name="aggregated_reward",
minimize=True,
)
context_reward_list = []
def evaluation_aggregated_reward(parameters):
# put parameters into 1-D array
# x = [bw_c0, bf_c0, c_c0, ...]
x = []
for context_name in benchmark_problem.context_name_list:
x.extend([parameters.get(param) for param in decomposition[context_name]])
aggregated_reward, context_reward = benchmark_problem.f(np.array(x))
return {"aggregated_reward": (aggregated_reward, 0.0)}, context_reward
for itrial in range(num_trials):
parameters, trial_index = axc.get_next_trial()
aggregated_res, context_res = evaluation_aggregated_reward(parameters)
axc.complete_trial(trial_index=trial_index, raw_data=aggregated_res)
context_res["trial_index"] = itrial
context_reward_list.append(context_res)
res = json.dumps(
{
"experiment": object_to_json(axc.experiment),
"context_rewards": context_reward_list,
}
)
with open(f'results/cbo_sac_park_rep_{rep}.json', "w") as fout:
json.dump(res, fout)
print ('=============', time.time() - t1)
|
ContextualBO-main
|
park_abr/run_park_sac.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Run benchmarks for CMAES.
Requires installing cma from pip. The experiments here used version 2.7.0.
"""
import cma
import time
import json
import numpy.matlib
import numpy as np
from fb_abr_problem import ParkContextualRunner
########
# Define problem
ABR_CONTEXT_CONFIG_DICT = {
'c0': {'name': 'c0', 'delay': 0.09111558887847584},
'c1': {'name': 'c1', 'delay': 0.13919983731019495},
'c10': {'name': 'c10', 'delay': 0.04709563378153773},
'c11': {'name': 'c11', 'delay': 0.09175980911983045},
'c2': {'name': 'c2', 'delay': 0.05811786663939401},
'c3': {'name': 'c3', 'delay': 0.15680707174733982},
'c4': {'name': 'c4', 'delay': 0.21008791350238118},
'c5': {'name': 'c5', 'delay': 0.12895778597785987},
'c6': {'name': 'c6', 'delay': 0.05922074675831855},
'c7': {'name': 'c7', 'delay': 0.0751735817104147},
'c8': {'name': 'c8', 'delay': 0.08200189263592551},
'c9': {'name': 'c9', 'delay': 0.0962324885998359}
}
num_trials = 75
for rep in range(25):
print('====================', rep)
r_contextual = ParkContextualRunner(
num_contexts=len(ABR_CONTEXT_CONFIG_DICT),
context_dict=ABR_CONTEXT_CONFIG_DICT,
max_eval=75,
return_context_reward=False,
)
t1 = time.time()
try:
_ = cma.fmin(
objective_function=r_contextual.f,
x0=[0.5, 1.0, 0.5, 0.001] * len(ABR_CONTEXT_CONFIG_DICT),
sigma0=0.15,
options={
"bounds": [
[0.0, 0.0, 0.0, 0.0001] * len(ABR_CONTEXT_CONFIG_DICT),
[1.0, 3.0, 1.0, 0.25] * len(ABR_CONTEXT_CONFIG_DICT),
],
"maxfevals": num_trials,
},
)
except StopIteration:
pass
with open(f'results/cma_es_park_rep_{rep}.json', 'w') as fout:
# times -1 to fs
json.dump([-reward for reward in r_contextual.fs], fout)
print ('=============', time.time() - t1)
|
ContextualBO-main
|
park_abr/run_park_cma_es.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import time
import json
import numpy as np
from ax.modelbridge.generation_strategy import GenerationStep, GenerationStrategy
from ax.modelbridge.strategies.alebo import ALEBOStrategy
from ax.storage.json_store.encoder import object_to_json
from ax.service.ax_client import AxClient
from fb_abr_problem import ParkContextualRunner
########
# Define problem
ABR_CONTEXT_CONFIG_DICT = {
'c0': {'name': 'c0', 'delay': 0.09111558887847584},
'c1': {'name': 'c1', 'delay': 0.13919983731019495},
'c10': {'name': 'c10', 'delay': 0.04709563378153773},
'c11': {'name': 'c11', 'delay': 0.09175980911983045},
'c2': {'name': 'c2', 'delay': 0.05811786663939401},
'c3': {'name': 'c3', 'delay': 0.15680707174733982},
'c4': {'name': 'c4', 'delay': 0.21008791350238118},
'c5': {'name': 'c5', 'delay': 0.12895778597785987},
'c6': {'name': 'c6', 'delay': 0.05922074675831855},
'c7': {'name': 'c7', 'delay': 0.0751735817104147},
'c8': {'name': 'c8', 'delay': 0.08200189263592551},
'c9': {'name': 'c9', 'delay': 0.0962324885998359}
}
num_trials = 75
for rep in range(25):
print('====================', rep)
num_contexts = len(ABR_CONTEXT_CONFIG_DICT)
benchmark_problem = ParkContextualRunner(
num_contexts=num_contexts,
context_dict=ABR_CONTEXT_CONFIG_DICT
)
decomposition = benchmark_problem.contextual_parameter_decomposition
t1 = time.time()
gs = ALEBOStrategy(
D=len(benchmark_problem._contextual_parameters), d=8, init_size=8
)
axc = AxClient(generation_strategy=gs)
experiment_parameters = benchmark_problem.contextual_parameters
axc.create_experiment(
name="cbo_aggregated_reward_experiment",
parameters=experiment_parameters,
objective_name="aggregated_reward",
minimize=True,
)
context_reward_list = []
def evaluation_aggregated_reward(parameters):
# put parameters into 1-D array
# x = [bw_c0, bf_c0, c_c0, ...]
x = []
for context_name in benchmark_problem.context_name_list:
x.extend([parameters.get(param) for param in decomposition[context_name]])
aggregated_reward, context_reward = benchmark_problem.f(np.array(x))
return {"aggregated_reward": (aggregated_reward, 0.0)}, context_reward
for itrial in range(num_trials):
parameters, trial_index = axc.get_next_trial()
aggregated_res, context_res = evaluation_aggregated_reward(parameters)
axc.complete_trial(trial_index=trial_index, raw_data=aggregated_res)
context_res["trial_index"] = itrial
context_reward_list.append(context_res)
res = json.dumps(
{
"experiment": object_to_json(axc.experiment),
"context_rewards": context_reward_list,
}
)
with open(f'results/alebo_park_rep_{rep}.json', "w") as fout:
json.dump(res, fout)
print ('=============', time.time() - t1)
|
ContextualBO-main
|
park_abr/run_park_alebo.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import time
import json
import numpy as np
from ax.modelbridge.generation_strategy import GenerationStep, GenerationStrategy
from ax.modelbridge.factory import get_GPEI, get_sobol
from ax.storage.json_store.encoder import object_to_json
from ax.service.ax_client import AxClient
from fb_abr_problem import ParkContextualRunner
########
# Define problem
ABR_CONTEXT_CONFIG_DICT = {
'c0': {'name': 'c0', 'delay': 0.09111558887847584},
'c1': {'name': 'c1', 'delay': 0.13919983731019495},
'c10': {'name': 'c10', 'delay': 0.04709563378153773},
'c11': {'name': 'c11', 'delay': 0.09175980911983045},
'c2': {'name': 'c2', 'delay': 0.05811786663939401},
'c3': {'name': 'c3', 'delay': 0.15680707174733982},
'c4': {'name': 'c4', 'delay': 0.21008791350238118},
'c5': {'name': 'c5', 'delay': 0.12895778597785987},
'c6': {'name': 'c6', 'delay': 0.05922074675831855},
'c7': {'name': 'c7', 'delay': 0.0751735817104147},
'c8': {'name': 'c8', 'delay': 0.08200189263592551},
'c9': {'name': 'c9', 'delay': 0.0962324885998359}
}
num_trials = 75
for rep in range(25):
print('====================', rep)
num_contexts = len(ABR_CONTEXT_CONFIG_DICT)
benchmark_problem = ParkContextualRunner(
num_contexts=num_contexts,
context_dict=ABR_CONTEXT_CONFIG_DICT
)
decomposition = benchmark_problem.contextual_parameter_decomposition
t1 = time.time()
gs = GenerationStrategy(
name="GPEI",
steps=[
GenerationStep(get_sobol, 8),
GenerationStep(get_GPEI, -1),
],
)
axc = AxClient(generation_strategy=gs)
experiment_parameters = benchmark_problem.contextual_parameters
axc.create_experiment(
name="cbo_aggregated_reward_experiment",
parameters=experiment_parameters,
objective_name="aggregated_reward",
minimize=True,
)
context_reward_list = []
def evaluation_aggregated_reward(parameters):
# put parameters into 1-D array
# x = [bw_c0, bf_c0, c_c0, ...]
x = []
for context_name in benchmark_problem.context_name_list:
x.extend([parameters.get(param) for param in decomposition[context_name]])
aggregated_reward, context_reward = benchmark_problem.f(np.array(x))
return {"aggregated_reward": (aggregated_reward, 0.0)}, context_reward
for itrial in range(num_trials):
parameters, trial_index = axc.get_next_trial()
aggregated_res, context_res = evaluation_aggregated_reward(parameters)
axc.complete_trial(trial_index=trial_index, raw_data=aggregated_res)
context_res["trial_index"] = itrial
context_reward_list.append(context_res)
res = json.dumps(
{
"experiment": object_to_json(axc.experiment),
"context_rewards": context_reward_list,
}
)
with open(f'results/standard_bo_park_rep_{rep}.json', "w") as fout:
json.dump(res, fout)
print ('=============', time.time() - t1)
|
ContextualBO-main
|
park_abr/run_park_standard_bo.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import time
import json
import numpy as np
from ax.modelbridge.generation_strategy import GenerationStep, GenerationStrategy
from ax.modelbridge.factory import get_GPEI, get_sobol
from ax.storage.json_store.encoder import object_to_json
from ax.service.ax_client import AxClient
from fb_abr_problem import ParkNoncontextualRunner
########
# Define problem
ABR_CONTEXT_CONFIG_DICT = {
'c0': {'name': 'c0', 'delay': 0.09111558887847584},
'c1': {'name': 'c1', 'delay': 0.13919983731019495},
'c10': {'name': 'c10', 'delay': 0.04709563378153773},
'c11': {'name': 'c11', 'delay': 0.09175980911983045},
'c2': {'name': 'c2', 'delay': 0.05811786663939401},
'c3': {'name': 'c3', 'delay': 0.15680707174733982},
'c4': {'name': 'c4', 'delay': 0.21008791350238118},
'c5': {'name': 'c5', 'delay': 0.12895778597785987},
'c6': {'name': 'c6', 'delay': 0.05922074675831855},
'c7': {'name': 'c7', 'delay': 0.0751735817104147},
'c8': {'name': 'c8', 'delay': 0.08200189263592551},
'c9': {'name': 'c9', 'delay': 0.0962324885998359}
}
num_trials = 75
for rep in range(25):
print('====================', rep)
num_contexts = len(ABR_CONTEXT_CONFIG_DICT)
benchmark_problem = ParkNoncontextualRunner(context_dict=ABR_CONTEXT_CONFIG_DICT)
t1 = time.time()
gs = GenerationStrategy(
name="GPEI",
steps=[
GenerationStep(get_sobol, 8),
GenerationStep(get_GPEI, -1),
],
)
axc = AxClient(generation_strategy=gs)
experiment_parameters = benchmark_problem.base_parameters
axc.create_experiment(
name="cbo_aggregated_reward_experiment",
parameters=experiment_parameters,
objective_name="aggregated_reward",
minimize=True,
)
context_reward_list = []
def evaluation_aggregated_reward(parameters):
# put parameters into 1-D array
x = [
parameters.get(param["name"]) for param in benchmark_problem.base_parameters
]
aggregated_reward, context_reward = benchmark_problem.f(np.array(x))
return {"aggregated_reward": (aggregated_reward, 0.0)}, context_reward
for itrial in range(num_trials):
parameters, trial_index = axc.get_next_trial()
aggregated_res, context_res = evaluation_aggregated_reward(parameters)
axc.complete_trial(trial_index=trial_index, raw_data=aggregated_res)
context_res["trial_index"] = itrial
context_reward_list.append(context_res)
res = json.dumps(
{
"experiment": object_to_json(axc.experiment),
"context_rewards": context_reward_list,
}
)
with open(f'results/non_contextual_park_rep_{rep}.json', "w") as fout:
json.dump(res, fout)
print ('=============', time.time() - t1)
|
ContextualBO-main
|
park_abr/run_park_non_contextual.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import time
import json
import numpy as np
from ax.modelbridge.generation_strategy import GenerationStep, GenerationStrategy
from ax.modelbridge.factory import get_sobol
from ax.storage.json_store.encoder import object_to_json
from ax.service.ax_client import AxClient
from cbo_generation_strategy import get_ContextualEmbeddingBO
from fb_abr_problem import ParkContextualRunner
CBO_EMB_MODEL_GEN_OPTIONS = {
"acquisition_function_kwargs": {"q": 1, "noiseless": True},
"optimizer_kwargs": {
"method": "SLSQP",
"batch_limit": 1,
"joint_optimization": True,
},
}
########
# Define problem
ABR_CONTEXT_CONFIG_DICT = {
'c0': {'name': 'c0', 'delay': 0.09111558887847584},
'c1': {'name': 'c1', 'delay': 0.13919983731019495},
'c10': {'name': 'c10', 'delay': 0.04709563378153773},
'c11': {'name': 'c11', 'delay': 0.09175980911983045},
'c2': {'name': 'c2', 'delay': 0.05811786663939401},
'c3': {'name': 'c3', 'delay': 0.15680707174733982},
'c4': {'name': 'c4', 'delay': 0.21008791350238118},
'c5': {'name': 'c5', 'delay': 0.12895778597785987},
'c6': {'name': 'c6', 'delay': 0.05922074675831855},
'c7': {'name': 'c7', 'delay': 0.0751735817104147},
'c8': {'name': 'c8', 'delay': 0.08200189263592551},
'c9': {'name': 'c9', 'delay': 0.0962324885998359}
}
num_trials = 75
for rep in range(25):
print('====================', rep)
num_contexts = len(ABR_CONTEXT_CONFIG_DICT)
benchmark_problem = ParkContextualRunner(
num_contexts=num_contexts,
context_dict=ABR_CONTEXT_CONFIG_DICT
)
decomposition = benchmark_problem.contextual_parameter_decomposition
t1 = time.time()
gs = GenerationStrategy(
name="LCE-A",
steps=[
GenerationStep(get_sobol, 8),
GenerationStep(
get_ContextualEmbeddingBO,
-1,
model_kwargs={"decomposition": decomposition},
model_gen_kwargs={"model_gen_options": CBO_EMB_MODEL_GEN_OPTIONS},
),
],
)
axc = AxClient(generation_strategy=gs)
experiment_parameters = benchmark_problem.contextual_parameters
axc.create_experiment(
name="cbo_aggregated_reward_experiment",
parameters=experiment_parameters,
objective_name="aggregated_reward",
minimize=True,
)
context_reward_list = []
def evaluation_aggregated_reward(parameters):
# put parameters into 1-D array
# x = [bw_c0, bf_c0, c_c0, ...]
x = []
for context_name in benchmark_problem.context_name_list:
x.extend([parameters.get(param) for param in decomposition[context_name]])
aggregated_reward, context_reward = benchmark_problem.f(np.array(x))
return {"aggregated_reward": (aggregated_reward, 0.0)}, context_reward
for itrial in range(num_trials):
parameters, trial_index = axc.get_next_trial()
aggregated_res, context_res = evaluation_aggregated_reward(parameters)
axc.complete_trial(trial_index=trial_index, raw_data=aggregated_res)
context_res["trial_index"] = itrial
context_reward_list.append(context_res)
res = json.dumps(
{
"experiment": object_to_json(axc.experiment),
"context_rewards": context_reward_list,
}
)
with open(f'results/cbo_lcea_park_rep_{rep}.json', "w") as fout:
json.dump(res, fout)
print ('=============', time.time() - t1)
|
ContextualBO-main
|
park_abr/run_park_lcea.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Run benchmarks for TuRBO.
Requires installing turbo from https://github.com/uber-research/TuRBO.
"""
import turbo
import time
import json
import numpy.matlib
import numpy as np
from fb_abr_problem import ParkContextualRunner
########
# Define problem
ABR_CONTEXT_CONFIG_DICT = {
'c0': {'name': 'c0', 'delay': 0.09111558887847584},
'c1': {'name': 'c1', 'delay': 0.13919983731019495},
'c10': {'name': 'c10', 'delay': 0.04709563378153773},
'c11': {'name': 'c11', 'delay': 0.09175980911983045},
'c2': {'name': 'c2', 'delay': 0.05811786663939401},
'c3': {'name': 'c3', 'delay': 0.15680707174733982},
'c4': {'name': 'c4', 'delay': 0.21008791350238118},
'c5': {'name': 'c5', 'delay': 0.12895778597785987},
'c6': {'name': 'c6', 'delay': 0.05922074675831855},
'c7': {'name': 'c7', 'delay': 0.0751735817104147},
'c8': {'name': 'c8', 'delay': 0.08200189263592551},
'c9': {'name': 'c9', 'delay': 0.0962324885998359}
}
lbs = np.array([0.0, 0.0, 0.0, 0.0001] * len(ABR_CONTEXT_CONFIG_DICT))
ubs = np.array([1.0, 3.0, 1.0, 0.25] * len(ABR_CONTEXT_CONFIG_DICT))
for rep in range(25):
print('====================', rep)
r_contextual = ParkContextualRunner(
num_contexts=len(ABR_CONTEXT_CONFIG_DICT),
context_dict=ABR_CONTEXT_CONFIG_DICT,
max_eval=75,
return_context_reward=False,
)
t1 = time.time()
turbo1 = turbo.Turbo1(
f=r_contextual.f,
lb=lbs,
ub=ubs,
n_init=8,
max_evals=75,
)
turbo1.optimize()
with open(f'results/turbo_park_rep_{rep}.json', 'w') as fout:
# times -1 to fs
json.dump([-reward for reward in r_contextual.fs], fout)
# json.dump(r_contextual.fs, fout)
print ('=============', time.time() - t1)
|
ContextualBO-main
|
park_abr/run_park_turbo.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, List, Optional, Type
import torch
from ax.core.data import Data
from ax.core.experiment import Experiment
from ax.core.generator_run import GeneratorRun
from ax.core.observation import ObservationFeatures
from ax.core.parameter import ChoiceParameter
from ax.core.search_space import SearchSpace
from ax.models.torch.cbo_lcea import LCEABO
from ax.models.torch.cbo_sac import SACBO
from ax.modelbridge.factory import DEFAULT_TORCH_DEVICE
from ax.modelbridge.generation_strategy import GenerationStep, GenerationStrategy
from ax.modelbridge.random import RandomModelBridge
from ax.modelbridge.registry import (
Cont_X_trans,
Y_trans,
)
from ax.modelbridge.torch import TorchModelBridge
from ax.modelbridge.transforms.base import Transform
from ax.models.random.sobol import SobolGenerator
from ax.models.torch.botorch import BotorchModel
def get_ContextualBO(
experiment: Experiment,
data: Data,
decomposition: Dict[str, List[str]],
search_space: Optional[SearchSpace] = None,
dtype: torch.dtype = torch.double,
device: torch.device = DEFAULT_TORCH_DEVICE,
transforms: List[Type[Transform]] = Cont_X_trans + Y_trans,
) -> TorchModelBridge:
return TorchModelBridge(
experiment=experiment,
search_space=search_space or experiment.search_space,
data=data,
model=SACBO(decomposition=decomposition),
transforms=transforms,
torch_dtype=dtype,
torch_device=device,
)
def get_ContextualEmbeddingBO(
experiment: Experiment,
data: Data,
decomposition: Dict[str, List[str]],
dtype: torch.dtype = torch.double,
device: torch.device = DEFAULT_TORCH_DEVICE,
transforms: List[Type[Transform]] = Cont_X_trans + Y_trans,
cat_feature_dict: Optional[Dict] = None,
embs_feature_dict: Optional[Dict] = None,
context_weight_dict: Optional[Dict] = None,
embs_dim_list: Optional[List[int]] = None,
search_space: Optional[SearchSpace] = None,
gp_model_args: Optional[Dict[str, Any]] = None,
) -> TorchModelBridge:
return TorchModelBridge(
experiment=experiment,
search_space=search_space or experiment.search_space,
data=data,
model=LCEABO(
decomposition=decomposition,
cat_feature_dict=cat_feature_dict,
embs_feature_dict=embs_feature_dict,
context_weight_dict=context_weight_dict,
embs_dim_list=embs_dim_list,
gp_model_args=gp_model_args,
),
transforms=transforms,
torch_dtype=dtype,
torch_device=device,
)
|
ContextualBO-main
|
park_abr/cbo_generation_strategy.py
|
# Folk of the adaptive video streaming environment in https://github.com/park-project/park
from park.envs import make
|
ContextualBO-main
|
park_abr/park/__init__.py
|
# Folk of the adaptive video streaming environment in https://github.com/park-project/park
from park import logger
# Env-related abstractions
class Env(object):
"""
The main park class. The interface follows OpenAI gym
https://gym.openai.com, which encapsulates an environment with
arbitrary behind-the-scenes dynamics. An environment can be
partially or fully observed.
The main API methods that users of this class need to know are:
observe
step
reset
seed
And set the following attributes:
action_space: The Space object corresponding to valid actions
observation_space: The Space object corresponding to valid observations
reward_range: A tuple corresponding to the min and max possible rewards
"""
# Set this in some subclasses
metadata = {"env.name": "abstract_env"}
reward_range = (-float("inf"), float("inf"))
# Set these in ALL subclasses
action_space = None
observation_space = None
def step(self, action):
"""
Run one timestep of the environment's dynamics. When end of
episode is reached, you are responsible for calling `reset()`
to reset this environment's state.
Accepts an action and returns a tuple (observation, reward, done, info).
Args:
action (object): an action provided by the environment
Returns:
observation (object): agent's observation of the current environment
reward (float) : amount of reward returned after previous action
done (boolean): whether the episode has ended, in which case further step() calls will return undefined results
info (dict): contains auxiliary diagnostic information (helpful for debugging, and sometimes learning)
"""
raise NotImplementedError
def reset(self):
"""
Resets the state of the environment and returns an initial observation.
Returns: observation (object): the initial observation of the space.
"""
raise NotImplementedError
def seed(self, seed=None):
"""
Sets the seed for this env's random number generator(s).
"""
logger.warn("Could not seed environment " + self.metadata["env.name"])
return
# Real system environment abstractions
class SysEnv(object):
"""
For many real world systems, the agent only passively returns
action when the system requests. In other words, it is more
natural for the system to run on it own, as opposed to using
the step function to "tick the time" in most simualted cases
as above.
The main API methods that users of this class need to know is:
run(agent_constructor, agent_parameters)
The user implements the agent in this format
class Agent(object):
def __init__(self, state_space, action_space, *args, **kwargs):
self.state_space = state_space
self.action_space = action_space
def get_action(self, obs, prev_reward, prev_done, prev_info):
act = self.action_space.sample()
# implement real action logic here
return act
"""
# Set this in some subclasses
metadata = {"env.name": "abstract_env"}
reward_range = (-float("inf"), float("inf"))
# Set these in ALL subclasses
action_space = None
observation_space = None
def run(self, agent, *args, **kwargs):
"""
Take in agent constructor, run the real system that consults the
agent for the action at certain events
"""
raise NotImplementedError
# Space-related abstractions
class Space(object):
"""
Defines the observation and action spaces, so you can write generic
code that applies to any Env. For example, you can choose a random
action.
"""
def __init__(self, struct=None, shape=None, dtype=None):
import numpy as np # takes about 300-400ms to import, load lazily
self.struct = struct # tensor, graph, etc.
self.shape = None if shape is None else tuple(shape)
self.dtype = None if dtype is None else np.dtype(dtype)
def sample(self):
"""
Uniformly randomly sample a random element of this space
"""
raise NotImplementedError
def contains(self, x):
"""
Return boolean specifying if x is a valid
member of this space
"""
raise NotImplementedError
|
ContextualBO-main
|
park_abr/park/core.py
|
# Folk of the adaptive video streaming environment in https://github.com/park-project/park
import logging
def debug(msg):
logging.debug(msg)
def info(msg):
logging.info(msg)
def warn(msg):
logging.warning(msg)
def error(msg):
logging.error(msg)
def exception(msg, *args, **kwargs):
logging.exception(msg, *args, **kwargs)
|
ContextualBO-main
|
park_abr/park/logger.py
|
import numpy as np
from park import core, logger
from park.spaces.rng import np_random
class Box(core.Space):
"""
A box in R^n.
I.e., each coordinate is bounded.
Example usage:
self.action_space = spaces.Box(low=-10, high=10, shape=(1,))
"""
def __init__(self, low=None, high=None, struct=None, shape=None, dtype=None):
"""
Two kinds of valid input:
Box(low=-1.0, high=1.0, shape=(3,4)) # low and high are scalars, and shape is provided
Box(low=np.array([-1.0,-2.0]), high=np.array([2.0,4.0])) # low and high are arrays of the same shape
"""
if shape is None:
assert low.shape == high.shape
shape = low.shape
else:
assert np.isscalar(low) and np.isscalar(high)
low = low + np.zeros(shape)
high = high + np.zeros(shape)
if dtype is None: # Autodetect type
if (high == 255).all():
dtype = np.uint8
else:
dtype = np.float32
logger.warn(
"park.spaces.Box autodetected dtype as {}. Please provide explicit dtype.".format(
dtype
)
)
self.low = low.astype(dtype)
self.high = high.astype(dtype)
core.Space.__init__(self, struct, shape, dtype)
def sample(self):
return np_random.uniform(
low=self.low,
high=self.high + (0 if self.dtype.kind == "f" else 1),
size=self.low.shape,
).astype(self.dtype)
def contains(self, x):
return (
x.shape == self.shape and (x >= self.low).all() and (x <= self.high).all()
)
|
ContextualBO-main
|
park_abr/park/spaces/box.py
|
import numpy as np
"""
Separate the random number generator from the environment.
This is used for all random sample in the space native methods.
We expect new algorithms to have their own rngs.
"""
np_random = np.random.RandomState()
np_random.seed(42)
|
ContextualBO-main
|
park_abr/park/spaces/rng.py
|
import numpy as np
from park import core
from park.spaces.rng import np_random
class Discrete(core.Space):
"""
{0,1,...,n-1}
Example usage:
self.observation_space = spaces.Discrete(2)
"""
def __init__(self, n):
self.n = n
core.Space.__init__(self, "tensor_int64", (), np.int64)
def sample(self):
return np_random.randint(self.n)
def contains(self, x):
if isinstance(x, int):
as_int = x
elif isinstance(x, (np.generic, np.ndarray)) and (
x.dtype.kind in np.typecodes["AllInteger"] and x.shape == ()
):
as_int = int(x)
else:
return False
return as_int >= 0 and as_int < self.n
|
ContextualBO-main
|
park_abr/park/spaces/discrete.py
|
from park.spaces.box import Box
from park.spaces.discrete import Discrete
|
ContextualBO-main
|
park_abr/park/spaces/__init__.py
|
import numpy as np
def np_random(seed=42):
if not (isinstance(seed, int) and seed >= 0):
raise ValueError("Seed must be a non-negative integer.")
rng = np.random.RandomState()
rng.seed(seed)
return rng
|
ContextualBO-main
|
park_abr/park/utils/seeding.py
|
import os
def create_folder_if_not_exists(folder_path):
if not os.path.exists(folder_path):
os.makedirs(folder_path)
|
ContextualBO-main
|
park_abr/park/utils/misc.py
|
import networkx as nx
import numpy as np
class DirectedGraph(object):
def __init__(self, node_features=None, edge_features=None):
self.graph = nx.DiGraph()
if node_features is not None:
self.update_nodes(node_features)
if edge_features is not None:
self.update_edges(edge_features)
def update_nodes(self, node_features):
self.graph.add_nodes_from(node_features.keys())
for node in node_features:
self.graph.nodes[node]["feature"] = node_features[node]
def remove_nodes(self, nodes):
self.graph.remove_nodes_from(nodes)
def update_edges(self, edge_features):
self.graph.add_edges_from(edge_features.keys())
for edge in edge_features:
assert len(edge) == 2
self.graph[edge[0]][edge[1]]["feature"] = edge_features[edge]
def remove_edges(self, edges):
self.graph.remove_edges_from(edges)
def has_node(self, node):
return self.graph.has_node(node)
def has_edge(self, edge):
assert len(edge) == 2
return self.graph.has_edge(edge[0], edge[1])
def nodes(self):
return self.graph.nodes
def edges(self):
return self.graph.edges
def number_of_nodes(self):
return self.graph.number_of_nodes()
def number_of_edges(self):
return self.graph.number_of_edges()
def get_node_features_tensor(self):
node_features = []
node_map = {}
for (i, n) in enumerate(self.graph.nodes):
feature = self.graph.nodes[n]["feature"]
if feature is not None:
node_features.append(feature)
node_map[i] = n
return np.array(node_features), node_map
def get_edge_features_tensor(self):
edge_features = []
edge_map = {}
for (i, e) in enumerate(self.graph.edges):
feature = self.graph[e[0]][e[1]]["feature"]
if feature is not None:
edge_features.append(feature)
edge_map[i] = e
return np.array(edge_features), edge_map
def convert_to_tensor(self):
# node feature matrix, adjacency matrix, edge feature matrix,
# node map (node index -> node object),
# edge map (edge index -> edge object)
node_features, node_map = self.get_node_features_tensor()
edge_features, edge_map = self.get_edge_features_tensor()
adj_mat = nx.adjacency_matrix(self.graph)
return node_features, edge_features, adj_mat, node_map, edge_map
def get_node_feature(self, node):
return self.graph.nodes[node]["feature"]
def get_edge_feature(self, edge):
return self.graph[edge[0]][edge[1]]["feature"]
def get_neighbors(self, node):
list_neighbors = [n for n in self.graph.neighbors(node)]
return list_neighbors
def visualize(self):
# TODO: use pydot
pass
|
ContextualBO-main
|
park_abr/park/utils/directed_graph.py
|
def print_red(s):
print("\033[91m" + s + "\033[0m")
def print_orange(s):
print("\033[93m" + s + "\033[0m")
def print_green(s):
print("\033[92m" + s + "\033[0m")
def print_blue(s):
print("\034[92m" + s + "\033[0m")
|
ContextualBO-main
|
park_abr/park/utils/colorful_print.py
|
ContextualBO-main
|
park_abr/park/utils/__init__.py
|
|
from collections import OrderedDict
class OrderedSet(object):
def __init__(self, contents=()):
self.set = OrderedDict((c, None) for c in contents)
def __contains__(self, item):
return item in self.set
def __iter__(self):
return iter(self.set.keys())
def __len__(self):
return len(self.set)
def __reversed__(self):
return iter(reversed(self.set.keys()))
def add(self, item):
self.set[item] = None
def clear(self):
self.set.clear()
def pop(self):
item = next(iter(self.set))
del self.set[item]
return item
def remove(self, item):
del self.set[item]
def to_list(self):
return [k for k in self.set]
|
ContextualBO-main
|
park_abr/park/utils/ordered_set.py
|
# Format follows OpenAI gym https://gym.openai.com
# Folk of the adaptive video streaming environment in https://github.com/park-project/park
def load(entry_point):
import pkg_resources # takes ~400ms to load, so we import it lazily
entry_point = pkg_resources.EntryPoint.parse("x={}".format(entry_point))
result = entry_point.resolve()
return result
class EnvSpec(object):
"""
A specification for a particular instance of the environment. Used
to register the parameters for official evaluations.
Args:
id (str): The environment ID
entry_point (Optional[str]): The Python entrypoint of the environment class (e.g. module.name:Class)
"""
def __init__(self, env_id, entry_point=None):
self.env_id = env_id
self._entry_point = entry_point
def make(self):
"""Instantiates an instance of the environment with appropriate kwargs"""
if self._entry_point is None:
raise Exception(
"Environment " + self.env_id + " needs to specify an entry point"
)
elif callable(self._entry_point):
env = self._entry_point()
else:
cls = load(self._entry_point)
env = cls()
return env
class EnvRegistry(object):
"""
Register an env by ID. The goal is that results on a particular
environment should be comparable.
"""
def __init__(self):
self.env_specs = {}
def make(self, env_id):
spec = self.spec(env_id)
env = spec.make()
return env
def all(self):
return self.env_specs.values()
def spec(self, env_id):
if env_id not in self.env_specs:
raise KeyError("Environment " + env_id + " not defined.")
return self.env_specs[env_id]
def register(self, env_id, entry_point=None):
if env_id in self.env_specs:
raise Exception("Cannot re-register id: {}".format(env_id))
self.env_specs[env_id] = EnvSpec(env_id, entry_point)
# Global registry
registry = EnvRegistry()
def register(env_id, entry_point):
return registry.register(env_id, entry_point)
def make(env_id):
return registry.make(env_id)
def spec(env_id):
return registry.spec(env_id)
|
ContextualBO-main
|
park_abr/park/envs/registration.py
|
# Format follows OpenAI gym https://gym.openai.com
# Folk of the adaptive video streaming environment in https://github.com/park-project/park
from park.envs.registration import make, register
register(env_id="abr_sim_fb", entry_point="park.envs.abr_sim:ABRSimFBEnv")
|
ContextualBO-main
|
park_abr/park/envs/__init__.py
|
# Folk of the adaptive video streaming environment in https://github.com/park-project/park
from collections import deque
import numpy as np
from ax.utils.common.logger import get_logger
from park import core, logger, spaces
from park.envs.abr_sim.trace_loader import (
get_chunk_time,
load_chunk_sizes,
load_traces,
sample_trace,
)
from park.utils import seeding
logger = get_logger(name="ABR_SIM")
class ABRSimEnv(core.Env):
"""
Adapt bitrate during a video playback with varying network conditions.
The objective is to (1) reduce stall (2) increase video quality and
(3) reduce switching between bitrate levels. Ideally, we would want to
*simultaneously* optimize the objectives in all dimensions.
* STATE *
[The throughput estimation of the past chunk (chunk size / elapsed time),
download time (i.e., elapsed time since last action), current buffer ahead,
number of the chunks until the end, the bitrate choice for the past chunk,
current chunk size of bitrate 1, chunk size of bitrate 2,
..., chunk size of bitrate 5]
Note: we need the selected bitrate for the past chunk because reward has
a term for bitrate change, a fully observable MDP needs the bitrate for past chunk
* ACTIONS *
Which bitrate to choose for the current chunk, represented as an integer in [0, 4]
* REWARD *
At current time t, the selected bitrate is b_t, the stall time between
t to t + 1 is s_t, then the reward r_t is
b_{t} - 4.3 * s_{t} - |b_t - b_{t-1}|
Note: there are different definitions of combining multiple objectives in the reward,
check Section 5.1 of the first reference below.
* REFERENCE *
Section 4.2, Section 5.1
Neural Adaptive Video Streaming with Pensieve
H Mao, R Netravali, M Alizadeh
https://dl.acm.org/citation.cfm?id=3098843
Figure 1b, Section 6.2 and Appendix J
Variance Reduction for Reinforcement Learning in Input-Driven Environments.
H Mao, SB Venkatakrishnan, M Schwarzkopf, M Alizadeh.
https://openreview.net/forum?id=Hyg1G2AqtQ
A Control-Theoretic Approach for Dynamic Adaptive Video Streaming over HTTP
X Yin, A Jindal, V Sekar, B Sinopoli
https://dl.acm.org/citation.cfm?id=2787486
"""
def __init__(self):
# observation and action space
self.setup_space()
# set up seed
self.seed(42)
# load all trace files
self.all_traces = load_traces()
# load all video chunk sizes
self.chunk_sizes = load_chunk_sizes()
# mapping between action and bitrate level
self.bitrate_map = [0.3, 0.75, 1.2, 1.85, 2.85, 4.3] # Mbps
# how many past throughput to report
self.past_chunk_len = 8
# assert number of chunks for different bitrates are all the same
assert len(np.unique([len(chunk_size) for chunk_size in self.chunk_sizes])) == 1
self.total_num_chunks = len(self.chunk_sizes[0])
def observe(self):
if self.chunk_idx < self.total_num_chunks:
valid_chunk_idx = self.chunk_idx
else:
valid_chunk_idx = 0
if self.past_action is not None:
valid_past_action = self.past_action
else:
valid_past_action = 0
# network throughput of past chunk, past chunk download time,
# current buffer, number of chunks left and the last bitrate choice
obs_arr = [
self.past_chunk_throughputs[-1],
self.past_chunk_download_times[-1],
self.buffer_size,
self.total_num_chunks - self.chunk_idx,
valid_past_action,
]
# current chunk size of different bitrates
obs_arr.extend(self.chunk_sizes[i][valid_chunk_idx] for i in range(6))
# fit in observation space
for i in range(len(obs_arr)):
if obs_arr[i] > self.obs_high[i]:
obs_arr[i] = self.obs_high[i]
obs_arr = np.array(obs_arr)
assert self.observation_space.contains(obs_arr)
return obs_arr
def reset(self, context_setup):
self.trace, self.curr_t_idx = sample_trace(self.all_traces, self.np_random)
self.noise_std = context_setup.get("noise_std", 0.0)
self.shift = context_setup.get("shift", 0.0)
self.multiplier = context_setup.get("multiplier", 1.0)
self.delay = context_setup.get("delay", 0.0)
self.chunk_time_left = get_chunk_time(self.trace, self.curr_t_idx)
self.chunk_idx = 0
self.buffer_size = 0.0 # initial download time not counted
self.past_action = None
self.past_chunk_throughputs = deque(maxlen=self.past_chunk_len)
self.past_chunk_download_times = deque(maxlen=self.past_chunk_len)
for _ in range(self.past_chunk_len):
self.past_chunk_throughputs.append(0)
self.past_chunk_download_times.append(0)
return self.observe()
def seed(self, seed):
self.np_random = seeding.np_random(seed)
def setup_space(self):
# Set up the observation and action space
# The boundary of the space may change if the dynamics is changed
# a warning message will show up every time e.g., the observation falls
# out of the observation space
self.obs_low = np.array([0] * 11)
self.obs_high = np.array(
[10e6, 100, 100, 500, 5, 10e6, 10e6, 10e6, 10e6, 10e6, 10e6]
)
self.observation_space = spaces.Box(
low=self.obs_low, high=self.obs_high, dtype=np.float32
)
self.action_space = spaces.Discrete(6)
def step(self, action):
# 0 <= action < num_servers
assert self.action_space.contains(action)
# Note: sizes are in bytes, times are in seconds
chunk_size = self.chunk_sizes[action][self.chunk_idx]
# compute chunk download time based on trace
delay = self.delay # in seconds
# keep experiencing the network trace
# until the chunk is downloaded
while chunk_size > 1e-8: # floating number business
throuput = self.trace[1][self.curr_t_idx] / 8.0 * 1e6 # bytes/second
throuput = throuput * self.multiplier + self.shift
sign = [-1, 1]
throuput += (0.25 * self.np_random.poisson(self.noise_std) * 1e6) * (
sign[self.np_random.binomial(1, 0.85)]
)
throuput = max(throuput, 0)
chunk_time_used = min(self.chunk_time_left, chunk_size / throuput)
chunk_size -= throuput * chunk_time_used
self.chunk_time_left -= chunk_time_used
delay += chunk_time_used
if self.chunk_time_left == 0:
self.curr_t_idx += 1
if self.curr_t_idx == len(self.trace[1]):
self.curr_t_idx = 0
self.chunk_time_left = get_chunk_time(self.trace, self.curr_t_idx)
# compute buffer size
rebuffer_time = max(delay - self.buffer_size, 0)
# update video buffer
self.buffer_size = max(self.buffer_size - delay, 0)
self.buffer_size += 4.0 # each chunk is 4 seconds of video
# cap the buffer size
self.buffer_size = min(self.buffer_size, 40.0)
# bitrate change penalty
if self.past_action is None:
bitrate_change = 0
else:
bitrate_change = np.abs(
self.bitrate_map[action] - self.bitrate_map[self.past_action]
)
# linear reward
# (https://dl.acm.org/citation.cfm?id=3098843 section 5.1, QoE metrics (1))
reward = self.bitrate_map[action] - 4.3 * rebuffer_time - bitrate_change
# store action for future bitrate change penalty
self.past_action = action
# update observed network bandwidth and duration
self.past_chunk_throughputs.append(
self.chunk_sizes[action][self.chunk_idx] / float(delay)
)
self.past_chunk_download_times.append(delay)
# advance video
self.chunk_idx += 1
done = self.chunk_idx == self.total_num_chunks
return (
self.observe(),
reward,
done,
{
"bitrate": self.bitrate_map[action],
"stall_time": rebuffer_time,
"bitrate_change": bitrate_change,
},
)
|
ContextualBO-main
|
park_abr/park/envs/abr_sim/abr.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# A modification of the adaptive video streaming environment in https://github.com/park-project/park
from park.envs.abr_sim.abr_fb import ABRSimFBEnv
|
ContextualBO-main
|
park_abr/park/envs/abr_sim/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# A modification of the adaptive video streaming environment in https://github.com/park-project/park
from collections import deque
import numpy as np
from ax.utils.common.logger import get_logger
from park import logger
from park.envs.abr_sim.abr import ABRSimEnv
from park.envs.abr_sim.fb_trace_loader import (
get_chunk_time,
load_chunk_sizes,
load_traces,
sample_trace,
)
logger = get_logger(name="ABR_SIM_FB")
class ABRSimFBEnv(ABRSimEnv):
"""
ABRSimEnv in FB setting.
Adapt bitrate during a video playback with varying network conditions.
The objective is to (1) reduce stall (2) increase video quality and
(3) reduce switching between bitrate levels. Ideally, we would want to
*simultaneously* optimize the objectives in all dimensions.
* STATE *
[The throughput estimation of the past chunk (chunk size / elapsed time),
download time (i.e., elapsed time since last action), current buffer ahead,
number of the chunks until the end, the bitrate choice for the past chunk,
current chunk size of bitrate 1, chunk size of bitrate 2,
..., chunk size of bitrate 5]
Note: we need the selected bitrate for the past chunk because reward has
a term for bitrate change, a fully observable MDP needs the bitrate for past chunk
* ACTIONS *
Which bitrate to choose for the current chunk, represented as an integer in [0, 5]
* REWARD *
At current time t, the selected bitrate is b_t, the stall time between
t to t + 1 is s_t, then the reward r_t is
b_{t} - 2.8 * s_{t} - 0.5 * |b_t - b_{t-1}|
* REFERENCE *
Section 4.2, Section 5.1
Neural Adaptive Video Streaming with Pensieve
H Mao, R Netravali, M Alizadeh
https://dl.acm.org/citation.cfm?id=3098843
Figure 1b, Section 6.2 and Appendix J
Variance Reduction for Reinforcement Learning in Input-Driven Environments.
H Mao, SB Venkatakrishnan, M Schwarzkopf, M Alizadeh.
https://openreview.net/forum?id=Hyg1G2AqtQ
A Control-Theoretic Approach for Dynamic Adaptive Video Streaming over HTTP
X Yin, A Jindal, V Sekar, B Sinopoli
https://dl.acm.org/citation.cfm?id=2787486
"""
def __init__(self):
# observation and action space
self.setup_space()
# set up seed
self.seed(42)
# load all trace files
self.all_traces = load_traces()
# load all video chunk sizes
self.all_chunk_sizes = load_chunk_sizes()
# mapping between action and bitrate level
self.bitrate_map = [0.3, 0.75, 1.2, 1.85, 2.85, 4.3] # Mbps
# how many past throughput to report
self.past_chunk_len = 8
def reset(self, context_setup, irun):
# context_setup = {"name": "context_0", "delay": delay}
context_name = context_setup["name"]
# load trace
if irun >= len(self.all_traces[context_name]):
return []
trace_uuid = sample_trace(self.all_traces[context_name], irun)
self.trace = self.all_traces[context_name][trace_uuid]
self.curr_t_idx = 0
# load chunk
self.chunk_sizes = self.all_chunk_sizes[context_name][trace_uuid] # sample
self.chunk_idx = 0
self.total_num_chunks = len(self.chunk_sizes[0])
# assert number of chunks for different bitrates are all the same
assert len(np.unique([len(chunk_size) for chunk_size in self.chunk_sizes])) == 1
self.delay = context_setup.get("delay", 0.0)
self.chunk_time_left = get_chunk_time(self.trace, self.curr_t_idx)
self.buffer_size = 0.0 # initial download time not counted
self.past_action = None
self.past_chunk_throughputs = deque(maxlen=self.past_chunk_len)
self.past_chunk_download_times = deque(maxlen=self.past_chunk_len)
for _ in range(self.past_chunk_len):
self.past_chunk_throughputs.append(0)
self.past_chunk_download_times.append(0)
return self.observe()
def step(self, action):
# 0 <= action < num_servers
assert self.action_space.contains(action)
# Note: sizes are in bytes, times are in seconds
chunk_size = self.chunk_sizes[action][self.chunk_idx]
# compute chunk download time based on trace
delay = self.delay # in seconds
# keep experiencing the network trace
# until the chunk is downloaded
while chunk_size > 1e-8: # floating number business
throuput = self.trace[1][self.curr_t_idx] # bytes/second
throuput = throuput / 3.0
throuput = max(throuput, 0)
chunk_time_used = min(self.chunk_time_left, chunk_size / throuput)
chunk_size -= throuput * chunk_time_used
self.chunk_time_left -= chunk_time_used
delay += chunk_time_used
if self.chunk_time_left == 0:
self.curr_t_idx += 1
if self.curr_t_idx == len(self.trace[1]):
self.curr_t_idx = 0
self.chunk_time_left = get_chunk_time(self.trace, self.curr_t_idx)
# compute buffer size
rebuffer_time = max(delay - self.buffer_size, 0)
# update video buffer
self.buffer_size = max(self.buffer_size - delay, 0)
self.buffer_size += 4.0 # each chunk is 4 seconds of video
# cap the buffer size
self.buffer_size = min(self.buffer_size, 40.0)
# bitrate change penalty
if self.past_action is None:
bitrate_change = 0
else:
bitrate_change = np.abs(
self.bitrate_map[action] - self.bitrate_map[self.past_action]
)
# linear reward
# (https://dl.acm.org/citation.cfm?id=3098843 section 5.1, QoE metrics (1))
reward = self.bitrate_map[action] - 2.8 * rebuffer_time - 0.5 * bitrate_change
# store action for future bitrate change penalty
self.past_action = action
# update observed network bandwidth and duration
self.past_chunk_throughputs.append(
self.chunk_sizes[action][self.chunk_idx] / float(delay)
)
self.past_chunk_download_times.append(delay)
# advance video
self.chunk_idx += 1
done = self.chunk_idx == self.total_num_chunks
return (
self.observe(),
reward,
done,
{
"bitrate": self.bitrate_map[action],
"stall_time": rebuffer_time,
"bitrate_change": bitrate_change,
},
)
|
ContextualBO-main
|
park_abr/park/envs/abr_sim/abr_fb.py
|
# Folk of the adaptive video streaming environment in https://github.com/park-project/park
import os
import numpy as np
import park
def get_chunk_time(trace, t_idx):
if t_idx == len(trace[0]) - 1:
return 1 # bandwidth last for 1 second
else:
return trace[0][t_idx + 1] - trace[0][t_idx]
def load_chunk_sizes():
# bytes of video chunk file at different bitrates
# source video: "Envivio-Dash3" video H.264/MPEG-4 codec
# at bitrates in {300,750,1200,1850,2850,4300} kbps
# original video file:
# https://github.com/hongzimao/pensieve/tree/master/video_server
# download video size folder if not existed
video_folder = park.__path__[0] + "/envs/abr_sim/videos/"
chunk_sizes = np.load(video_folder + "video_sizes.npy")
return chunk_sizes
def load_traces():
# download video size folder if not existed
trace_folder = park.__path__[0] + "/envs/abr_sim/traces/"
all_traces = []
for trace in os.listdir(trace_folder):
all_t = []
all_bandwidth = []
with open(trace_folder + trace, "rb") as f:
for line in f:
parse = line.split()
all_t.append(float(parse[0]))
all_bandwidth.append(float(parse[1]))
all_traces.append((all_t, all_bandwidth))
return all_traces
def sample_trace(all_traces, np_random):
# weighted random sample based on trace length
all_p = [len(trace[1]) for trace in all_traces]
sum_p = float(sum(all_p))
all_p = [p / sum_p for p in all_p]
# sample a trace
trace_idx = np_random.choice(len(all_traces), p=all_p)
# sample a starting point
init_t_idx = np_random.choice(len(all_traces[trace_idx][0]))
# return a trace and the starting t
return all_traces[trace_idx], init_t_idx
|
ContextualBO-main
|
park_abr/park/envs/abr_sim/trace_loader.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# A modification of the adaptive video streaming environment in https://github.com/park-project/park
import json
import park
def get_chunk_time(trace, t_idx):
if t_idx == len(trace[0]) - 1:
return 1 # bandwidth last for 1 second
else:
return trace[0][t_idx + 1] - trace[0][t_idx]
def load_chunk_sizes():
"""
chunk_sizes is a dict that with keys being context name and values
being bytes of video chunk file at different bitrates for correpsonding
traces.
"""
# download video size folder if not existed
video_folder = park.__path__[0] + "/envs/abr_sim/videos/"
with open(video_folder + "fb_chunks_data_all.json", "r") as fp:
chunk_sizes = json.load(fp)
return chunk_sizes
def load_traces():
"""
all_traces is a dict that with keys being context name and values
being a dictionary with keys being trace uuid and values being bandwidths
{
"context name": {
"trace_id": (
[0.01, 1.0], # segment time (seconds)
[1e6, 2e6], # bandwidth (bytes per second)
}
}
"""
# download video size folder if not existed
trace_folder = park.__path__[0] + "/envs/abr_sim/fb_traces/"
with open(trace_folder + "fb_traces_data_all.json", "r") as fp:
all_traces = json.load(fp)
return all_traces
def sample_trace(all_traces, irun):
# deterministic
trace_list = list(all_traces.keys())
trace_list.sort()
return trace_list[irun]
def random_sample_trace(all_traces, np_random):
# weighted random sample based on trace length
trace_list = list(all_traces.keys())
trace_list.sort()
all_p = [len(all_traces[trace_name][1]) for trace_name in trace_list]
sum_p = float(sum(all_p))
all_p = [p / sum_p for p in all_p]
# sample a trace
trace_idx = np_random.choice(len(trace_list), p=all_p)
trace_uuid = trace_list[trace_idx]
# sample a starting point
init_t_idx = np_random.choice(len(all_traces[trace_uuid][0]))
# return a trace and the starting t
return trace_uuid, init_t_idx
|
ContextualBO-main
|
park_abr/park/envs/abr_sim/fb_trace_loader.py
|
#! /usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import numpy as np
from ax.modelbridge.factory import get_GPEI, get_sobol
from ax.modelbridge.generation_strategy import GenerationStep, GenerationStrategy
from ax.service.ax_client import AxClient
from ax.storage.json_store.encoder import object_to_json
from cbo_generation_strategy import (
get_ContextualBO,
get_ContextualEmbeddingBO,
)
from get_synthetic_problem import get_benchmark_problem
CBO_EMB_MODEL_GEN_OPTIONS = {
"acquisition_function_kwargs": {"q": 1, "noiseless": True},
"optimizer_kwargs": {
"method": "SLSQP",
"batch_limit": 1,
"joint_optimization": True,
},
}
def run_aggregated_reward_benchmark(
strategy_name,
benchmark_problem_name,
irep,
num_contexts,
init_size=8,
num_trials=100,
benchmark_problem_args={},
):
benchmark_problem = get_benchmark_problem(
name=benchmark_problem_name,
num_contexts=num_contexts,
benchmark_problem_args=benchmark_problem_args,
)
decomposition = benchmark_problem.contextual_parameter_decomposition
context_weight_dict = {
benchmark_problem.context_name_list[i]: benchmark_problem.context_weights[i]
for i in range(benchmark_problem.num_contexts)
}
embs_feature_dict = {
benchmark_problem.context_name_list[i]: benchmark_problem.context_embedding[
i, :
]
for i in range(benchmark_problem.num_contexts)
}
if strategy_name == "Sobol":
gs = GenerationStrategy(
name="Sobol", steps=[GenerationStep(get_sobol, -1)]
)
elif strategy_name == "GPEI":
gs = GenerationStrategy(
name="GPEI",
steps=[
GenerationStep(get_sobol, init_size),
GenerationStep(get_GPEI, -1),
],
)
elif strategy_name == "SAC":
gs = GenerationStrategy(
name="SAC",
steps=[
GenerationStep(model=get_sobol, num_trials=init_size),
GenerationStep(
model=get_ContextualBO,
num_trials=-1,
model_kwargs={"decomposition": decomposition},
),
],
)
elif strategy_name == "LCE-A":
gs = GenerationStrategy(
name="LCE-A",
steps=[
GenerationStep(model=get_sobol, num_trials=init_size),
GenerationStep(
model=get_ContextualEmbeddingBO,
num_trials=-1,
model_kwargs={
"decomposition": decomposition,
},
model_gen_kwargs={"model_gen_options": CBO_EMB_MODEL_GEN_OPTIONS},
),
],
)
axc = AxClient(generation_strategy=gs)
experiment_parameters = benchmark_problem.contextual_parameters
axc.create_experiment(
name="cbo_aggregated_reward_experiment",
parameters=experiment_parameters,
objective_name="aggregated_reward",
minimize=True,
)
def evaluation_aggregated_reward(parameters):
# put parameters into 2-D array
x = np.array(
[
[parameters.get(param) for param in decomposition[context_name]]
for context_name in benchmark_problem.context_name_list
]
)
return {
"aggregated_reward": (
benchmark_problem.evaluation_function_aggregated(x),
0.0,
)
}
for _ in range(num_trials):
parameters, trial_index = axc.get_next_trial()
axc.complete_trial(
trial_index=trial_index, raw_data=evaluation_aggregated_reward(parameters)
)
res = json.dumps(object_to_json(axc.experiment))
with open(f'results/aggregated_reward_{benchmark_problem_name}_{strategy_name}_rep_{irep}.json', "w") as fout:
json.dump(res, fout)
return res
def run_aggregated_reward_benchmark_reps(
benchmark_problem_name,
strategy,
num_contexts,
init_size=8,
num_trials=100,
reps=8,
benchmark_problem_args={},
):
res = {strategy: []}
for irep in range(reps):
res[strategy].append(
run_aggregated_reward_benchmark(
strategy_name=strategy,
benchmark_problem_name=benchmark_problem_name,
irep=irep,
num_contexts=num_contexts,
init_size=init_size,
num_trials=num_trials,
benchmark_problem_args=benchmark_problem_args,
)
)
with open(f'results/aggregated_reward_{benchmark_problem_name}_{strategy}.json', "w") as fout:
json.dump(res, fout)
if __name__ == '__main__':
# Run all of the benchmark replicates.
# Hartmann5DEmbedding, Uniform Weights, SAC
run_aggregated_reward_benchmark_reps(
benchmark_problem_name="Hartmann5DEmbedding",
strategy="SAC",
num_contexts=5,
reps=10
)
# # Hartmann5DEmbedding, Uniform Weights, LCE-A
run_aggregated_reward_benchmark_reps(
benchmark_problem_name="Hartmann5DEmbedding",
strategy="LCE-A",
num_contexts=5,
reps=10
)
# Hartmann5DEmbedding, Uniform Weights, SOBOL
run_aggregated_reward_benchmark_reps(
benchmark_problem_name="Hartmann5DEmbedding",
strategy="Sobol",
num_contexts=5,
num_trials=10,
reps=1
)
# Hartmann5DEmbedding, Uniform Weights, Standard BO
run_aggregated_reward_benchmark_reps(
benchmark_problem_name="Hartmann5DEmbedding",
strategy="GPEI",
num_contexts=5,
num_trials=10,
reps=1
)
# Hartmann5DEmbedding, Skewed Weights, num of contexts = 10, num of dense contexts = 2
# run_aggregated_reward_benchmark_reps(
# benchmark_problem_name="Hartmann5DEmbedding",
# strategy="SAC",
# num_contexts=10,
# reps=10,
# benchmark_problem_args = {"context_weights": [0.46, 0.46, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01]}
# )
|
ContextualBO-main
|
benchmarks/run_synthetic_benchmarks_agg_reward.py
|
#! /usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import datetime
import json
from typing import Any, Dict, List, Optional
import numpy as np
from ax.core.parameter import ChoiceParameter, ParameterType
from ax.service.ax_client import AxClient
from ax.storage.json_store.encoder import object_to_json
from get_synthetic_problem import get_benchmark_problem
from cbo_generation_strategy import (
MultiOutputStrategy,
MultiSOBOLStrategy,
MultiTaskContextualBOStrategy,
)
from synthetic_problems import ContextualEmbeddingSyntheticFunction
def run_multioutput_reward_benchmark(
strategy_name,
benchmark_problem_name,
irep,
num_contexts,
init_size=8,
num_trials=100,
benchmark_problem_args={},
):
init_size = init_size * num_contexts
num_trials = num_trials * num_contexts
benchmark_problem = get_benchmark_problem(
name=benchmark_problem_name,
num_contexts=num_contexts,
benchmark_problem_args=benchmark_problem_args,
)
context_parameter = ChoiceParameter(
name="CONTEXT_PARAMS",
values=benchmark_problem.context_name_list,
is_task=True,
parameter_type=ParameterType.STRING,
)
if strategy_name == "MultiSOBOL":
gs = MultiSOBOLStrategy(context_parameter=context_parameter, name="MultiSOBOL")
elif strategy_name == "ICM":
gs = MultiOutputStrategy(
name="ICM",
context_parameter=context_parameter,
init_size=init_size,
)
elif strategy_name == "LCE-M":
gs = MultiTaskContextualBOStrategy(
name="LCE-M",
context_parameter=context_parameter,
init_size=init_size,
)
axc = AxClient(generation_strategy=gs)
experiment_parameters = benchmark_problem.base_parameters
experiment_parameters.append(
{
"name": context_parameter.name,
"type": "choice",
"values": context_parameter.values,
"is_task": True,
}
)
axc.create_experiment(
name="cbo_multioutput_reward_experiment",
parameters=experiment_parameters,
objective_name="context_reward",
minimize=True,
)
def evaluation_contextual_reward(parameters):
# get base parameters only (into 1-D array)
x = np.array(
[
parameters.get(param["name"])
for param in benchmark_problem.base_parameters
if param["name"] != context_parameter.name
]
)
context = parameters.get(context_parameter.name)
weight = benchmark_problem.context_weights[
benchmark_problem.context_name_list.index(context)
]
if isinstance(benchmark_problem, ContextualEmbeddingSyntheticFunction):
embs = benchmark_problem.context_embedding[
benchmark_problem.context_name_list.index(context), :
]
x = np.hstack([x, embs])
return {
"context_reward": (weight * benchmark_problem.component_function(x), 0.0)
}
for _ in range(num_trials):
parameters, trial_index = axc.get_next_trial()
axc.complete_trial(
trial_index=trial_index, raw_data=evaluation_contextual_reward(parameters)
)
res = json.dumps(object_to_json(axc.experiment))
with open(f'results/multioutput_reward_{benchmark_problem_name}_{strategy_name}_rep_{irep}.json', "w") as fout:
json.dump(res, fout)
return res
def run_multioutput_reward_benchmark_reps(
benchmark_problem_name,
strategy,
num_contexts,
init_size=4,
num_trials=40,
reps=8,
benchmark_problem_args={},
):
res = {strategy: []}
for irep in range(reps):
res[strategy].append(
run_multioutput_reward_benchmark(
strategy_name=strategy,
benchmark_problem_name=benchmark_problem_name,
irep=irep,
num_contexts=num_contexts,
init_size=init_size,
num_trials=num_trials,
benchmark_problem_args=benchmark_problem_args,
)
)
with open(f'results/multioutput_reward_{benchmark_problem_name}_{strategy}.json', "w") as fout:
json.dump(res, fout)
if __name__ == '__main__':
# Run all of the benchmark replicates.
# Hartmann5DEmbedding, Uniform Weights, LCE-M
run_multioutput_reward_benchmark_reps(
benchmark_problem_name="Hartmann5DEmbedding",
strategy="LCE-M",
num_contexts=5,
reps=8
)
# Hartmann5DEmbedding, Uniform Weights, ICM
run_multioutput_reward_benchmark_reps(
benchmark_problem_name="Hartmann5DEmbedding",
strategy="ICM",
num_contexts=5,
reps=8
)
# Hartmann5DEmbedding, Uniform Weights, SOBOL
run_multioutput_reward_benchmark_reps(
benchmark_problem_name="Hartmann5DEmbedding",
strategy="MultiSOBOL",
num_contexts=5,
reps=8
)
|
ContextualBO-main
|
benchmarks/run_synthetic_benchmarks_multioutput_reward.py
|
#! /usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from abc import ABC
from typing import Dict, List
import numpy as np
import torch
from ax.models.random.sobol import SobolGenerator
from ax.utils.measurement.synthetic_functions import branin, hartmann6
class ContextualSyntheticFunction(ABC):
def __init__(
self,
context_name_list: List,
context_weights: np.ndarray,
noise_sd: float = 0.0,
) -> None:
self.context_name_list = context_name_list
# contextual weights
self.context_weights = context_weights
# number of contexts
self.num_contexts = len(context_name_list)
# noise term
self.noise_sd = noise_sd
self._base_parameters = []
self._contextual_parameters = []
self._decomposition = {}
@property
def base_parameters(self) -> List[Dict]:
return self._base_parameters
@property
def contextual_parameters(self) -> List[Dict]:
return self._contextual_parameters
@property
def contextual_parameter_decomposition(self) -> List[Dict]:
return self._decomposition
def component_function(self, x: np.ndarray) -> float:
"""function that produces the outcomes for each component."""
raise NotImplementedError
def evaluation_function_vectorized(self, x: np.ndarray) -> np.ndarray:
# input x is a matrix: each row corresponds each context
# and each column to each base parameter
return np.array(
[self.component_function(x[i, :]) for i in range(self.num_contexts)]
)
def evaluation_function_aggregated(self, x: np.ndarray) -> float:
# aggregate across context weighted by context coeff
context_output = self.evaluation_function_vectorized(x)
return np.sum(context_output * self.context_weights)
class ContextualEmbeddingSyntheticFunction(ContextualSyntheticFunction):
def __init__(
self,
context_name_list: List,
context_weights: np.ndarray,
context_embedding: np.ndarray,
noise_sd: float = 0.0,
) -> None:
super().__init__(
context_name_list=context_name_list,
context_weights=context_weights,
noise_sd=noise_sd,
)
# each row corresponds each context and each column to each embeddding
self.context_embedding = context_embedding
def evaluation_function_vectorized(self, x: np.ndarray) -> np.ndarray:
# input x is a matrix: each row corresponds each context
# and each column to each base parameter
x_all = np.hstack([x, self.context_embedding])
return np.array(
[self.component_function(x_all[i, :]) for i in range(self.num_contexts)]
)
class Branin2DBase(ContextualSyntheticFunction):
def __init__(
self,
context_name_list: List,
context_weights: np.ndarray,
noise_sd: float = 0.0,
) -> None:
super().__init__(
context_name_list=context_name_list,
context_weights=context_weights,
noise_sd=noise_sd,
)
# define search space for non-dp setting
self._base_parameters = [
{
"name": "x0",
"type": "range",
"bounds": [-5.0, 10.0],
"value_type": "float",
"log_scale": False,
},
{
"name": "x1",
"type": "range",
"bounds": [0.0, 15.0],
"value_type": "float",
"log_scale": False,
},
]
# for dp setting, extend to contextual search space
self._contextual_parameters = []
for context_name in self.context_name_list:
self._contextual_parameters.append(
{
"name": f"x0_{context_name}",
"type": "range",
"bounds": [-5.0, 10.0],
"value_type": "float",
"log_scale": False,
}
)
self._contextual_parameters.append(
{
"name": f"x1_{context_name}",
"type": "range",
"bounds": [0.0, 15.0],
"value_type": "float",
"log_scale": False,
}
)
self._decomposition = {
f"{context_name}": [f"x0_{context_name}", f"x1_{context_name}"]
for context_name in self.context_name_list
}
def component_function(self, x: np.ndarray) -> float:
return branin.f(x)
class Branin1DEmbedding(ContextualEmbeddingSyntheticFunction):
def __init__(
self,
context_name_list: List,
context_weights: np.ndarray,
context_embedding: np.ndarray,
noise_sd: float = 0.0,
) -> None:
super().__init__(
context_name_list=context_name_list,
context_weights=context_weights,
context_embedding=context_embedding, # values between [0.0, 15.0]
noise_sd=noise_sd,
)
# define search space for non-dp setting
self._base_parameters = [
{
"name": "x0",
"type": "range",
"bounds": [-5.0, 10.0],
"value_type": "float",
"log_scale": False,
}
]
# for dp setting, extend to contextual search space
self._contextual_parameters = []
for context_name in self.context_name_list:
self._contextual_parameters.append(
{
"name": f"x0_{context_name}",
"type": "range",
"bounds": [-5.0, 10.0],
"value_type": "float",
"log_scale": False,
}
)
self._decomposition = {
f"{context_name}": [f"x0_{context_name}"]
for context_name in self.context_name_list
}
def component_function(self, x: np.ndarray) -> float:
# make sure embedding is at the end of the array
return branin.f(x)
class Hartmann6DBase(ContextualSyntheticFunction):
# additive brannin 2d case
def __init__(
self,
context_name_list: List,
context_weights: np.ndarray,
noise_sd: float = 0.0,
) -> None:
super().__init__(
context_name_list=context_name_list,
context_weights=context_weights,
noise_sd=noise_sd,
)
# define search space for non-dp setting
self._base_parameters = [
{
"name": f"x{i}",
"type": "range",
"bounds": [0.0, 1.0],
"value_type": "float",
"log_scale": False,
}
for i in range(6)
]
# for dp setting, extend to contextual search space
self._contextual_parameters = []
for context_name in self.context_name_list:
self._contextual_parameters.extend(
[
{
"name": f"x{j}_{context_name}",
"type": "range",
"bounds": [0.0, 1.0],
"value_type": "float",
"log_scale": False,
}
for j in range(6)
]
)
self._decomposition = {
f"{context_name}": [f"x{j}_{context_name}" for j in range(6)]
for context_name in self.context_name_list
}
def component_function(self, x: np.ndarray) -> float:
return hartmann6.f(x)
class Hartmann5DEmbedding(ContextualEmbeddingSyntheticFunction):
def __init__(
self,
context_name_list: List,
context_weights: np.ndarray,
context_embedding: np.ndarray,
noise_sd: float = 0.0,
) -> None:
super().__init__(
context_name_list=context_name_list,
context_weights=context_weights,
context_embedding=context_embedding, # values between [0.0, 1.0]
noise_sd=noise_sd,
)
# define search space for non-dp setting
self._base_parameters = [
{
"name": f"x{i}",
"type": "range",
"bounds": [0.0, 1.0],
"value_type": "float",
"log_scale": False,
}
for i in range(5)
]
# for dp setting, extend to contextual search space
self._contextual_parameters = []
for context_name in self.context_name_list:
self._contextual_parameters.extend(
[
{
"name": f"x{j}_{context_name}",
"type": "range",
"bounds": [0.0, 1.0],
"value_type": "float",
"log_scale": False,
}
for j in range(5)
]
)
self._decomposition = {
f"{context_name}": [f"x{j}_{context_name}" for j in range(5)]
for context_name in self.context_name_list
}
def component_function(self, x: np.ndarray) -> float:
# make sure embedding is at the end of the array
return hartmann6.f(x)
|
ContextualBO-main
|
benchmarks/synthetic_problems.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, Optional
import numpy as np
from synthetic_problems import (
Branin1DEmbedding,
Branin2DBase,
Hartmann5DEmbedding,
Hartmann6DBase,
)
def get_benchmark_problem(
name: str,
num_contexts: int,
benchmark_problem_args: Optional[Dict[str, Any]] = None,
):
"""generate benchmark problems.
Args:
1. name: benchmark name
2. num_contexts: number of contexts n
3. args for creating benchmark
- context_name_list. List of str. Default is [c0, c1, ..., cn]
- context_weights. [w0, w1, ..., wn]. sum of w_i = 1. Default is [1/n]
- context_embedding.
"""
benchmark_problem_args = benchmark_problem_args or {}
context_name_list = benchmark_problem_args.get(
"context_name_list", [f"c{i}" for i in range(num_contexts)]
)
context_weights = np.array(
benchmark_problem_args.get(
"context_weights", np.ones(num_contexts) / num_contexts
)
)
if name == "Branin2D":
benchmark_problem = Branin2DBase(
context_name_list=context_name_list, context_weights=context_weights
)
elif name == "Hartmann6D":
benchmark_problem = Hartmann6DBase(
context_name_list=context_name_list, context_weights=context_weights
)
elif name == "Branin1DEmbedding":
benchmark_problem = Branin1DEmbedding(
context_name_list=context_name_list,
context_weights=context_weights,
context_embedding=np.arange(0.0, 15.0, 15.0 / num_contexts).reshape(
num_contexts, 1
),
)
elif name == "Hartmann5DEmbedding":
context_embedding = np.array(
benchmark_problem_args.get(
"context_embedding", np.arange(0.0, 1.0, 1.0 / num_contexts)
)
)
benchmark_problem = Hartmann5DEmbedding(
context_name_list=context_name_list,
context_weights=context_weights,
context_embedding=context_embedding.reshape(num_contexts, 1),
)
return benchmark_problem
|
ContextualBO-main
|
benchmarks/get_synthetic_problem.py
|
#! /usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, List, Optional, Type
import torch
from ax.core.data import Data
from ax.core.experiment import Experiment
from ax.core.generator_run import GeneratorRun
from ax.core.observation import ObservationFeatures
from ax.core.parameter import ChoiceParameter
from ax.core.search_space import SearchSpace
from ax.models.torch.cbo_lcea import LCEABO
from ax.models.torch.cbo_sac import SACBO
from ax.models.torch.cbo_lcem import LCEMBO
from ax.modelbridge.factory import DEFAULT_TORCH_DEVICE
from ax.modelbridge.generation_strategy import GenerationStep, GenerationStrategy
from ax.modelbridge.random import RandomModelBridge
from ax.modelbridge.registry import (
Cont_X_trans,
StratifiedStandardizeY,
TaskEncode,
Y_trans,
)
from ax.modelbridge.torch import TorchModelBridge
from ax.modelbridge.transforms.base import Transform
from ax.models.random.sobol import SobolGenerator
from ax.models.torch.botorch import BotorchModel
def get_multisobol(search_space: SearchSpace) -> RandomModelBridge:
return RandomModelBridge(
search_space=search_space,
model=SobolGenerator(),
transforms=[TaskEncode] + Cont_X_trans,
)
class MultiSOBOLStrategy(GenerationStrategy):
def __init__(
self, context_parameter: ChoiceParameter, name: str = "MultiSOBOL"
) -> None:
self.context_parameter = context_parameter
self.num_contexts = len(context_parameter.values)
steps = [GenerationStep(get_multisobol, -1)]
super().__init__(steps=steps, name=name)
def clone_reset(self) -> "MultiSOBOLStrategy":
"""Copy without state."""
return self.__class__(context_parameter=self.context_parameter, name=self.name)
def gen(
self,
experiment: Experiment,
data: Optional[Data] = None,
n: int = 1,
**kwargs: Any,
) -> GeneratorRun:
"""Produce the next points in the experiment."""
num_trials = len(self._generator_runs)
idx = num_trials % self.num_contexts # decide which context to optimize
fixed_features = ObservationFeatures(
parameters={self.context_parameter.name: self.context_parameter.values[idx]}
)
generator_run = super().gen(
experiment=experiment, data=data, n=1, fixed_features=fixed_features
)
return generator_run
def get_multioutput(
experiment: Experiment,
data: Data,
search_space: Optional[SearchSpace] = None,
status_quo_features: Optional[ObservationFeatures] = None,
) -> TorchModelBridge:
# Set transforms for a Single-type MTGP model.
transforms = Cont_X_trans + [StratifiedStandardizeY, TaskEncode]
return TorchModelBridge(
experiment=experiment,
search_space=search_space or experiment.search_space,
data=data,
model=BotorchModel(),
transforms=transforms,
torch_dtype=torch.double,
status_quo_features=status_quo_features,
)
class MultiOutputStrategy(GenerationStrategy):
def __init__(
self,
context_parameter: ChoiceParameter,
init_size: int,
steps: Optional[List[GenerationStep]] = None,
name: str = "MultiOutputBO",
) -> None:
self.context_parameter = context_parameter
self.num_contexts = len(context_parameter.values)
if steps is None:
steps = [
GenerationStep(get_multisobol, init_size),
GenerationStep(get_multioutput, -1),
]
super().__init__(steps=steps, name=name)
def clone_reset(self) -> "MultiOutputStrategy":
"""Copy without state."""
return self.__class__(context_parameter=self.context_parameter, name=self.name)
def gen(
self,
experiment: Experiment,
data: Optional[Data] = None,
n: int = 1,
**kwargs: Any,
) -> GeneratorRun:
"""Produce the next points in the experiment."""
num_trials = len(self._generator_runs)
idx = num_trials % self.num_contexts # decide which context to optimize
fixed_features = ObservationFeatures(
parameters={self.context_parameter.name: self.context_parameter.values[idx]}
)
generator_run = super().gen(
experiment=experiment, data=data, n=1, fixed_features=fixed_features
)
return generator_run
def get_multitask_contextualBO(
experiment: Experiment,
data: Data,
search_space: Optional[SearchSpace] = None,
status_quo_features: Optional[ObservationFeatures] = None,
) -> TorchModelBridge:
# Set transforms for a Single-type MTGP model.
transforms = Cont_X_trans + [StratifiedStandardizeY, TaskEncode]
return TorchModelBridge(
experiment=experiment,
search_space=search_space or experiment.search_space,
data=data,
model=LCEMBO(),
transforms=transforms,
torch_dtype=torch.double,
status_quo_features=status_quo_features,
)
class MultiTaskContextualBOStrategy(MultiOutputStrategy):
def __init__(
self,
context_parameter: ChoiceParameter,
init_size: int,
name: str = "MultiTaskContextualBO",
) -> None:
steps = [
GenerationStep(get_multisobol, init_size),
GenerationStep(get_multitask_contextualBO, -1),
]
super().__init__(
context_parameter=context_parameter,
init_size=init_size,
steps=steps,
name=name,
)
def get_ContextualBO(
experiment: Experiment,
data: Data,
decomposition: Dict[str, List[str]],
search_space: Optional[SearchSpace] = None,
dtype: torch.dtype = torch.double,
device: torch.device = DEFAULT_TORCH_DEVICE,
transforms: List[Type[Transform]] = Cont_X_trans + Y_trans,
) -> TorchModelBridge:
return TorchModelBridge(
experiment=experiment,
search_space=search_space or experiment.search_space,
data=data,
model=SACBO(decomposition=decomposition),
transforms=transforms,
torch_dtype=dtype,
torch_device=device,
)
def get_ContextualEmbeddingBO(
experiment: Experiment,
data: Data,
decomposition: Dict[str, List[str]],
dtype: torch.dtype = torch.double,
device: torch.device = DEFAULT_TORCH_DEVICE,
transforms: List[Type[Transform]] = Cont_X_trans + Y_trans,
cat_feature_dict: Optional[Dict] = None,
embs_feature_dict: Optional[Dict] = None,
context_weight_dict: Optional[Dict] = None,
embs_dim_list: Optional[List[int]] = None,
search_space: Optional[SearchSpace] = None,
gp_model_args: Optional[Dict[str, Any]] = None,
) -> TorchModelBridge:
return TorchModelBridge(
experiment=experiment,
search_space=search_space or experiment.search_space,
data=data,
model=LCEABO(
decomposition=decomposition,
cat_feature_dict=cat_feature_dict,
embs_feature_dict=embs_feature_dict,
context_weight_dict=context_weight_dict,
embs_dim_list=embs_dim_list,
gp_model_args=gp_model_args,
),
transforms=transforms,
torch_dtype=dtype,
torch_device=device,
)
|
ContextualBO-main
|
benchmarks/cbo_generation_strategy.py
|
#Copyright (c) Facebook, Inc. and its affiliates.
#All rights reserved.
#This source code is licensed under the license found in the
#LICENSE file in the root directory of this source tree.
from scipy.spatial.transform import Rotation as R
import os
from glob import glob
from tqdm import tqdm
import scipy.io as sio
import random
from PIL import Image
import numpy as np
import torch
from torchvision import transforms
preprocess = transforms.Compose([
transforms.Resize((256, 256)),
transforms.ToTensor()
])
# class used for obtaining an instance of the dataset for training vision chart prediction
# to be passed to a pytorch dataloader
# input:
# - classes: list of object classes used
# - args: set of input parameters from the training file
# - set_type: the set type used
# - sample_num: the size of the point cloud to be returned in a given batch
class mesh_loader_vision(object):
def __init__(self, classes, args, set_type='train', sample_num=3000):
# initialization of data locations
self.args = args
self.surf_location = '../data/surfaces/'
self.img_location = '../data/images/'
self.touch_location = '../data/scene_info/'
self.sheet_location = '../data/sheets/'
self.sample_num = sample_num
self.set_type = set_type
self.set_list = np.load('../data/split.npy', allow_pickle='TRUE').item()
names = [[f.split('/')[-1], f.split('/')[-2]] for f in glob((f'{self.img_location}/*/*'))]
self.names = []
self.classes_names = [[] for _ in classes]
np.random.shuffle(names)
for n in tqdm(names):
if n[1] in classes:
if os.path.exists(self.surf_location + n[1] + '/' + n[0] + '.npy'):
if os.path.exists(self.touch_location + n[1] + '/' + n[0]):
if n[0] + n[1] in self.set_list[self.set_type]:
if n[0] +n[1] in self.set_list[self.set_type]:
self.names.append(n)
self.classes_names[classes.index(n[1])].append(n)
print(f'The number of {set_type} set objects found : {len(self.names)}')
def __len__(self):
return len(self.names)
# select the object and grasps for training
def get_training_instance(self):
# select an object and and a principle grasp randomly
class_choice = random.choice(self.classes_names)
object_choice = random.choice(class_choice)
obj, obj_class = object_choice
# select the remaining grasps and shuffle the select grasps
num_choices = [0, 1, 2, 3, 4]
nums = []
for i in range(self.args.num_grasps):
choice = random.choice(num_choices)
nums.append(choice)
del (num_choices[num_choices.index(choice)])
random.shuffle(nums)
return obj, obj_class, nums[-1], nums
# select the object and grasps for validating
def get_validation_examples(self, index):
# select an object and a principle grasp
obj, obj_class = self.names[index]
orig_num = 0
# select the remaining grasps deterministically
nums = [(orig_num + i) % 5 for i in range(self.args.num_grasps)]
return obj, obj_class, orig_num, nums
# load surface point cloud
def get_gt_points(self, obj_class, obj):
samples = np.load(self.surf_location +obj_class + '/' + obj + '.npy')
if self.args.eval:
np.random.seed(0)
np.random.shuffle(samples)
gt_points = torch.FloatTensor(samples[:self.sample_num])
gt_points *= .5 # scales the models to the size of shape we use
gt_points[:, -1] += .6 # this is to make the hand and the shape the right releative sizes
return gt_points
# load vision signal
def get_images(self, obj_class, obj, grasp_number):
# load images
img_occ = Image.open(f'{self.img_location}/{obj_class}/{obj}/{grasp_number}.png')
img_unocc = Image.open(f'{self.img_location}/{obj_class}/{obj}/unoccluded.png')
# apply pytorch image preprocessing
img_occ = preprocess(img_occ)
img_unocc = preprocess(img_unocc)
return torch.FloatTensor(img_occ), torch.FloatTensor(img_unocc)
# load touch sheet mask indicating toch success
def get_touch_info(self, obj_class, obj, grasps):
sheets, successful = [], []
# cycle though grasps and load touch sheets
for grasp in grasps:
sheet_location = self.sheet_location + f'{obj_class}/{obj}/sheets_{grasp}_finger_num.npy'
hand_info = np.load(f'{self.touch_location}/{obj_class}/{obj}/{grasp}.npy', allow_pickle=True).item()
sheet, success = self.get_touch_sheets(sheet_location, hand_info)
sheets.append(sheet)
successful += success
return torch.cat(sheets), successful
# load the touch sheet
def get_touch_sheets(self, location, hand_info):
sheets = []
successful = []
touches = hand_info['touch_success']
finger_pos = torch.FloatTensor(hand_info['cam_pos'])
# cycle through fingers in the grasp
for i in range(4):
sheet = np.load(location.replace('finger_num', str(i)))
# if the touch was unsuccessful
if not touches[i] or sheet.shape[0] == 1:
sheets.append(finger_pos[i].view(1, 3).expand(25, 3)) # save the finger position instead in every vertex
successful.append(False) # binary mask for unsuccessful touch
# if the touch was successful
else:
sheets.append(torch.FloatTensor(sheet)) # save the sheet
successful.append(True) # binary mask for successful touch
sheets = torch.stack(sheets)
return sheets, successful
def __getitem__(self, index):
if self.set_type == 'train':
obj, obj_class, grasp_number, grasps = self.get_training_instance()
else:
obj, obj_class, grasp_number, grasps = self.get_validation_examples(index)
data = {}
# meta data
data['names'] = obj, obj_class, grasp_number
data['class'] = obj_class
# load sampled ground truth points
data['gt_points'] = self.get_gt_points(obj_class, obj)
# load images
data['img_occ'], data['img_unocc'] = self.get_images(obj_class, obj, grasp_number)
# get touch information
data['sheets'], data['successful'] = self.get_touch_info(obj_class, obj, grasps)
return data
def collate(self, batch):
data = {}
data['names'] = [item['names'] for item in batch]
data['class'] = [item['class'] for item in batch]
data['sheets'] = torch.cat([item['sheets'].unsqueeze(0) for item in batch])
data['gt_points'] = torch.cat([item['gt_points'].unsqueeze(0) for item in batch])
data['img_occ'] = torch.cat([item['img_occ'].unsqueeze(0) for item in batch])
data['img_unocc'] = torch.cat([item['img_unocc'].unsqueeze(0) for item in batch])
data['successful'] = [item['successful'] for item in batch]
return data
# class used for obtaining an instance of the dataset for training touch chart prediction
# to be passed to a pytorch dataloader
# input:
# - classes: list of object classes used
# - args: set of input parameters from the training file
# - set_type: the set type used
# - num: if specified only returns a given grasp number
# - all: if True use all objects, regarless of set type
# - finger: if specified only returns a given finger number
class mesh_loader_touch(object):
def __init__(self, classes, args, set_type='train', produce_sheets = False):
# initialization of data locations
self.args = args
self.surf_location = '../data/surfaces/'
self.img_location = '../data/images/'
self.touch_location = '../data/scene_info/'
self.sheet_location = '../data/remake_sheets/'
self.set_type = set_type
self.set_list = np.load('../data/split.npy', allow_pickle='TRUE').item()
self.empty = torch.FloatTensor(np.load('../data/empty_gel.npy'))
self.produce_sheets = produce_sheets
names = [[f.split('/')[-1], f.split('/')[-2]] for f in glob((f'{self.img_location}/*/*'))]
self.names = []
import os
for n in tqdm(names):
if n[1] in classes:
if os.path.exists(self.surf_location + n[1] + '/' + n[0] + '.npy'):
if os.path.exists(self.touch_location + n[1] + '/' + n[0]):
if self.produce_sheets or (n[0] + n[1]) in self.set_list[self.set_type]:
if produce_sheets:
for i in range(5):
for j in range(4):
self.names.append(n + [i, j])
else:
for i in range(5):
hand_info = np.load(f'{self.touch_location}/{n[1]}/{n[0]}/{i}.npy',
allow_pickle=True).item()
for j in range(4):
if hand_info['touch_success'][j]:
self.names.append(n + [i, j])
print(f'The number of {set_type} set objects found : {len(self.names)}')
def __len__(self):
return len(self.names)
def standerdize_point_size(self, points):
if points.shape[0] == 0:
return torch.zeros((self.args.num_samples, 3))
np.random.shuffle(points)
points = torch.FloatTensor(points)
while points.shape[0] < self.args.num_samples :
points = torch.cat((points, points, points, points))
perm = torch.randperm(points.shape[0])
idx = perm[:self.args.num_samples ]
return points[idx]
def get_finger_transforms(self, hand_info, finger_num, args):
rot = hand_info['cam_rot'][finger_num]
rot = R.from_euler('xyz', rot, degrees=False).as_matrix()
rot_q = R.from_matrix(rot).as_quat()
pos = hand_info['cam_pos'][finger_num]
return torch.FloatTensor(rot_q), torch.FloatTensor(rot), torch.FloatTensor(pos)
def __getitem__(self, index):
obj, obj_class, num, finger_num = self.names[index]
# meta data
data = {}
data['names'] = [obj, num , finger_num]
data['class'] = obj_class
# hand infomation
hand_info = np.load(f'{self.touch_location}/{obj_class}/{obj}/{num}.npy', allow_pickle=True).item()
data['rot'], data['rot_M'], data['pos'] = self.get_finger_transforms(hand_info, finger_num, self.args)
data['good_touch'] = hand_info['touch_success']
# simulated touch information
scene_info = np.load(f'{self.touch_location}/{obj_class}/{obj}/{num}.npy', allow_pickle=True).item()
data['depth'] = torch.clamp(torch.FloatTensor(scene_info['depth'][finger_num]).unsqueeze(0), 0, 1)
data['sim_touch'] = torch.FloatTensor(np.array(scene_info['gel'][finger_num]) / 255.).permute(2, 0, 1).contiguous().view(3, 100, 100)
data['empty'] = torch.FloatTensor(self.empty / 255.).permute(2, 0, 1).contiguous().view(3, 100, 100)
# point cloud information
data['samples'] = self.standerdize_point_size(scene_info['points'][finger_num])
data['num_samples'] = scene_info['points'][finger_num].shape
# where to save sheets
data['save_dir'] = f'{self.sheet_location}/{obj_class}/{obj}/sheets_{num}_{finger_num}.npy'
return data
def collate(self, batch):
data = {}
data['names'] = [item['names'] for item in batch]
data['class'] = [item['class'] for item in batch]
data['samples'] = torch.cat([item['samples'].unsqueeze(0) for item in batch])
data['sim_touch'] = torch.cat([item['sim_touch'].unsqueeze(0) for item in batch])
data['empty'] = torch.cat([item['empty'].unsqueeze(0) for item in batch])
data['depth'] = torch.cat([item['depth'].unsqueeze(0) for item in batch])
data['ref'] = {}
data['ref']['rot'] = torch.cat([item['rot'].unsqueeze(0) for item in batch])
data['ref']['rot_M'] = torch.cat([item['rot_M'].unsqueeze(0) for item in batch])
data['ref']['pos'] = torch.cat([item['pos'].unsqueeze(0) for item in batch])
data['good_touch'] = [item['good_touch'] for item in batch]
data['save_dir'] = [item['save_dir'] for item in batch]
data['num_samples'] = [item['num_samples'] for item in batch]
return data
|
3D-Vision-and-Touch-main
|
data_loaders.py
|
#Copyright (c) Facebook, Inc. and its affiliates.
#All rights reserved.
#This source code is licensed under the license found in the
#LICENSE file in the root directory of this source tree.
import numpy as np
import torch
import time
import sys
sys.path.insert(0, "../")
from pytorch3d.loss import chamfer_distance as cuda_cd
# loads the initial mesh and stores vertex, face, and adjacency matrix information
# input:
# - args: arguments from the training file
# - obj_name: name of the initial mesh object file fot eh vision charts
# output:
# - adj_info: the adjacency matrix, and faces for the combination of vision and touch charts
# - verts: the set of vertices for the initial vision charts
def load_mesh_vision(args, obj_name):
# load obj file
obj = import_obj(obj_name)
verts = np.array(obj.vertices)
verts = torch.FloatTensor(verts).cuda()
faces = torch.LongTensor(np.array(obj.faces) - 1).cuda()
# get adjacency matrix infomation
adj_info = adj_init(verts, faces, args)
return adj_info, verts
# loads object file
# involves identifying face and vertex infomation in .obj file
# needs to be triangulated to work
class import_obj(object):
def __init__(self, file):
self.vertices = []
self.faces = []
with open(file) as f :
for line in f:
line = line.replace('//', '/')
line = line.replace('\n', '')
if line[:2] == "v ":
self.vertices.append([float(v) for v in line.split(" ")[1:]])
elif line[0] == "f":
self.faces.append([int(s.split('/')[0]) for s in line.split(' ')[1:]])
# normalizes symetric, binary adj matrix such that sum of each row is 1
def normalize_adj(mx):
rowsum = mx.sum(1)
r_inv = (1. / rowsum).view(-1)
r_inv[r_inv != r_inv] = 0.
mx = torch.mm(torch.eye(r_inv.shape[0]).to(mx.device) * r_inv, mx)
return mx
# defines the adjacecny matrix for an object
def adj_init(verts, faces, args):
# get generic adjacency matrix for vision charts
adj = calc_adj(faces)
adj_info = {}
if args.use_touch:
# this combines the adjacency information of touch and vision charts
# the output adj matrix has the first k rows corresponding to vision charts, and the last |V| - k
# corresponding to touch charts. Similarly the first l faces are correspond to vision charts, and the
# remaining correspond to touch charts
adj, faces = adj_fuse_touch(verts, faces, adj, args)
adj = normalize_adj(adj)
adj_info['adj'] = adj
adj_info['faces'] = faces
return adj_info
# combines graph for vision and touch charts to define a fused adjacency matrix
# input:
# - verts: vertices of the vision charts
# - faces: faces of the vision charts
# - adj: adjacency matric for the vision charts
# - args: arguements from the training file
# output:
# - adj: adjacency matrix from the combination of touch and vision charts
# - faces: combination of vision and touch chart faces
def adj_fuse_touch(verts, faces, adj, args):
verts = verts.data.cpu().numpy()
hash = {}
# find vertices which have the same 3D position
for e, v in enumerate(verts):
if v.tobytes() in hash:
hash[v.tobytes()].append(e)
else:
hash[v.tobytes()] = [e]
# load object information for generic touch chart
sheet = import_obj('../data/initial_sheet.obj')
sheet_verts = torch.FloatTensor(np.array(sheet.vertices)).cuda()
sheet_faces = torch.LongTensor(np.array(sheet.faces) - 1).cuda()
sheet_adj = calc_adj(sheet_faces)
# central vertex for each touch chart that will communicate with all vision charts
central_point = 4
central_points = [central_point + (i * sheet_adj.shape[0]) + adj.shape[0] for i in range(4 * args.num_grasps)]
# define and fill new adjacency matrix with vision and touch charts
new_dim = adj.shape[0] + (4 * args.num_grasps * sheet_adj.shape[0])
new_adj = torch.zeros((new_dim, new_dim)).cuda()
new_adj[: adj.shape[0], :adj.shape[0]] = adj.clone()
for i in range(4 * args.num_grasps):
start = adj.shape[0] + (sheet_adj.shape[0] * i)
end = adj.shape[0] + (sheet_adj.shape[0] * (i + 1))
new_adj[start: end, start:end] = sheet_adj.clone()
adj = new_adj
# define new faces with vision and touch charts
all_faces = [faces]
for i in range(4 * args.num_grasps):
temp_sheet_faces = sheet_faces.clone() + verts.shape[0]
temp_sheet_faces += i * sheet_verts.shape[0]
all_faces.append(temp_sheet_faces)
faces = torch.cat(all_faces)
# update adjacency matrix to allow communication between vision and touch charts
for key in hash.keys():
cur_verts = hash[key]
if len(cur_verts) > 1:
for v1 in cur_verts:
for v2 in cur_verts: # vertices on the boundary of vision charts can communicate
adj[v1, v2] = 1
if args.use_touch:
for c in central_points: # touch and vision charts can communicate
adj[v1, c] = 1
adj[c, v1] = 1
return adj, faces
# computes adjacemcy matrix from face information
def calc_adj(faces):
v1 = faces[:, 0]
v2 = faces[:, 1]
v3 = faces[:, 2]
num_verts = int(faces.max())
adj = torch.eye(num_verts + 1).to(faces.device)
adj[(v1, v2)] = 1
adj[(v1, v3)] = 1
adj[(v2, v1)] = 1
adj[(v2, v3)] = 1
adj[(v3, v1)] = 1
adj[(v3, v2)] = 1
return adj
# sample points from a batch of meshes
# implemented from:
# https://github.com/EdwardSmith1884/GEOMetrics/blob/master/utils.py
# MIT License
# input:
# - verts: vertices of the mesh to sample from
# - faces: faces of the mesh to sample from
# - num: number of point to sample
# output:
# - points: points sampled on the surface of the mesh
def batch_sample(verts, faces, num=10000):
dist_uni = torch.distributions.Uniform(torch.tensor([0.0]).cuda(), torch.tensor([1.0]).cuda())
batch_size = verts.shape[0]
# calculate area of each face
x1, x2, x3 = torch.split(torch.index_select(verts, 1, faces[:, 0]) - torch.index_select(verts, 1, faces[:, 1]), 1,
dim=-1)
y1, y2, y3 = torch.split(torch.index_select(verts, 1, faces[:, 1]) - torch.index_select(verts, 1, faces[:, 2]), 1,
dim=-1)
a = (x2 * y3 - x3 * y2) ** 2
b = (x3 * y1 - x1 * y3) ** 2
c = (x1 * y2 - x2 * y1) ** 2
Areas = torch.sqrt(a + b + c) / 2
Areas = Areas.squeeze(-1) / torch.sum(Areas, dim=1) # percentage of each face w.r.t. full surface area
# define distrubtions of relative face surface areas
choices = None
for A in Areas:
if choices is None:
choices = torch.multinomial(A, num, True) # list of faces to be sampled from
else:
choices = torch.cat((choices, torch.multinomial(A, num, True)))
# select the faces to be used
select_faces = faces[choices].view(verts.shape[0], 3, num)
face_arange = verts.shape[1] * torch.arange(0, batch_size).cuda().unsqueeze(-1).expand(batch_size, num)
select_faces = select_faces + face_arange.unsqueeze(1)
select_faces = select_faces.view(-1, 3)
flat_verts = verts.view(-1, 3)
# sample one point from each
xs = torch.index_select(flat_verts, 0, select_faces[:, 0])
ys = torch.index_select(flat_verts, 0, select_faces[:, 1])
zs = torch.index_select(flat_verts, 0, select_faces[:, 2])
u = torch.sqrt(dist_uni.sample_n(batch_size * num))
v = dist_uni.sample_n(batch_size * num)
points = (1 - u) * xs + (u * (1 - v)) * ys + u * v * zs
points = points.view(batch_size, num, 3)
return points
# compute the local chamfer distance metric on the ground truth mesh at different distances away from the touch sites
# input:
# - samples: point cloud from surface of predicted charts
# - batch: current batch information
# - losses: the current losses across the test set
# output:
# - losses: updates losses across the test set
# - num_examples: the number of times the losses were updated
def calc_local_chamfer(samples, batch, losses):
batch_size = samples.shape[0]
# a grid of point projected towards the surface of the object, starting from the same position and orientation
# as the touch sensor when the touch occurred, but 5 times its size
planes = batch['radius'].cuda().view(batch_size, 4, 100, 100, 3)
# mask indicating which point hit the surface of the object, ie, tho ones we care about
masks = batch['radius_masks'].cuda().view(batch_size, 4, 100, 100)
successful = batch['successful']
num_examples = 0
# for every grasps
for pred, gt, mask, success in zip(samples, planes, masks, successful):
# for every ring size around each touch site
for i in range(5):
# for every touch
for j in range(4):
if not success[j]:
continue
# select the right ring of points, ie 1 x size of sensor ... 5 x size of sensor
dim_mask = torch.zeros(mask[j].shape).clone()
dim_mask[40 - i * 10: 60 + i * 10, 40 - i * 10: 60 + i * 10] = 1
dim_mask[50 - i * 10: 50 + i * 10, 50 - i * 10: 50 + i * 10] = 0
# select point which are on the objects surface
dim_mask[mask[j] == 0] = 0
gt_masked = gt[j][dim_mask == 1]
if (gt_masked.shape[0] == 0):
continue
# compute the local loss between the selected points and the predicted surface
loss, _ = cuda_cd(pred_points, gt_points, batch_reduction=None)
losses[i] += loss.mean()
if i == 0:
num_examples += 1.
return losses, num_examples
# sets up arugments for the pretrained models
def pretrained_args(args):
if args.pretrained == 'empty':
args.use_occluded = False
args.use_unoccluded = False
args.use_touch = False
elif args.pretrained == 'touch':
args.num_gcn_layers = 25
args.hidden_gcn_layers = 250
args.use_occluded = False
args.use_unoccluded = False
args.use_touch = True
elif args.pretrained == 'touch_unoccluded':
args.num_img_blocks = 4
args.num_img_layers = 3
args.size_img_ker = 5
args.num_gcn_layers = 15
args.hidden_gcn_layers = 200
args.use_occluded = False
args.use_unoccluded = True
args.use_touch = True
elif args.pretrained == 'touch_occluded':
args.num_img_blocks = 4
args.num_img_layers = 3
args.size_img_ker = 5
args.num_gcn_layers = 20
args.hidden_gcn_layers = 200
args.use_occluded = True
args.use_unoccluded = False
args.use_touch = True
elif args.pretrained == 'unoccluded':
args.num_img_blocks = 5
args.num_img_layers = 3
args.size_img_ker = 5
args.num_gcn_layers = 15
args.hidden_gcn_layers = 150
args.use_occluded = False
args.use_unoccluded = True
args.use_touch = False
elif args.pretrained == 'occluded':
args.num_img_blocks = 4
args.num_img_layers = 3
args.size_img_ker = 5
args.num_gcn_layers = 25
args.hidden_gcn_layers = 250
args.use_occluded = True
args.use_unoccluded = False
args.use_touch = False
return args
# implemented from:
# https://github.com/EdwardSmith1884/GEOMetrics/blob/master/utils.py
# MIT License
# loads the initial mesh and returns vertex, and face information
def load_mesh_touch(obj='386.obj'):
obj = import_obj(obj)
verts = np.array(obj.vertices)
verts = torch.FloatTensor(verts).cuda()
faces = torch.LongTensor(np.array(obj.faces) - 1).cuda()
return verts, faces
# returns the chamfer distance between a mesh and a point cloud
# input:
# - verts: vertices of the mesh
# - faces: faces of the mesh
# - gt_points: point cloud to operate over
# output:
# - cd: computed chamfer distance
def chamfer_distance(verts, faces, gt_points, num=1000):
batch_size = verts.shape[0]
# sample from faces and calculate pairs
pred_points = batch_sample(verts, faces, num=num)
cd, _ = cuda_cd(pred_points, gt_points, batch_reduction=None)
return cd.mean()
# implemented from:
# https://github.com/EdwardSmith1884/GEOMetrics/blob/master/utils.py
# MIT License
# compute the edgle lengths of a batch of meshes
def batch_calc_edge(verts, faces):
# get vertex locations of faces
p1 = torch.index_select(verts, 1, faces[:, 0])
p2 = torch.index_select(verts, 1, faces[:, 1])
p3 = torch.index_select(verts, 1, faces[:, 2])
# get edge lengths
e1 = p2 - p1
e2 = p3 - p1
e3 = p2 - p3
edge_length = (torch.sum(e1 ** 2, -1).mean() + torch.sum(e2 ** 2, -1).mean() + torch.sum(e3 ** 2, -1).mean()) / 3.
return edge_length
# returns the chamfer distance between two point clouds
# input:
# - gt_points: point cloud 1 to operate over
# - pred_points: point cloud 2 to operate over
# output:
# - cd: computed chamfer distance
def point_loss(gt_points, pred_points):
cd, _ = cuda_cd(pred_points, gt_points, batch_reduction=None)
return cd.mean()
|
3D-Vision-and-Touch-main
|
utils.py
|
#Copyright (c) Facebook, Inc. and its affiliates.
#All rights reserved.
#This source code is licensed under the license found in the
#LICENSE file in the root directory of this source tree.
import numpy as np
import os
from tqdm import tqdm
interval = 1300
commands_to_run = []
for i in range(200):
commands_to_run += [f'python runner.py --save_director experiments/checkpoint/pretrained/encoder_touch '
f'--start {interval*i } --end {interval*i + interval}']
def call(command):
os.system(command)
from multiprocessing import Pool
pool = Pool(processes=10)
pbar = tqdm(pool.imap_unordered(call, commands_to_run), total=len(commands_to_run))
pbar.set_description(f"calling submitit")
for _ in pbar:
pass
|
3D-Vision-and-Touch-main
|
touch_charts/submit.py
|
#Copyright (c) Facebook, Inc. and its affiliates.
#All rights reserved.
#This source code is licensed under the license found in the
#LICENSE file in the root directory of this source tree.
import os
import submitit
import argparse
import produce_sheets
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=0, help='Random seed.')
parser.add_argument('--start', type=int, default=0, help='Random seed.')
parser.add_argument('--end', type=int, default=10000000, help='Random seed.')
parser.add_argument('--save_directory', type=str, default='experiments/checkpoint/pretrained/encoder_touch',
help='Location of the model used to produce sheets')
parser.add_argument('--num_samples', type=int, default=4000, help='Number of points in the predicted point cloud.')
parser.add_argument('--model_location', type=str, default="../data/initial_sheet.obj")
parser.add_argument('--surf_co', type=float, default=9000.)
args = parser.parse_args()
trainer = produce_sheets.Engine(args)
submitit_logs_dir = os.path.join('experiments','sheet_logs_again',str(args.start))
executor = submitit.SlurmExecutor(submitit_logs_dir, max_num_timeout=3)
time = 360
executor.update_parameters(
num_gpus=1,
partition='',
cpus_per_task=16,
mem=500000,
time=time,
job_name=str(args.start),
signal_delay_s=300,
)
executor.submit(trainer)
|
3D-Vision-and-Touch-main
|
touch_charts/runner.py
|
#Copyright (c) Facebook, Inc. and its affiliates.
#All rights reserved.
#This source code is licensed under the license found in the
#LICENSE file in the root directory of this source tree.
import torch.nn as nn
import torch
import torch.nn.functional as F
# implemented from:
# https://github.com/MicrosoftLearning/dev290x-v2/blob/master/Mod04/02-Unet/unet_pytorch/model.py
# MIT License
class DoubleConv(nn.Module):
def __init__(self, in_channels, out_channels, mid_channels=None):
super().__init__()
if not mid_channels:
mid_channels = out_channels
self.double_conv = nn.Sequential(
nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(mid_channels),
nn.ReLU(inplace=True),
nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.double_conv(x)
# implemented from:
# https://github.com/MicrosoftLearning/dev290x-v2/blob/master/Mod04/02-Unet/unet_pytorch/model.py
# MIT License
class Down(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.maxpool_conv = nn.Sequential(
nn.MaxPool2d(2),
DoubleConv(in_channels, out_channels)
)
def forward(self, x):
return self.maxpool_conv(x)
# implemented from:
# https://github.com/MicrosoftLearning/dev290x-v2/blob/master/Mod04/02-Unet/unet_pytorch/model.py
# MIT License
class Up(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.up = nn.ConvTranspose2d(in_channels , in_channels // 2, kernel_size=2, stride=2)
self.conv = DoubleConv(in_channels, out_channels)
def forward(self, x1, x2):
x1 = self.up(x1)
diffY = torch.tensor([x2.size()[2] - x1.size()[2]])
diffX = torch.tensor([x2.size()[3] - x1.size()[3]])
x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2])
x = torch.cat([x2, x1], dim=1)
output = self.conv(x)
return output
# implemented from:
# https://github.com/MicrosoftLearning/dev290x-v2/blob/master/Mod04/02-Unet/unet_pytorch/model.py
# MIT License
class OutConv(nn.Module):
def __init__(self, in_channels, out_channels):
super(OutConv, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
def forward(self, x):
return self.conv(x)
class Encoder(nn.Module):
def __init__(self, args, dim = 100):
super(Encoder, self).__init__()
self.args = args
# settings
n_channels = 3
n_classes = 1
# downscale the image
self.inc = DoubleConv(n_channels, 64)
self.down1 = Down(64, 128)
self.down2 = Down(128, 256)
self.down3 = Down(256, 512)
# upscale the image
self.down4 = Down(512, 1024)
self.up1 = Up(1024, 512)
self.up2 = Up(512, 256)
self.up3 = Up(256, 128)
self.up4 = Up(128, 64)
self.outc = OutConv(64, n_classes)
# define a plane of the same size, and shape at the touch sensor
width = .0218 - 0.00539
y_z = torch.arange(dim).cuda().view(dim, 1).expand(dim, dim).float()
y_z = torch.stack((y_z, y_z.permute(1, 0))).permute(1, 2, 0)
plane = torch.cat((torch.zeros(dim, dim, 1).cuda(), y_z), dim=-1)
self.orig_plane = (plane / float(dim) - .5) * width
# update the plane with the predicted depth information
def project_depth(self, depths, pos, rot, dim=100):
# reshape the plane to have the same position and orientation as the touch sensor when the touch occurred
batch_size = depths.shape[0]
planes = self.orig_plane.view(1 , -1 , 3).expand(batch_size, -1, 3)
planes = torch.bmm(rot, planes.permute(0, 2, 1)).permute(0, 2, 1)
planes += pos.view(batch_size, 1, 3)
# add the depth in the same direction as the normal of the sensor plane
init_camera_vector = torch.FloatTensor((1, 0, 0)).cuda().view(1, 3, 1) .expand(batch_size, 3, 1 )
camera_vector = torch.bmm(rot, init_camera_vector).permute(0, 2, 1)
camera_vector = F.normalize(camera_vector, p=2, dim=-1).view(batch_size, 1, 1, 3).expand(batch_size, dim, dim, 3)
depth_update = depths.unsqueeze(-1) * camera_vector
local_depth = (planes + depth_update.view(batch_size, -1, 3)).view(batch_size, -1, 3)
return local_depth
def forward(self, gel, depth, ref_frame, empty, producing_sheet = False):
# get initial data
batch_size = ref_frame['pos'].shape[0]
pos = ref_frame['pos'].cuda().view(batch_size, -1)
rot_m = ref_frame['rot_M'].cuda().view(-1, 3, 3)
# U-Net prediction
# downscale the image
x1 = self.inc(gel)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
# upscale the image
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
pred_depth =(self.outc(x))
# scale the prediction
pred_depth = F.sigmoid(pred_depth) * 0.1
# we only want to use the points in the predicted point cloud if they correspond to pixels in the touch signal
# which are "different" enough from the an untouched touch signal, otherwise the do not correspond to any
# geometry of the object which is deforming the touch sensor's surface.
diff = torch.sqrt((((gel.permute(0, 2, 3, 1) - empty.permute(0, 2, 3, 1)).view(batch_size, -1, 3)) **2).sum(dim = -1))
useful_points = diff > 0.001
# project the depth values into 3D points
projected_depths = self.project_depth(pred_depth.squeeze(1), pos, rot_m).view(batch_size, -1, 3)
pred_points = []
for points, useful in zip(projected_depths, useful_points):
# select only useful points
orig_points = points.clone()
points = points[useful]
if points.shape[0] == 0:
if producing_sheet:
pred_points.append(torch.zeros((self.args.num_samples, 3)).cuda())
continue
else:
points = orig_points
# make the number of points in each element of a batch consistent
while points.shape[0] < self.args.num_samples:
points = torch.cat((points, points, points, points))
perm = torch.randperm(points.shape[0])
idx = perm[:self.args.num_samples]
points = points[idx]
pred_points.append(points)
pred_points = torch.stack(pred_points)
return pred_depth, pred_points
|
3D-Vision-and-Touch-main
|
touch_charts/models.py
|
#Copyright (c) Facebook, Inc. and its affiliates.
#All rights reserved.
#This source code is licensed under the license found in the
#LICENSE file in the root directory of this source tree.
import models
import os
import torch
import numpy as np
import torch.optim as optim
from torch.utils.data import DataLoader
from tqdm import tqdm
import argparse
import sys
sys.path.insert(0, "../")
import utils
import data_loaders
class Engine():
def __init__(self, args):
# set seeds
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
self.classes = ['0001', '0002']
self.args = args
self.verts, self.faces = utils.load_mesh_touch(f'../data/initial_sheet.obj')
def __call__(self) -> float:
self.encoder = models.Encoder(self.args)
self.encoder.load_state_dict(torch.load(self.args.save_directory))
self.encoder.cuda()
self.encoder.eval()
train_data = data_loaders.mesh_loader_touch(self.classes, self.args, produce_sheets=True)
train_data.names = train_data.names[self.args.start:self.args.end]
train_loader = DataLoader(train_data, batch_size=1, shuffle=False,
num_workers=16, collate_fn=train_data.collate)
for k, batch in enumerate(tqdm(train_loader, smoothing=0)):
# initialize data
sim_touch = batch['sim_touch'].cuda()
depth = batch['depth'].cuda()
ref_frame = batch['ref']
# predict point cloud
with torch.no_grad():
pred_depth, sampled_points = self.encoder(sim_touch, depth, ref_frame, empty = batch['empty'].cuda())
# optimize touch chart
for points, dir in zip(sampled_points, batch['save_dir']):
if os.path.exists(dir):
continue
directory = dir[:-len(dir.split('/')[-1])]
if not os.path.exists(directory):
os.makedirs(directory)
# if not a successful touch
if torch.abs(points).sum() == 0 :
np.save(dir, np.zeros(1))
continue
# make initial mesh match touch sensor when touch occurred
initial = self.verts.clone().unsqueeze(0)
pos = ref_frame['pos'].cuda().view(1, -1)
rot = ref_frame['rot_M'].cuda().view(1, 3, 3)
initial = torch.bmm(rot, initial.permute(0, 2, 1)).permute(0, 2, 1)
initial += pos.view(1, 1, 3)
initial = initial[0]
# set up optimization
updates = torch.zeros(self.verts.shape, requires_grad=True, device="cuda")
optimizer = optim.Adam([updates], lr=0.003, weight_decay=0)
last_improvement = 0
best_loss = 10000
while True:
# update
optimizer.zero_grad()
verts = initial + updates
# losses
surf_loss = utils.chamfer_distance(verts.unsqueeze(0), self.faces, points.unsqueeze(0), num =self.args.num_samples)
edge_lengths = utils.batch_calc_edge(verts.unsqueeze(0), self.faces)
loss = self.args.surf_co * surf_loss + 70 * edge_lengths
# optimize
loss.backward()
optimizer.step()
# check results
if loss < 0.0006:
break
if best_loss > loss :
best_loss = loss
best_verts = verts.clone()
last_improvement = 0
else:
last_improvement += 1
if last_improvement > 50:
break
np.save(dir, best_verts.data.cpu().numpy())
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=0, help='Random seed.')
parser.add_argument('--start', type=int, default=0, help='Random seed.')
parser.add_argument('--end', type=int, default=10000000, help='Random seed.')
parser.add_argument('--save_directory', type=str, default='experiments/checkpoint/pretrained/encoder_touch',
help='Location of the model used to produce sheet')
parser.add_argument('--num_samples', type=int, default=4000, help='Number of points in the predicted point cloud.')
parser.add_argument('--model_location', type=str, default="../data/initial_sheet.obj",
help='Location of inital mesh sheet whcih will be optimized')
parser.add_argument('--surf_co', type=float, default=9000.)
args = parser.parse_args()
trainer = Engine(args)
trainer()
|
3D-Vision-and-Touch-main
|
touch_charts/produce_sheets.py
|
#Copyright (c) Facebook, Inc. and its affiliates.
#All rights reserved.
#This source code is licensed under the license found in the
#LICENSE file in the root directory of this source tree.
import models
from torch.utils.tensorboard import SummaryWriter
import torch
import numpy as np
import torch.optim as optim
import os
from torch.utils.data import DataLoader
from tqdm import tqdm
import argparse
import sys
sys.path.insert(0, "../")
import utils
import data_loaders
class Engine():
def __init__(self, args):
# set seeds
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
# set initial data values
self.epoch = 0
self.best_loss = 10000
self.args = args
self.last_improvement = 0
self.classes = ['0001', '0002']
self.checkpoint_dir = os.path.join('experiments/checkpoint/', args.exp_type, args.exp_id)
self.log_dir = f'experiments/results/{self.args.exp_type}/{self.args.exp_id}/'
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
def __call__(self) -> float:
self.encoder = models.Encoder(self.args)
self.encoder.cuda()
params = list(self.encoder.parameters())
self.optimizer = optim.Adam(params, lr=self.args.lr, weight_decay=0)
writer = SummaryWriter(os.path.join('experiments/tensorboard/', args.exp_type ))
train_loader, valid_loaders = self.get_loaders()
if self.args.eval:
self.load('')
with torch.no_grad():
self.validate(valid_loaders, writer)
exit()
for epoch in range(self.args.epochs):
self.epoch = epoch
self.train(train_loader, writer)
with torch.no_grad():
self.validate(valid_loaders, writer)
self.check_values()
def get_loaders(self):
# training data
train_data = data_loaders.mesh_loader_touch(self.classes, self.args, set_type='train')
train_loader = DataLoader(train_data, batch_size=self.args.batch_size, shuffle=True, num_workers=16, collate_fn=train_data.collate)
# validation data
valid_loaders = []
set_type = 'test' if self.args.eval else 'valid'
for c in self.classes:
valid_data = data_loaders.mesh_loader_touch(c, self.args, set_type=set_type)
valid_loaders.append(
DataLoader(valid_data, batch_size=self.args.batch_size, shuffle=False, num_workers=16, collate_fn=valid_data.collate))
return train_loader, valid_loaders
def train(self, data, writer):
total_loss = 0
iterations = 0
self.encoder.train()
for k, batch in enumerate(tqdm(data)):
self.optimizer.zero_grad()
# initialize data
sim_touch = batch['sim_touch'].cuda()
depth = batch['depth'].cuda()
ref_frame = batch['ref']
gt_points = batch['samples'].cuda()
# inference
pred_depth, pred_points = self.encoder(sim_touch, depth, ref_frame, empty = batch['empty'].cuda())
# losses
loss = point_loss = self.args.loss_coeff * utils.point_loss(pred_points, gt_points)
total_loss += point_loss.item()
# backprop
loss.backward()
self.optimizer.step()
# log
message = f'Train || Epoch: {self.epoch}, loss: {loss.item():.5f} '
message += f'|| best_loss: {self.best_loss :.5f}'
tqdm.write(message)
iterations += 1.
writer.add_scalars('train', {self.args.exp_id: total_loss / iterations}, self.epoch)
def validate(self, data, writer):
total_loss = 0
self.encoder.eval()
# loop through every class
for v, valid_loader in enumerate(data):
num_examples = 0
class_loss = 0
# loop through every batch
for k, batch in enumerate(tqdm(valid_loader)):
# initialize data
sim_touch = batch['sim_touch'].cuda()
depth = batch['depth'].cuda()
ref_frame = batch['ref']
gt_points = batch['samples'].cuda()
obj_class = batch['class'][0]
batch_size = gt_points.shape[0]
# inference
pred_depth, pred_points = self.encoder( sim_touch, depth, ref_frame, empty = batch['empty'].cuda())
# losses
point_loss = self.args.loss_coeff * utils.point_loss(pred_points, gt_points)
# log
num_examples += float(batch_size)
class_loss += point_loss * float(batch_size)
# log
class_loss = (class_loss / num_examples)
message = f'Valid || Epoch: {self.epoch}, class: {obj_class}, loss: {class_loss:.5f}'
message += f' || best_loss: {self.best_loss:.5f}'
tqdm.write(message)
total_loss += (class_loss / float(len(self.classes)))
# log
print('*******************************************************')
print(f'Total validation loss: {total_loss}')
print('*******************************************************')
if not self.args.eval:
writer.add_scalars('valid', {self.args.exp_id: total_loss}, self.epoch)
self.current_loss = total_loss
def save(self, label):
if not os.path.exists(self.checkpoint_dir):
os.makedirs(self.checkpoint_dir)
torch.save(self.encoder.state_dict(), self.checkpoint_dir + '/encoder_touch' + label)
torch.save(self.optimizer.state_dict(), self.checkpoint_dir + '/optim_touch' + label)
def check_values(self):
if self.best_loss >= self.current_loss:
improvement = self.best_loss - self.current_loss
self.best_loss = self.current_loss
print(f'Saving Model with a {improvement} improvement in point loss')
self.save('')
self.last_improvement = 0
else:
self.last_improvement += 1
if self.last_improvement == self.args.patience:
print(f'Over {self.args.patience} steps since last imporvement')
print('Exiting now')
exit()
if self.epoch % 10 == 0:
print(f'Saving Model at epoch {self.epoch}')
self.save(f'_recent')
print('*******************************************************')
def load(self, label):
self.encoder.load_state_dict(torch.load(self.checkpoint_dir + '/encoder_touch' + label))
self.optimizer.load_state_dict(torch.load(self.checkpoint_dir + '/optim_touch' + label))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=0, help='Setting for the random seed.')
parser.add_argument('--epochs', type=int, default=300, help='Number of epochs to use.')
parser.add_argument('--lr', type=float, default=0.001, help='Initial learning rate.')
parser.add_argument('--eval', action='store_true', default=False, help='Evaluate the trained model on the test set.')
parser.add_argument('--batch_size', type=int, default=128, help='Size of the batch.')
parser.add_argument('--num_samples', type=int, default=4000, help='Number of points in the predicted point cloud.')
parser.add_argument('--patience', type=int, default=70, help='How many epochs without imporvement before training stops.')
parser.add_argument('--loss_coeff', type=float, default=9000., help='Coefficient for loss term.')
parser.add_argument('--exp_id', type=str, default='test', help='The experiment name')
parser.add_argument('--exp_type', type=str, default='test', help='The experiment group')
args = parser.parse_args()
trainer = Engine(args)
trainer()
|
3D-Vision-and-Touch-main
|
touch_charts/recon.py
|
#Copyright (c) Facebook, Inc. and its affiliates.
#All rights reserved.
#This source code is licensed under the license found in the
#LICENSE file in the root directory of this source tree.
from .chamfer_distance import ChamferDistance
|
3D-Vision-and-Touch-main
|
third_party_code/__init__.py
|
#Copyright (c) Facebook, Inc. and its affiliates.
#All rights reserved.
#This source code is licensed under the license found in the
#LICENSE file in the root directory of this source tree.
import torch
from torch.utils.cpp_extension import load
cd = load(name="cd",
sources=["../third_party_code/chamfer_distance.cpp",
"../third_party_code/chamfer_distance.cu"])
class ChamferDistanceFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, xyz1, xyz2):
batchsize, n, _ = xyz1.size()
_, m, _ = xyz2.size()
xyz1 = xyz1.contiguous()
xyz2 = xyz2.contiguous()
dist1 = torch.zeros(batchsize, n)
dist2 = torch.zeros(batchsize, m)
idx1 = torch.zeros(batchsize, n, dtype=torch.int)
idx2 = torch.zeros(batchsize, m, dtype=torch.int)
dist1 = dist1.cuda()
dist2 = dist2.cuda()
idx1 = idx1.cuda()
idx2 = idx2.cuda()
cd.forward_cuda(xyz1, xyz2, dist1, dist2, idx1, idx2)
return idx1, idx2
class ChamferDistance(torch.nn.Module):
def forward(self, xyz1, xyz2):
return ChamferDistanceFunction.apply(xyz1, xyz2)
|
3D-Vision-and-Touch-main
|
third_party_code/chamfer_distance.py
|
#Copyright (c) Facebook, Inc. and its affiliates.
#All rights reserved.
#This source code is licensed under the license found in the
#LICENSE file in the root directory of this source tree.
import numpy as np
import os
from tqdm import tqdm
def call(command):
os.system(command)
param_namer = {'--seed': 'seed', '--num_gcn_layers': 'ngl', '--hidden_gcn_layers': 'hgl', '--num_img_blocks': 'nib',
'--num_img_layers': 'nil', '--num_grasps': 'grasps', '--geo': 'geo'}
commands = []
ex_type = 'Comparison'
eval = False
def add_commands(forced_params, string, params, exp_id_start):
for f in forced_params:
string += f' {f}'
number = []
keys = list(params.keys())
for param_name in keys:
number.append(len(params[param_name]))
numbers = np.where(np.zeros(number) == 0 )
numbers = np.stack(numbers).transpose()
commands = []
for n in numbers :
exp_id = exp_id_start
command = string
for e, k in enumerate(n):
param_name = keys[e]
param_value = params[param_name][k]
command += f' {param_name} {param_value}'
exp_id += f'_{param_namer[param_name]}_{param_value}'
if eval:
command += ' --eval'
command += f' --exp_id {exp_id}'
commands.append(command)
return commands
######################
###### empty #########
######################
params = {'--seed': [0,1,2,3,4,5]}
exp_id_start = '@empty'
string = f'CUDA_VISIBLE_DEVICES=0 python runner.py --exp_type {ex_type}'
forced_params = []
commands += add_commands(forced_params, string, params, exp_id_start)
######################
###### occluded ######
######################
params = {'--num_gcn_layers': [15, 20, 25], '--hidden_gcn_layers': [150, 200, 250], '--num_img_blocks': [4,5],
'--num_img_layers': [3, 5]}
exp_id_start = '@occluded'
string = f'CUDA_VISIBLE_DEVICES=0 python runner.py --exp_type {ex_type}'
forced_params = ['--use_occluded']
commands += add_commands(forced_params, string, params, exp_id_start)
######################
###### unoccluded ####
######################
params = {'--num_gcn_layers': [15, 20, 25], '--hidden_gcn_layers': [150, 200, 250], '--num_img_blocks': [4,5],
'--num_img_layers': [3, 5]}
exp_id_start = '@unoccluded'
string = f'CUDA_VISIBLE_DEVICES=0 python runner.py --exp_type {ex_type}'
forced_params = ['--use_unoccluded']
commands += add_commands(forced_params, string, params, exp_id_start)
########################
#### touch ######
########################
params = {'--num_gcn_layers': [15, 20, 25], '--hidden_gcn_layers': [150, 200, 250], '--num_grasps': [1, 2, 3, 4, 5]}
exp_id_start = '@touch'
string = f'CUDA_VISIBLE_DEVICES=0 python runner.py --exp_type {ex_type}'
forced_params = ['--use_touch', ]
commands += add_commands(forced_params, string, params, exp_id_start)
##############################
##### occluded + touch #######
##############################
params = {'--num_gcn_layers': [15, 20, 25], '--hidden_gcn_layers': [150, 200, 250], '--num_img_blocks': [4,5],
'--num_img_layers': [3, 5]}
exp_id_start = '@occluded_touch'
string = f'CUDA_VISIBLE_DEVICES=0 python runner.py --exp_type {ex_type}'
forced_params = ['--use_occluded', '--use_touch']
commands += add_commands(forced_params, string, params, exp_id_start)
##############################
### touch + unoccluded ######
##############################
params = {'--num_gcn_layers': [15, 20, 25], '--hidden_gcn_layers': [150, 200, 250], '--num_img_blocks': [4,5],
'--num_img_layers': [3, 5],'--num_grasps': [1, 2, 3, 4, 5] }
exp_id_start = '@unoccluded_touch'
string = f'CUDA_VISIBLE_DEVICES=0 python runner.py --exp_type {ex_type}'
forced_params = ['--use_unoccluded', '--use_touch', ]
commands += add_commands(forced_params, string, params, exp_id_start)
for i in range(len(commands)):
commands[i] += f'_command_{i}@'
from multiprocessing import Pool
pool = Pool(processes=10)
pbar = tqdm(pool.imap_unordered(call, commands), total=len(commands))
pbar.set_description(f"calling submitit")
for _ in pbar:
pass
|
3D-Vision-and-Touch-main
|
vision_charts/submit.py
|
#Copyright (c) Facebook, Inc. and its affiliates.
#All rights reserved.
#This source code is licensed under the license found in the
#LICENSE file in the root directory of this source tree.
import os
import submitit
import argparse
import recon
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=0, help='Setting for the random seed.')
parser.add_argument('--geo', type=int, default=0, help='use_geomtrics')
parser.add_argument('--lr', type=float, default=0.0003, help='Initial learning rate.')
parser.add_argument('--eval', action='store_true', default=False, help='Evaluate the trained model on the test set.')
parser.add_argument('--batch_size', type=int, default=25, help='Size of the batch.')
parser.add_argument('--exp_id', type=str, default='Eval', help='The experiment name')
parser.add_argument('--exp_type', type=str, default='Test', help='The experiment group')
parser.add_argument('--use_occluded', action='store_true', default=False, help='To use the occluded image.')
parser.add_argument('--use_unoccluded', action='store_true', default=False, help='To use the unoccluded image.')
parser.add_argument('--use_touch', action='store_true', default=False, help='To use the touch information.')
parser.add_argument('--patience', type=int, default=70, help='How many epochs without imporvement before training stops.')
parser.add_argument('--loss_coeff', type=float, default=9000., help='Coefficient for loss term.')
parser.add_argument('--num_img_blocks', type=int, default=6, help='Number of image block in the image encoder.')
parser.add_argument('--num_img_layers', type=int, default=3, help='Number of image layer in each blocl in the image encoder.')
parser.add_argument('--size_img_ker', type=int, default=5, help='Size of the image kernel in each Image encoder layer')
parser.add_argument('--num_gcn_layers', type=int, default=20, help='Number of GCN layer in the mesh deformation network.')
parser.add_argument('--hidden_gcn_layers', type=int, default=300, help='Size of the feature vector for each GCN layer in the mesh deformation network.')
parser.add_argument('--num_grasps', type=int, default=1, help='Number of grasps in each instance to train with')
parser.add_argument('--pretrained', type=str, default='no', help='String indicating which pretrained model to use.',
choices=['no', 'touch', 'touch_unoccluded', 'touch_occluded', 'unoccluded', 'occluded'])
parser.add_argument('--visualize', action='store_true', default=False)
args = parser.parse_args()
trainer = recon.Engine(args)
submitit_logs_dir = os.path.join('experiments','logs', args.exp_type, args.exp_id )
executor = submitit.SlurmExecutor(submitit_logs_dir, max_num_timeout=3)
if args.eval:
time = 30
else:
time = 60*48
executor.update_parameters(
num_gpus=1,
partition='',
cpus_per_task=16,
mem=500000,
time=time,
job_name=args.exp_id,
signal_delay_s=300,
)
executor.submit(trainer)
|
3D-Vision-and-Touch-main
|
vision_charts/runner.py
|
#Copyright (c) Facebook, Inc. and its affiliates.
#All rights reserved.
#This source code is licensed under the license found in the
#LICENSE file in the root directory of this source tree.
import torch.nn as nn
import torch
import numpy as np
from torch.nn.parameter import Parameter
import torch.nn.functional as F
import math
# network for making image features for vertex feature vectors
class Image_Encoder(nn.Module):
def __init__(self, args):
super(Image_Encoder, self).__init__()
layers = []
cur_size = 6
next_size = 16
for i in range(args.num_img_blocks):
layers.append(CNN_layer(cur_size, next_size, args.size_img_ker, stride=2))
cur_size = next_size
next_size = next_size * 2
for j in range(args.num_img_layers -1):
layers.append(CNN_layer(cur_size, cur_size, args.size_img_ker))
self.args = args
self.layers = nn.ModuleList(layers)
f = 221.7025
RT = np.array([[-0.0000, -1.0000, 0.0000, -0.0000],
[-0.7071, 0.0000, -0.7071, 0.4243],
[0.7071, 0.0000, -0.7071, 1.1314]])
K = np.array([[f, 0, 128.], [0, f, 128.], [0, 0, 1]])
self.matrix = torch.FloatTensor(K.dot(RT)).cuda()
# implemented from:
# https://github.com/EdwardSmith1884/GEOMetrics/blob/master/utils.py
# MIT License
# defines image features over vertices from vertex positions, and feature mpas from vision
def pooling(self, blocks, verts_pos, debug=False):
# convert vertex positions to x,y coordinates in the image, scaled to fractions of image dimension
ext_verts_pos = torch.cat(
(verts_pos, torch.FloatTensor(np.ones([verts_pos.shape[0], verts_pos.shape[1], 1])).cuda()), dim=-1)
ext_verts_pos = torch.matmul(ext_verts_pos, self.matrix.permute(1, 0))
xs = ext_verts_pos[:, :, 1] / ext_verts_pos[:, :, 2] / 256.
ys = ext_verts_pos[:, :, 0] / ext_verts_pos[:, :, 2] / 256.
full_features = None
batch_size = verts_pos.shape[0]
# check camera project covers the image
if debug:
dim = 256
xs = (torch.clamp(xs * dim, 0, dim - 1).data.cpu().numpy()).astype(np.uint8)
ys = (torch.clamp(ys * dim, 0, dim - 1).data.cpu().numpy()).astype(np.uint8)
for ex in range(blocks.shape[0]):
img = blocks[ex].permute(1, 2, 0).data.cpu().numpy()[:, :, :3]
for x, y in zip(xs[ex], ys[ex]):
img[x, y, 0] = 1
img[x, y, 1] = 0
img[x, y, 2] = 0
from PIL import Image
Image.fromarray((img * 255).astype(np.uint8)).save('results/temp.png')
print('saved')
input()
for block in blocks:
# scale projected vertex points to dimension of current feature map
dim = block.shape[-1]
cur_xs = torch.clamp(xs * dim, 0, dim - 1)
cur_ys = torch.clamp(ys * dim, 0, dim - 1)
# https://en.wikipedia.org/wiki/Bilinear_interpolation
x1s, y1s, x2s, y2s = torch.floor(cur_xs), torch.floor(cur_ys), torch.ceil(cur_xs), torch.ceil(cur_ys)
A = x2s - cur_xs
B = cur_xs - x1s
G = y2s - cur_ys
H = cur_ys - y1s
x1s = x1s.type(torch.cuda.LongTensor)
y1s = y1s.type(torch.cuda.LongTensor)
x2s = x2s.type(torch.cuda.LongTensor)
y2s = y2s.type(torch.cuda.LongTensor)
# flatten batch of feature maps to make vectorization easier
flat_block = block.permute(1, 0, 2, 3).contiguous().view(block.shape[1], -1)
block_idx = torch.arange(0, verts_pos.shape[0]).cuda().unsqueeze(-1).expand(batch_size, verts_pos.shape[1])
block_idx = block_idx * dim * dim
selection = (block_idx + (x1s * dim) + y1s).view(-1)
C = torch.index_select(flat_block, 1, selection)
C = C.view(-1, batch_size, verts_pos.shape[1]).permute(1, 0, 2)
selection = (block_idx + (x1s * dim) + y2s).view(-1)
D = torch.index_select(flat_block, 1, selection)
D = D.view(-1, batch_size, verts_pos.shape[1]).permute(1, 0, 2)
selection = (block_idx + (x2s * dim) + y1s).view(-1)
E = torch.index_select(flat_block, 1, selection)
E = E.view(-1, batch_size, verts_pos.shape[1]).permute(1, 0, 2)
selection = (block_idx + (x2s * dim) + y2s).view(-1)
F = torch.index_select(flat_block, 1, selection)
F = F.view(-1, batch_size, verts_pos.shape[1]).permute(1, 0, 2)
section1 = A.unsqueeze(1) * C * G.unsqueeze(1)
section2 = H.unsqueeze(1) * D * A.unsqueeze(1)
section3 = G.unsqueeze(1) * E * B.unsqueeze(1)
section4 = B.unsqueeze(1) * F * H.unsqueeze(1)
features = (section1 + section2 + section3 + section4)
features = features.permute(0, 2, 1)
if full_features is None:
full_features = features
else:
full_features = torch.cat((full_features, features), dim=2)
return full_features
def forward(self, img_occ, img_unocc, cur_vertices):
# double size due to legacy decision
if self.args.use_unoccluded:
x = torch.cat((img_unocc, img_unocc), dim = 1)
elif self.args.use_occluded:
x = torch.cat((img_occ, img_occ), dim=1)
else:
x = torch.cat((img_occ, img_unocc), dim=1)
features = []
layer_selections = [len(self.layers) - 1 - (i+1)*self.args.num_img_layers for i in range(3)]
for e, layer in enumerate(self.layers):
if x.shape[-1] < self.args.size_img_ker:
break
x = layer(x)
# collect feature maps
if e in layer_selections:
features.append(x)
features.append(x)
# get vertex features from selected feature maps
vert_image_features = self.pooling(features, cur_vertices)
return vert_image_features
# global chart deformation class
class Encoder(nn.Module):
def __init__(self, adj_info, inital_positions, args):
super(Encoder, self).__init__()
self.adj_info = adj_info
self.initial_positions = inital_positions
self.args = args
input_size = 3 # used to determine the size of the vertex feature vector
if args.use_occluded or args.use_unoccluded:
self.img_encoder = Image_Encoder(args).cuda()
with torch.no_grad():
input_size += self.img_encoder(torch.zeros(1, 3, 256, 256).cuda(), torch.zeros(1, 3, 256, 256).cuda(), torch.zeros(1, 1, 3).cuda()).shape[-1]
if self.args.use_touch:
input_size+=1
self.mesh_decoder = GCN(input_size, args).cuda()
def forward(self, img_occ, img_unocc, batch):
# initial data
batch_size = img_occ.shape[0]
cur_vertices = self.initial_positions.unsqueeze(0).expand(batch_size, -1, -1)
size_vision_charts = cur_vertices.shape[1]
# if using touch then append touch chart position to graph definition
if self.args.use_touch:
sheets = batch['sheets'].cuda().view(batch_size, -1, 3)
cur_vertices = torch.cat((cur_vertices,sheets), dim = 1 )
# cycle thorugh deformation
for _ in range(3):
vertex_features = cur_vertices.clone()
# add vision features
if self.args.use_occluded or self.args.use_unoccluded:
vert_img_features = self.img_encoder(img_occ, img_unocc, cur_vertices)
vertex_features = torch.cat((vert_img_features, vertex_features), dim=-1)
# add mask for touch charts
if self.args.use_touch:
vision_chart_mask = torch.ones(batch_size, size_vision_charts, 1).cuda() * 2 # flag corresponding to vision
touch_chart_mask = torch.FloatTensor(batch['successful']).cuda().unsqueeze(-1).expand(batch_size, 4 * self.args.num_grasps, 25)
touch_chart_mask = touch_chart_mask.contiguous().view(batch_size, -1, 1)
mask = torch.cat((vision_chart_mask, touch_chart_mask), dim=1)
vertex_features = torch.cat((vertex_features,mask), dim = -1)
# deform the vertex positions
vertex_positions = self.mesh_decoder(vertex_features, self.adj_info)
# avoid deforming the touch chart positions
vertex_positions[:, size_vision_charts:] = 0
cur_vertices = cur_vertices + vertex_positions
return cur_vertices
# implemented from:
# https://github.com/tkipf/pygcn/tree/master/pygcn
# MIT License
# Graph convolutional network for chart deformation
class GCN(nn.Module):
def __init__(self, input_features, args):
super(GCN, self).__init__()
self.num_layers = args.num_gcn_layers
# define output sizes for each GCN layer
hidden_values = [input_features] + [ args.hidden_gcn_layers for k in range(self.num_layers -1)] + [3]
# define layers
layers = []
for i in range(self.num_layers):
layers.append(GCN_layer(hidden_values[i], hidden_values[i+1]))
self.layers = nn.ModuleList(layers)
def forward(self, vertex_features, adj_info):
adj = adj_info['adj']
# iterate through GCN layers
x = self.layers[0](vertex_features, adj, F.relu)
for i in range(1, self.num_layers-1):
x = self.layers[i](x, adj, F.relu)
coords = (self.layers[-1](x, adj, lambda x: x))
return coords
# CNN layer definition
def CNN_layer(f_in, f_out, k, stride = 1):
layers = []
layers.append(nn.Conv2d(int(f_in), int(f_out), kernel_size=k, padding=1, stride=stride))
layers.append(nn.BatchNorm2d(int(f_out)))
layers.append(nn.ReLU(inplace=True))
return nn.Sequential(*layers)
# implemented from:
# https://github.com/tkipf/pygcn/tree/master/pygcn
# MIT License
# Graph convolutional network layer definition
class GCN_layer(nn.Module):
def __init__(self, in_features, out_features, bias=True):
super(GCN_layer, self).__init__()
self.weight1 = Parameter(torch.Tensor(1, in_features, out_features))
self.bias = Parameter(torch.Tensor(out_features))
self.reset_parameters()
def reset_parameters(self):
stdv = 6. / math.sqrt((self.weight1.size(1) + self.weight1.size(0)))
stdv *= .3
self.weight1.data.uniform_(-stdv, stdv)
self.bias.data.uniform_(-.1, .1)
def forward(self, features, adj, activation):
# 0N-GCN definition, removes need for resnet layers
features = torch.matmul(features, self.weight1)
output = torch.matmul(adj, features[:, :, :features.shape[-1] // 3])
output = torch.cat((output, features[:, :, features.shape[-1] // 3:]), dim=-1)
output = output + self.bias
return activation(output)
|
3D-Vision-and-Touch-main
|
vision_charts/models.py
|
#Copyright (c) Facebook, Inc. and its affiliates.
#All rights reserved.
#This source code is licensed under the license found in the
#LICENSE file in the root directory of this source tree.
import os
import models
import numpy as np
import torch
from torch.utils.data import DataLoader
from torch.autograd import Variable
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
import tensorflow as tf
import tensorboard as tb
from tqdm import tqdm
import sys
sys.path.insert(0, "../")
import utils
import data_loaders
class Engine():
def __init__(self, args):
# set seeds
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
# set initial data values
self.epoch = 0
self.best_loss = 10000
self.args = args
self.last_improvement = 0
self.num_samples = 10000
self.classes = ['0001', '0002']
self.checkpoint_dir = os.path.join('experiments/checkpoint/', args.exp_type, args.exp_id)
def __call__(self) -> float:
# initial data
if self.args.GEOmetrics:
self.adj_info, initial_positions = utils.load_mesh_vision(self.args, f'../data/sphere.obj')
else:
self.adj_info, initial_positions = utils.load_mesh_vision(self.args, f'../data/vision_sheets.obj')
self.encoder = models.Encoder(self.adj_info, Variable(initial_positions.cuda()), self.args)
self.encoder.cuda()
params = list(self.encoder.parameters())
self.optimizer = optim.Adam(params, lr=self.args.lr, weight_decay=0)
writer = SummaryWriter(os.path.join('experiments/tensorboard/', self.args.exp_type ))
train_loader, valid_loaders = self.get_loaders()
if self.args.eval:
if self.args.pretrained != 'no':
self.load_pretrained()
else:
self.load('')
with torch.no_grad():
self.validate(valid_loaders, writer)
exit()
# training loop
for epoch in range(3000):
self.epoch = epoch
self.train(train_loader, writer)
with torch.no_grad():
self.validate(valid_loaders, writer)
self.check_values()
def get_loaders(self):
train_data = data_loaders.mesh_loader_vision(self.classes, self.args, set_type='train', sample_num=self.num_samples)
train_loader = DataLoader(train_data, batch_size=self.args.batch_size, shuffle=True, num_workers=16, collate_fn=train_data.collate)
valid_loaders = []
set_type = 'test' if self.args.eval else 'valid'
for c in self.classes:
valid_data = data_loaders.mesh_loader_vision(c, self.args, set_type=set_type, sample_num=self.num_samples)
valid_loaders.append( DataLoader(valid_data, batch_size=self.args.batch_size, shuffle=False, num_workers=16, collate_fn=valid_data.collate))
return train_loader, valid_loaders
def train(self, data, writer):
total_loss = 0
iterations = 0
self.encoder.train()
for k, batch in enumerate(tqdm(data)):
self.optimizer.zero_grad()
# initialize data
img_occ = batch['img_occ'].cuda()
img_unocc = batch['img_unocc'].cuda()
gt_points = batch['gt_points'].cuda()
# inference
# self.encoder.img_encoder.pooling(img_unocc, gt_points, debug=True)
verts = self.encoder(img_occ, img_unocc, batch)
# losses
loss = utils.chamfer_distance(verts, self.adj_info['faces'], gt_points, num=self.num_samples)
loss = self.args.loss_coeff * loss.mean()
# backprop
loss.backward()
self.optimizer.step()
# log
message = f'Train || Epoch: {self.epoch}, loss: {loss.item():.2f}, b_ptp: {self.best_loss:.2f}'
tqdm.write(message)
total_loss += loss.item()
iterations += 1.
writer.add_scalars('train_loss', {self.args.exp_id : total_loss / iterations}, self.epoch)
def validate(self, data, writer):
total_loss = 0
# local losses at different distances from the touch sites
self.encoder.eval()
for v, valid_loader in enumerate(data):
num_examples = 0
class_loss = 0
for k, batch in enumerate(tqdm(valid_loader)):
# initialize data
img_occ = batch['img_occ'].cuda()
img_unocc = batch['img_unocc'].cuda()
gt_points = batch['gt_points'].cuda()
batch_size = img_occ.shape[0]
obj_class = batch['class'][0]
# model prediction
verts = self.encoder(img_occ, img_unocc, batch)
# losses
loss = utils.chamfer_distance(verts, self.adj_info['faces'], gt_points, num=self.num_samples)
loss = self.args.loss_coeff * loss.mean() * batch_size
# logs
num_examples += float(batch_size)
class_loss += loss
print_loss = (class_loss / num_examples)
message = f'Valid || Epoch: {self.epoch}, class: {obj_class}, f1: {print_loss:.2f}'
tqdm.write(message)
total_loss += (print_loss / float(len(self.classes)))
print('*******************************************************')
print(f'Validation Accuracy: {total_loss}')
print('*******************************************************')
writer.add_scalars('valid_ptp', {self.args.exp_id: total_loss}, self.epoch)
self.current_loss = total_loss
def save(self, label):
if not os.path.exists(self.checkpoint_dir):
os.makedirs(self.checkpoint_dir)
torch.save(self.encoder.state_dict(), self.checkpoint_dir + '/encoder_vision' + label)
torch.save(self.optimizer.state_dict(), self.checkpoint_dir + '/optim_vision' + label)
def check_values(self):
if self.best_loss >= self.current_loss:
improvement = self.best_loss -self.current_loss
self.best_loss = self.current_loss
print(f'Saving Model with a {improvement} improvement')
self.save('')
self.last_improvement = 0
else:
self.last_improvement += 1
if self.last_improvement == self.args.patience:
print(f'Over {self.args.patience} steps since last imporvement')
print('Exiting now')
exit()
if self.epoch % 10 == 0:
print(f'Saving Model at epoch {self.epoch}')
self.save(f'_recent')
print('*******************************************************')
def load(self, label):
self.encoder.load_state_dict(torch.load(self.checkpoint_dir + '/encoder_vision' + label))
self.optimizer.load_state_dict(torch.load(self.checkpoint_dir + '/optim_vision' + label))
def load_pretrained(self):
pretrained_location = 'experiments/checkpoint/pretrained/' + self.args.pretrained
self.encoder.load_state_dict(torch.load(pretrained_location))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=0, help='Setting for the random seed.')
parser.add_argument('--GEOmetrics', type=int, default=0, help='use GEOMemtrics setup instead')
parser.add_argument('--lr', type=float, default=0.0003, help='Initial learning rate.')
parser.add_argument('--eval', action='store_true', default=False, help='Evaluate the trained model on the test set.')
parser.add_argument('--batch_size', type=int, default=16, help='Size of the batch.')
parser.add_argument('--exp_id', type=str, default='Eval', help='The experiment name')
parser.add_argument('--exp_type', type=str, default='Test', help='The experiment group')
parser.add_argument('--use_occluded', action='store_true', default=False, help='To use the occluded image.')
parser.add_argument('--use_unoccluded', action='store_true', default=False, help='To use the unoccluded image.')
parser.add_argument('--use_touch', action='store_true', default=False, help='To use the touch information.')
parser.add_argument('--patience', type=int, default=30, help='How many epochs without imporvement before training stops.')
parser.add_argument('--loss_coeff', type=float, default=9000., help='Coefficient for loss term.')
parser.add_argument('--num_img_blocks', type=int, default=6, help='Number of image block in the image encoder.')
parser.add_argument('--num_img_layers', type=int, default=3, help='Number of image layer in each blocl in the image encoder.')
parser.add_argument('--size_img_ker', type=int, default=5, help='Size of the image kernel in each Image encoder layer')
parser.add_argument('--num_gcn_layers', type=int, default=20, help='Number of GCN layer in the mesh deformation network.')
parser.add_argument('--hidden_gcn_layers', type=int, default=300, help='Size of the feature vector for each GCN layer in the mesh deformation network.')
parser.add_argument('--num_grasps', type=int, default=1, help='Number of grasps in each instance to train with')
parser.add_argument('--pretrained', type=str, default='no', help='String indicating which pretrained model to use.',
choices=['no', 'empty', 'touch', 'touch_unoccluded', 'touch_occluded', 'unoccluded', 'occluded'])
args = parser.parse_args()
# update args for pretrained models
args = utils.pretrained_args(args)
trainer = Engine(args)
trainer()
|
3D-Vision-and-Touch-main
|
vision_charts/recon.py
|
# coding=UTF8
import json
import re
import sys
import fileinput
control_chars = ''.join(map(unichr, range(0,32) + range(127,160)))
control_char_re = re.compile('[%s]' % re.escape(control_chars))
def remove_control_chars(s):
return control_char_re.sub('', s)
def convert():
# try:
# fin = open(FILE)
# except:
# print 'FAILED TO OPEN FILE ' + FILE
# return
#patterns = PATTERNS.get(domain)
#if not patterns:
# print 'NO SCRAPER FOUND FOR ' + domain
# return
#print 'PROCESSING ' + domain
#if not os.path.exists(OUTPUT_DIR):
# os.makedirs(OUTPUT_DIR)
#f_fields = open('%s/%s_fields.tsv' % (OUTPUT_DIR, domain), 'w', 1000 * 1000 * 100)
#f_content = open('%s/%s_content.tsv' % (OUTPUT_DIR, domain), 'w', 1000 * 1000 * 100)
for line in fileinput.input():
id, url, status, headers, flags, body, imported, imported_pyht, timestamp = line.split('\t')
# if domain in VERSIONS:
# for indicator, scraper in VERSIONS[domain]:
# if indicator in body:
# patterns = PATTERNS[scraper]
# break
# if domain in SPECIAL_CHARSETS:
# body = body.decode(SPECIAL_CHARSETS[domain], 'ignore').encode('utf-8')
fields = {}
fields['url'] = url
fields['content'] = body
#content = ''
# for key, reg in patterns.iteritems():
# # remove control chars.
# # otherwise HTML_PARSER may fail (UnicodeEncodingError).
# val = remove_control_chars(val.strip().decode('utf-8', 'ignore'))
sys.stdout.write('\t'.join([id, json.dumps(fields)]).encode('utf-8') + '\n')
#f_fields.write('\t'.join([domain, id, url, json.dumps(fields)]).encode('utf-8') + '\n')
#f_content.write('\t'.join(["%s:%s" % (domain, id), content]).encode('utf-8') + '\n')
#f_fields.close()
#f_content.close()
# DOMAINS = ['escortsincollege']
if __name__ == '__main__':
convert()
# if NUM_PROCESSES > 1 and DOMAINS > 1:
# p = Pool(NUM_PROCESSES)
# p.map(scrape, DOMAINS)
# else:
# for d in DOMAINS:
# scrape(d)
|
elementary-master
|
test/generate_json.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "elementary.settings.dev")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
elementary-master
|
django/manage.py
|
from django.conf import settings
from django.db import models
from pymongo import MongoClient
# placeholder for refreshing elasticsearch when documents
# are updated. currently, documents are inserted into elasticsearch when
# dd pipeline is run.
class ElasticMixin(models.Model):
def update_elastic_data(self, data):
pass
def delete_elastic_data(self, data):
pass
class Meta:
abstract = True
class MongoMixin(models.Model):
@property
def mongo_data(self):
if not self.id:
return None
if hasattr(self, '_mongo_data_cache'):
return self._mongo_data_cache
data = self._exec_mongo_request('find_one')
self._mongo_data_cache = data
return data
def update_mongo_data(self, data):
self._exec_mongo_request('update_one', [{'$set': data}, True]) # True: upsert
def delete_mongo_data(self):
self._exec_mongo_request('delete_one')
@classmethod
def get_mongo_collection(cls):
client = MongoClient(**settings.MONGODB_CONNECTION_PARAMS)
db = client['model_data']
model_name = cls.__name__
collection = db[model_name]
return collection
def _exec_mongo_request(self, command, args=None):
if hasattr(self.__class__, '_mongo_collection'):
collection = self.__class__._mongo_collection
else:
collection = self.__class__.get_mongo_collection()
self.__class__._mongo_collection = collection
if command not in ['find_one', 'update_one', 'delete_one']:
raise ValueError('Bad command: %s' % command)
if not self.pk:
raise ValueError('No PK value found. Has the object been saved?')
spec = {'_id': self.pk}
func = getattr(collection, command)
# if there is a connection failure, this operation would fail,
# but pymongo would reconnect in the background -- hopefully in time for next operation.
if not args:
args = []
return func(spec, *args)
class Meta:
abstract = True
|
elementary-master
|
django/resources/model_mixins.py
|
from __future__ import absolute_import
from collections import defaultdict
import logging
import os
from celery import shared_task
from django.conf import settings
from django.db import IntegrityError
from django.db.models import F
from django.utils.timezone import now
from pymongo import MongoClient, UpdateOne
from elasticsearch import Elasticsearch
from ddctrl import ddctrl
from .dataio import download_file, open_docs_file, validate_docs
from .models import Document, DocSource, Repository
logger = logging.getLogger(__name__)
PROCESSING_BATCH_SIZE = 100 * 1000
INGESTION_BATCH_SIZE_BYTES = 100 * 1000 * 1000
es = Elasticsearch()
@shared_task
def process_docs():
pending_repos = []
for r in Repository.objects.all():
if r.docs.filter(processed__isnull=True).exists():
pending_repos.append(r)
if not pending_repos:
logger.info('== No pending docs to be processed.')
return
logger.info('====== Have docs from %d repos to be processed...' % len(pending_repos))
for r in pending_repos:
logger.info('==== Processing pending docs in repo %s' % r)
process_docs_for_repo(r)
@shared_task
def ingest_sources():
pending_sources = list(DocSource.objects.filter(processed__isnull=True).order_by('created'))
if not pending_sources:
logger.info('== No pending doc sources to be ingested.')
return
logger.info('====== Have %d doc sources to be ingested...' % len(pending_sources))
for s in pending_sources:
logger.info('==== Ingesting pending doc source %s' % s)
ingest_source(s)
def process_docs_in_batch(repo, doc_ids):
logger.info('processing %d docs' % len(doc_ids))
task_id = process_docs.request.id
collection = Document.get_mongo_collection()
docs = list(collection.find({'_id': {'$in': tuple(doc_ids)}}))
# ddctrl updates docs in place
error = None
try:
ddctrl.run_pipeline(task_id, repo, docs)
except Exception as e:
logger.exception('Error processing %d docs' % len(docs))
error = str(e)
# add to elasticsearch
bulk_data = []
INDEX_NAME = 'elem'
for d in docs:
id = d['_id']
content = d['content']
if error is not None:
d['processing_error'] = error
result = d.get('result', {})
op_dict = {
"index": {
"_index": INDEX_NAME,
"_type": 'docs',
"_id": id
}
}
data_dict = {
"id": id,
"content": content,
"repo": repo.full_name,
"result": result
}
bulk_data.append(op_dict)
bulk_data.append(data_dict)
res = es.bulk(index = INDEX_NAME, body = bulk_data, refresh = True)
# update mongo
ops = []
for d in docs:
id = d['_id']
del d['content']
if error is not None:
d['processing_error'] = error
ops.append(UpdateOne({'_id': id}, {
'$set': d
}))
collection.bulk_write(ops, ordered=False)
collection.database.client.close()
# update stats on DocSource
source_map = {}
for d in repo.docs.filter(id__in=doc_ids):
if d.source is not None:
key = d.source.crawlid
source_map[key] = source_map.get(key, 0) + 1
for s, c in source_map.items():
ds = DocSource.objects.get(crawlid=s)
ds.processed_docs = F('processed_docs') + c
ds.save()
# update processed timestamp
repo.docs.filter(id__in=doc_ids).update(processed=now(),processing_error=error)
logger.info('done')
def process_docs_for_repo(repo):
logger.info('pipeline: %s' % repo.pipeline)
logger.info('num docs: %d' % repo.docs.count())
doc_ids = repo.docs.filter(processed__isnull=True).order_by('created')\
.values_list('id', flat=True)[:PROCESSING_BATCH_SIZE]
while doc_ids:
process_docs_in_batch(repo, doc_ids)
doc_ids = repo.docs.filter(processed__isnull=True).order_by('created')\
.values_list('id', flat=True)[:PROCESSING_BATCH_SIZE]
logger.info('DONE!')
def ingest_source(source):
data_dir = os.path.join(settings.ELEMENTARY_DATA_DIR, 'sources')
if not os.path.isdir(data_dir):
os.makedirs(data_dir)
path = os.path.join(data_dir, '%s' % source.id)
start_time = now()
error = None
stats = defaultdict(int)
def complete():
logs = []
end_time = now()
msg = error or stats.get('error')
if msg:
logs.append('ERROR: ' + msg)
logs.append('Started: %s; Finished: %s.' % (start_time, end_time))
logs.append('VALID = %d; INVALID = %d; BYTES = %d.' %
(stats['valid'], stats['invalid'], stats['bytes']))
source.processed = end_time
source.ingestion_log = '\n'.join(logs)
source.total_docs = stats['valid'] + stats['invalid']
source.ingested_docs = stats['ingested']
source.invalid_docs = stats['invalid']
source.processed_docs = 0
source.save()
if source.ingested_docs > 0:
process_docs_queue.fill()
logger.info('%s\n%s' % (source, source.ingestion_log))
logger.info('ingested %d docs' % source.ingested_docs)
logger.info('done')
try:
error = download_file(source.url, path)
if error:
return complete()
file_obj, error = open_docs_file(path)
if error:
return complete()
docs = []
batch_bytes_start = 0
for doc in validate_docs(file_obj, stats):
docs.append(doc)
if stats['bytes'] - batch_bytes_start >= INGESTION_BATCH_SIZE_BYTES:
num_ingested = ingest_docs_batch(source, docs)
stats['ingested'] += num_ingested
docs = []
batch_bytes_start = stats['bytes']
if docs:
num_ingested = ingest_docs_batch(source, docs)
stats['ingested'] += num_ingested
except Exception as e:
error = 'Unexpected error: %s' % e
return complete()
def ingest_docs_batch(source, docs, max_retries=2):
docmap = {doc['docid']: doc for doc in docs}
docids = set(docmap.keys())
existing_docids = set(Document.objects.filter(repo=source.repo, docid__in=docids)
.values_list('docid', flat=True))
new_docids = docids - existing_docids
records = []
for docid in new_docids:
records.append(Document(docid=docid,
repo=source.repo,
source=source,
url=docmap[docid].get('url')))
if records:
try:
Document.objects.bulk_create(records)
except IntegrityError:
# other processes may have just taken some keys in new_docids;
# we retry a couple of times before giving up.
if max_retries > 0:
return ingest_docs_batch(source, docs, max_retries - 1)
else:
raise Exception('DB was too busy; record creation kept failing.')
docid_to_id_map = dict(Document.objects.filter(repo=source.repo, docid__in=new_docids)
.values_list('docid', 'id'))
mongo_records = []
ts_created = now()
for docid in new_docids:
id = docid_to_id_map[docid]
doc = docmap[docid]
doc['_id'] = id
doc['created'] = ts_created
mongo_records.append(doc)
logger.info('inserting %d docs into mongo' % len(mongo_records))
collection = Document.get_mongo_collection()
collection.insert_many(mongo_records, ordered=False)
collection.database.client.close()
return len(records)
class SingletonCeleryQueue:
"""Regulator of a celery queue-task pair to make sure there is
at most one task in the queue at any moment. Call fill() to enqueue.
"""
def __init__(self, name, task):
self.name = name
self.task = task
def _get_collection(self):
if hasattr(self, '_collection'):
return self._collection
client = MongoClient(settings.BROKER_URL)
db = client.get_default_database()
collection = db.messages
self._collection = collection
return collection
def fill(self):
collection = self._get_collection()
exists = collection.find_one({'queue': self.name})
if exists:
return False
self.task.apply_async(queue=self.name)
return True
process_docs_queue = SingletonCeleryQueue('docs', process_docs)
ingest_sources_queue = SingletonCeleryQueue('sources', ingest_sources)
|
elementary-master
|
django/resources/tasks.py
|
import io
import json
import logging
from multiprocessing.pool import ThreadPool
import os
import zipfile
from binaryornot.check import is_binary
from django.conf import settings
import requests
logger = logging.getLogger(__name__)
def download_file(url, local_path):
'''Returns None if successful; otherwise error message'''
try:
r = requests.get(url, stream=True, allow_redirects=True, timeout=10)
if r.status_code != 200:
return 'Bad HTTP status: %s' % r.status_code
with open(local_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024 * 1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
except requests.exceptions.ConnectionError as e:
return 'Connection Error: %s' % e
except requests.exceptions.HTTPError as e:
return 'HTTP error: %s' % e
except requests.exceptions.Timeout:
return 'Request timeout after 10 sec'
except requests.exceptions.TooManyRedirects:
return 'Too many redirects'
except requests.exceptions.RequestException as e:
return 'HTTP error: %s' % e
except Exception as e:
return 'Error: %s' % e
return None
def open_docs_file(path):
'''Returns (file, error) where error is None on success'''
if not os.path.isfile(path):
return None, 'No file found'
binary = is_binary(path)
if not binary:
logger.info('%s is deemed a text file' % path)
return io.open(path, 'rbU'), None
else:
logger.info('%s is deemed a binary file' % path)
if not zipfile.is_zipfile(path):
return None, 'Downloaded binary file is not a zip file.'
zfile = zipfile.ZipFile(path)
infolist = zfile.infolist()
if len(infolist) != 1:
zfile.close()
return None, 'Downloaded zip file contains %d items.' % len(infolist)
return zfile.open(infolist[0], 'rU'), None
def validate_docs(file_obj, stats):
'''Yields good doc json records and updates stats object (a map of counters)'''
try:
for line in io.TextIOWrapper(file_obj, encoding='utf-8'):
line = line.strip()
if not line:
continue
try:
data = json.loads(line)
if not isinstance(data, dict):
stats['invalid'] += 1
continue
if 'docid' not in data or 'content' not in data:
stats['invalid'] += 1
continue
stats['valid'] += 1
stats['bytes'] += len(line)
yield data
except:
stats['invalid'] += 1
continue
except ValueError as e:
stats['error'] = 'Failed to decode file using UTF-8'
if hasattr(file_obj, 'close'):
file_obj.close()
def parse_docs(docs, num_threads=2):
'''
Input: [<docid, content>]
Output: [<docid, parse_blob>] where parse_blob is a TSV with these columns:
<docid, sent_idx, content, words, lemmas, postags, ner, offsets, dep labels, dep parents>
'''
try:
requests.get(settings.ELEMENTARY_PARSER_ENDPOINT)
except:
raise Exception('Failed to reach parsing service at %s' % settings.ELEMENTARY_PARSER_ENDPOINT)
logger.info('Parsing %d docs with %d threads...' % (len(docs), num_threads))
pool = ThreadPool(num_threads)
results = pool.map(parse_a_doc, docs)
return results
def parse_a_doc(doc):
docid, content = doc
if not content:
return docid, ''
content = content.replace('\t', ' ')
content = content.replace('\n', ' ')
r = requests.post(settings.ELEMENTARY_PARSER_ENDPOINT, data=content.encode('utf-8'), timeout=60)
if r.status_code != 200:
logger.warning('Failed to parse doc: %s' % docid)
return docid, ''
result = r.text
if len(result) <= 1:
# either no content or parsing failed
return docid, ''
# prepend each line with the docid
blob = '\n'.join(str(docid) + '\t' + x for x in result.strip().split('\n')) + '\n'
return (docid, blob)
|
elementary-master
|
django/resources/dataio.py
|
from django.db import models
from django.contrib.postgres.fields import ArrayField, HStoreField
from django.utils.http import urlquote
from .model_mixins import MongoMixin, ElasticMixin
import uuid
class Repository(models.Model):
PIPELINE_CHOICES = (
('memex/atf', 'Memex ATF'),
('memex/escort', 'Memex Escort'),
('genomics', 'Genomics')
)
name = models.TextField()
owner = models.ForeignKey('auth.User', related_name='repos')
created = models.DateTimeField(auto_now_add=True)
pipeline = models.TextField(choices=PIPELINE_CHOICES, null=True)
class Meta:
unique_together = ('owner', 'name')
@property
def full_name(self):
return '%s/%s' % (self.owner.username, self.name)
def __unicode__(self):
return self.full_name
class Document(MongoMixin, ElasticMixin, models.Model):
docid = models.TextField()
repo = models.ForeignKey('Repository', related_name='docs')
source = models.ForeignKey('DocSource', null=True, related_name='docs')
# URL of the doc, if any
url = models.TextField(null=True)
# domain, doc type, etc.; currrently unused
properties = HStoreField(null=True)
created = models.DateTimeField(auto_now_add=True)
# will be updated by the processor
processed = models.DateTimeField(null=True)
processing_error = models.TextField(null=True)
# Inherited: mongo_data and update_mongo_data() from MongoMixin
# Processor may call update_mongo_data() to add text sections.
class Meta:
unique_together = ('repo', 'docid')
@property
def content(self):
mdata = self.mongo_data
if not mdata:
return None
return mdata.get('content')
def section_text(self, section):
mdata = self.mongo_data
if not mdata:
return None
return mdata.get(section, None)
@property
def full_name(self):
return '%s/%s' % (self.repo.full_name, self.docid)
@property
def result(self):
mdata = self.mongo_data
if not mdata:
return None
return mdata.get('result')
def __unicode__(self):
return self.full_name
def random_uuid_hex():
return uuid.uuid4().hex
class DocSource(models.Model):
repo = models.ForeignKey('Repository', related_name='sources')
# A source is fully specified in a URL; could be a pre-signed S3 object URL.
# Response must be either of
# 1. a text file
# 2. a zip of one text file
# Each line of the text file must be a JSON object with keys
# docid and content; and optionally keys like 'url' and others.
url = models.URLField(max_length=1000)
crawlid = models.TextField(default=random_uuid_hex)
creator = models.ForeignKey('auth.User')
created = models.DateTimeField(auto_now_add=True)
# fields below will be updated by the ingestor
processed = models.DateTimeField(null=True)
total_docs = models.BigIntegerField(null=True)
ingested_docs = models.BigIntegerField(null=True)
invalid_docs = models.BigIntegerField(null=True)
processed_docs = models.BigIntegerField(null=True)
ingestion_log = models.TextField(null=True)
class Meta:
#unique_together = ('repo', 'url')
unique_together = ('repo', 'crawlid')
def __unicode__(self):
#return '[%s][%s] %s' % (self.id, self.repo.full_name, self.url)
return '[%s][%s] %s' % (self.id, self.repo.full_name, self.crawlid)
class Result(models.Model):
doc = models.ForeignKey('Document', related_name='results')
# denormalized field
repo = models.ForeignKey('Repository', related_name='results')
# record type, to be populated from the DD app output;
# semantics is determined by the DD app.
record_type = models.TextField(null=True)
# each record is an opaque blob; could be a string or a JSON
data = models.TextField()
# class Mention(models.Model):
# doc = models.ForeignKey('Document', related_name='mentions')
# # denormalized field
# repo = models.ForeignKey('Repository', related_name='mentions')
# # mention ID
# mid = models.TextField()
# # sentence ID; mainly for internal use (e.g., DD feedback)
# sid = models.TextField(null=True)
# # raw string content
# content = models.TextField()
# # We sometimes extract different text sections from the doc content (e.g., title and body)
# # and then perform NLP and extraction from the resultant sections. As a result, each
# # mention comes from a section (could be 'content' itself), and the three fields below
# # locate the mentinon.
# # There are also mentions without location info (e.g., DOM extractions), hence null=True.
# section = models.TextField(null=True) # see Document.section_text
# offset = models.IntegerField(null=True)
# length = models.IntegerField(null=True)
# # e.g., "title:phone", "body:phone", "dom:author-name", "abstract:gene", "pheno-alias"
# mention_type = models.TextField()
# # e.g., "phone", "person", "gene", "pheno"
# entity_type = models.TextField()
# # e.g., "Bob Dylan", "555-666-7777", "BRAF"
# entity_name = models.TextField()
#
# class Meta:
# index_together = [
# ['entity_type', 'repo'],
# ['entity_type', 'entity_name', 'repo']
# ]
|
elementary-master
|
django/resources/models.py
|
from functools import wraps
from django.contrib.auth.models import User
from django.db import IntegrityError
from rest_framework import serializers
from rest_framework.exceptions import ValidationError
from .models import Document, DocSource, Repository
from .tasks import process_docs_queue, ingest_sources_queue
import uuid
def catch_dml_failure(message='Update failed because it violates DB integrity'):
def wrapper(f):
@wraps(f)
def wrapped(self, *f_args, **f_kwargs):
try:
return f(self, *f_args, **f_kwargs)
except IntegrityError as ie:
raise ValidationError(message)
return wrapped
return wrapper
class UserSerializer(serializers.ModelSerializer):
username = serializers.RegexField(r'\w+')
email = serializers.EmailField()
password = serializers.CharField(
write_only=True,
style={'input_type': 'password'}
)
url = serializers.HyperlinkedIdentityField(
view_name='user-detail',
lookup_field='username'
)
class Meta:
model = User
fields = ('url', 'username', 'email', 'password')
@catch_dml_failure('User name already exists')
def create(self, validated_data):
user = User(
email=validated_data['email'],
username=validated_data['username']
)
user.set_password(validated_data['password'])
user.save()
return user
@catch_dml_failure('User name already exists')
def update(self, instance, validated_data):
password = validated_data.pop('password')
super(UserSerializer, self).update(instance, validated_data)
instance.set_password(password)
instance.save()
return instance
class RepositorySerializer(serializers.ModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
name = serializers.RegexField(r'\w+')
pipeline = serializers.ChoiceField(Repository.PIPELINE_CHOICES, allow_blank=True)
url = serializers.HyperlinkedIdentityField(
view_name='repository-detail',
lookup_field='full_name'
)
docs = serializers.SerializerMethodField()
sources = serializers.SerializerMethodField()
class Meta:
model = Repository
fields = ('url', 'docs', 'sources', 'owner', 'name', 'pipeline', 'created')
def get_docs(self, obj):
url = '/docs/%s/' % obj.full_name
return self.context['request'].build_absolute_uri(url)
def get_sources(self, obj):
url = '/sources/%s/' % obj.full_name
return self.context['request'].build_absolute_uri(url)
@catch_dml_failure('Repo name already exists')
def create(self, validated_data):
return super(RepositorySerializer, self).create(validated_data)
@catch_dml_failure('Repo name already exists')
def update(self, instance, validated_data):
return super(RepositorySerializer, self).update(instance, validated_data)
class DocumentSerializer(serializers.ModelSerializer):
repo = serializers.StringRelatedField(source='repo.full_name')
docid = serializers.CharField(max_length=100, trim_whitespace=True)
doc_url = serializers.URLField(max_length=1000, allow_blank=True, source='url')
content = serializers.CharField(trim_whitespace=True)
created = serializers.ReadOnlyField()
processed = serializers.ReadOnlyField()
url = serializers.SerializerMethodField()
result = serializers.SerializerMethodField()
class Meta:
model = Document
fields = ('url', 'repo', 'docid', 'doc_url', 'created', 'processed', 'content', 'result')
def get_url(self, obj):
url = '/docs/%s/' % obj.full_name
return self.context['request'].build_absolute_uri(url)
def get_result(self, obj):
if not obj.processed:
return {'_status': 'NOT_PROCESSED_YET'}
if obj.processed and not obj.result:
if obj.processing_error:
return {
'_status': 'ERROR',
'_error': obj.processing_error
}
return {'_status': 'NO_EXTRACTIONS'}
return obj.result
@catch_dml_failure('Value of docid conflicts with existing records')
def create(self, validated_data):
instance = super(DocumentSerializer, self).create(validated_data)
instance.update_mongo_data({
'content': validated_data['content'],
'url': instance.url,
'created': instance.created,
})
instance.update_elastic_data({
'content': validated_data['content']
})
process_docs_queue.fill()
return instance
@catch_dml_failure('Value of docid conflicts with existing records')
def update(self, instance, validated_data):
content = validated_data.pop('content')
super(DocumentSerializer, self).update(instance, validated_data)
if instance.content != content:
instance.update_mongo_data({
'content': content
})
instance.update_elastic_data({
'content': content
})
return instance
def random_uuid_hex():
return uuid.uuid4().hex
class DocSourceSerializer(serializers.ModelSerializer):
url = serializers.SerializerMethodField()
crawlid = serializers.CharField(trim_whitespace=True, default=random_uuid_hex)
repo = serializers.StringRelatedField(source='repo.full_name')
source_url = serializers.URLField(max_length=1000, allow_blank=True, source='url')
creator = serializers.StringRelatedField(source='creator.username')
created = serializers.ReadOnlyField()
processed = serializers.ReadOnlyField()
total_docs = serializers.ReadOnlyField()
ingested_docs = serializers.ReadOnlyField()
invalid_docs = serializers.ReadOnlyField()
processed_docs = serializers.ReadOnlyField()
ingestion_log = serializers.ReadOnlyField()
class Meta:
model = DocSource
fields = ('url', 'crawlid', 'repo', 'source_url', 'creator', 'created',
'processed', 'total_docs', 'ingested_docs', 'invalid_docs', 'processed_docs', 'ingestion_log')
def get_url(self, obj):
url = '/sources/%s/%s/' % (obj.repo.full_name, obj.pk)
return self.context['request'].build_absolute_uri(url)
@catch_dml_failure('Value of crawlid conflicts with existing records')
def create(self, validated_data):
instance = super(DocSourceSerializer, self).create(validated_data)
ingest_sources_queue.fill()
return instance
|
elementary-master
|
django/resources/serializers.py
|
elementary-master
|
django/resources/__init__.py
|
|
from django.test import TestCase
from django.contrib.auth.models import User
from models import Repository
from models import Document
# Create your tests here.
class UserTestCase(TestCase):
def test_create_remove(self):
# Create a user
user1 = User.objects.create_user('john', 'lennon@thebeatles.com', 'johnpassword')
self.assertIsNotNone(user1)
# Remove user
user2 = User.objects.get_by_natural_key('john')
self.assertEqual(user1, user2)
User.delete(user2)
class RepositoryTestCase(TestCase):
def setUp(self):
user = User.objects.create_user('john', 'lennon@thebeatles.com', 'johnpassword')
def test_create_remove(self):
# Create a repo
repo = Repository.objects.create(name='myrepo', owner=User.objects.get(username='john'))
self.assertIsNotNone(repo)
# Remove repo
Repository.delete(repo)
class DocumentTestCase(TestCase):
def setUp(self):
user = User.objects.create_user('john', 'lennon@thebeatles.com', 'johnpassword')
self.repo = Repository.objects.create(name='myrepo', owner=User.objects.get(username='john'))
def test_add_doc(self):
# Add a document to the repo
doc = Document.objects.create(docid='mydoc', repo=self.repo)
# Query repo
qds = self.repo.docs.filter(docid='mydoc')
# Remove document from repo
Document.delete(qds.first())
|
elementary-master
|
django/resources/tests.py
|
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.http import Http404, HttpResponse
from django.shortcuts import render, get_object_or_404
from rest_framework import generics, mixins, viewsets
from rest_framework.authtoken.models import Token
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.permissions import BasePermission
from rest_framework.response import Response
from rest_framework.decorators import detail_route, list_route
from .models import Document, DocSource, Repository
from .serializers import (DocumentSerializer, DocSourceSerializer,
RepositorySerializer, UserSerializer)
import json
import urllib
import urllib2
@login_required
def root(request):
return render(request, 'index.html')
class AuthTokenView(ObtainAuthToken):
def get(self, request):
if not request.user.is_authenticated():
return Response('Please use POST instead or log in from the browser first')
token, created = Token.objects.get_or_create(user=request.user)
return Response({'token': token.key})
class RepoPermission(BasePermission):
def has_permission(self, request, view):
if not request.user or not request.user.is_authenticated():
return False
if request.method == 'POST' and not request.user.is_staff:
return settings.ELEMENTARY_ALLOW_REPO_CREATION
return True
class IsAdminOrSelf(BasePermission):
def has_permission(self, request, view):
if not request.user or not request.user.is_authenticated():
return False
if not request.user.is_staff:
return request.method in ['GET', 'HEAD', 'OPTIONS', 'PUT']
return True
def has_object_permission(self, request, view, obj):
if request.user.is_staff:
return True
return request.user == obj
class IsAdminOrOwner(BasePermission):
def has_permission(self, request, view):
if not request.user or not request.user.is_authenticated():
return False
if request.method in ['GET', 'HEAD', 'OPTIONS']:
return True
elif not 'repo' in view.kwargs:
return False
else:
repo = _get_repo_from_view(view)
if not repo:
return False
if not request.user.is_staff and request.user != repo.owner:
return False
return True
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
serializer_class = UserSerializer
permission_classes = [IsAdminOrSelf]
lookup_field = 'username'
def get_queryset(self):
user = self.request.user
if not user.is_staff:
return User.objects.filter(id=user.id)
return User.objects.all()
class RepoViewSet(viewsets.ModelViewSet):
queryset = Repository.objects.select_related('owner')
serializer_class = RepositorySerializer
permission_classes = [RepoPermission]
lookup_field = 'full_name'
lookup_value_regex = r'\w+/\w+'
def get_queryset(self):
user = self.request.user
if not user.is_staff:
return Repository.objects.filter(owner=user)
return Repository.objects.all()
def get_object(self):
queryset = self.get_queryset()
owner_name, name = self.kwargs['full_name'].split('/', 1)
obj = get_object_or_404(queryset, name=name, owner__username=owner_name)
return obj
def perform_create(self, serializer):
serializer.save(owner=self.request.user)
class DocViewSet(viewsets.GenericViewSet,
mixins.ListModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
mixins.CreateModelMixin):
queryset = Document.objects.all()
serializer_class = DocumentSerializer
permission_classes = [IsAdminOrOwner]
lookup_field = 'docid'
def get_queryset(self):
return _get_queryset_owned(Document, self)
def get_object(self):
queryset = self.get_queryset()
docid = self.kwargs['docid']
obj = queryset.filter(docid=docid).first()
return obj
def perform_create(self, serializer):
_create_if_owned(self, serializer)
def delete(self, request, *args, **kwargs):
_delete_if_owned(self, serializer)
# def update(self, request, *args, **kwargs):
# _update_if_owned(self, serializer)
@list_route()
def search(self, request, user, repo):
q = request.GET.get('q', '')
r_from = int(request.GET.get('from', '0'))
r_size = int(request.GET.get('size', '10'))
data = {
"query": {
"query_string" : { "query" : q }
},
"filter": {
"term" : { "repo" : user + "/" + repo }
},
"from" : r_from,
"size" : r_size
}
url='http://127.0.0.1:9200/elem/docs/_search'
data_enc = urllib.urlencode(data)
req = urllib2.Request(url, json.dumps(data))
response = urllib2.urlopen(req)
the_page = response.read()
return HttpResponse(the_page)
class DocSourceViewSet(viewsets.GenericViewSet,
mixins.ListModelMixin,
mixins.RetrieveModelMixin,
mixins.CreateModelMixin):
queryset = DocSource.objects.all()
serializer_class = DocSourceSerializer
permission_classes = [IsAdminOrOwner]
def get_queryset(self):
return _get_queryset_owned(DocSource, self)
def perform_create(self, serializer):
_create_if_owned(self, serializer, {'creator': self.request.user})
def _get_queryset_owned(model, view):
queryset = model.objects.select_related('repo__owner')
repo = _get_repo_from_view(view)
user = view.request.user
if not repo or not (user.is_staff or repo.owner == user):
raise Http404
queryset = queryset.filter(repo=repo)
return queryset
def _create_if_owned(view, serializer, kwargs=None):
repo = _get_repo_from_view(view)
user = view.request.user
if not user.is_staff and user != repo.owner:
raise Http404
if not kwargs:
kwargs = {}
serializer.save(repo=repo, **kwargs)
def _delete_if_owned(view, serializer, kwargs=None):
repo = _get_repo_from_view(view)
user = view.request.user
if not user.is_staff and user != repo.owner:
raise Http404
if not kwargs:
kwargs = {}
serializer.destroy(repo=repo, **kwargs)
#def _update_if_owned(view, serializer, kwargs=None):
# repo = _get_repo_from_view(view)
# user = view.request.user
# if not user.is_staff and user != repo.owner:
# raise Http404
# if not kwargs:
# kwargs = {}
# serializer.update(repo=repo, **kwargs)
def _get_repo_from_view(view):
user_name = view.kwargs.get('user', None)
repo_name = view.kwargs.get('repo', None)
repo = Repository.objects.filter(name=repo_name,
owner__username=user_name).first()
return repo
|
elementary-master
|
django/resources/views.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import uuid
def gen_uuid(apps, schema_editor):
MyModel = apps.get_model('resources', 'DocSource')
for row in MyModel.objects.all():
row.crawlid = uuid.uuid4()
row.save()
class Migration(migrations.Migration):
dependencies = [
('resources', '0008_auto_20150707_2308'),
]
operations = [
migrations.RunPython(gen_uuid, reverse_code=migrations.RunPython.noop),
]
|
elementary-master
|
django/resources/migrations/0009_auto_20150707_2322.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('resources', '0003_auto_20150526_1530'),
]
operations = [
migrations.AddField(
model_name='result',
name='record_type',
field=models.TextField(null=True),
),
]
|
elementary-master
|
django/resources/migrations/0004_result_record_type.py
|
elementary-master
|
django/resources/migrations/__init__.py
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('resources', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Mention',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('mid', models.TextField()),
('sid', models.TextField(null=True)),
('content', models.TextField()),
('section', models.TextField(null=True)),
('offset', models.IntegerField(null=True)),
('length', models.IntegerField(null=True)),
('mention_type', models.TextField()),
('entity_type', models.TextField()),
('entity_name', models.TextField()),
('doc', models.ForeignKey(related_name='mentions', to='resources.Document')),
],
),
migrations.AddField(
model_name='repository',
name='pipeline',
field=models.TextField(null=True, choices=[(b'memex', b'Memex'), (b'genomics', b'Genomics')]),
),
migrations.AddField(
model_name='mention',
name='repo',
field=models.ForeignKey(related_name='mentions', to='resources.Repository'),
),
migrations.AlterIndexTogether(
name='mention',
index_together=set([('entity_type', 'entity_name', 'repo'), ('entity_type', 'repo')]),
),
]
|
elementary-master
|
django/resources/migrations/0002_auto_20150525_2317.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import resources.models
class Migration(migrations.Migration):
dependencies = [
('resources', '0007_document_processing_error'),
]
operations = [
migrations.AddField(
model_name='docsource',
name='crawlid',
field=models.TextField(default=resources.models.random_uuid_hex, null=True),
),
migrations.AddField(
model_name='docsource',
name='processed_docs',
field=models.BigIntegerField(null=True),
),
]
|
elementary-master
|
django/resources/migrations/0008_auto_20150707_2308.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('resources', '0002_auto_20150525_2317'),
]
operations = [
migrations.CreateModel(
name='Result',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('data', models.TextField()),
('doc', models.ForeignKey(related_name='results', to='resources.Document')),
('repo', models.ForeignKey(related_name='results', to='resources.Repository')),
],
),
migrations.AlterIndexTogether(
name='mention',
index_together=set([]),
),
migrations.RemoveField(
model_name='mention',
name='doc',
),
migrations.RemoveField(
model_name='mention',
name='repo',
),
migrations.DeleteModel(
name='Mention',
),
]
|
elementary-master
|
django/resources/migrations/0003_auto_20150526_1530.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('resources', '0005_add_doc_sources'),
]
operations = [
migrations.AlterField(
model_name='repository',
name='pipeline',
field=models.TextField(null=True, choices=[(b'memex/atf', b'Memex ATF'), (b'memex/escort', b'Memex Escort'), (b'genomics', b'Genomics')]),
),
]
|
elementary-master
|
django/resources/migrations/0006_auto_20150604_0124.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import resources.models
class Migration(migrations.Migration):
dependencies = [
('resources', '0009_auto_20150707_2322'),
]
operations = [
migrations.AlterField(
model_name='docsource',
name='crawlid',
field=models.TextField(default=resources.models.random_uuid_hex),
),
migrations.AlterUniqueTogether(
name='docsource',
unique_together=set([('repo', 'crawlid')]),
),
]
|
elementary-master
|
django/resources/migrations/0010_auto_20150707_2322.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import django.contrib.postgres.fields.hstore
from django.contrib.postgres.operations import HStoreExtension
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
HStoreExtension(),
migrations.CreateModel(
name='Document',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('docid', models.TextField()),
('url', models.TextField(null=True)),
('properties', django.contrib.postgres.fields.hstore.HStoreField(null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('processed', models.DateTimeField(null=True)),
],
),
migrations.CreateModel(
name='Repository',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.TextField()),
('created', models.DateTimeField(auto_now_add=True)),
('owner', models.ForeignKey(related_name='repos', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='document',
name='repo',
field=models.ForeignKey(related_name='docs', to='resources.Repository'),
),
migrations.AlterUniqueTogether(
name='repository',
unique_together=set([('owner', 'name')]),
),
migrations.AlterUniqueTogether(
name='document',
unique_together=set([('repo', 'docid')]),
),
]
|
elementary-master
|
django/resources/migrations/0001_initial.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('resources', '0004_result_record_type'),
]
operations = [
migrations.CreateModel(
name='DocSource',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('url', models.URLField(max_length=1000)),
('created', models.DateTimeField(auto_now_add=True)),
('processed', models.DateTimeField(null=True)),
('total_docs', models.BigIntegerField(null=True)),
('ingested_docs', models.BigIntegerField(null=True)),
('invalid_docs', models.BigIntegerField(null=True)),
('ingestion_log', models.TextField(null=True)),
('creator', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
('repo', models.ForeignKey(related_name='sources', to='resources.Repository')),
],
),
migrations.AddField(
model_name='document',
name='source',
field=models.ForeignKey(related_name='docs', to='resources.DocSource', null=True),
),
migrations.AlterUniqueTogether(
name='docsource',
unique_together=set([('repo', 'url')]),
),
]
|
elementary-master
|
django/resources/migrations/0005_add_doc_sources.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('resources', '0006_auto_20150604_0124'),
]
operations = [
migrations.AddField(
model_name='document',
name='processing_error',
field=models.TextField(null=True),
),
]
|
elementary-master
|
django/resources/migrations/0007_document_processing_error.py
|
# -*- coding: utf-8 -*-
import codecs
import errno
import io
import json
import logging
import multiprocessing
import os
import shutil
import sys
from subprocess import Popen
from resources.dataio import parse_docs
logger = logging.getLogger(__name__)
# tmp dir with respect to elementary root
TMP_DIR = 'tmp'
NUM_CORES = multiprocessing.cpu_count()
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def elementary_root():
return os.path.dirname(os.path.realpath(__file__)) + '/../../..'
def run_pipeline(task_id, repo, docs):
task_dir = elementary_root() + '/' + TMP_DIR + '/' + str(task_id)
# pipelines are currently "memex/atf" and "memex/escort"
pipeline_dir = elementary_root() + '/elementary-' + repo.pipeline
# create task directory
mkdir_p(task_dir)
logger.info(task_dir)
with io.open(task_dir + '/' + 'input.json', 'w', encoding='utf-8') as f:
for d in docs:
data = {
'id': d['_id'],
'url': d.get('url', ''),
'content': d.get('content', '')
}
f.write(unicode(json.dumps(data, encoding='utf-8')) + '\n')
# run scraper
p1 = Popen(pipeline_dir + '/scrape.sh', cwd=task_dir)
p1.communicate() # wait
# run NLP parser
docs_content = []
with codecs.open(os.path.join(task_dir, 'scraped.json'), 'r', 'utf-8') as f:
for line in f:
fields = json.loads(line)
docs_content.append([fields['id'], fields['content']])
docs_parsed = parse_docs(docs_content, num_threads=max(1, NUM_CORES - 1))
# push parsing results into the input json objects
blob_map = {str(x): y for x, y in docs_parsed}
for d in docs:
if str(d['_id']) in blob_map:
d['parse'] = blob_map[str(d['_id'])]
# write parsing results to TSV file
with codecs.open(os.path.join(task_dir, 'parsed.tsv'), 'w', 'utf-8') as f:
for docid, blob in docs_parsed:
f.write(blob)
# run inference
p3 = Popen(pipeline_dir + '/infer.sh', cwd=task_dir)
p3.communicate() # wait
# results are now in task_dir/result.json
# the _ids here are mongo's internal IDs (unique and allow efficient updates)
results = {}
with open(task_dir + '/result.json', 'r') as f:
for line in f:
j = json.loads(line)
_id = j['doc_id']
del j['doc_id']
results[str(_id)] = j
for d in docs:
_id = str(d['_id'])
if _id in results:
d['result'] = results[_id]
shutil.rmtree(task_dir)
|
elementary-master
|
django/ddctrl/ddctrl.py
|
elementary-master
|
django/ddctrl/__init__.py
|
|
from __future__ import absolute_import
# This will make sure the app is always imported when
# Django starts so that shared_task will use this app.
from .celery import app as celery_app
|
elementary-master
|
django/elementary/__init__.py
|
from __future__ import absolute_import
import os
from celery import Celery
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'elementary.settings.prod')
from django.conf import settings
app = Celery('elementary')
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
|
elementary-master
|
django/elementary/celery.py
|
from django.conf.urls import include, url
from rest_framework import routers
from resources import views
router = routers.DefaultRouter()
router.register(r'users', views.UserViewSet)
router.register(r'repos', views.RepoViewSet)
router.register(r'docs/(?P<user>\w+)/(?P<repo>\w+)', views.DocViewSet)
router.register(r'sources/(?P<user>\w+)/(?P<repo>\w+)', views.DocSourceViewSet)
urlpatterns = [
url(r'^$', views.root),
url(r'^', include(router.urls)),
url(r'^', include('rest_framework.urls', namespace='rest_framework')), # login and logout
url(r'^api-token-auth/', views.AuthTokenView.as_view()),
]
|
elementary-master
|
django/elementary/urls.py
|
# Fix kombu incompatibility with pymongo 3.0
# https://github.com/celery/kombu/pull/479/files
from kombu.transport.mongodb import Channel, BroadcastCursor
from pymongo import MongoClient
from pymongo.cursor import CursorType
from kombu.syn import _detect_environment
def _kombu_mongo_open(self, scheme='mongodb://'):
hostname, dbname, options = self._parse_uri(scheme=scheme)
options.pop('auto_start_request', None)
env = _detect_environment()
if env == 'gevent':
from gevent import monkey
monkey.patch_all()
elif env == 'eventlet':
from eventlet import monkey_patch
monkey_patch()
mongoconn = MongoClient(
host=hostname, ssl=options['ssl'],
connectTimeoutMS=options['connectTimeoutMS'],
)
database = mongoconn[dbname]
version = mongoconn.server_info()['version']
if tuple(map(int, version.split('.')[:2])) < (1, 3):
raise NotImplementedError(
'Kombu requires MongoDB version 1.3+ (server is {0})'.format(
version))
self._create_broadcast(database, options)
self._client = database
def _create_broadcast_cursor(self, exchange, routing_key, pattern, queue):
cursor = self.get_broadcast().find(
filter={'queue': exchange},
sort=[('$natural', 1)],
cursor_type=CursorType.TAILABLE
)
ret = self._broadcast_cursors[queue] = BroadcastCursor(cursor)
return ret
Channel._open = _kombu_mongo_open
Channel.create_broadcast_cursor = _create_broadcast_cursor
|
elementary-master
|
django/elementary/hacks.py
|
"""
WSGI config for elementary project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "elementary.settings.prod")
application = get_wsgi_application()
|
elementary-master
|
django/elementary/wsgi.py
|
import logging
class ExceptionLoggingMiddleware(object):
def process_exception(self, request, exception):
logging.exception('Exception handling request for ' + request.path)
|
elementary-master
|
django/elementary/middleware/exception_log.py
|
elementary-master
|
django/elementary/middleware/__init__.py
|
|
import copy
import time
from django.conf import settings
from django.utils.timezone import now
from pymongo import MongoClient
REQUEST_META_FIELDS = [
'HTTP_REFERER',
'HTTP_USER_AGENT',
'HTTP_ACCEPT_LANGUAGE',
'REMOTE_ADDR',
'REMOTE_HOST',
'REQUEST_METHOD',
'QUERY_STRING',
]
class RequestLogMiddleware(object):
def get_mongo_collection(self):
if hasattr(self, '_collection'):
return self._collection
params = copy.copy(settings.MONGODB_CONNECTION_PARAMS)
# http://api.mongodb.org/python/current/api/pymongo/mongo_client.html#pymongo.mongo_client.MongoClient
# disable write acknowledgement
params['w'] = 0
client = MongoClient(**params)
db = client['logs']
collection = db['requests']
self._collection = collection
return collection
def process_request(self, request):
request._start_epoch = time.time()
request._start_time = now()
def process_response(self, request, response):
user_id, user_name = None, None
if request.user.is_authenticated():
user_id, user_name = request.user.id, request.user.username
request_meta = {k.lower(): request.META.get(k) for k in REQUEST_META_FIELDS}
log_data = {
'user_id': user_id,
'user_name': user_name,
'http_scheme': request.scheme,
'http_host': request.get_host(),
'request_time': request._start_time,
'request_path': request.path,
'request_full_path': request.get_full_path(),
'query_dict': request.GET.dict(),
'is_ajax': request.is_ajax(),
'response_status': response.status_code,
'run_time': time.time() - request._start_epoch,
}
log_data.update(request_meta)
collection = self.get_mongo_collection()
collection.insert_one(log_data)
return response
|
elementary-master
|
django/elementary/middleware/request_log.py
|
elementary-master
|
django/elementary/settings/__init__.py
|
|
from base import *
DEBUG = True
LOGGING['root']['level'] = 'DEBUG'
try:
from .dev_local import *
except:
pass
|
elementary-master
|
django/elementary/settings/dev.py
|
from base import *
try:
from .prod_local import *
except:
pass
# we currently don't have an interface that allows an administrator
# to create a repository for another user. Until we have added this
# capability, allow users to create repos.
ELEMENTARY_ALLOW_REPO_CREATION = True
|
elementary-master
|
django/elementary/settings/prod.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.