python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
from setuptools import setup, find_packages
setup(name='observational', version='1.0', packages=find_packages())
|
observational-main
|
setup.py
|
import numpy as np
import torch
import torch.nn as nn
from scipy.stats import norm
from skimage.util.shape import view_as_windows
from sklearn.metrics import (
f1_score,
roc_auc_score,
recall_score,
)
import os
import pickle
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.metrics import roc_auc_score
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
from functools import partial
import pdb
def load_file_markers(
source: str,
split_type: str,
train_scale: float,
val_scale: float,
seed: int = 0,
):
"""
A helper function that fetches image paths and labels
Returns: a list of file markers with (image_path,label) tuples
Args:
source: (str) the dataset name
split_type: (str) whether to return [train, val, train_val, test] split
train_scale: (float) percentage of train set to use (used for sample complexity analysis)
val_scale: (float) the val split percentage
seed: (int) the random seed
"""
file_dir = os.path.join("./file_markers", source)
if split_type in ["train", "val", "train_val"]:
file_markers_dir = os.path.join(file_dir, "trainval_list.pkl")
with open(file_markers_dir, "rb") as fp:
file_markers = pickle.load(fp)
labels = [fm[1] for fm in file_markers]
sss = StratifiedShuffleSplit(n_splits=1, test_size=val_scale, random_state=seed)
for train_ndx, val_ndx in sss.split(np.zeros(len(labels)), labels):
file_markers_train = [file_markers[ndx] for ndx in train_ndx]
file_markers_val = [file_markers[ndx] for ndx in val_ndx]
if train_scale < 1:
# stratified shuffle split
labels = [fm[1] for fm in file_markers_train]
sss2 = StratifiedShuffleSplit(
n_splits=1, test_size=train_scale, random_state=seed
)
for _, test_ndx in sss2.split(np.zeros(len(labels)), labels):
file_markers_train = [file_markers_train[ndx] for ndx in test_ndx]
if split_type == "train":
file_markers = file_markers_train
elif split_type == "train_val":
file_markers = file_markers_train
file_markers.extend(file_markers_val)
else:
file_markers = file_markers_val
elif split_type == "test":
file_markers_dir = os.path.join(file_dir, "test_list.pkl")
with open(file_markers_dir, "rb") as fp:
file_markers = pickle.load(fp)
else:
raise ValueError(f"Split type {split_type} not an option.")
print(f"{len(file_markers)} files in {split_type} split...")
# shuffle file markers
np.random.seed(seed)
np.random.shuffle(file_markers)
return file_markers
def load_weak_labels(source: str, train_scale: float, val_scale: float, seed: int = 0):
"""
A helper function that fetches the weak labels using the Gaze-WS method
Returns: a dictionary of (image_id: weak_label) entries
Args:
source: (str) the dataset name
train_scale: (float) percentage of train set to use (used for sample complexity analysis)
val_scale: (float) the val split percentage
seed: (int) the random seed
"""
predictions, gaze_ids = run_gaze_ws(
source,
train_scale,
val_scale,
seed,
)
weak_dict = {gaze_id: predictions[ndx] for ndx, gaze_id in enumerate(gaze_ids)}
return weak_dict
def load_helper_task_labels(source: str, gaze_mtl_task: str):
"""
A helper function that fetches the helper task labels for Gaze-MTL
Returns: a list of helper task labels
Args:
source: (str) the dataset name
gaze_mtl_task: (str) the helper task (can be multiple helper tasks seperated by "_")
"""
# pull all gaze sequences
seqs, labels, gaze_ids = load_gaze_data(source, "train_val", 1, 0.2, 0)
# create task_labels dict
task_labels = {gaze_id: [] for gaze_id in gaze_ids}
tasks = gaze_mtl_task.split("_")
for task in tasks:
if task == "loc":
grid_size = 3
heatmaps = make_heatmaps(seqs, grid_size).reshape(-1, grid_size * grid_size)
for ndx, gaze_id in enumerate(gaze_ids):
if labels[ndx]:
task_labels[gaze_id].append(np.argmax(heatmaps[ndx, :].T))
else:
task_labels[gaze_id].append(0)
elif task == "time":
lengths = np.array([len(seq) for seq in seqs])
mean_plus_std = np.mean(lengths) + 2 * np.std(lengths)
lengths = np.array([min(len(seq) / mean_plus_std, 1) for seq in seqs])
for ndx, gaze_id in enumerate(gaze_ids):
task_labels[gaze_id].append(lengths[ndx])
elif task == "diffusivity":
grid_size = 10
heatmaps = make_heatmaps(seqs, grid_size).squeeze()
diffuse_all = apply_lf(heatmaps, partial(diffusivity, s1=2, s2=2))
for ndx, gaze_id in enumerate(gaze_ids):
task_labels[gaze_id].append(diffuse_all[ndx])
else:
raise ValueError(f"Helper task {task} not an option.")
return task_labels
def load_gaze_data(
source,
split_type,
train_scale=1,
val_scale=0.2,
seed=0,
return_img_pths=False,
):
"""
Returns: a dictionary of (gaze_id: gaze_seq) for the split type and source
"""
gaze_dict_pth = os.path.join(
"./gaze_data",
source + "_gaze_data.pkl",
)
with open(gaze_dict_pth, "rb") as pkl_f:
gaze_dict_all = pickle.load(pkl_f)
# load file markers for split to know which gaze sequences to return
if split_type == "all":
file_markers = load_file_markers(source, "train", train_scale, val_scale, seed)
file_markers.extend(
load_file_markers(source, "val", train_scale, val_scale, seed)
)
file_markers.extend(
load_file_markers(source, "test", train_scale, val_scale, seed)
)
else:
file_markers = load_file_markers(
source, split_type, train_scale, val_scale, seed
)
gaze_seqs = []
labels = []
gaze_ids = []
img_pths = []
for img_pth, lab in file_markers:
img_pths.append(img_pth)
labels.append(standardize_label(lab, source))
# extract gaze_id from img_pth
gaze_id_base = img_pth.split("/")[-1]
gaze_id = gaze_id_base.split(".")[0]
if source == "cxr":
gaze_id = img_pth
# get gaze seq
if gaze_id in gaze_dict_all:
if source == "mets":
gaze_ids.append(gaze_id_base)
else:
gaze_ids.append(gaze_id)
if gaze_dict_all[gaze_id] == []:
gaze_seqs.append([[0.5, 0.5, 1]])
else:
gaze_seqs.append(gaze_dict_all[gaze_id])
else:
gaze_seqs.append([[0.5, 0.5, 1]])
gaze_seqs = np.array(gaze_seqs)
labels = np.array(labels)
gaze_ids = np.array(gaze_ids)
print(f"{len(gaze_seqs)} gaze sequences in {split_type} split...")
if return_img_pths:
return gaze_seqs, labels, img_pths
return gaze_seqs, labels, gaze_ids
def standardize_label(label, source):
# standardize labels so that 0 is negative and 1 is positive
if source in ["cxr", "mets", "clevr", "cub", "cxr2"]:
# label is already standardized
standard_label = label
elif source == "poet":
# labels are 1-10, should be 0-9
standard_label = label - 1
else:
raise ValueError(f"undefined source: {source}")
return standard_label
def rle2mask(rle, width, height):
mask = np.zeros(width * height)
array = np.asarray([int(x) for x in rle.split()])
starts = array[0::2]
lengths = array[1::2]
current_position = 0
for index, start in enumerate(starts):
current_position += start
mask[current_position : current_position + lengths[index]] = 1
current_position += lengths[index]
return mask.reshape(width, height)
def make_heatmaps(gaze_seqs, num_patches=8, normalize_heatmaps=False):
all_grids = np.zeros(
(len(gaze_seqs), 1, num_patches, num_patches), dtype=np.float32
)
for ndx, gaze_seq in enumerate(gaze_seqs):
# loop through gaze seq and increment # of visits to each patch
for (x, y, t) in gaze_seq:
# make sure if x or y are > 1 then they are 1
x, y = np.clip([x, y], 0.0, 0.999)
patch_x, patch_y = int(x * num_patches), int(y * num_patches)
all_grids[ndx, 0, patch_x, patch_y] += t
if normalize_heatmaps:
# Destroy total time information, as a diagnostic
all_grids[ndx] /= np.sum(all_grids[ndx])
return all_grids
def apply_lf(data, lf):
# Apply a labeling function to a bunch of data
return np.array([lf(x) for x in data])
def max_visit(heatmap, pct=0.5):
if np.any(heatmap > np.sum(heatmap) * pct):
return np.max(heatmap)
return 0
def diffusivity(heatmap, s1=5, s2=5, stride=1):
heatmap = heatmap / np.sum(heatmap)
heatmap_windows = view_as_windows(heatmap, (s1, s2), step=stride)
conv_results = np.tensordot(
heatmap_windows, np.ones((s1, s2)), axes=((2, 3), (0, 1))
)
return np.amax(conv_results)
def unique_visits(heatmap):
return np.sum(heatmap > 0)
def total_time(heatmap):
return np.sum(heatmap)
def run_gaze_ws(
source: str,
train_scale: float,
val_scale: float,
seed: int = 0,
):
"""
A helper function that runs the Gaze-WS method
Returns: a list of the weak labels and a list of the image_ids
Args:
source: (str) the dataset name
train_scale: (float) percentage of train set to use (used for sample complexity analysis)
val_scale: (float) the val split percentage
seed: (int) the random seed
"""
# extract the gaze sequences of the train split
seqs, labels, gaze_ids = load_gaze_data(
source, "train", train_scale, val_scale, seed
)
# extract the gaze sequences of the val split, which will be used as "training" data in Gaze-WS
seqs_train, y_train, _ = load_gaze_data(source, "val", train_scale, val_scale, seed)
# Get the corresponding gaze feature hyperparameters depending on the dataset
if source == "mets":
grid_size = 10
s1 = 2
s2 = 2
stride = 1
view_pct = 0.3
elif source in ["cxr", "cxr2"]:
grid_size = 25
s1 = grid_size
s2 = 12
stride = 13
view_pct = 0
# compute gaze features for the train split
heatmaps = make_heatmaps(seqs, num_patches=grid_size)
time_all = apply_lf(np.squeeze(heatmaps), total_time)
max_visit_all = apply_lf(np.squeeze(heatmaps), partial(max_visit, pct=view_pct))
unique_all = apply_lf(np.squeeze(heatmaps), unique_visits)
diffusivity_all = apply_lf(
np.squeeze(heatmaps), partial(diffusivity, s1=s1, s2=s2, stride=stride)
)
L = np.vstack((time_all, max_visit_all, unique_all, diffusivity_all)).T
# compute gaze features for the val split
heatmaps_train = make_heatmaps(seqs_train, num_patches=grid_size)
time_train = apply_lf(np.squeeze(heatmaps_train), total_time)
max_visit_all_train = apply_lf(
np.squeeze(heatmaps_train), partial(max_visit, pct=view_pct)
)
unique_all_train = apply_lf(np.squeeze(heatmaps_train), unique_visits)
diffusivity_all_train = apply_lf(
np.squeeze(heatmaps_train), partial(diffusivity, s1=s1, s2=s2, stride=stride)
)
L_train = np.vstack(
(time_train, max_visit_all_train, unique_all_train, diffusivity_all_train)
).T
# normaliz the gaze features
L = L - np.mean(L, 0)
L = L / np.std(L, 0)
L_train = L_train - np.mean(L_train, 0)
L_train = L_train / np.std(L_train, 0)
# run Gaze-WS method
pred_mat_gmm_pos_test = np.zeros_like(L)
pred_mat_gmm_pos_train = np.zeros_like(L_train)
pred_mat_gmm_neg_test = np.zeros_like(L)
pred_mat_gmm_neg_train = np.zeros_like(L_train)
for feat_ind in np.arange(L.shape[1]):
feats_train = L_train[:, feat_ind]
feats_test = L[:, feat_ind]
mean_pos, std_pos = norm.fit(feats_train[y_train == 1])
mean_neg, std_neg = norm.fit(feats_train[y_train == 0])
pred_mat_gmm_pos_test[:, feat_ind] = norm.pdf(feats_test, mean_pos, std_pos)
pred_mat_gmm_pos_train[:, feat_ind] = norm.pdf(feats_train, mean_pos, std_pos)
pred_mat_gmm_neg_test[:, feat_ind] = norm.pdf(feats_test, mean_neg, std_neg)
pred_mat_gmm_neg_train[:, feat_ind] = norm.pdf(feats_train, mean_neg, std_neg)
pred_mat_gmm_pos_test = np.prod(pred_mat_gmm_pos_test, 1) * np.mean(y_train)
pred_mat_gmm_pos_train = np.prod(pred_mat_gmm_pos_train, 1) * np.mean(y_train)
pred_mat_gmm_neg_test = np.prod(pred_mat_gmm_neg_test, 1) * np.mean(
np.abs(1 - y_train)
)
pred_mat_gmm_neg_train = np.prod(pred_mat_gmm_neg_train, 1) * np.mean(
np.abs(1 - y_train)
)
prob_preds = pred_mat_gmm_pos_train / (
pred_mat_gmm_pos_train + pred_mat_gmm_neg_train
)
prob_preds = np.expand_dims(prob_preds, 0)
best_f1 = 0
best_thresh = 0
for thresh in np.arange(0, 1, 0.01):
bin_preds = (prob_preds > thresh) * 1.0
thresh_f1 = f1_score(y_train, np.squeeze(bin_preds))
if thresh_f1 > best_f1:
best_f1 = thresh_f1.copy()
best_thresh = thresh.copy()
all_prob_preds = pred_mat_gmm_pos_test / (
pred_mat_gmm_pos_test + pred_mat_gmm_neg_test
)
all_bin_preds = (all_prob_preds > best_thresh) * 1.0
# print relevant metrics on how Gaze-WS performed
print(
"train size:",
L_train.shape,
"predict size:",
L.shape,
"all roc:",
roc_auc_score(labels, all_prob_preds),
"all f1:",
f1_score(labels, all_bin_preds),
"all recall:",
recall_score(labels, all_bin_preds),
"predicted CB:",
np.mean(all_bin_preds),
)
return all_bin_preds, gaze_ids
def write_to_file(path, file_name, value):
if not isinstance(value, str):
value = str(value)
fout = open(os.path.join(path, file_name), "w")
fout.write(value + "\n")
fout.close()
def add_application_args(parser):
parser.add_argument("--source", default="poet", type=str, help="dataset source")
parser.add_argument(
"--data_dir", default="original", help="Directory where image data is", type=str
)
parser.add_argument(
"--train_scale", default=1, help="scale ratio for train set size", type=float
)
parser.add_argument(
"--val_scale", default=0.1, help="scale ratio for train set size", type=float
)
parser.add_argument(
"--task_weights",
default=[1],
help="weights on the aux tasks when running MTL",
type=float,
nargs="+",
)
parser.add_argument(
"--task",
default="original",
help="defines the task, could be 'original', 'gaze_mtl', or 'unsup_gaze'",
type=str,
)
parser.add_argument(
"--gaze_mtl_task",
default="none",
help="defines the gaze mtl task, could be 'none', 'loc1', 'loc2', 'time', or 'diffusivity', and can concanenate by adding '_' between ",
type=str,
)
parser.add_argument("--batch_size", type=int, default=32, help="batch size")
parser.add_argument(
"--evaluate",
action="store_true",
help="Indicates if should evaluate features learnt",
)
parser.add_argument(
"--transfer_learning",
action="store_true",
help="If true, runs tranfer learning experiment on the gaze mtl tasks",
)
parser.add_argument(
"--pretrained", action="store_true", help="for pretrained model weights"
)
parser.add_argument("--load_cnn", type=str, default=None, help="load path")
|
observational-main
|
utils.py
|
from typing import List
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
class SoftCrossEntropyLoss(nn.Module):
"""
Calculate the CrossEntropyLoss with soft targets
:param weight: Weight to assign to each of the classes. Default: None
:type weight: list of float
:param reduction: The way to reduce the losses: 'none' | 'mean' | 'sum'.
'none': no reduction,
'mean': the mean of the losses,
'sum': the sum of the losses.
:type reduction: str
"""
def __init__(self, weight: List[float] = None, reduction: str = "mean"):
super().__init__()
if weight is None:
self.weight = None
else:
self.register_buffer("weight", torch.tensor(weight))
self.reduction = reduction
def forward(self, input: Tensor, target: Tensor) -> Tensor: # type:ignore
"""
Calculate the loss
:param input: prediction logits
:param target: target probabilities
:return: loss
"""
n, k = input.shape
losses = input.new_zeros(n)
for i in range(k):
cls_idx = input.new_full((n,), i, dtype=torch.long)
loss = F.cross_entropy(input, cls_idx, reduction="none")
if self.weight is not None:
loss = loss * self.weight[i]
losses += target[:, i].float() * loss
if self.reduction == "mean":
losses = losses.mean()
elif self.reduction == "sum":
losses = losses.sum()
elif self.reduction != "none":
raise ValueError(f"Unrecognized reduction: {self.reduction}")
return losses
|
observational-main
|
end_model/soft_cross_entropy.py
|
observational-main
|
end_model/__init__.py
|
|
#!/usr/bin/env python
# coding: utf-8
#
# Author: Kazuto Nakashima
# URL: http://kazuto1011.github.io
# Created: 2017-05-26
from collections import Sequence
import numpy as np
import torch
import torch.nn as nn
from torch.nn import functional as F
from tqdm import tqdm
import pdb
class _BaseWrapper(object):
def __init__(self, model):
super(_BaseWrapper, self).__init__()
self.device = next(model.parameters()).device
self.model = model
self.handlers = [] # a set of hook function handlers
def _encode_one_hot(self, ids):
one_hot = torch.zeros_like(self.logits).to(self.device)
one_hot.scatter_(0, ids, 1.0)
return one_hot
def forward(self, image):
self.image_shape = image.shape[2:]
self.logits = self.model(image)
self.probs = F.softmax(self.logits) # , dim=1)
return self.probs.sort(descending=True) # , dim=1) # ordered results
def backward(self, ids):
"""
Class-specific backpropagation
"""
one_hot = self._encode_one_hot(ids)
self.model.zero_grad()
self.logits.backward(gradient=one_hot, retain_graph=True)
def generate(self):
raise NotImplementedError
def remove_hook(self):
"""
Remove all the forward/backward hook functions
"""
for handle in self.handlers:
handle.remove()
class BackPropagation(_BaseWrapper):
def forward(self, image):
self.image = image.requires_grad_()
return super(BackPropagation, self).forward(self.image)
def generate(self):
gradient = self.image.grad.clone()
self.image.grad.zero_()
return gradient
class GuidedBackPropagation(BackPropagation):
"""
"Striving for Simplicity: the All Convolutional Net"
https://arxiv.org/pdf/1412.6806.pdf
Look at Figure 1 on page 8.
"""
def __init__(self, model):
super(GuidedBackPropagation, self).__init__(model)
def backward_hook(module, grad_in, grad_out):
# Cut off negative gradients
if isinstance(module, nn.ReLU):
return (F.relu(grad_in[0]),)
for module in self.model.named_modules():
self.handlers.append(module[1].register_backward_hook(backward_hook))
|
observational-main
|
end_model/grad_cam.py
|
# Convolutional neural network (three convolutional layers)
import torch
import torch.nn as nn
import torchvision
class ConvNet(nn.Module):
def __init__(self, num_classes=10):
super(ConvNet, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(3, 16, kernel_size=5, stride=2, padding=2),
nn.BatchNorm2d(16),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
self.layer2 = nn.Sequential(
nn.Conv2d(16, 32, kernel_size=5, stride=2, padding=2),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
self.layer3 = nn.Sequential(
nn.Conv2d(32, 32, kernel_size=5, stride=2, padding=2),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
self.avg_pool = nn.AvgPool2d(kernel_size=3, stride=1, padding=0)
self.fc = nn.Linear(32, num_classes)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = self.layer3(out)
out = self.avg_pool(out)
out = out.reshape(out.size(0), -1)
#out = self.fc(out)
return out
|
observational-main
|
end_model/cnn.py
|
import os, sys
import numpy as np
import torch
from emmental.data import EmmentalDataset
from PIL import Image
import pydicom
sys.path.append("../")
from utils import (
load_file_markers,
load_helper_task_labels,
load_weak_labels,
standardize_label,
)
import pdb
num_gaze_dims_dict = {
"none": 0,
"loc": 9,
"time": 1,
"diffusivity": 1,
}
num_classes_dict = {"cxr": 2, "mets": 2, "cxr2": 2}
def rle2mask(rle, width, height):
mask = np.zeros(width * height)
array = np.asarray([int(x) for x in rle.split()])
starts = array[0::2]
lengths = array[1::2]
current_position = 0
for index, start in enumerate(starts):
current_position += start
mask[current_position : current_position + lengths[index]] = 1
current_position += lengths[index]
return mask.reshape(width, height)
class ObservationalDataset(EmmentalDataset):
"""
A standard PyTorch definition of Dataset which defines the functions __len__ and __getitem__.
"""
def __init__(
self,
source,
task,
gaze_mtl_task,
data_dir,
split_type,
transform,
train_scale=1,
val_scale=0.1,
seed=0,
):
"""
Store the filenames of the seizures to use.
Args:
file_dir: (string) directory containing the list of file names to pull in
split_type: (string) whether train, val, or test set
"""
self.split_type = split_type
self.transform = transform
self.data_dir = data_dir
self.source = source
self.task = task
# load appropriate file markers, which are a list of (image_path, label) tuples
self.file_markers = load_file_markers(
source,
split_type,
train_scale,
val_scale,
seed,
)
print(f"{len(self.file_markers)} files in {split_type} split...")
# If Gaze-WS, load weak labels derived from gaze
if task == "weak_gaze" and split_type == "train":
weak_dict = load_weak_labels(source, train_scale, val_scale, seed)
# If Gaze-MTL, load helper task labels derived from gaze
if gaze_mtl_task:
helper_task_labels_dict = load_helper_task_labels(source, gaze_mtl_task)
helper_tasks = gaze_mtl_task.split("_") if gaze_mtl_task else []
self.num_helper_tasks = len(helper_tasks)
self.num_gaze_dims = [num_gaze_dims_dict[task] for task in helper_tasks]
self.num_classes = num_classes_dict[source]
X_dict = {
"img_id": [],
"image": [],
}
Y_dict = {"target": [], "weak": []}
for i in range(self.num_helper_tasks):
Y_dict["helper_task_" + str(i)] = []
for idx in range(len(self.file_markers)):
img_pth, label = self.file_markers[idx]
img_id = img_pth.split("/")[-1]
img_name = img_id.split(".")[0]
if source == "cxr":
img_id = img_pth
img_name = img_pth
if source == "mets":
img_name = img_id
X_dict["img_id"].append(img_id)
# standardize labels so that 0 is negative and 1 is positive
label = standardize_label(label, source)
Y_dict["target"].append(label)
if self.task == "gaze_mtl" and img_name in helper_task_labels_dict:
gaze_labels = helper_task_labels_dict[img_name]
else:
gaze_labels = []
for i in range(self.num_helper_tasks):
if self.num_gaze_dims[i] == 1:
gaze_labels.append(0)
else:
gaze_labels.append([0] * self.num_gaze_dims[i])
for i in range(self.num_helper_tasks):
Y_dict["helper_task_" + str(i)].append(gaze_labels[i])
if split_type == "train" and task == "weak_gaze":
if img_name in weak_dict:
weak_vec = weak_dict[img_name]
else:
weak_vec = [0.5] * self.num_classes
Y_dict["weak"].append(weak_vec)
Y_dict["target"] = torch.from_numpy(np.array(Y_dict["target"]))
Y_dict["weak"] = torch.from_numpy(np.array(Y_dict["weak"]))
for i in range(self.num_helper_tasks):
Y_dict["helper_task_" + str(i)] = torch.from_numpy(
np.array(Y_dict["helper_task_" + str(i)])
)
super().__init__(name=source, X_dict=X_dict, Y_dict=Y_dict, uid="image")
def __len__(self):
return len(self.file_markers)
def __getitem__(self, idx):
"""
Fetch index idx image and label from dataset.
Args:
idx: (int) index in [0, 1, ..., size_of_dataset-1]
Returns:
image: (Tensor) transformed image
label: (int) corresponding label of image
"""
img_id = self.X_dict["img_id"][idx]
true_label = self.Y_dict["target"][idx]
base_dir = os.path.join(self.data_dir, self.source.upper())
if self.source == "mets":
# for mets there is an extra directory in the path for the image case num
img_case_id = img_id.split("_")[1]
img_pth = os.path.join(base_dir, "Mets_" + img_case_id)
img_pth = os.path.join(img_pth, img_id)
elif self.source == "cxr":
img_pth = os.path.join(base_dir, "dicom_images", img_id)
else:
img_pth = os.path.join(base_dir, img_id)
if "cxr" in self.source:
ds = pydicom.dcmread(img_pth)
img = ds.pixel_array
img = Image.fromarray(np.uint8(img))
else:
img = Image.open(img_pth)
img = self.transform(img)
if img.shape[0] == 1:
img = torch.cat([img, img, img])
x_dict = {
"image": img,
"img_id": self.X_dict["img_id"][idx],
"id": self.X_dict["img_id"][idx],
}
# we only want the weak label for the training set
if self.split_type == "train" and self.task == "weak_gaze":
weak_label = self.Y_dict["weak"][idx]
else:
weak_label = true_label
# Return X and Y dictionaries depending on the task
y_dict = {"target": true_label}
if self.task == "weak_gaze":
y_dict = {"target": true_label, "weak": weak_label}
elif self.task == "gaze_mtl":
for i in range(self.num_helper_tasks):
y_dict["helper_task_" + str(i)] = self.Y_dict["helper_task_" + str(i)][
idx
]
return x_dict, y_dict
|
observational-main
|
end_model/dataset.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pickle
from open3d import visualization as o3dv
import random
import argparse
import numpy as np
import time
import contactopt.util as util
import contactopt.geometric_eval as geometric_eval
import pprint
from tqdm import tqdm
from joblib import Parallel, delayed
import multiprocessing as mp
import matplotlib.pyplot as plt
import matplotlib as mpl
import sklearn.metrics
import trimesh
import os
SAVE_OBJ_FOLDER = 'eval/saveobj'
def vis_sample(gt_ho, in_ho, out_ho, mje_in=None, mje_out=None):
hand_gt, obj_gt = gt_ho.get_o3d_meshes(hand_contact=True, normalize_pos=True)
hand_in, obj_in = in_ho.get_o3d_meshes(hand_contact=True, normalize_pos=True)
hand_in.translate((0.0, 0.2, 0.0))
obj_in.translate((0.0, 0.2, 0.0))
if not args.split == 'honn':
out_ho.hand_contact = in_ho.hand_contact
out_ho.obj_contact = in_ho.obj_contact
hand_out, obj_out = out_ho.get_o3d_meshes(hand_contact=True, normalize_pos=True)
hand_out.translate((0.0, 0.4, 0.0))
obj_out.translate((0.0, 0.4, 0.0))
geom_list = [hand_gt, obj_gt, hand_out, obj_out, hand_in, obj_in]
geom_list.append(util.text_3d('In', pos=[-0.4, 0.2, 0], font_size=40, density=2))
geom_list.append(util.text_3d('Refined', pos=[-0.4, 0.4, 0], font_size=40, density=2))
geom_list.append(util.text_3d('GT', pos=[-0.4, 0.0, 0], font_size=40, density=2))
if mje_in is not None:
geom_list.append(util.text_3d('MJE in {:.2f}cm out {:.2f}cm'.format(mje_in * 100, mje_out * 100), pos=[-0.4, -0.2, 0], font_size=40, density=2))
o3dv.draw_geometries(geom_list)
def calc_mean_dicts(all_dicts, phase=''):
keys = all_dicts[0].keys()
mean_dict = dict()
stds = ['pen_vol']
for k in keys:
l = list()
for d in all_dicts:
l.append(d[k])
mean_dict[k] = np.array(l).mean()
if k in stds:
mean_dict[k + '_std'] = np.array(l).std()
return mean_dict
def calc_sample(ho_test, ho_gt, idx, phase='nophase'):
stats = geometric_eval.geometric_eval(ho_test, ho_gt)
return stats
def process_sample(sample, idx):
gt_ho, in_ho, out_ho = sample['gt_ho'], sample['in_ho'], sample['out_ho']
in_stats = calc_sample(in_ho, gt_ho, idx, 'before ContactOpt')
out_stats = calc_sample(out_ho, gt_ho, idx, 'after ContactOpt')
return in_stats, out_stats
def run_eval(args):
in_file = 'data/optimized_{}.pkl'.format(args.split)
runs = pickle.load(open(in_file, 'rb'))
print('Loaded {} len {}'.format(in_file, len(runs)))
# if args.vis or args.physics:
# print('Shuffling!!!')
# random.shuffle(runs)
if args.partial > 0:
runs = runs[:args.partial]
do_parallel = not args.vis
if do_parallel:
all_data = Parallel(n_jobs=mp.cpu_count() - 2)(delayed(process_sample)(s, idx) for idx, s in enumerate(tqdm(runs)))
in_all = [item[0] for item in all_data]
out_all = [item[1] for item in all_data]
else:
all_data = [] # Do non-parallel
for idx, s in enumerate(tqdm(runs)):
all_data.append(process_sample(s, idx))
if args.vis:
print('In vs GT\n', pprint.pformat(all_data[-1][0]))
print('Out vs GT\n', pprint.pformat(all_data[-1][1]))
if args.split == 'im_pred_trans':
vis_sample(s['gt_ho'], s['in_ho'], s['out_ho'], mje_in=all_data[-1][0]['objalign_hand_joints'], mje_out=all_data[-1][1]['objalign_hand_joints'])
else:
vis_sample(s['gt_ho'], s['in_ho'], s['out_ho'], mje_in=all_data[-1][0]['unalign_hand_joints'], mje_out=all_data[-1][1]['unalign_hand_joints'])
in_all = [item[0] for item in all_data]
out_all = [item[1] for item in all_data]
mean_in = calc_mean_dicts(in_all, 'In vs GT')
mean_out = calc_mean_dicts(out_all, 'Out vs GT')
print('In vs GT\n', pprint.pformat(mean_in))
print('Out vs GT\n', pprint.pformat(mean_out))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Run eval on fitted pkl')
parser.add_argument('--split', default='aug', type=str)
parser.add_argument('--vis', action='store_true')
parser.add_argument('--contact_f1', action='store_true')
parser.add_argument('--pen', action='store_true')
parser.add_argument('--saveobj', action='store_true')
parser.add_argument('--partial', default=-1, type=int, help='Only run for n samples')
args = parser.parse_args()
start_time = time.time()
run_eval(args)
print('Eval time', time.time() - start_time)
|
ContactOpt-main
|
contactopt/run_eval.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from os import path as osp
import numpy as np
import json
import matplotlib.pyplot as plt
import torch
import pytorch3d
from manopth import manolayer
import open3d
from PIL import Image, ImageFont, ImageDraw
from pyquaternion import Quaternion
from open3d import io as o3dio
from open3d import geometry as o3dg
from open3d import utility as o3du
from open3d import visualization as o3dv
from manopth.manolayer import ManoLayer
import trimesh
SAMPLE_VERTS_NUM = 2048
DEEPCONTACT_BIN_WEIGHTS_FILE = 'data/class_bin_weights.out'
DEEPCONTACT_NUM_BINS = 10
def val_to_class(val):
"""
Converts a contact value [0-1] to a class assignment
:param val: tensor (batch, verts)
:return: class assignment (batch, verts)
"""
expanded = torch.floor(val * DEEPCONTACT_NUM_BINS)
return torch.clamp(expanded, 0, DEEPCONTACT_NUM_BINS - 1).long() # Cut off potential 1.0 inputs?
def class_to_val(raw_scores):
"""
Finds the highest softmax for each class
:param raw_scores: tensor (batch, verts, classes)
:return: highest class (batch, verts)
"""
cls = torch.argmax(raw_scores, dim=2)
val = (cls + 0.5) / DEEPCONTACT_NUM_BINS
return val
def forward_mano(mano_model, pose, beta, tforms):
"""Forward mano pass, MANO params to mesh"""
device = pose.device
batch_size = pose.shape[0]
verts, joints = mano_model(pose, beta)
verts_homo = torch.cat((verts / 1000, torch.ones(batch_size, verts.shape[1], 1, device=device)), 2)
joints_homo = torch.cat((joints / 1000, torch.ones(batch_size, joints.shape[1], 1, device=device)), 2)
tform_agg = torch.eye(4, device=device).reshape(1, 4, 4).repeat(batch_size, 1, 1)
for tform in tforms:
tform_agg = torch.bmm(tform, tform_agg) # Aggregate all transforms
# Apply aggregated transform to all points, permuting for matmul
verts_homo = torch.bmm(tform_agg, verts_homo.permute(0, 2, 1)).permute(0, 2, 1)
joints_homo = torch.bmm(tform_agg, joints_homo.permute(0, 2, 1)).permute(0, 2, 1)
return verts_homo[:, :, :3], joints_homo[:, :, :3]
def fit_pca_to_axang(mano_pose, mano_beta):
"""
This project uses the MANO model parameterized with 15 PCA components. However, many other approaches use
different parameterizations (15 joints, parameterized with 45 axis-angle parameters). This function
allows converting between the formats. It first runs the MANO model forwards to get the hand vertices of
the initial format. Then an optimization is performed to adjust the 15 PCA parameters of a second MANO model
to match the initial vertices. Perhaps there are better ways to do this, but this ensures highest accuracy.
:param mano_pose: numpy (45) axis angle coordinates
:param mano_beta: numpy (10) beta parameters
:return: numpy (15) PCA parameters of fitted hand
"""
mano_pose = np.array(mano_pose)
full_axang = torch.Tensor(mano_pose).unsqueeze(0)
mano_model = ManoLayer(mano_root='mano/models', use_pca=True, ncomps=45, side='right', flat_hand_mean=False)
beta_in = torch.Tensor(mano_beta).unsqueeze(0)
mano_model_orig = ManoLayer(mano_root='mano/models', joint_rot_mode="axisang", use_pca=False, center_idx=None, flat_hand_mean=True)
_, target_joints = forward_mano(mano_model_orig, full_axang, beta_in, [])
full_axang[:, 3:] -= mano_model.th_hands_mean
pca_mat = mano_model.th_selected_comps.T
pca_shape = full_axang[:, 3:].mm(pca_mat) # Since the HO gt is in full 45 dim axang coords, convert back to PCA shape
new_pca_shape = np.zeros(18)
new_pca_shape[:3] = mano_pose[:3] # set axang
new_pca_shape[3:] = pca_shape[0, :15] # set pca pose
# Do optimization
pca_in = torch.Tensor(new_pca_shape).unsqueeze(0)
pca_in.requires_grad = True
mano_model = ManoLayer(mano_root='mano/models', use_pca=True, ncomps=15, side='right', flat_hand_mean=False)
optimizer = torch.optim.Adam([pca_in], lr=0.03, amsgrad=True) # AMSgrad helps
loss_criterion = torch.nn.L1Loss()
for it in range(200):
optimizer.zero_grad()
hand_verts, hand_joints = forward_mano(mano_model, pca_in, beta_in, []) # 2.2ms
# vis_pointcloud(hand_joints, target_joints)
loss = loss_criterion(hand_joints, target_joints)
# print('Opt loss', loss.detach())
loss.backward()
optimizer.step()
return pca_in.detach().squeeze(0).numpy()
def hand_color():
return np.asarray([224.0, 172.0, 105.0]) / 255
def obj_color():
return np.asarray([100.0, 100.0, 100.0]) / 255
def save_trimesh(obj_mesh, output_path):
obj_raw = trimesh.exchange.obj.export_obj(obj_mesh, include_texture=False)
os.makedirs(os.path.dirname(output_path), exist_ok=True)
with open(output_path, "w") as obj_file:
obj_file.write(obj_raw)
def verts_to_name(num_verts):
"""Hacky function allowing finding the name of an object by the number of vertices.
Each object happens to have a different number."""
num_verts_dict = {100597: 'mouse', 29537: 'binoculars', 100150: 'bowl', 120611: 'camera', 64874: 'cell_phone',
177582: 'cup', 22316: 'eyeglasses', 46334: 'flashlight', 35949: 'hammer', 93324: 'headphones',
19962: 'knife', 169964: 'mug', 57938: 'pan', 95822: 'ps_controller', 57824: 'scissors',
144605: 'stapler', 19708: 'toothbrush', 42394: 'toothpaste', 126627: 'utah_teapot', 90926: 'water_bottle',
104201: 'wine_glass', 108248: 'door_knob', 71188: 'light_bulb', 42232: 'banana', 93361: 'apple',
8300: 'HO_sugar', 8251: 'HO_soap', 16763: 'HO_mug', 10983: 'HO_mustard', 9174: 'HO_drill',
8291: 'HO_cheezits', 8342: 'HO_spam', 10710: 'HO_banana', 8628: 'HO_scissors',
148245: 'train_exclude'}
if num_verts in num_verts_dict:
return num_verts_dict[num_verts]
return 'DIDNT FIND {}'.format(num_verts)
def mesh_is_thin(num_verts):
"""For thin meshes, the interpenetration loss doesn't do anything, since they're thinner than 2*2mm.
For thin objects, we set this margin to zero mm."""
thins = [19708, 19962, 22316, 16763, 8628] # Toothbrush, Knife, Eyeglasses, HO_mug, HO_scissors
is_thin = torch.zeros_like(num_verts)
for t in thins:
is_thin[num_verts == t] = 1
return is_thin
def upscale_contact(obj_mesh, obj_sampled_idx, contact_obj):
"""
When we run objects through our network, they always have a fixed number of vertices.
We need to up/downscale the contact from this to the original number of vertices
:param obj_mesh: Pytorch3d Meshes object
:param obj_sampled_idx: (batch, 2048)
:param contact_obj: (batch, 2048)
:return:
"""
obj_verts = obj_mesh.verts_padded()
_, closest_idx, _ = pytorch3d.ops.knn_points(obj_verts, batched_index_select(obj_verts, 1, obj_sampled_idx), K=1)
upscaled = batched_index_select(contact_obj, 1, closest_idx.squeeze(2))
return upscaled.detach()
def hack_filedesciptor():
"""
Sometimes needed if reading datasets very quickly? Fixes:
RuntimeError: received 0 items of ancdata
https://github.com/pytorch/pytorch/issues/973
"""
torch.multiprocessing.set_sharing_strategy('file_system')
def apply_tform(tform, verts):
"""
Applies a 4x4 rigid transform to a list of points
:param tform: tensor (batch, 4, 4)
:param verts: tensor (batch, N, 3)
:return:
"""
verts_homo = torch.cat((verts, torch.ones(verts.shape[0], verts.shape[1], 1, device=verts.device)), 2)
new_verts = torch.bmm(tform, verts_homo.permute(0, 2, 1)).permute(0, 2, 1)
return new_verts[:, :, :3]
def apply_rot(rot, verts, around_centroid=False):
"""
Applies a 3x3 rotation matrix to a list of points
:param rot: tensor (batch, 3, 3)
:param verts: tensor (batch, N, 3)
:return:
"""
if around_centroid:
centroid = verts.mean(dim=1)
verts = verts - centroid
new_verts = torch.bmm(rot, verts.permute(0, 2, 1)).permute(0, 2, 1)
if around_centroid:
new_verts = new_verts + centroid
return new_verts
def translation_to_tform(translation):
"""
(batch, 3) to (batch, 4, 4)
"""
tform_out = pytorch3d.ops.eyes(4, translation.shape[0], device=translation.device)
tform_out[:, :3, 3] = translation
return tform_out
def sharpen_contact(c, slope=10, thresh=0.6):
"""
Apply filter to input, makes into a "soft binary"
"""
out = slope * (c - thresh) + thresh
return torch.clamp(out, 0.0, 1.0)
def fit_sigmoid(colors, a=0.05):
"""Fits a sigmoid to raw contact temperature readings from the ContactPose dataset. This function is copied from that repo"""
idx = colors > 0
ci = colors[idx]
x1 = min(ci) # Find two points
y1 = a
x2 = max(ci)
y2 = 1-a
lna = np.log((1 - y1) / y1)
lnb = np.log((1 - y2) / y2)
k = (lnb - lna) / (x1 - x2)
mu = (x2*lna - x1*lnb) / (lna - lnb)
ci = np.exp(k * (ci-mu)) / (1 + np.exp(k * (ci-mu))) # Apply the sigmoid
colors[idx] = ci
return colors
def subdivide_verts(edges, verts):
"""
Takes a list of edges and vertices, and subdivides each edge and puts a vert in the middle. May not work with variable-size meshes
:param edges: (batch, E, 2)
:param verts: (batch, V, 3)
:return: new_verts (batch, E+V, 3)
"""
selected_verts = edges.view(edges.shape[0], -1) # Flatten into (batch, E*2)
new_verts = batched_index_select(verts, 1, selected_verts)
new_verts = new_verts.view(edges.shape[0], edges.shape[1], 2, 3)
new_verts = new_verts.mean(dim=2)
new_verts = torch.cat([verts, new_verts], dim=1) # (sum(V_n)+sum(E_n), 3)
return new_verts
def calc_l2_err(a, b, axis=2):
if torch.is_tensor(a):
mse = torch.sum(torch.square(a - b), dim=axis)
l2_err = torch.sqrt(mse)
return torch.mean(l2_err, 1)
else:
mse = np.linalg.norm(a - b, 2, axis=axis)
return mse.mean()
def batched_index_select(t, dim, inds):
"""
Helper function to extract batch-varying indicies along array
:param t: array to select from
:param dim: dimension to select along
:param inds: batch-vary indicies
:return:
"""
dummy = inds.unsqueeze(2).expand(inds.size(0), inds.size(1), t.size(2))
out = t.gather(dim, dummy) # b x e x f
return out
def mesh_set_color(color, mesh, colormap=plt.cm.inferno):
"""
Applies colormap to object
:param color: Tensor or numpy array, (N, 1)
:param mesh: Open3D TriangleMesh
:return:
"""
# vertex_colors = np.tile(color.squeeze(), (3, 1)).T
# mesh.vertex_colors = o3du.Vector3dVector(vertex_colors)
# geometry.apply_colormap(mesh, apply_sigmoid=False)
colors = np.asarray(color).squeeze()
if len(colors.shape) > 1:
colors = colors[:, 0]
colors[colors < 0.1] = 0.1 # TODO hack to make brighter
colors = colormap(colors)[:, :3]
colors = o3du.Vector3dVector(colors)
mesh.vertex_colors = colors
def aggregate_tforms(tforms):
"""Aggregates a list of 4x4 rigid transformation matricies"""
device = tforms[0].device
batch_size = tforms[0].shape[0]
tform_agg = pytorch3d.ops.eyes(4, batch_size, device=device)
for tform in tforms:
tform_agg = torch.bmm(tform, tform_agg) # Aggregate all transforms
return tform_agg
def axisEqual3D(ax):
"""Sets a matplotlib 3D plot to have equal-scale axes"""
extents = np.array([getattr(ax, 'get_{}lim'.format(dim))() for dim in 'xyz'])
sz = extents[:,1] - extents[:,0]
centers = np.mean(extents, axis=1)
maxsize = max(abs(sz))
r = maxsize/2
for ctr, dim in zip(centers, 'xyz'):
getattr(ax, 'set_{}lim'.format(dim))(ctr - r, ctr + r)
def vis_pointcloud(object_points, hand_points, idx=None, show=True):
if show:
plt.switch_backend('TkAgg')
else:
plt.switch_backend('agg')
if idx is None:
idx = int(np.random.randint(0, hand_points.shape[0])) # Select random sample from batch
object_points = object_points[idx, :, :].detach().cpu().numpy()
hand_points = hand_points[idx, :, :].detach().cpu().numpy()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(object_points[:, 0], object_points[:, 1], object_points[:, 2])
ax.scatter(hand_points[:, 0], hand_points[:, 1], hand_points[:, 2]) #, c=np.arange(hand_points.shape[0]))
if show:
axisEqual3D(ax)
# plt.axis('off')
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
plt.show()
return fig
def get_mano_closed_faces():
"""The default MANO mesh is "open" at the wrist. By adding additional faces, the hand mesh is closed,
which looks much better.
https://github.com/hassony2/handobjectconsist/blob/master/meshreg/models/manoutils.py"""
mano_layer = manolayer.ManoLayer(
joint_rot_mode="axisang", use_pca=False, mano_root='mano/models', center_idx=None, flat_hand_mean=True
)
close_faces = torch.Tensor(
[
[92, 38, 122],
[234, 92, 122],
[239, 234, 122],
[279, 239, 122],
[215, 279, 122],
[215, 122, 118],
[215, 118, 117],
[215, 117, 119],
[215, 119, 120],
[215, 120, 108],
[215, 108, 79],
[215, 79, 78],
[215, 78, 121],
[214, 215, 121],
]
)
closed_faces = torch.cat([mano_layer.th_faces, close_faces.long()])
# Indices of faces added during closing --> should be ignored as they match the wrist
# part of the hand, which is not an external surface of the human
# Valid because added closed faces are at the end
hand_ignore_faces = [1538, 1539, 1540, 1541, 1542, 1543, 1544, 1545, 1546, 1547, 1548, 1549, 1550, 1551]
return closed_faces.detach().cpu().numpy() #, hand_ignore_faces
def text_3d(text, pos, direction=None, degree=-90.0, density=10, font='/usr/share/fonts/truetype/freefont/FreeMono.ttf', font_size=10):
"""
Generate a Open3D text point cloud used for visualization.
https://github.com/intel-isl/Open3D/issues/2
:param text: content of the text
:param pos: 3D xyz position of the text upper left corner
:param direction: 3D normalized direction of where the text faces
:param degree: in plane rotation of text
:param font: Name of the font - change it according to your system
:param font_size: size of the font
:return: o3d.geoemtry.PointCloud object
"""
if direction is None:
direction = (0., 0., 1.)
# font_obj = ImageFont.truetype(font, font_size)
font_obj = ImageFont.truetype(font, font_size * density)
font_dim = font_obj.getsize(text)
img = Image.new('RGB', font_dim, color=(255, 255, 255))
draw = ImageDraw.Draw(img)
draw.text((0, 0), text, font=font_obj, fill=(0, 0, 0))
img = np.asarray(img)
img_mask = img[:, :, 0] < 128
indices = np.indices([*img.shape[0:2], 1])[:, img_mask, 0].reshape(3, -1).T
pcd = open3d.geometry.PointCloud()
pcd.colors = open3d.utility.Vector3dVector(img[img_mask, :].astype(float) / 255.0)
# pcd.points = o3d.utility.Vector3dVector(indices / 100.0)
pcd.points = open3d.utility.Vector3dVector(indices / 1000 / density)
raxis = np.cross([0.0, 0.0, 1.0], direction)
if np.linalg.norm(raxis) < 1e-6:
raxis = (0.0, 0.0, 1.0)
trans = (Quaternion(axis=raxis, radians=np.arccos(direction[2])) *
Quaternion(axis=direction, degrees=degree)).transformation_matrix
trans[0:3, 3] = np.asarray(pos)
pcd.transform(trans)
return pcd
def to_cpu_numpy(obj):
"""Convert torch cuda tensors to cpu, numpy tensors"""
if torch.is_tensor(obj):
return obj.detach().cpu().numpy()
elif isinstance(obj, dict):
res = {}
for k, v in obj.items():
res[k] = to_cpu_numpy(v)
return res
elif isinstance(obj, list):
res = []
for v in obj:
res.append(to_cpu_numpy(v))
return res
else:
raise TypeError("Invalid type for move_to")
def dict_to_device(data, device):
"""Move dict of tensors to device"""
out = dict()
for k in data.keys():
out[k] = data[k].to(device)
return out
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
|
ContactOpt-main
|
contactopt/util.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import datetime
def parse_dataset(args):
""" Converts the --split argument into a dataset file """
if args.split == 'aug':
args.train_dataset = 'data/perturbed_contactpose_train.pkl'
args.test_dataset = 'data/perturbed_contactpose_test.pkl'
elif args.split == 'fine':
args.test_dataset = 'data/contactpose_test.pkl'
elif args.split == 'im':
args.test_dataset = 'data/ho3d_image.pkl'
elif args.split == 'demo':
args.test_dataset = 'data/ho3d_image_demo.pkl'
else:
raise ValueError('Unknown dataset')
def run_contactopt_parse_args():
parser = argparse.ArgumentParser(description='Alignment networks training')
parser.add_argument('--batch_size', default=64, type=int)
parser.add_argument('--split', default='aug', type=str)
parser.add_argument('--lr', type=float)
parser.add_argument('--n_iter', type=int)
parser.add_argument('--partial', default=-1, type=int, help='Only run for n batches')
parser.add_argument('--w_cont_hand', type=float, help='Weight of the hand contact in optimization')
parser.add_argument('--sharpen_thresh', type=float)
parser.add_argument('--ncomps', type=int)
parser.add_argument('--w_cont_asym', type=float)
parser.add_argument('--w_opt_trans', type=float)
parser.add_argument('--w_opt_rot', type=float)
parser.add_argument('--w_opt_pose', type=float)
parser.add_argument('--caps_rad', type=float)
parser.add_argument('--caps_hand', action='store_true')
parser.add_argument('--cont_method', type=int)
parser.add_argument('--caps_top', type=float)
parser.add_argument('--caps_bot', type=float)
parser.add_argument('--w_pen_cost', type=float)
parser.add_argument('--pen_it', type=float)
parser.add_argument('--w_obj_rot', type=float)
parser.add_argument('--rand_re', type=int)
parser.add_argument('--rand_re_trans', type=float)
parser.add_argument('--rand_re_rot', type=float)
parser.add_argument('--vis_method', type=int)
parser.add_argument('--vis', action='store_true')
parser.add_argument('--video', action='store_true')
parser.add_argument('--min_cont', default=1, type=int, help='Cut grasps with less than this points of initial contact')
args = parser.parse_args()
parse_dataset(args)
if args.vis:
args.batch_size = 1
return args
def train_network_parse_args():
parser = argparse.ArgumentParser(description='Alignment networks training')
parser.add_argument('--lr', default=0.01, type=float)
parser.add_argument('--batch_size', default=32, type=int)
parser.add_argument('--optimizer', default='adam', type=str)
parser.add_argument('--split', default='aug', type=str)
# parser.add_argument('--loss_pose', default=0, type=float)
parser.add_argument('--loss_c_obj', default=1, type=float)
parser.add_argument('--loss_c_hand', default=1, type=float)
# parser.add_argument('--loss_3d', default=0, type=float)
parser.add_argument('--epochs', default=101, type=int)
parser.add_argument('--checkpoint', default='', type=str)
parser.add_argument('--desc', default='', type=str)
parser.add_argument('--vis', action='store_true')
args = parser.parse_args()
if args.desc == '':
args.desc = str(datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
all_str = ''
for key, val in vars(args).items():
all_str += '--{}={} '.format(key, val)
print(all_str) # Convert to dict and print
args.all_str = all_str
parse_dataset(args)
return args
|
ContactOpt-main
|
contactopt/arguments.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
ContactOpt-main
|
contactopt/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from contactopt.loader import *
import contactopt.util as util
from contactopt.hand_object import HandObject
import time
from open3d import io as o3dio
from open3d import geometry as o3dg
from open3d import utility as o3du
from open3d import visualization as o3dv
def show_optimization(data, opt_state, hand_contact_target=None, obj_contact_target=None, is_video=False, label=None, vis_method=1, delay=0.001):
"""Displays video/still frame of optimization process
Contact visualization options:
0 GT contact on opt
1 Predicted contact on opt
2 Live contact on opt hand
3 Live contact on both
4 No contact on any
5 No hand contact, predicted obj contact
"""
gt_ho = HandObject()
opt_ho = HandObject()
gt_ho.load_from_batch(data['hand_beta_gt'], data['hand_pose_gt'], data['hand_mTc_gt'], data['hand_contact_gt'], data['obj_contact_gt'], data['mesh_gt'])
opt_ho.load_from_batch(data['hand_beta_gt'], data['hand_pose_gt'], data['hand_mTc_gt'], data['hand_contact_gt'], data['obj_contact_gt'], data['mesh_aug'], obj_rot=opt_state[-1]['obj_rot'])
hand_mesh_gt, obj_mesh_gt = gt_ho.get_o3d_meshes()
hand_mesh_opt, obj_mesh_opt = opt_ho.get_o3d_meshes()
geom_list = [hand_mesh_gt, obj_mesh_gt, obj_mesh_opt, hand_mesh_opt]
if vis_method == 1 or vis_method == 5:
util.mesh_set_color(hand_contact_target, hand_mesh_opt)
if obj_contact_target.shape[1] == util.SAMPLE_VERTS_NUM:
obj_contact_target = upscale_contact(data['mesh_aug'], data['obj_sampled_idx'], obj_contact_target)
util.mesh_set_color(obj_contact_target, obj_mesh_opt)
if vis_method == 2 or vis_method == 3:
util.mesh_set_color(opt_state[-1]['contact_hand'].squeeze(), hand_mesh_opt)
if opt_state[-1]['contact_obj'].shape[1] == util.SAMPLE_VERTS_NUM:
c = upscale_contact(data['mesh_aug'], data['obj_sampled_idx'], opt_state[-1]['contact_obj'])
util.mesh_set_color(c, obj_mesh_opt)
else:
util.mesh_set_color(opt_state[-1]['contact_obj'].squeeze(), obj_mesh_opt)
if vis_method == 4 or vis_method == 5:
hand_mesh_gt.paint_uniform_color(np.asarray([150.0, 250.0, 150.0]) / 255) # Green
hand_mesh_opt.paint_uniform_color(np.asarray([250.0, 150.0, 150.0]) / 255) # Red
if vis_method == 4:
obj_mesh_gt.paint_uniform_color(np.asarray([100.0, 100.0, 100.0]) / 255) # Gray
obj_mesh_opt.paint_uniform_color(np.asarray([100.0, 100.0, 100.0]) / 255) # Gray
if label is not None:
lbl_verts = util.text_3d(label, pos=[0, 0.1, 0], font_size=20, density=2)
geom_list.append(lbl_verts)
hand_mesh_opt.vertices = o3du.Vector3dVector(opt_state[-1]['hand_verts'].squeeze())
hand_mesh_opt.compute_vertex_normals()
hand_mesh_gt.translate((0, 0.2, 0))
obj_mesh_gt.translate((0, 0.2, 0))
if not is_video:
o3dv.draw_geometries(geom_list)
else:
vis = o3dv.VisualizerWithKeyCallback()
vis.create_window()
for g in geom_list:
vis.add_geometry(g)
for i in range(len(opt_state) * 2):
out_dict = opt_state[i % len(opt_state)]
if out_dict['obj_rot'][0, 0, 0] < 1:
obj_verts = util.apply_rot(out_dict['obj_rot'], data['mesh_aug'].verts_padded(), around_centroid=True).squeeze()
obj_mesh_opt.vertices = o3du.Vector3dVector(obj_verts)
hand_mesh_opt.vertices = o3du.Vector3dVector(out_dict['hand_verts'].squeeze())
if vis_method == 2 or vis_method == 3:
util.mesh_set_color(out_dict['contact_hand'].squeeze(), hand_mesh_opt)
if vis_method == 3:
if out_dict['contact_obj'].shape[1] == util.SAMPLE_VERTS_NUM:
c = util.upscale_contact(data['mesh_aug'], data['obj_sampled_idx'], out_dict['contact_obj'])
util.mesh_set_color(c, obj_mesh_opt)
else:
util.mesh_set_color(out_dict['contact_obj'].squeeze(), obj_mesh_opt)
vis.update_geometry(hand_mesh_opt)
vis.update_geometry(obj_mesh_opt)
vis.poll_events()
vis.update_renderer()
if i % len(opt_state) == 0:
time.sleep(2)
# time.sleep(delay)
vis.destroy_window()
|
ContactOpt-main
|
contactopt/visualize.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import pytorch3d.ops
from contactopt.util import *
from pytorch3d.structures import Meshes
def capsule_sdf(mesh_verts, mesh_normals, query_points, query_normals, caps_rad, caps_top, caps_bot, foreach_on_mesh):
"""
Find the SDF of query points to mesh verts
Capsule SDF formulation from https://iquilezles.org/www/articles/distfunctions/distfunctions.htm
:param mesh_verts: (batch, V, 3)
:param mesh_normals: (batch, V, 3)
:param query_points: (batch, Q, 3)
:param caps_rad: scalar, radius of capsules
:param caps_top: scalar, distance from mesh to top of capsule
:param caps_bot: scalar, distance from mesh to bottom of capsule
:param foreach_on_mesh: boolean, foreach point on mesh find closest query (V), or foreach query find closest mesh (Q)
:return: normalized sdf + 1 (batch, V or Q)
"""
# TODO implement normal check?
if foreach_on_mesh: # Foreach mesh vert, find closest query point
knn_dists, nearest_idx, nearest_pos = pytorch3d.ops.knn_points(mesh_verts, query_points, K=1, return_nn=True) # TODO should attract capsule middle?
capsule_tops = mesh_verts + mesh_normals * caps_top
capsule_bots = mesh_verts + mesh_normals * caps_bot
delta_top = nearest_pos[:, :, 0, :] - capsule_tops
normal_dot = torch.sum(mesh_normals * batched_index_select(query_normals, 1, nearest_idx.squeeze(2)), dim=2)
else: # Foreach query vert, find closest mesh point
knn_dists, nearest_idx, nearest_pos = pytorch3d.ops.knn_points(query_points, mesh_verts, K=1, return_nn=True) # TODO should attract capsule middle?
closest_mesh_verts = batched_index_select(mesh_verts, 1, nearest_idx.squeeze(2)) # Shape (batch, V, 3)
closest_mesh_normals = batched_index_select(mesh_normals, 1, nearest_idx.squeeze(2)) # Shape (batch, V, 3)
capsule_tops = closest_mesh_verts + closest_mesh_normals * caps_top # Coordinates of the top focii of the capsules (batch, V, 3)
capsule_bots = closest_mesh_verts + closest_mesh_normals * caps_bot
delta_top = query_points - capsule_tops
normal_dot = torch.sum(query_normals * closest_mesh_normals, dim=2)
bot_to_top = capsule_bots - capsule_tops # Vector from capsule bottom to top
along_axis = torch.sum(delta_top * bot_to_top, dim=2) # Dot product
top_to_bot_square = torch.sum(bot_to_top * bot_to_top, dim=2)
h = torch.clamp(along_axis / top_to_bot_square, 0, 1) # Could avoid NaNs with offset in division here
dist_to_axis = torch.norm(delta_top - bot_to_top * h.unsqueeze(2), dim=2) # Distance to capsule centerline
return dist_to_axis / caps_rad, normal_dot # (Normalized SDF)+1 0 on endpoint, 1 on edge of capsule
def sdf_to_contact(sdf, dot_normal, method=0):
"""
Transform normalized SDF into some contact value
:param sdf: NORMALIZED SDF, 1 is surface of object
:param method: select method
:return: contact (batch, S, 1)
"""
if method == 0:
c = 1 / (sdf + 0.0001) # Exponential dropoff
elif method == 1:
c = -sdf + 2 # Linear dropoff
elif method == 2:
c = 1 / (sdf + 0.0001) # Exponential dropoff
c = torch.pow(c, 2)
elif method == 3:
c = torch.sigmoid(-sdf + 2.5)
elif method == 4:
c = (-dot_normal/2+0.5) / (sdf + 0.0001) # Exponential dropoff with sharp normal
elif method == 5:
c = 1 / (sdf + 0.0001) # Proxy for other stuff
return torch.clamp(c, 0.0, 1.0)
def calculate_contact_capsule(hand_verts, hand_normals, object_verts, object_normals,
caps_top=0.0005, caps_bot=-0.0015, caps_rad=0.001, caps_on_hand=False, contact_norm_method=0):
"""
Calculates contact maps on object and hand.
:param hand_verts: (batch, V, 3)
:param hand_normals: (batch, V, 3)
:param object_verts: (batch, O, 3)
:param object_normals: (batch, O, 3)
:param caps_top: ctop, distance to top capsule center
:param caps_bot: cbot, distance to bottom capsule center
:param caps_rad: crad, radius of the contact capsule
:param caps_on_hand: are contact capsules placed on hand or object vertices
:param contact_norm_method: select a distance-to-contact function
:return: object contact (batch, O, 1), hand contact (batch, V, 1)
"""
if caps_on_hand:
sdf_obj, dot_obj = capsule_sdf(hand_verts, hand_normals, object_verts, object_normals, caps_rad, caps_top, caps_bot, False)
sdf_hand, dot_hand = capsule_sdf(hand_verts, hand_normals, object_verts, object_normals, caps_rad, caps_top, caps_bot, True)
else:
sdf_obj, dot_obj = capsule_sdf(object_verts, object_normals, hand_verts, hand_normals, caps_rad, caps_top, caps_bot, True)
sdf_hand, dot_hand = capsule_sdf(object_verts, object_normals, hand_verts, hand_normals, caps_rad, caps_top, caps_bot, False)
obj_contact = sdf_to_contact(sdf_obj, dot_obj, method=contact_norm_method)# * (dot_obj/2+0.5) # TODO dotting contact normal
hand_contact = sdf_to_contact(sdf_hand, dot_hand, method=contact_norm_method)# * (dot_hand/2+0.5)
# print('oshape, nshape', obj_contact.shape, (dot_obj/2+0.5).shape)##
return obj_contact.unsqueeze(2), hand_contact.unsqueeze(2)
def calculate_penetration_cost(hand_verts, hand_normals, object_verts, object_normals, is_thin, contact_norm_method, allowable_pen=0.002):
"""
Calculates an increasing cost for hands heavily intersecting with objects.
Foreach hand vertex, find the nearest object point, dot with object normal.
Include "allowable-pen" buffer margin to account for hand deformation.
"""
allowable_pen = (torch.zeros_like(is_thin) + allowable_pen) * (1 - is_thin)
allowable_pen = allowable_pen.unsqueeze(1)
if contact_norm_method == 5:
hand_verts_offset = hand_verts + hand_normals * -0.004
else:
hand_verts_offset = hand_verts
knn_dists, nearest_idx, nearest_pos = pytorch3d.ops.knn_points(hand_verts_offset, object_verts, K=1, return_nn=True) # Foreach hand vert, find closest obj vert
closest_obj_verts = batched_index_select(object_verts, 1, nearest_idx.squeeze(2)) # Shape (batch, V, 3)
closest_obj_normals = batched_index_select(object_normals, 1, nearest_idx.squeeze(2)) # Shape (batch, V, 3)
# print('nearest shape', nearest_pos.shape, closest_obj_verts.shape)
delta_pos = hand_verts - closest_obj_verts
dist_along_normal = torch.sum(delta_pos * closest_obj_normals, dim=2) # Dot product. Negative means backward along normal
# print('d along normal', dist_along_normal.shape)
pen_score = torch.nn.functional.relu(-dist_along_normal - allowable_pen)
# print('pen score', pen_score)
return pen_score
if __name__ == '__main__':
# Plot all sdf_to_contact mappings
import matplotlib.pyplot as plt
for m in range(4):
d = torch.linspace(0, 3, 1000)
c = sdf_to_contact(d, method=m)
plt.plot(d.numpy(), c.numpy(), label=str(m))
plt.ylabel('Contact value')
plt.xlabel('Normalized SDF from center')
plt.legend()
plt.show()
|
ContactOpt-main
|
contactopt/diffcontact.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import trimesh
import json
import contactopt.util as util
import contactopt.arguments as arguments
from contactopt.hand_object import HandObject
from contactopt.run_contactopt import run_contactopt
def create_demo_dataset():
obj_mesh = trimesh.load('data/demo_obj.obj') # Load object mesh
with open('data/demo_mano.json') as json_file: # Load mano parameters
mano_params = json.load(json_file)
# Initialize the HandObject class with the given mano parameters and object mesh.
# Note that pose must be represented using the 15-dimensional PCA space
ho_pred = HandObject()
ho_pred.load_from_mano_params(hand_beta=mano_params['beta'], hand_pose=mano_params['pose'], hand_trans=mano_params['trans'],
obj_faces=obj_mesh.faces, obj_verts=obj_mesh.vertices)
# To make the dataloader happy, we need a "ground truth" H/O set.
# However, since this isn't used for this demo, just copy the ho_pred object.
ho_gt = HandObject()
ho_gt.load_from_ho(ho_pred)
new_sample = dict()
new_sample['ho_aug'] = ho_pred
new_sample['ho_gt'] = ho_gt
# Select the random object vertices which will be sampled
new_sample['obj_sampled_idx'] = np.random.randint(0, len(ho_gt.obj_verts), util.SAMPLE_VERTS_NUM)
# Calculate hand and object features. The network uses these for improved performance.
new_sample['hand_feats_aug'], new_sample['obj_feats_aug'] = ho_pred.generate_pointnet_features(new_sample['obj_sampled_idx'])
return [new_sample] # Return a dataset of length 1
if __name__ == '__main__':
dataset = create_demo_dataset()
args = arguments.run_contactopt_parse_args()
defaults = {'lr': 0.01,
'n_iter': 250,
'w_cont_hand': 2.5,
'sharpen_thresh': -1,
'ncomps': 15,
'w_cont_asym': 2,
'w_opt_trans': 0.3,
'w_opt_rot': 1,
'w_opt_pose': 1.0,
'caps_rad': 0.001,
'cont_method': 0,
'caps_top': 0.0005,
'caps_bot': -0.001,
'w_pen_cost': 320,
'pen_it': 0,
'rand_re': 8,
'rand_re_trans': 0.02,
'rand_re_rot': 5,
'w_obj_rot': 0,
'vis_method': 1}
for k in defaults.keys():
if vars(args)[k] is None:
vars(args)[k] = defaults[k]
args.test_dataset = dataset
args.split = 'user'
run_contactopt(args)
|
ContactOpt-main
|
contactopt/run_user_demo.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import pytorch3d
import time
from contactopt.loader import *
from manopth.manolayer import ManoLayer
from manopth import rodrigues_layer
import contactopt.diffcontact as calculate_contact
import contactopt.util as util
from contactopt.hand_object import HandObject
from contactopt.visualize import show_optimization
def optimize_pose(data, hand_contact_target, obj_contact_target, n_iter=250, lr=0.01, w_cont_hand=2, w_cont_obj=1,
save_history=False, ncomps=15, w_cont_asym=2, w_opt_trans=0.3, w_opt_pose=1, w_opt_rot=1,
caps_top=0.0005, caps_bot=-0.001, caps_rad=0.001, caps_on_hand=False,
contact_norm_method=0, w_pen_cost=600, w_obj_rot=0, pen_it=0):
"""Runs differentiable optimization to align the hand with the target contact map.
Minimizes the loss between ground truth contact and contact calculated with DiffContact"""
batch_size = data['hand_pose_aug'].shape[0]
device = data['hand_pose_aug'].device
opt_vector = torch.zeros((batch_size, ncomps + 6 + 3), device=device) # 3 hand rot, 3 hand trans, 3 obj rot
opt_vector.requires_grad = True
mano_model = ManoLayer(mano_root='mano/models', use_pca=True, ncomps=ncomps, side='right', flat_hand_mean=False).to(device)
if data['obj_sampled_idx'].numel() > 1:
obj_normals_sampled = util.batched_index_select(data['obj_normals_aug'], 1, data['obj_sampled_idx'])
else: # If we're optimizing over all verts
obj_normals_sampled = data['obj_normals_aug']
optimizer = torch.optim.Adam([opt_vector], lr=lr, amsgrad=True) # AMSgrad helps
loss_criterion = torch.nn.L1Loss(reduction='none') # Benchmarked, L1 performs best vs MSE/SmoothL1
opt_state = []
is_thin = mesh_is_thin(data['mesh_aug'].num_verts_per_mesh())
# print('is thin', is_thin, data['mesh_aug'].num_verts_per_mesh())
for it in range(n_iter):
optimizer.zero_grad()
mano_pose_out = torch.cat([opt_vector[:, 0:3] * w_opt_rot, opt_vector[:, 3:ncomps+3] * w_opt_pose], dim=1)
mano_pose_out[:, :18] += data['hand_pose_aug']
tform_out = util.translation_to_tform(opt_vector[:, ncomps+3:ncomps+6] * w_opt_trans)
hand_verts, hand_joints = util.forward_mano(mano_model, mano_pose_out, data['hand_beta_aug'], [data['hand_mTc_aug'], tform_out]) # 2.2ms
if contact_norm_method != 0 and not caps_on_hand:
with torch.no_grad(): # We need to calculate hand normals if using more complicated methods
mano_mesh = Meshes(verts=hand_verts, faces=mano_model.th_faces.repeat(batch_size, 1, 1))
hand_normals = mano_mesh.verts_normals_padded()
else:
hand_normals = torch.zeros(hand_verts.shape, device=device)
obj_verts = data['obj_sampled_verts_aug']
obj_normals = obj_normals_sampled
obj_rot_mat = rodrigues_layer.batch_rodrigues(opt_vector[:, ncomps+6:])
obj_rot_mat = obj_rot_mat.view(batch_size, 3, 3)
if w_obj_rot > 0:
obj_verts = util.apply_rot(obj_rot_mat, obj_verts, around_centroid=True)
obj_normals = util.apply_rot(obj_rot_mat, obj_normals)
contact_obj, contact_hand = calculate_contact.calculate_contact_capsule(hand_verts, hand_normals, obj_verts, obj_normals,
caps_top=caps_top, caps_bot=caps_bot, caps_rad=caps_rad, caps_on_hand=caps_on_hand, contact_norm_method=contact_norm_method)
contact_obj_sub = obj_contact_target - contact_obj
contact_obj_weighted = contact_obj_sub + torch.nn.functional.relu(contact_obj_sub) * w_cont_asym # Loss for 'missing' contact higher
loss_contact_obj = loss_criterion(contact_obj_weighted, torch.zeros_like(contact_obj_weighted)).mean(dim=(1, 2))
contact_hand_sub = hand_contact_target - contact_hand
contact_hand_weighted = contact_hand_sub + torch.nn.functional.relu(contact_hand_sub) * w_cont_asym # Loss for 'missing' contact higher
loss_contact_hand = loss_criterion(contact_hand_weighted, torch.zeros_like(contact_hand_weighted)).mean(dim=(1, 2))
loss = loss_contact_obj * w_cont_obj + loss_contact_hand * w_cont_hand
if w_pen_cost > 0 and it >= pen_it:
pen_cost = calculate_contact.calculate_penetration_cost(hand_verts, hand_normals, data['obj_sampled_verts_aug'], obj_normals_sampled, is_thin, contact_norm_method)
loss += pen_cost.mean(dim=1) * w_pen_cost
out_dict = {'loss': loss.detach().cpu()}
if save_history:
out_dict['hand_verts'] = hand_verts.detach().cpu()#.numpy()
out_dict['hand_joints'] = hand_joints.detach().cpu()#.numpy()
out_dict['contact_obj'] = contact_obj.detach().cpu()#.numpy()
out_dict['contact_hand'] = contact_hand.detach().cpu()#.numpy()
out_dict['obj_rot'] = obj_rot_mat.detach().cpu()#.numpy()
opt_state.append(out_dict)
loss.mean().backward()
optimizer.step()
tform_full_out = util.aggregate_tforms([data['hand_mTc_aug'], tform_out])
return mano_pose_out, tform_full_out, obj_rot_mat, opt_state
def show_optimization_video(data, device):
"""Displays video of optimization process of hand converging"""
data_gpu = util.dict_to_device(data, device)
contact_obj_pred = util.batched_index_select(data_gpu['obj_contact_gt'], 1, data_gpu['obj_sampled_idx'])
out_pose, out_tform, obj_rot_mat, opt_state = optimize_pose(data_gpu, data_gpu['hand_contact_gt'], contact_obj_pred, save_history=True)
show_optimization(data, opt_state, hand_contact_target=data['hand_contact_gt'], obj_contact_target=contact_obj_pred.detach().cpu(), is_video=True, vis_method=1)
if __name__ == '__main__':
"""Show a video optimization from perturbed pose"""
test_dataset = ContactDBDataset('data/perturbed_contactpose_test.pkl')
dataloader = DataLoader(test_dataset, batch_size=1, shuffle=True, collate_fn=ContactDBDataset.collate_fn)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
for idx, data in enumerate(dataloader):
show_optimization_video(data, device) # do optimization and show video
if idx >= 10:
break
|
ContactOpt-main
|
contactopt/optimize_pose.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from torch.utils.data import Dataset
from contactopt.util import *
import torch
import numpy as np
from pytorch3d.structures import Meshes
import pytorch3d
from torch.utils.data import DataLoader
import time
from tqdm import tqdm
import pickle
class ContactDBDataset(Dataset):
"""PyTorch Dataset object which allows batched fetching of hand/object pairs from a dataset.
PyTorch3D Meshes are used to handle batches of variable-size meshes"""
def __init__(self, data, train=False, min_num_cont=1):
start_time = time.time()
self.train = train
self.aug_vert_jitter = 0.0005
if isinstance(data, str):
self.dataset = pickle.load(open(data, 'rb')) # Load pickle, can take many seconds
else:
self.dataset = data
if 'num_verts_in_contact' in self.dataset[0]:
print('Cutting samples with less than {} points in contact. Was size {}'.format(min_num_cont, len(self.dataset)))
self.dataset = [s for s in self.dataset if s['num_verts_in_contact'] >= min_num_cont]
print('Dataset loaded in {:.2f} sec, {} samples'.format(time.time() - start_time, len(self.dataset)))
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
sample = self.dataset[idx]
out = dict()
out['obj_faces'] = torch.Tensor(sample['ho_gt'].obj_faces)
out['obj_sampled_idx'] = torch.Tensor(sample['obj_sampled_idx']).long()
out['obj_verts_gt'] = torch.Tensor(sample['ho_gt'].obj_verts)
out['obj_sampled_verts_gt'] = out['obj_verts_gt'][out['obj_sampled_idx'], :]
out['obj_contact_gt'] = torch.Tensor(sample['ho_gt'].obj_contact)
out['hand_contact_gt'] = torch.Tensor(sample['ho_gt'].hand_contact)
out['hand_pose_gt'] = torch.Tensor(sample['ho_gt'].hand_pose)
out['hand_beta_gt'] = torch.Tensor(sample['ho_gt'].hand_beta)
out['hand_mTc_gt'] = torch.Tensor(sample['ho_gt'].hand_mTc)
out['hand_verts_gt'] = torch.Tensor(sample['ho_gt'].hand_verts)
out['obj_verts_aug'] = torch.Tensor(sample['ho_aug'].obj_verts)
out['obj_sampled_verts_aug'] = out['obj_verts_aug'][out['obj_sampled_idx'], :]
out['hand_pose_aug'] = torch.Tensor(sample['ho_aug'].hand_pose)
out['hand_beta_aug'] = torch.Tensor(sample['ho_aug'].hand_beta)
out['hand_mTc_aug'] = torch.Tensor(sample['ho_aug'].hand_mTc)
out['hand_verts_aug'] = torch.Tensor(sample['ho_aug'].hand_verts)
out['hand_feats_aug'] = torch.Tensor(sample['hand_feats_aug'])
out['obj_feats_aug'] = torch.Tensor(sample['obj_feats_aug'])
out['obj_normals_aug'] = torch.Tensor(sample['ho_aug'].obj_normals)
if self.train:
out['obj_sampled_verts_aug'] += torch.randn(out['obj_sampled_verts_aug'].shape) * self.aug_vert_jitter
return out
@staticmethod
def collate_fn(batch):
out = dict()
batch_keys = batch[0].keys()
skip_keys = ['obj_faces', 'obj_verts_gt', 'obj_contact_gt', 'obj_normals_aug', 'obj_verts_aug'] # These will be manually collated
# For each not in skip_keys, use default torch collator
for key in [k for k in batch_keys if k not in skip_keys]:
out[key] = torch.utils.data._utils.collate.default_collate([d[key] for d in batch])
verts_gt_all = [sample['obj_verts_gt'] for sample in batch]
verts_aug_all = [sample['obj_verts_aug'] for sample in batch]
faces_all = [sample['obj_faces'] for sample in batch]
contact_all = [sample['obj_contact_gt'] for sample in batch]
obj_normals_aug_all = [sample['obj_normals_aug'] for sample in batch]
out['obj_contact_gt'] = pytorch3d.structures.utils.list_to_padded(contact_all, pad_value=-1)
out['obj_normals_aug'] = pytorch3d.structures.utils.list_to_padded(obj_normals_aug_all, pad_value=-1)
# out['obj_verts_gt'] = pytorch3d.structures.utils.list_to_padded(verts_gt_all, pad_value=-1)
# out['obj_verts_aug'] = pytorch3d.structures.utils.list_to_padded(verts_aug_all, pad_value=-1)
# out['obj_faces'] = pytorch3d.structures.utils.list_to_padded(faces_all, pad_value=-1)
out['mesh_gt'] = Meshes(verts=verts_gt_all, faces=faces_all) # This is slower than the above, but probably fast enough
out['mesh_aug'] = Meshes(verts=verts_aug_all, faces=faces_all)
return out
if __name__ == '__main__':
# Test the speed of the dataloader by going through the entire perturbed-contactpose train set
dataset = ContactDBDataset('data/perturbed_contactpose_train.pkl')
dataloader = DataLoader(dataset, batch_size=16, num_workers=6, collate_fn=ContactDBDataset.collate_fn)
start_time = time.time()
print('start', len(dataloader))
for idx, sample in enumerate(tqdm(dataloader)):
pass
print('Epoch dataload time: ', time.time() - start_time)
|
ContactOpt-main
|
contactopt/loader.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import pickle
from contactopt.hand_object import HandObject
import open3d
from tqdm import tqdm
from scipy.spatial.transform import Rotation as R
import random
from contactopt.util import SAMPLE_VERTS_NUM
def process_image_pkl(input_file, output_file):
"""
Reads pre-generated pkl file containing pose estimates and ground truth poses,
Generates a dataset pkl file and does preprocessing for the PyTorch dataloader
:param input_file: path of input pkl
:param output_file: path of output pkl
"""
input_pkl = pickle.load(open(input_file, 'rb'))
random.shuffle(input_pkl)
all_data = []
for idx, sample_dict in enumerate(tqdm(input_pkl)):
ho_gt = HandObject()
# Apply the extrinsic matrix to the pose axis-angle values
cam_extr = sample_dict['hand_extr_gt']
rot_pose = R.from_rotvec(sample_dict['hand_pose_gt'][:3])
rot_extr = R.from_matrix(cam_extr[:3, :3])
rot_new = rot_extr * rot_pose
sample_dict['hand_pose_gt'][:3] = rot_new.as_rotvec() # Overwrite the original axang rotation with new one
ho_gt.load_from_image(sample_dict['hand_beta_gt'], sample_dict['hand_pose_gt'], sample_dict['obj_faces'], sample_dict['obj_verts_gt'], hand_verts=sample_dict['hand_verts_gt'])
ho_gt.calc_dist_contact(hand=True, obj=True)
num_verts_in_contact = np.sum(ho_gt.hand_contact >= 0.9)
ho_gt.hand_contact *= 0
ho_gt.obj_contact *= 0
obj_verts = sample_dict['obj_verts_gt']
ho_pred = HandObject()
ho_pred.load_from_image(sample_dict['hand_beta_pred'], sample_dict['hand_pose_pred'], sample_dict['obj_faces'], obj_verts, hand_verts=sample_dict['hand_verts_pred'])
new_sample = dict()
new_sample['ho_aug'] = ho_pred
new_sample['ho_gt'] = ho_gt
new_sample['obj_sampled_idx'] = np.random.randint(0, len(ho_gt.obj_verts), SAMPLE_VERTS_NUM)
new_sample['hand_feats_aug'], new_sample['obj_feats_aug'] = ho_pred.generate_pointnet_features(new_sample['obj_sampled_idx'])
new_sample['num_verts_in_contact'] = num_verts_in_contact
all_data.append(new_sample)
if len(all_data) > 10:
print('Cutting short!')
break
pickle.dump(all_data, open(output_file, 'wb'))
if __name__ == '__main__':
IN_PKL = 'data/pose_estimates.pkl'
OUT_PKL = 'data/ho3d_image.pkl'
process_image_pkl(IN_PKL, OUT_PKL)
|
ContactOpt-main
|
contactopt/create_dataset_im.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import contactopt.pointnet as pointnet
import torch.nn.functional as F
from pytorch3d import ops, transforms
import contactopt.util as util
class DeepContactNet(nn.Module):
def __init__(self, normalize_pts=True):
super(DeepContactNet, self).__init__()
self.pointnet = pointnet.Net()
self.normalize_pts = normalize_pts
pointnet_total_params = sum(p.numel() for p in self.pointnet.parameters() if p.requires_grad)
print('Backbone params: {}'.format(pointnet_total_params))
def forward(self, hand_verts, hand_feats, obj_verts, obj_feats):
device = hand_verts.device
batch_size = hand_verts.shape[0]
out = dict()
if self.normalize_pts:
tform = self.get_normalizing_tform(hand_verts, obj_verts)
hand_verts = util.apply_tform(tform, hand_verts)
obj_verts = util.apply_tform(tform, obj_verts)
# util.vis_pointcloud(obj_verts, hand_verts) # View pointnet input
x, pos, batch = self.verts_to_pointcloud(hand_verts, hand_feats, obj_verts, obj_feats)
contact_batched = self.pointnet(x, pos, batch)
contact = contact_batched.view(batch_size, hand_verts.shape[1] + obj_verts.shape[1], 10)
out['contact_hand'] = contact[:, :hand_verts.shape[1], :]
out['contact_obj'] = contact[:, hand_verts.shape[1]:, :]
return out
@staticmethod
def get_normalizing_tform(hand_verts, obj_verts, random_rot=True):
"""
Find a 4x4 rigid transform to normalize the pointcloud. We choose the object center of mass to be the origin,
the hand center of mass to be along the +X direction, and the rotation around this axis to be random.
:param hand_verts: (batch, 778, 3)
:param obj_verts: (batch, 2048, 3)
:return: tform: (batch, 4, 4)
"""
with torch.no_grad():
obj_centroid = torch.mean(obj_verts, dim=1) # (batch, 3)
hand_centroid = torch.mean(hand_verts, dim=1)
x_vec = F.normalize(hand_centroid - obj_centroid, dim=1) # From object to hand
if random_rot:
rand_vec = transforms.random_rotations(hand_verts.shape[0], device=hand_verts.device) # Generate random rot matrix
y_vec = F.normalize(torch.cross(x_vec, rand_vec[:, :3, 0]), dim=1) # Make orthogonal
else:
ref_pt = hand_verts[:, 80, :]
y_vec = F.normalize(torch.cross(x_vec, ref_pt - obj_centroid), dim=1) # From object to hand ref point
z_vec = F.normalize(torch.cross(x_vec, y_vec), dim=1) # Z axis
tform = ops.eyes(4, hand_verts.shape[0], device=hand_verts.device)
tform[:, :3, 0] = x_vec
tform[:, :3, 1] = y_vec
tform[:, :3, 2] = z_vec
tform[:, :3, 3] = obj_centroid
return torch.inverse(tform)
@staticmethod
def verts_to_pointcloud(hand_verts, hand_feats, obj_verts, obj_feats):
"""
Convert hand and object vertices and features from Pytorch3D padded format (batch, vertices, N)
to Pytorch-Geometric packed format (all_vertices, N)
"""
batch_size = hand_verts.shape[0]
device = hand_verts.device
ptcloud_pos = torch.cat((hand_verts, obj_verts), dim=1)
ptcloud_x = torch.cat((hand_feats, obj_feats), dim=1)
_, N, _ = ptcloud_pos.shape # (batch_size, num_points, 3)
pos = ptcloud_pos.view(batch_size * N, -1)
batch = torch.zeros((batch_size, N), device=device, dtype=torch.long)
for i in range(batch_size):
batch[i, :] = i
batch = batch.view(-1)
x = ptcloud_x.view(-1, hand_feats.shape[2])
# print('x', x.shape, pos.shape, batch.shape)
return x, pos, batch
|
ContactOpt-main
|
contactopt/deepcontact_net.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from contactopt.loader import ContactDBDataset
from contactopt.deepcontact_net import DeepContactNet
import glob
import argparse
from contactopt.optimize_pose import optimize_pose
from contactopt.visualize import show_optimization
import pickle
from contactopt.hand_object import HandObject
import contactopt.util as util
from tqdm import tqdm
import contactopt.arguments as arguments
import time
import torch
import os
from torch.utils.data import DataLoader
import pytorch3d
import numpy as np
def get_newest_checkpoint():
"""
Finds the newest model checkpoint file, sorted by the date of the file
:return: Model with loaded weights
"""
list_of_files = glob.glob('checkpoints/*.pt')
latest_file = max(list_of_files, key=os.path.getctime)
print('Loading checkpoint file:', latest_file)
model = DeepContactNet()
model.load_state_dict(torch.load(latest_file))
return model
def run_contactopt(args):
"""
Actually run ContactOpt approach. Estimates target contact with DeepContact,
then optimizes it. Performs random restarts if selected.
Saves results to a pkl file.
:param args: input settings
"""
print('Running split', args.split)
dataset = ContactDBDataset(args.test_dataset, min_num_cont=args.min_cont)
shuffle = args.vis or args.partial > 0
print('Shuffle:', shuffle)
test_loader = DataLoader(dataset, batch_size=args.batch_size, shuffle=shuffle, num_workers=6, collate_fn=ContactDBDataset.collate_fn)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = get_newest_checkpoint()
model.to(device)
model.eval()
all_data = list()
for idx, data in enumerate(tqdm(test_loader)):
data_gpu = util.dict_to_device(data, device)
batch_size = data['obj_sampled_idx'].shape[0]
if args.split != 'fine':
with torch.no_grad():
network_out = model(data_gpu['hand_verts_aug'], data_gpu['hand_feats_aug'], data_gpu['obj_sampled_verts_aug'], data_gpu['obj_feats_aug'])
hand_contact_target = util.class_to_val(network_out['contact_hand']).unsqueeze(2)
obj_contact_target = util.class_to_val(network_out['contact_obj']).unsqueeze(2)
else:
hand_contact_target = data_gpu['hand_contact_gt']
obj_contact_target = util.batched_index_select(data_gpu['obj_contact_gt'], 1, data_gpu['obj_sampled_idx'])
if args.sharpen_thresh > 0: # If flag, sharpen contact
print('Sharpening')
obj_contact_target = util.sharpen_contact(obj_contact_target, slope=2, thresh=args.sharpen_thresh)
hand_contact_target = util.sharpen_contact(hand_contact_target, slope=2, thresh=args.sharpen_thresh)
if args.rand_re > 1: # If we desire random restarts
mtc_orig = data_gpu['hand_mTc_aug'].detach().clone()
print('Doing random optimization restarts')
best_loss = torch.ones(batch_size) * 100000
for re_it in range(args.rand_re):
# Add noise to hand translation and rotation
data_gpu['hand_mTc_aug'] = mtc_orig.detach().clone()
random_rot_mat = pytorch3d.transforms.euler_angles_to_matrix(torch.randn((batch_size, 3), device=device) * args.rand_re_rot / 180 * np.pi, 'ZYX')
data_gpu['hand_mTc_aug'][:, :3, :3] = torch.bmm(random_rot_mat, data_gpu['hand_mTc_aug'][:, :3, :3])
data_gpu['hand_mTc_aug'][:, :3, 3] += torch.randn((batch_size, 3), device=device) * args.rand_re_trans
cur_result = optimize_pose(data_gpu, hand_contact_target, obj_contact_target, n_iter=args.n_iter, lr=args.lr,
w_cont_hand=args.w_cont_hand, w_cont_obj=1, save_history=args.vis, ncomps=args.ncomps,
w_cont_asym=args.w_cont_asym, w_opt_trans=args.w_opt_trans, w_opt_pose=args.w_opt_pose,
w_opt_rot=args.w_opt_rot,
caps_top=args.caps_top, caps_bot=args.caps_bot, caps_rad=args.caps_rad,
caps_on_hand=args.caps_hand,
contact_norm_method=args.cont_method, w_pen_cost=args.w_pen_cost,
w_obj_rot=args.w_obj_rot, pen_it=args.pen_it)
if re_it == 0:
out_pose = torch.zeros_like(cur_result[0])
out_mTc = torch.zeros_like(cur_result[1])
obj_rot = torch.zeros_like(cur_result[2])
opt_state = cur_result[3]
loss_val = cur_result[3][-1]['loss']
for b in range(batch_size):
if loss_val[b] < best_loss[b]:
best_loss[b] = loss_val[b]
out_pose[b, :] = cur_result[0][b, :]
out_mTc[b, :, :] = cur_result[1][b, :, :]
obj_rot[b, :, :] = cur_result[2][b, :, :]
# print('Loss, re', re_it, loss_val)
# print('Best loss', best_loss)
else:
result = optimize_pose(data_gpu, hand_contact_target, obj_contact_target, n_iter=args.n_iter, lr=args.lr,
w_cont_hand=args.w_cont_hand, w_cont_obj=1, save_history=args.vis, ncomps=args.ncomps,
w_cont_asym=args.w_cont_asym, w_opt_trans=args.w_opt_trans, w_opt_pose=args.w_opt_pose,
w_opt_rot=args.w_opt_rot,
caps_top=args.caps_top, caps_bot=args.caps_bot, caps_rad=args.caps_rad,
caps_on_hand=args.caps_hand,
contact_norm_method=args.cont_method, w_pen_cost=args.w_pen_cost,
w_obj_rot=args.w_obj_rot, pen_it=args.pen_it)
out_pose, out_mTc, obj_rot, opt_state = result
obj_contact_upscale = util.upscale_contact(data_gpu['mesh_aug'], data_gpu['obj_sampled_idx'], obj_contact_target)
for b in range(obj_contact_upscale.shape[0]): # Loop over batch
gt_ho = HandObject()
in_ho = HandObject()
out_ho = HandObject()
gt_ho.load_from_batch(data['hand_beta_gt'], data['hand_pose_gt'], data['hand_mTc_gt'], data['hand_contact_gt'], data['obj_contact_gt'], data['mesh_gt'], b)
in_ho.load_from_batch(data['hand_beta_aug'], data['hand_pose_aug'], data['hand_mTc_aug'], hand_contact_target, obj_contact_upscale, data['mesh_aug'], b)
out_ho.load_from_batch(data['hand_beta_aug'], out_pose, out_mTc, data['hand_contact_gt'], data['obj_contact_gt'], data['mesh_aug'], b, obj_rot=obj_rot)
# out_ho.calc_dist_contact(hand=True, obj=True)
all_data.append({'gt_ho': gt_ho, 'in_ho': in_ho, 'out_ho': out_ho})
if args.vis:
show_optimization(data, opt_state, hand_contact_target.detach().cpu().numpy(), obj_contact_upscale.detach().cpu().numpy(),
is_video=args.video, vis_method=args.vis_method)
if idx >= args.partial > 0: # Speed up for eval
break
out_file = 'data/optimized_{}.pkl'.format(args.split)
print('Saving to {}. Len {}'.format(out_file, len(all_data)))
pickle.dump(all_data, open(out_file, 'wb'))
if __name__ == '__main__':
util.hack_filedesciptor()
args = arguments.run_contactopt_parse_args()
if args.split == 'aug': # Settings defaults for Perturbed ContactPose
defaults = {'lr': 0.01,
'n_iter': 250,
'w_cont_hand': 2.0,
'sharpen_thresh': -1,
'ncomps': 15,
'w_cont_asym': 2,
'w_opt_trans': 0.3,
'w_opt_rot': 1.0,
'w_opt_pose': 1.0,
'caps_rad': 0.001,
'cont_method': 0,
'caps_top': 0.0005,
'caps_bot': -0.001,
'w_pen_cost': 600,
'pen_it': 0,
'rand_re': 8,
'rand_re_trans': 0.04,
'rand_re_rot': 5,
'w_obj_rot': 0,
'vis_method': 1}
elif args.split == 'im' or args.split == 'demo': # Settings defaults for image-based pose estimates
defaults = {'lr': 0.01,
'n_iter': 250,
'w_cont_hand': 2.5,
'sharpen_thresh': -1,
'ncomps': 15,
'w_cont_asym': 2,
'w_opt_trans': 0.3,
'w_opt_rot': 1,
'w_opt_pose': 1.0,
'caps_rad': 0.001,
'cont_method': 0,
'caps_top': 0.0005,
'caps_bot': -0.001,
'w_pen_cost': 320,
'pen_it': 0,
'rand_re': 8,
'rand_re_trans': 0.02,
'rand_re_rot': 5,
'w_obj_rot': 0,
'vis_method': 1}
elif args.split == 'fine': # Settings defaults for small-scale refinement
defaults = {'lr': 0.003,
'n_iter': 250,
'w_cont_hand': 0,
'sharpen_thresh': 0.3,
'ncomps': 15,
'w_cont_asym': 4,
'w_opt_trans': 0.03,
'w_opt_rot': 1.0,
'w_opt_pose': 1.0,
'caps_rad': 0.001,
'cont_method': 5,
'caps_top': 0.0005,
'caps_bot': -0.001,
'w_pen_cost': 600,
'pen_it': 0,
'rand_re': 1,
'rand_re_trans': 0.00,
'rand_re_rot': 0,
'w_obj_rot': 0,
'vis_method': 5}
for k in defaults.keys(): # Override arguments that have not been manually set with defaults
if vars(args)[k] is None:
vars(args)[k] = defaults[k]
print(args)
start_time = time.time()
run_contactopt(args)
print('Elapsed time:', time.time() - start_time)
|
ContactOpt-main
|
contactopt/run_contactopt.py
|
"""Pytorch-Geometric implementation of Pointnet++
Original source available at https://github.com/rusty1s/pytorch_geometric"""
import torch
import torch.nn.functional as F
from torch.nn import Sequential as Seq, Linear as Lin, ReLU, BatchNorm1d as BN
from torch_geometric.datasets import ModelNet
import torch_geometric.transforms as T
from torch_geometric.data import DataLoader
from torch_geometric.nn import PointConv, fps, radius, global_max_pool, knn_interpolate
class SAModule(torch.nn.Module):
def __init__(self, ratio, r, nn):
super(SAModule, self).__init__()
self.ratio = ratio
self.r = r
self.conv = PointConv(nn)
def forward(self, x, pos, batch):
idx = fps(pos, batch, ratio=self.ratio)
row, col = radius(pos, pos[idx], self.r, batch, batch[idx],
max_num_neighbors=64)
edge_index = torch.stack([col, row], dim=0)
x = self.conv(x, (pos, pos[idx]), edge_index)
pos, batch = pos[idx], batch[idx]
return x, pos, batch
class GlobalSAModule(torch.nn.Module):
def __init__(self, nn):
super(GlobalSAModule, self).__init__()
self.nn = nn
def forward(self, x, pos, batch):
x = self.nn(torch.cat([x, pos], dim=1))
x = global_max_pool(x, batch)
pos = pos.new_zeros((x.size(0), 3))
batch = torch.arange(x.size(0), device=batch.device)
return x, pos, batch
def MLP(channels):
return Seq(*[
Seq(Lin(channels[i - 1], channels[i]), ReLU(), BN(channels[i]))
for i in range(1, len(channels))
])
class FPModule(torch.nn.Module):
def __init__(self, k, nn):
super(FPModule, self).__init__()
self.k = k
self.nn = nn
def forward(self, x, pos, batch, x_skip, pos_skip, batch_skip):
x = knn_interpolate(x, pos, pos_skip, batch, batch_skip, k=self.k)
if x_skip is not None:
x = torch.cat([x, x_skip], dim=1)
x = self.nn(x)
return x, pos_skip, batch_skip
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
NUM_FEATS = 25
NUM_CLASSES = 10
self.sa1_module = SAModule(0.2, 0.1, MLP([3 + NUM_FEATS, 64, 64, 128])) # TODO, reduce PN params
self.sa2_module = SAModule(0.25, 0.2, MLP([128 + 3, 128, 128, 256]))
self.sa3_module = GlobalSAModule(MLP([256 + 3, 256, 512, 1024]))
self.fp3_module = FPModule(1, MLP([1024 + 256, 256, 256]))
self.fp2_module = FPModule(3, MLP([256 + 128, 256, 128]))
self.fp1_module = FPModule(3, MLP([128 + NUM_FEATS, 128, 128, 128]))
self.lin1 = torch.nn.Linear(128, 128)
self.lin2 = torch.nn.Linear(128, 128)
self.lin3 = torch.nn.Linear(128, NUM_CLASSES)
def forward(self, x, pos, batch):
sa0_out = (x, pos, batch)
sa1_out = self.sa1_module(*sa0_out)
sa2_out = self.sa2_module(*sa1_out)
sa3_out = self.sa3_module(*sa2_out)
fp3_out = self.fp3_module(*sa3_out, *sa2_out)
fp2_out = self.fp2_module(*fp3_out, *sa1_out)
x, _, _ = self.fp1_module(*fp2_out, *sa0_out)
x = F.relu(self.lin1(x))
x = F.dropout(x, p=0.5, training=self.training)
x = self.lin2(x)
x = F.dropout(x, p=0.5, training=self.training)
x = self.lin3(x)
# return x
# return F.sigmoid(x) # big hyperparam, Bound to 0-1
# print('pre softmax shape', x.shape)
return F.log_softmax(x, dim=-1)
|
ContactOpt-main
|
contactopt/pointnet.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from os import path
import sys
import numpy as np
import pickle
from tqdm import tqdm
from joblib import Parallel, delayed
import multiprocessing
from contactopt.hand_object import HandObject
from contactopt.util import *
sys.path.append('../ContactPose') # Change this path to point to the ContactPose repo
from utilities.dataset import get_object_names, ContactPose
object_cut_list = []
# object_cut_list = ['eyeglasses']
def get_all_contactpose_samples():
"""
Gets all participants and objects from ContactPose
Cuts out grasps with two hands or grasps using left hand
:return: list of (participant_num, intent, object_name, ContactPose_object)
"""
samples = []
print('Reading ContactPose dataset')
for participant_id in tqdm(range(1, 51)):
for intent in ['handoff', 'use']:
for object_name in get_object_names(participant_id, intent):
cp = ContactPose(participant_id, intent, object_name, load_mano=False)
if cp._valid_hands != [1]: # If anything else than just the right hand, remove
continue
samples.append((participant_id, intent, object_name, cp))
print('Valid ContactPose samples:', len(samples))
return samples
def generate_contactpose_dataset(dataset, output_file, low_p, high_p, num_pert=1, aug_trans=0.02, aug_rot=0.05, aug_pca=0.3):
"""
Generates a dataset pkl file and does preprocessing for the PyTorch dataloader
:param dataset: List of ContactPose objects
:param output_file: path to output pkl file
:param low_p: Lower split location of the dataset, [0-1)
:param high_p: Upper split location of the dataset, [0-1)
:param num_pert: Number of random perturbations which are computed for every true dataset sample
:param aug_trans: Std deviation of hand translation noise added to the datasets, meters
:param aug_rot: Std deviation of hand rotation noise, axis-angle radians
:param aug_pca: Std deviation of hand pose noise, PCA units
"""
low_split = int(len(dataset) * low_p)
high_split = int(len(dataset) * high_p)
dataset = dataset[low_split:high_split]
if len(object_cut_list) > 0:
dataset = [s for s in dataset if s[2] not in object_cut_list]
print('Some objects are being removed', object_cut_list)
def process_sample(s, idx):
ho_gt = HandObject()
ho_gt.load_from_contactpose(s[3])
sample_list = []
# print('Processing', idx)
for i in range(num_pert):
# Since we're only saving pointers to the data, it's memory efficient
sample_data = dict()
ho_aug = HandObject()
aug_t = np.random.randn(3) * aug_trans
aug_p = np.concatenate((np.random.randn(3) * aug_rot, np.random.randn(15) * aug_pca)).astype(np.float32)
ho_aug.load_from_ho(ho_gt, aug_p, aug_t)
sample_data['ho_gt'] = ho_gt
sample_data['ho_aug'] = ho_aug
sample_data['obj_sampled_idx'] = np.random.randint(0, len(ho_gt.obj_verts), SAMPLE_VERTS_NUM)
sample_data['hand_feats_aug'], sample_data['obj_feats_aug'] = ho_aug.generate_pointnet_features(sample_data['obj_sampled_idx'])
sample_list.append(sample_data)
return sample_list
parallel = True
if parallel:
num_cores = multiprocessing.cpu_count()
print('Running on {} cores'.format(num_cores))
all_data_2d = Parallel(n_jobs=num_cores)(delayed(process_sample)(s, idx) for idx, s in enumerate(tqdm(dataset)))
all_data = [item for sublist in all_data_2d for item in sublist] # flatten 2d list
else:
all_data = [] # Do non-parallel
for idx, s in enumerate(tqdm(dataset)):
all_data.extend(process_sample(s, idx))
print('Writing pickle file, often slow and freezes computer')
pickle.dump(all_data, open(output_file, 'wb'))
if __name__ == '__main__':
train_file = 'data/perturbed_contactpose_train.pkl'
test_file = 'data/perturbed_contactpose_test.pkl'
fine_file = 'data/contactpose_test.pkl'
aug_trans = 0.05
aug_rot = 0.1
aug_pca = 0.5
contactpose_dataset = get_all_contactpose_samples()
# Generate Perturbed ContactPose
generate_contactpose_dataset(contactpose_dataset, train_file, 0.0, 0.8, num_pert=16, aug_trans=aug_trans, aug_rot=aug_rot, aug_pca=aug_pca)
generate_contactpose_dataset(contactpose_dataset, test_file, 0.8, 1.0, num_pert=4, aug_trans=aug_trans, aug_rot=aug_rot, aug_pca=aug_pca)
# Generate "Small Refinements" dataset for optimizing ground-truth thermal contact
generate_contactpose_dataset(contactpose_dataset, fine_file, 0.0, 1.0, num_pert=1, aug_trans=0, aug_rot=0, aug_pca=0)
|
ContactOpt-main
|
contactopt/create_dataset_contactpose.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import hand_object
import os
import util
from scipy.linalg import orthogonal_procrustes
from scipy.spatial.transform import Rotation as R
import trimesh
from open3d import io as o3dio
from open3d import geometry as o3dg
from open3d import utility as o3du
from open3d import visualization as o3dv
import matplotlib.pyplot as plt
import torch
def np_apply_tform(points, tform):
"""
The non-batched numpy version
:param points: (N, 3)
:param tform: (4, 4)
:return:
"""
points_homo = np.concatenate((points, np.ones((points.shape[0], 1))), axis=1)
points_out = np.matmul(tform, points_homo.T).T
return points_out[:, :3]
def get_hand_align_tform(hand_joints):
"""
Find a 4x4 rigid transform to align the joints of a hand to a 'cardinal rotation'
:param hand_joints: (21, 3)
:return: tform: (4, 4)
"""
center_joint = 0
x_joint = 2
y_joint = 17
trans = hand_joints[center_joint, :]
x_vec = hand_joints[x_joint, :] - hand_joints[center_joint, :]
x_vec = x_vec / np.linalg.norm(x_vec)
y_vec = hand_joints[y_joint, :] - hand_joints[center_joint, :]
y_vec = np.cross(x_vec, y_vec)
y_vec = y_vec / np.linalg.norm(y_vec)
z_vec = np.cross(x_vec, y_vec)
z_vec = z_vec / np.linalg.norm(z_vec)
tform = np.eye(4)
tform[:3, 0] = x_vec
tform[:3, 1] = y_vec
tform[:3, 2] = z_vec
tform[:3, 3] = trans
return np.linalg.inv(tform)
def calc_procrustes(points1, points2, return_tform=False):
""" Align the predicted entity in some optimality sense with the ground truth.
Does NOT align scale
https://github.com/shreyashampali/ho3d/blob/master/eval.py """
t1 = points1.mean(0) # Find centroid
t2 = points2.mean(0)
points1_t = points1 - t1 # Zero mean
points2_t = points2 - t2
R, s = orthogonal_procrustes(points1_t, points2_t) # Run procrustes alignment, returns rotation matrix and scale
points2_t = np.dot(points2_t, R.T) # Apply tform to second pointcloud
points2_t = points2_t + t1
if return_tform:
return R, t1 - t2
else:
return points2_t
def align_by_tform(mtx, tform):
t2 = mtx.mean(0)
mtx_t = mtx - t2
R, t1 = tform
return np.dot(mtx_t, R.T) + t1 + t2
def get_trans_rot_err(points1, points2):
"""
Given two pointclouds, find the error in centroid and rotation
:param points1: numpy (V, 3)
:param points2: numpy (V, 3)
:return: translation error (meters), rotation error (degrees)
"""
tform = calc_procrustes(points1, points2, return_tform=True)
translation_error = np.linalg.norm(tform[1], 2)
r = R.from_matrix(tform[0])
rotation_error = r.magnitude() * 180 / np.pi
return translation_error, rotation_error
def geometric_eval(ho_test, ho_gt):
"""
Computes many statistics about ground truth and HO
Note that official HO-3D metrics are available here, but they only consider the hand, and I think they do too much alignment
https://github.com/shreyashampali/ho3d/blob/master/eval.py
:param ho_test: hand-object under test
:param ho_gt: ground-truth hand-object
:return: dictionary of stats
"""
stats = dict()
stats['unalign_hand_verts'] = util.calc_l2_err(ho_gt.hand_verts, ho_test.hand_verts, axis=1)
stats['unalign_hand_joints'] = util.calc_l2_err(ho_gt.hand_joints, ho_test.hand_joints, axis=1)
stats['unalign_obj_verts'] = util.calc_l2_err(ho_gt.obj_verts, ho_test.obj_verts, axis=1)
root_test = ho_test.hand_joints[0, :]
root_gt = ho_gt.hand_joints[0, :]
stats['rootalign_hand_joints'] = util.calc_l2_err(ho_gt.hand_joints - root_gt, ho_test.hand_joints - root_test, axis=1)
stats['rootalign_obj_verts'] = util.calc_l2_err(ho_gt.obj_verts - root_gt, ho_test.obj_verts - root_test, axis=1)
obj_cent_gt = ho_gt.obj_verts.mean(0)
obj_cent_test = ho_test.obj_verts.mean(0)
stats['objalign_hand_joints'] = util.calc_l2_err(ho_gt.hand_joints - obj_cent_gt, ho_test.hand_joints - obj_cent_test, axis=1)
hand_joints_align_gt = np_apply_tform(ho_gt.hand_joints, get_hand_align_tform(ho_gt.hand_joints))
hand_joints_align_test = np_apply_tform(ho_test.hand_joints, get_hand_align_tform(ho_test.hand_joints))
hand_verts_align_gt = np_apply_tform(ho_gt.hand_verts, get_hand_align_tform(ho_gt.hand_joints))
hand_verts_align_test = np_apply_tform(ho_test.hand_verts, get_hand_align_tform(ho_test.hand_joints))
stats['handalign_hand_joints'] = util.calc_l2_err(hand_joints_align_gt, hand_joints_align_test, axis=1)
stats['handalign_hand_verts'] = util.calc_l2_err(hand_verts_align_gt, hand_verts_align_test, axis=1)
stats['verts'] = ho_gt.obj_verts.shape[0]
return stats
|
ContactOpt-main
|
contactopt/geometric_eval.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from os import path as osp
import numpy as np
from open3d import io as o3dio
from open3d import geometry as o3dg
from open3d import utility as o3du
from open3d import visualization as o3dv
import json
import transforms3d.quaternions as txq
import torch
import pytorch3d
from pytorch3d.structures import Meshes
import contactopt.util as util
from manopth.manolayer import ManoLayer
from contactopt.diffcontact import calculate_contact_capsule
import matplotlib.pyplot as plt
def mano_get_faces():
return util.get_mano_closed_faces()
class HandObject:
"""
Universal data structure to handle hand, object, and contact data.
This class has many data elements, not all of them are always populated.
Has many loader functions to take data from multiple sources.
"""
closed_faces = util.get_mano_closed_faces()
def __init__(self):
self.is_left = None
self.hand_beta = None
self.hand_pose = None
self.hand_mTc = None
self.hand_contact = None
self.hand_verts = None
self.hand_joints = None
self.obj_verts = None
self.obj_faces = None
self.obj_contact = None
self.path = None
self.obj_normals = None
def load_from_verts(self, hand_verts, obj_faces, obj_verts):
"""Load from hand/object vertices alone"""
self.obj_verts = obj_verts
self.obj_faces = obj_faces
self.hand_verts = hand_verts
self.calc_dist_contact(hand=True, obj=True)
def load_from_image(self, hand_beta, hand_pose, obj_faces, obj_verts, hand_verts=None):
"""Load from image-based results pkl file. Mano root translation is not known, but hand vertices are"""
self.hand_beta = hand_beta
self.hand_pose = hand_pose
self.hand_mTc = np.eye(4)
self.obj_verts = obj_verts
self.obj_faces = obj_faces
self.run_mano() # Run mano model forwards
if hand_verts is not None:
displ = hand_verts[0, :] - self.hand_verts[0, :] # Find translation by comparing vertices of aligned hands
self.hand_mTc[:3, 3] = displ
self.run_mano() # Rerun mano model to account for translation
mean_err = np.linalg.norm(self.hand_verts - hand_verts, 2, 1)
if mean_err.mean() > 1e-6: # Check if there's much error in reconstruction
print('Mean verts error', mean_err.mean())
print('Mano reconstruction failure')
# self.calc_dist_contact(hand=True, obj=True)
self.hand_contact = np.zeros((self.hand_verts.shape[0], 1)) # Set to zero since we don't know the ground truth
self.obj_contact = np.zeros((self.obj_verts.shape[0], 1))
def load_from_batch(self, hand_beta, hand_pose, hand_mTc, hand_contact, obj_contact, obj_mesh, idx=0, obj_rot=None):
"""Generate HO object from a torch dataloader batch"""
obj_verts = obj_mesh.verts_list()[idx]
if obj_rot is not None:
obj_verts = util.apply_rot(obj_rot[idx, :, :].unsqueeze(0).detach().cpu(), obj_verts.unsqueeze(0), around_centroid=True).squeeze(0)
self.hand_beta = hand_beta[idx, :].detach().cpu().numpy()
self.hand_pose = hand_pose[idx, :].detach().cpu().numpy()
self.hand_mTc = hand_mTc[idx, :, :].detach().cpu().numpy()
self.hand_contact = hand_contact[idx, :, :].detach().cpu().numpy()
self.obj_verts = obj_verts.detach().cpu().numpy()
self.obj_faces = obj_mesh.faces_list()[idx].detach().cpu().numpy()
self.obj_contact = obj_contact[idx, :self.obj_verts.shape[0], :].detach().cpu().numpy() # Since we're using a padded array, need to cut off some
self.run_mano()
def load_from_contactpose(self, cp_obj):
"""Load HO object from ContactPose dataset"""
if not osp.isfile(cp_obj.contactmap_filename):
raise FileNotFoundError('Could not find {}'.format(cp_obj.contactmap_filename))
obj_mesh = o3dio.read_triangle_mesh(cp_obj.contactmap_filename) # Includes object mesh and contact map embedded as vertex colors
vertex_colors = np.array(obj_mesh.vertex_colors, dtype=np.float32)
self.obj_contact = np.expand_dims(util.fit_sigmoid(vertex_colors[:, 0]), axis=1) # Normalize with sigmoid, shape (V, 1)
self.obj_verts = np.array(obj_mesh.vertices, dtype=np.float32) # Keep as floats since torch uses floats
self.obj_faces = np.array(obj_mesh.triangles)
for idx, mp in enumerate(cp_obj.mano_params):
if mp is None:
continue
self.is_left = idx == 0 # Left then right
self.hand_beta = np.array(mp['betas']) # 10 shape PCA parameters
self.hand_pose = np.array(mp['pose']) # 18 dim length, first 3 ax-angle, 15 PCA pose
mTc = mp['hTm']
# mTc = np.linalg.inv(mTc) # World to object
self.hand_mTc = mTc
if self.is_left:
raise ValueError('Pipeline currently cant handle left hands')
self.run_mano()
self.calc_dist_contact(hand=True, obj=False)
def load_from_ho(self, ho, aug_pose=None, aug_trans=None):
"""Load from another HandObject obj, potentially with augmentation"""
self.hand_beta = np.array(ho.hand_beta)
self.hand_pose = np.array(ho.hand_pose)
self.hand_mTc = np.array(ho.hand_mTc)
self.obj_verts = ho.obj_verts
self.obj_faces = ho.obj_faces
self.obj_contact = ho.obj_contact
if aug_pose is not None:
self.hand_pose += aug_pose
if aug_trans is not None:
self.hand_mTc[:3, 3] += aug_trans
self.run_mano()
# self.calc_dist_contact(hand=True, obj=False) # DONT calculate hand contact, since it's not ground truth
def load_from_mano_params(self, hand_beta, hand_pose, hand_trans, obj_faces, obj_verts):
"""Load from mano parameters and object mesh"""
self.hand_beta = np.array(hand_beta)
self.hand_pose = np.array(hand_pose)
self.hand_mTc = np.eye(4)
self.hand_mTc[:3, 3] = hand_trans
self.obj_verts = np.array(obj_verts)
self.obj_faces = np.array(obj_faces)
self.run_mano()
self.hand_contact = np.zeros((self.hand_verts.shape[0], 1)) # Set to zero since we don't know the ground truth
self.obj_contact = np.zeros((self.obj_verts.shape[0], 1))
def calc_dist_contact(self, hand=True, obj=False, special_contact=False):
"""Set hand and object contact maps based on DiffContact method.
This is sometimes used when ground truth contact is not known"""
object_mesh = Meshes(verts=[torch.Tensor(self.obj_verts)], faces=[torch.Tensor(self.obj_faces)])
hand_mesh = Meshes(verts=torch.Tensor(self.hand_verts).unsqueeze(0), faces=torch.Tensor(self.closed_faces).unsqueeze(0))
hand_verts = torch.Tensor(self.hand_verts).unsqueeze(0)
if not special_contact:
obj_contact, hand_contact = calculate_contact_capsule(hand_verts, hand_mesh.verts_normals_padded(), object_mesh.verts_padded(), object_mesh.verts_normals_padded())
else:
# hand_verts_subdivided = util.subdivide_verts(hand_mesh.edges_packed().unsqueeze(0), hand_verts)
# hand_normals_subdivided = util.subdivide_verts(hand_mesh.edges_packed().unsqueeze(0), hand_mesh.verts_normals_padded())
hand_verts_subdivided = hand_verts
hand_normals_subdivided = hand_mesh.verts_normals_padded()
obj_contact, hand_contact = calculate_contact_capsule(hand_verts_subdivided, hand_normals_subdivided, object_mesh.verts_padded(),
object_mesh.verts_normals_padded(), caps_rad=0.003) # needed for paper vis?
if hand:
self.hand_contact = hand_contact.squeeze(0).detach().cpu().numpy()
if obj:
self.obj_contact = obj_contact.squeeze(0).detach().cpu().numpy()
def run_mano(self):
"""Runs forward_mano, computing the hand vertices and joints based on pose/beta parameters.
Handles numpy-pytorch-numpy conversion"""
if self.hand_pose.shape[0] == 48: # Special case when we're loading GT honnotate
mano_model = ManoLayer(mano_root='mano/models', joint_rot_mode="axisang", use_pca=False, center_idx=None, flat_hand_mean=True)
else: # Everything else
mano_model = ManoLayer(mano_root='mano/models', use_pca=True, ncomps=15, side='right', flat_hand_mean=False)
pose_tensor = torch.Tensor(self.hand_pose).unsqueeze(0)
beta_tensor = torch.Tensor(self.hand_beta).unsqueeze(0)
tform_tensor = torch.Tensor(self.hand_mTc).unsqueeze(0)
mano_verts, mano_joints = util.forward_mano(mano_model, pose_tensor, beta_tensor, [tform_tensor])
self.hand_verts = mano_verts.squeeze().detach().numpy()
self.hand_joints = mano_joints.squeeze().detach().numpy()
def generate_pointnet_features(self, obj_sampled_idx):
"""Calculates per-point features for pointnet. DeepContact uses these features"""
obj_mesh = Meshes(verts=[torch.Tensor(self.obj_verts)], faces=[torch.Tensor(self.obj_faces)])
hand_mesh = Meshes(verts=[torch.Tensor(self.hand_verts)], faces=[torch.Tensor(util.get_mano_closed_faces())])
obj_sampled_verts_tensor = obj_mesh.verts_padded()[:, obj_sampled_idx, :]
_, _, obj_nearest = pytorch3d.ops.knn_points(obj_sampled_verts_tensor, hand_mesh.verts_padded(), K=1, return_nn=True) # Calculate on object
_, _, hand_nearest = pytorch3d.ops.knn_points(hand_mesh.verts_padded(), obj_sampled_verts_tensor, K=1, return_nn=True) # Calculate on hand
obj_normals = obj_mesh.verts_normals_padded()
obj_normals = torch.nn.functional.normalize(obj_normals, dim=2, eps=1e-12) # Because buggy mistuned value in Pytorch3d, must re-normalize
norms = torch.sum(obj_normals * obj_normals, dim=2) # Dot product
obj_normals[norms < 0.8] = 0.6 # TODO hacky get-around when normal finding fails completely
self.obj_normals = obj_normals.detach().squeeze().numpy()
obj_sampled_verts = self.obj_verts[obj_sampled_idx, :]
obj_sampled_normals = obj_normals[0, obj_sampled_idx, :].detach().numpy()
hand_normals = hand_mesh.verts_normals_padded()[0, :, :].detach().numpy()
hand_centroid = np.mean(self.hand_verts, axis=0)
obj_centroid = np.mean(self.obj_verts, axis=0)
# Hand features
hand_one_hot = np.ones((self.hand_verts.shape[0], 1))
hand_vec_to_closest = hand_nearest.squeeze().numpy() - self.hand_verts
hand_dist_to_closest = np.expand_dims(np.linalg.norm(hand_vec_to_closest, 2, 1), axis=1)
hand_dist_along_normal = np.expand_dims(np.sum(hand_vec_to_closest * hand_normals, axis=1), axis=1)
hand_dist_to_joint = np.expand_dims(self.hand_verts, axis=1) - np.expand_dims(self.hand_joints, axis=0) # Expand for broadcasting
hand_dist_to_joint = np.linalg.norm(hand_dist_to_joint, 2, 2)
hand_dot_to_centroid = np.expand_dims(np.sum((self.hand_verts - obj_centroid) * hand_normals, axis=1), axis=1)
# Object features
obj_one_hot = np.zeros((obj_sampled_verts.shape[0], 1))
obj_vec_to_closest = obj_nearest.squeeze().numpy() - obj_sampled_verts
obj_dist_to_closest = np.expand_dims(np.linalg.norm(obj_vec_to_closest, 2, 1), axis=1)
obj_dist_along_normal = np.expand_dims(np.sum(obj_vec_to_closest * obj_sampled_normals, axis=1), axis=1)
obj_dist_to_joint = np.expand_dims(obj_sampled_verts, axis=1) - np.expand_dims(self.hand_joints, axis=0) # Expand for broadcasting
obj_dist_to_joint = np.linalg.norm(obj_dist_to_joint, 2, 2)
obj_dot_to_centroid = np.expand_dims(np.sum((obj_sampled_verts - hand_centroid) * obj_sampled_normals, axis=1), axis=1)
# hand_feats = np.concatenate((hand_one_hot, hand_normals, hand_vec_to_closest, hand_dist_to_closest, hand_dist_along_normal, hand_dist_to_joint), axis=1)
# obj_feats = np.concatenate((obj_one_hot, obj_sampled_normals, obj_vec_to_closest, obj_dist_to_closest, obj_dist_along_normal, obj_dist_to_joint), axis=1)
hand_feats = np.concatenate((hand_one_hot, hand_dot_to_centroid, hand_dist_to_closest, hand_dist_along_normal, hand_dist_to_joint), axis=1)
obj_feats = np.concatenate((obj_one_hot, obj_dot_to_centroid, obj_dist_to_closest, obj_dist_along_normal, obj_dist_to_joint), axis=1)
return hand_feats, obj_feats
def get_o3d_meshes(self, hand_contact=False, normalize_pos=False):
"""Returns Open3D meshes for visualization
Draw with: o3dv.draw_geometries([hand_mesh, obj_mesh])"""
hand_color = np.asarray([224.0, 172.0, 105.0]) / 255
obj_color = np.asarray([100.0, 100.0, 100.0]) / 255
obj_centroid = self.obj_verts.mean(0)
if not normalize_pos:
obj_centroid *= 0
hand_mesh = o3dg.TriangleMesh()
hand_mesh.vertices = o3du.Vector3dVector(self.hand_verts - obj_centroid)
hand_mesh.triangles = o3du.Vector3iVector(HandObject.closed_faces)
hand_mesh.compute_vertex_normals()
if hand_contact and self.hand_contact.mean() != 0:
util.mesh_set_color(self.hand_contact, hand_mesh)
else:
hand_mesh.paint_uniform_color(hand_color)
obj_mesh = o3dg.TriangleMesh()
obj_mesh.vertices = o3du.Vector3dVector(self.obj_verts - obj_centroid)
obj_mesh.triangles = o3du.Vector3iVector(self.obj_faces)
obj_mesh.compute_vertex_normals()
if self.obj_contact.mean() != 0:
util.mesh_set_color(self.obj_contact, obj_mesh)
else:
obj_mesh.paint_uniform_color(obj_color)
return hand_mesh, obj_mesh
def vis_hand_object(self):
"""Runs Open3D visualizer for the current data"""
hand_mesh, obj_mesh = self.get_o3d_meshes(hand_contact=True)
o3dv.draw_geometries([hand_mesh, obj_mesh])
|
ContactOpt-main
|
contactopt/hand_object.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import numpy as np
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
import contactopt.arguments as arguments
from contactopt.deepcontact_net import DeepContactNet
from tqdm import tqdm
import contactopt.util as util
from contactopt.loader import ContactDBDataset
def calc_losses(network_out, contact_obj_gt, contact_hand_gt, sampled_verts_idx):
losses = dict()
batch_size = contact_obj_gt.shape[0]
batch = torch.zeros(sampled_verts_idx.shape, device=device, dtype=torch.long)
for i in range(batch_size):
batch[i, :] = i
batch = batch.view(-1)
contact_obj_gt = contact_obj_gt[batch, sampled_verts_idx.view(-1), :] # Select sampled verts
contact_obj_gt = contact_obj_gt.reshape(batch_size, sampled_verts_idx.shape[1], 1) # Reshape into network's shape
class_hand_gt = util.val_to_class(contact_hand_gt).squeeze(2)
class_obj_gt = util.val_to_class(contact_obj_gt).squeeze(2)
# print('class obj gt', class_obj_gt.shape, network_out['contact_obj'], class_obj_gt)
losses['contact_obj'] = criterion(network_out['contact_obj'].permute(0, 2, 1), class_obj_gt)
losses['contact_hand'] = criterion(network_out['contact_hand'].permute(0, 2, 1), class_hand_gt)
return losses
def train_epoch(epoch):
model.train()
scheduler.step()
loss_meter = util.AverageMeter('Loss', ':.2f')
for idx, data in enumerate(tqdm(train_loader)):
data = util.dict_to_device(data, device)
batch_size = data['hand_pose_gt'].shape[0]
optimizer.zero_grad()
out = model(data['hand_verts_aug'], data['hand_feats_aug'], data['obj_sampled_verts_aug'], data['obj_feats_aug'])
losses = calc_losses(out, data['obj_contact_gt'], data['hand_contact_gt'], data['obj_sampled_idx'])
loss = losses['contact_obj'] * args.loss_c_obj + losses['contact_hand'] * args.loss_c_hand
loss_meter.update(loss.item(), batch_size) # TODO better loss monitoring
loss.backward()
optimizer.step()
if idx % 10 == 0:
print('{} / {}'.format(idx, len(train_loader)), loss_meter)
global_iter = epoch * len(train_loader) + idx
writer.add_scalar('training/loss_contact_obj', losses['contact_obj'], global_iter)
writer.add_scalar('training/loss_contact_hand', losses['contact_hand'], global_iter)
writer.add_scalar('training/lr', scheduler.get_lr(), global_iter)
print('Train epoch: {}. Avg loss {:.4f} --------------------'.format(epoch, loss_meter.avg))
def test():
model.eval()
for idx, data in enumerate(test_loader):
data = util.dict_to_device(data, device)
with torch.no_grad():
out = model(data['hand_verts_aug'], data['hand_feats_aug'], data['obj_sampled_verts_aug'], data['obj_feats_aug'])
losses = calc_losses(out, data['obj_contact_gt'], data['hand_contact_gt'], data['obj_sampled_idx'])
global_iter = epoch * len(train_loader)
writer.add_scalar('testing/loss_contact_obj', losses['contact_obj'], global_iter)
writer.add_scalar('testing/loss_contact_hand', losses['contact_hand'], global_iter)
# print('Test epoch: Mean joint err {:.2f} cm --------------------'.format(joint_err_meter.avg))
if __name__ == '__main__':
util.hack_filedesciptor()
args = arguments.train_network_parse_args()
train_dataset = ContactDBDataset(args.train_dataset, train=True)
test_dataset = ContactDBDataset(args.test_dataset)
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=6, collate_fn=ContactDBDataset.collate_fn)
test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=6, collate_fn=ContactDBDataset.collate_fn)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = DeepContactNet().to(device)
if args.checkpoint != '':
print('Attempting to load checkpoint file:', args.checkpoint)
pretrained_dict = torch.load(args.checkpoint)
model_dict = model.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict and 'mano' not in k}
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
if args.optimizer == 'adam':
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
elif args.optimizer == 'SGD':
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=0.9)
bin_weights = torch.Tensor(np.loadtxt(util.DEEPCONTACT_BIN_WEIGHTS_FILE)).to(device)
# criterion = torch.nn.CrossEntropyLoss(weight=bin_weights)
criterion = torch.nn.NLLLoss(weight=bin_weights)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[10], gamma=0.1) # TODO automatic?
writer = SummaryWriter(logdir='runs/' + args.desc)
writer.add_text('Hyperparams', args.all_str, 0)
for epoch in range(1, args.epochs):
train_epoch(epoch)
test()
torch.save(model.state_dict(), 'checkpoints/{}.pt'.format(args.desc))
print('\n')
|
ContactOpt-main
|
contactopt/train_deepcontact.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import os
parser = argparse.ArgumentParser(description='Generate Data')
parser.add_argument('--env-name', default='InvertedPendulum-v1',
help='environment to train on (default: InvertedPendulum-v1)')
parser.add_argument('--N', type=int, default=1000000)
parser.add_argument('--seed', type=int, default=1,
help='random seed (default: 1)')
parser.add_argument('--out', type=str, default='/data/ddr')
parser.add_argument('--num-processes', type=int, default=40,
help='how many training processes to use (default: 40)')
parser.add_argument('--rollout', type=int, default=20, help="rollout for goal")
parser.add_argument('--method', type=str, default='random',
help='["random", "pixel_control"]')
parser.add_argument('--render', action='store_true')
parser.add_argument('--reset', action='store_true')
parser.add_argument('--from-policy', type=str, default=None,
help="use reward module as policy")
parser.add_argument('--framework', default='gym',
help='framework of env (default: gym)')
parser.add_argument('--maze-id', type=int, default=0)
parser.add_argument('--maze-length', type=int, default=1)
parser.add_argument('--single-env', action='store_true')
parser.add_argument('--random-start', action='store_true')
parser.add_argument('-v', action='store_true', help='verbose logging')
parser.add_argument('--max-episode-length', type=int, default=500,
help='maximum length of an episode (default: 500)')
parser.add_argument('--file-path', type=str, default=None,
help='path to XML file for mujoco')
def generate_data(rank, args, start, end):
from envs import create_env, set_seed, get_obs
from model import R_Module
import torch
print(rank, "started")
env = create_env(args.env_name, framework=args.framework, args=args)
env = set_seed(args.seed + rank, env, args.framework)
state = get_obs(env, args.framework)
if args.from_policy is not None:
model_state, r_args = torch.load(args.from_policy)
policy = R_Module(env.action_space.shape[0],
r_args.dim,
discrete=r_args.discrete, baseline=r_args.baseline,
state_space=env.observation_space.shape[0])
policy.load_state_dict(model_state)
policy.eval()
states = []
actions = []
i = start
done = False
while i < end:
if i % 100 == 0:
print(rank, i)
ep_states = []
ep_actions = []
if args.from_policy is not None:
cx_p = Variable(torch.zeros(1, r_args.dim))
hx_p = Variable(torch.zeros(1, r_args.dim))
for j in range(args.rollout):
if args.from_policy is not None:
value, logit, (hx_p, cx_p) = policy(
state.unsqueeze(0), (hx_p, cx_p))
a, _, _ = get_action(logit, r_args.discrete)
else:
a = env.action_space.sample()
ep_actions.append(a)
state = get_obs(env, args.framework)
env.step(a)
if args.render:
env.render()
ep_states.append(state)
final_state = get_obs(env, args.framework)
ep_states.append(final_state)
states.append(ep_states)
actions.append(ep_actions)
i += 1
# reset the environment here
if done or args.reset:
env.reset()
done = False
torch.save((states, actions), os.path.join(
args.out_dir, 'states_actions_%s_%s.pt' % (start, end)))
if __name__ == '__main__':
import torch
import torch.multiprocessing as mp
mp.set_start_method('spawn')
from torch.autograd import Variable
from envs import create_env, set_seed, get_obs
from model import R_Module
os.environ['OMP_NUM_THREADS'] = '1'
args = parser.parse_args()
env_name = args.env_name
env_name += '_rollout%s' % args.rollout
if args.env_name.endswith('MazeEnv'):
env_name += 'mazeid%slength%s' % (args.maze_id, args.maze_length)
if args.single_env and args.maze_id == -1:
env = create_env(args.env_name, framework=args.framework, args=args)
env_name += '_single_env'
args.maze_structure = env._env.MAZE_STRUCTURE
if args.random_start:
env_name += '_randomstart'
if args.file_path is not None:
env_name += '_transfer'
if args.framework == 'mazebase':
env_name += '_rollout_%s_length_%s' % (args.rollout, args.maze_length)
args.out_dir = os.path.join(args.out, env_name)
print(args)
print(args.out_dir)
os.makedirs(args.out_dir, exist_ok=True)
processes = []
block = int(args.N / args.num_processes)
for rank in range(0, args.num_processes):
start = rank * block
end = (rank + 1) * block
p = mp.Process(target=generate_data, args=(rank, args, start, end))
p.start()
processes.append(p)
torch.save(args, os.path.join(args.out_dir, 'args.pt'))
# exit cleanly
for p in processes:
p.join()
|
ddr-master
|
generate_dynamics_data.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import math
import numpy as np
import os
import time
from itertools import chain
import torch
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from envs import *
from model import Encoder, Decoder, D_Module, R_Module
from train_dynamics_module import D_Module, get_dynamics_losses
from common import *
from tensorboardX import SummaryWriter
def ensure_shared_grads(model, shared_model):
for param, shared_param in zip(model.parameters(),
shared_model.parameters()):
if shared_param.grad is not None:
return
shared_param._grad = param.grad
def train_online(rank, args, shared_model, optimizer=None, writer_dir=None):
"""
Arguments:
- writer: the tensorboard summary writer directory (note: can't get it working directly with the SummaryWriter object)
"""
# create writer here itself
writer = None
if writer_dir is not None:
writer = SummaryWriter(log_dir=writer_dir)
shared_enc, shared_dec, shared_d_module, shared_r_module = shared_model
running_t, running_reward, running_value_loss, running_policy_loss, \
running_reward_loss = 0, 0, 0, 0, 0
torch.manual_seed(args.seed + rank)
env = create_env(args.env_name, framework=args.framework, args=args)
set_seed(args.seed + rank, env, args.framework)
enc = Encoder(env.observation_space.shape[0], args.dim,
use_conv=args.use_conv)
dec = Decoder(env.observation_space.shape[0], args.dim,
use_conv=args.use_conv)
d_module = D_Module(env.action_space.shape[0], args.dim, args.discrete)
r_module = R_Module(env.action_space.shape[0], args.dim,
discrete=args.discrete, baseline=False,
state_space=env.observation_space.shape[0])
all_params = chain(enc.parameters(), dec.parameters(),
d_module.parameters(),
r_module.parameters())
# no shared adam ?
if optimizer is None:
optimizer = optim.Adam(all_params, lr=args.lr)
enc.train()
dec.train()
d_module.train()
r_module.train()
results_dict = {
'enc': None,
'dec': None,
'd_module': None,
'args': args,
'reward': [],
'policy_loss': [],
'value_loss': [],
'mean_entropy': [],
'mean_predicted_value': [],
'dec_losses': [],
'forward_losses': [],
'inverse_losses': [],
'total_losses': [],
}
episode_length = 0
i_episode, total_episode = 0, 0
done = True
start = time.time()
while total_episode < args.num_episodes:
# Sync with the shared model
r_module.load_state_dict(shared_r_module.state_dict())
d_module.load_state_dict(shared_d_module.state_dict())
enc.load_state_dict(shared_enc.state_dict())
dec.load_state_dict(shared_dec.state_dict())
if done:
cx_p = Variable(torch.zeros(1, args.dim))
hx_p = Variable(torch.zeros(1, args.dim))
cx_d = Variable(torch.zeros(1, args.dim))
hx_d = Variable(torch.zeros(1, args.dim))
i_episode += 1
episode_length = 0
total_episode = args.num_processes * (i_episode - 1) + rank
start = time.time()
last_episode_length = episode_length
if not args.single_env and args.env_name.endswith('MazeEnv'): # generate new maze
env = create_env(
args.env_name, framework=args.framework, args=args)
s = env.reset()
s = Variable(torch.from_numpy(s).float())
else:
cx_p = Variable(cx_p.data)
hx_p = Variable(hx_p.data)
cx_d = Variable(cx_d.data)
hx_d = Variable(hx_d.data)
s = Variable(s.data)
z = enc(s).unsqueeze(0)
s_hat = dec(z)
values = []
rhats = []
log_probs = []
rewards = []
entropies = []
dec_loss = 0
inv_loss = 0
model_loss = 0
recon_loss = 0
forward_loss = 0
for step in range(args.num_steps):
episode_length += 1
value, rhat, logit, (hx_p, cx_p) = r_module((
z.detach(), (hx_p, cx_p)))
action, entropy, log_prob = get_action(logit, discrete=args.discrete)
vlog("Action: %s\t Bounds: %s" % (str(action), str((env.action_space.low, env.action_space.high))), args.v)
entropies.append(entropy)
s_prime, reward, done, _ = env.step(action.data.numpy())
s_prime = Variable(torch.from_numpy(s_prime).float())
done = done or episode_length >= args.max_episode_length
z_prime = enc(s_prime)
z_prime_hat, a_hat, (hx_d, cx_d) = d_module(
(z, z_prime, action, (hx_d, cx_d)))
s_prime_hat = dec(z_prime_hat)
r_loss, m_loss, d_loss, i_loss, f_loss = get_dynamics_losses(
s, s_hat, s_prime, s_prime_hat, z_prime, z_prime_hat, a_hat,
action)
values.append(value)
rhats.append(rhat)
log_probs.append(log_prob)
rewards.append(reward)
dec_loss += d_loss
inv_loss += i_loss
model_loss += m_loss
recon_loss += r_loss
forward_loss += f_loss
z = z_prime_hat
s = s_prime
s_hat = s_prime_hat
if done:
break
R = torch.zeros(1, 1)
if not done:
value, _, _, _ = r_module((z, (hx_p, cx_p)))
R = value.data
values.append(Variable(R))
policy_loss = 0
value_loss = 0
rew_loss = 0
pred_reward_loss = 0
R = Variable(R)
gae = torch.zeros(1, 1)
vlog("values: %s" % str([v.data[0,0] for v in values]), args.v)
vlog("rhats: %s" % str(rhats), args.v)
for i in reversed(range(len(rewards))):
R = args.gamma * R + rewards[i]
advantage = R - values[i]
value_loss += 0.5 * advantage.pow(2)
# reward loss
rew_loss += F.mse_loss(rhats[i], Variable(torch.from_numpy(
np.array([rewards[i]])).float()))
# Generalized Advantage Estimation
delta_t = rewards[i] + args.gamma * values[i + 1].data \
- values[i].data
gae = gae * args.gamma * args.tau + delta_t
if args.discrete:
policy_loss = policy_loss - log_probs[i] * Variable(gae) \
- args.entropy_coef * entropies[i]
else:
policy_loss = policy_loss - (log_probs[i] * Variable(gae).expand_as(
log_probs[i])).sum() - (args.entropy_coef * entropies[i]).sum()
optimizer.zero_grad()
U = 1. / min(i_episode, 100)
running_reward = running_reward * (1 - U) + sum(rewards) * U
running_t = running_t * (1 - U) + episode_length * U
running_policy_loss = running_policy_loss * (1 - U) + policy_loss.data[0] * U
running_value_loss = running_value_loss * (1 - U) + \
args.value_loss_coef * value_loss.data[0, 0] * U
running_reward_loss = running_reward_loss * (1 - U) + \
args.rew_loss_coef * rew_loss.data[0] * U
mean_entropy = np.mean([e.sum().data[0] for e in entropies])
mean_predicted_value = np.mean([v.sum().data[0] for v in values])
loss = policy_loss + args.value_loss_coef * value_loss + \
args.rew_loss_coef * rew_loss + args.inv_loss_coef * inv_loss + \
args.dec_loss_coef * dec_loss + forward_loss
if total_episode % args.log_interval == 0 and done:
if not args.discrete:
sample_logits = (list(logit[0].data[0].numpy()),
list(logit[1].data[0].numpy()))
else:
sample_logits = list(logit.data[0].numpy())
log(
'Episode {}\t'.format(total_episode) + \
'Avg reward: {:.2f}\tAverage length: {:.2f}\t'.format(
running_reward, running_t) + \
'Entropy: {:.2f}\tTime: {:.2f}\tRank: {}\t'.format(
mean_entropy, time.time() - start, rank) + \
'Policy Loss: {:.2f}\t'.format(running_policy_loss) + \
'Reward Loss: {:.2f}\t'.format(running_reward_loss) + \
'Weighted Value Loss: {:.2f}\t'.format(running_value_loss) + \
'Sample Action: %s\t' % str(list(action.data.numpy())) + \
'Logits: %s\t' % str(sample_logits) + \
'Decoder Loss: {:.2f}\t'.format(dec_loss.data[0]) + \
'Forward Loss: {:.2f}\t'.format(forward_loss.data[0]) + \
'Inverse Loss: {:.2f}\t'.format(inv_loss.data[0]) + \
'Loss: {:.2f}\t'.format(loss.data[0, 0]))
# write summaries here
if writer_dir is not None and done:
log('writing to tensorboard')
# running losses
writer.add_scalar('reward/running_reward', running_reward, i_episode)
writer.add_scalar('reward/running_policy_loss', running_policy_loss, i_episode)
writer.add_scalar('reward/running_value_loss', running_value_loss, i_episode)
# current episode stats
writer.add_scalar('reward/episode_reward', sum(rewards), i_episode)
writer.add_scalar('reward/episode_policy_loss', policy_loss.data[0], i_episode)
writer.add_scalar('reward/episode_value_loss', value_loss.data[0,0], i_episode)
writer.add_scalar('reward/mean_entropy', mean_entropy, i_episode)
writer.add_scalar('reward/mean_predicted_value', mean_predicted_value, i_episode)
writer.add_scalar('dynamics/total_loss', loss.data[0], i_episode)
writer.add_scalar('dynamics/decoder', dec_loss.data[0], i_episode)
writer.add_scalar('dynamics/reconstruction_loss', recon_loss.data[0], i_episode)
writer.add_scalar('dynamics/next_state_prediction_loss', model_loss.data[0], i_episode)
writer.add_scalar('dynamics/inv_loss', inv_loss.data[0], i_episode)
writer.add_scalar('dynamics/forward_loss', forward_loss.data[0], i_episode)
results_dict['reward'].append(sum(rewards))
results_dict['policy_loss'].append(policy_loss.data[0])
results_dict['value_loss'].append(value_loss.data[0,0])
results_dict['mean_entropy'].append(mean_entropy)
results_dict['mean_predicted_value'].append(mean_predicted_value)
results_dict['dec_losses'].append(dec_loss.data[0])
results_dict['forward_losses'].append(forward_loss.data[0])
results_dict['inverse_losses'].append(inv_loss.data[0])
results_dict['total_losses'].append(loss.data[0])
loss.backward()
torch.nn.utils.clip_grad_norm(all_params, args.max_grad_norm)
ensure_shared_grads(r_module, shared_r_module)
ensure_shared_grads(d_module, shared_d_module)
ensure_shared_grads(enc, shared_enc)
ensure_shared_grads(dec, shared_dec)
optimizer.step()
if total_episode % args.checkpoint_interval == 0:
args.curr_iter = total_episode
args.dynamics_module = os.path.join(
args.out, 'dynamics_module%s.pt' % total_episode)
torch.save((shared_r_module.state_dict(), args), os.path.join(
args.out, 'reward_module%s.pt' % total_episode))
results_dict['enc'] = shared_enc.state_dict()
results_dict['dec'] = shared_dec.state_dict()
results_dict['d_module'] = shared_d_module.state_dict()
torch.save(results_dict,
os.path.join(args.out, 'dynamics_module%s.pt' % total_episode))
log("Saved model %d" % total_episode)
if writer_dir is not None and i_episode % \
(args.checkpoint_interval // args.num_processes) == 0:
torch.save(results_dict,
os.path.join(args.out, 'results_dict.pt'))
print(os.path.join(args.out, 'results_dict.pt'))
if writer_dir is not None:
torch.save(results_dict,
os.path.join(args.out, 'results_dict.pt'))
print(os.path.join(args.out, 'results_dict.pt'))
|
ddr-master
|
train_online.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
import gym
from gym.spaces.box import Box
from rllab.envs.mujoco.swimmer_env import SwimmerEnv
from rllab.envs.mujoco.ant_env import AntEnv
from rllab.envs.mujoco.half_cheetah_env import HalfCheetahEnv
from rllab.envs.mujoco.hopper_env import HopperEnv
from rllab.envs.mujoco.humanoid_env import HumanoidEnv
from rllab.envs.mujoco.simple_humanoid_env import SimpleHumanoidEnv
from rllab.envs.mujoco.maze.point_maze_env import PointMazeEnv
from rllab.envs.mujoco.maze.swimmer_maze_env import SwimmerMazeEnv
from rllab.envs.mujoco.maze.ant_maze_env import AntMazeEnv
from rllab.envs.mujoco.inverted_double_pendulum_env import InvertedDoublePendulumEnv
from rllab.misc import ext
from rllab.envs.normalized_env import normalize
from common import *
def create_env(env_str, framework='gym', args=None, eval_flag=False, norm=True,
rank=0):
if framework == 'gym':
env = gym.make(env_str)
if norm:
env = NormalizedEnv(env)
elif framework == 'rllab':
if not hasattr(args, 'file_path'):
args.file_path = None
if env_str.endswith('MazeEnv'):
if not hasattr(args, 'coef_inner_rew'):
args.coef_inner_rew = 0.
if not hasattr(args, 'maze_structure'):
args.maze_structure = None
if not hasattr(args, 'random_start'):
args.random_start = False
if not hasattr(args, 'difficulty'):
args.difficulty = -1
difficulty = args.difficulty
if args.difficulty > 1 and not eval_flag:
if args.difficulty <= 5:
difficulty = np.random.choice(range(
args.difficulty - 1, args.difficulty + 1))
elif args.difficulty == -1:
difficulty = np.random.choice([1, 2, 3, 4, 5, -1])
env = eval(env_str)(maze_id=args.maze_id, length=args.maze_length,
coef_inner_rew=args.coef_inner_rew,
structure=args.maze_structure,
file_path=args.file_path,
random_start=args.random_start,
difficulty=difficulty)
env.horizon = args.max_episode_length
vlog(args.maze_structure, args.v)
else:
env = eval(env_str)(file_path=args.file_path)
if norm:
env = normalize(env)
else:
raise("framework not supported")
env.reset()
set_seed(args.seed + rank, env, framework)
return env
def wrapper(env):
def _wrap():
return env
return _wrap
def get_obs(env, framework):
if framework == 'gym':
state = env.unwrapped._get_obs()
elif framework == 'rllab':
state = env.get_current_obs()
else:
raise("framework not supported")
return state
def set_seed(seed, env, framework):
if framework == 'gym':
env.unwrapped.seed(seed)
elif framework == 'rllab':
ext.set_seed(seed)
else:
raise("framework not supported")
return env
def reset_env(env, args):
"""Reset env. Can differ based on env. e.g. in maze maybe we want to randomly
deposit the agent in different locations?"""
env.reset()
return get_obs(env, args.framework)
class NormalizedEnv(gym.ObservationWrapper):
def __init__(self, env=None):
super(NormalizedEnv, self).__init__(env)
self.state_mean = 0
self.state_std = 0
self.alpha = 0.9999
self.num_steps = 0
def _observation(self, observation):
self.num_steps += 1
self.state_mean = self.state_mean * self.alpha + \
observation.mean() * (1 - self.alpha)
self.state_std = self.state_std * self.alpha + \
observation.std() * (1 - self.alpha)
unbiased_mean = self.state_mean / (1 - pow(self.alpha, self.num_steps))
unbiased_std = self.state_std / (1 - pow(self.alpha, self.num_steps))
return (observation - unbiased_mean) / (unbiased_std + 1e-8)
|
ddr-master
|
envs.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import math
import numpy as np
import os
import time
import torch
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from envs import *
from model import R_Module
from common import *
from tensorboardX import SummaryWriter
def ensure_shared_grads(model, shared_model):
for param, shared_param in zip(model.parameters(),
shared_model.parameters()):
if shared_param.grad is not None:
return
shared_param._grad = param.grad
def train_rewards(rank, args, shared_model, enc, optimizer=None, writer_dir=None,
d_module=None):
"""
Arguments:
- writer: the tensorboard summary writer directory (note: can't get it working directly with the SummaryWriter object)
"""
# create writer here itself
writer = None
if writer_dir is not None:
writer = SummaryWriter(log_dir=writer_dir)
results_dict = {
'reward': [],
'policy_loss': [],
'value_loss': [],
'mean_entropy': [],
'mean_predicted_value': []
}
running_t, running_reward, running_value_loss, running_policy_loss, \
running_reward_loss = 0, 0, 0, 0, 0
torch.manual_seed(args.seed + rank)
env = create_env(args.env_name, framework=args.framework, args=args)
set_seed(args.seed + rank, env, args.framework)
model = R_Module(env.action_space.shape[0], args.dim,
discrete=args.discrete, baseline=args.baseline,
state_space=env.observation_space.shape[0])
max_rollout = 0
if args.planning:
max_rollout = args.rollout
if args.from_checkpoint is not None:
model_state, _ = torch.load(args.from_checkpoint, map_location=lambda storage, loc: storage)
model.load_state_dict(model_state)
# no shared adam ?
if optimizer is None:
optimizer = optim.Adam(shared_model.parameters(), lr=args.lr, eps=args.eps)
model.train()
done = True
episode_length = 0
i_episode, total_episode = 0, 0
start = time.time()
while total_episode < args.num_episodes:
# Sync with the shared model
model.load_state_dict(shared_model.state_dict())
if done:
cx_p = Variable(torch.zeros(1, args.dim))
hx_p = Variable(torch.zeros(1, args.dim))
cx_d = Variable(torch.zeros(1, args.dim))
hx_d = Variable(torch.zeros(1, args.dim))
i_episode += 1
episode_length = 0
total_episode = args.num_steps * (i_episode - 1) + rank
start = time.time()
last_episode_length = episode_length
if not args.single_env and args.env_name.endswith('MazeEnv'): # generate new maze
env = create_env(
args.env_name, framework=args.framework, args=args)
state = env.reset()
state = Variable(torch.from_numpy(state).float())
if not args.baseline:
state = enc(state)
else:
cx_p = Variable(cx_p.data)
hx_p = Variable(hx_p.data)
cx_d = Variable(cx_d.data)
hx_d = Variable(hx_d.data)
values = []
value_preds = []
log_probs = []
rewards = []
total_actions = []
entropies = []
obses = []
hx_ps = []
cx_ps = []
step = 0
while step < args.num_steps:
episode_length += 1
if args.planning:
_, actions, (hx_p, cx_p), (hx_d, cx_d), values, es, \
lps = mcts(
env, state, model, d_module, enc, (hx_p, cx_p), (hx_d, cx_d),
args, discrete=args.discrete)
log_probs += lps
entropies += es
actions = actions[:1]
else:
obses.append(state.unsqueeze(0))
hx_ps.append(hx_p)
cx_ps.append(cx_p)
value, logit, (hx_p, cx_p) = model((
state.unsqueeze(0), (hx_p, cx_p)))
action, entropy, log_prob = get_action(
logit, discrete=args.discrete)
vlog("Action: %s\t Bounds: %s" % (str(action), str(
(env.action_space.low, env.action_space.high))), args.v)
entropies.append(entropy.mean().data)
actions = [action]
values.append(value)
log_probs.append(log_prob)
for action in actions:
state, reward, done, _ = env.step(action.data.numpy())
if args.neg_reward:
reward = -reward
state = Variable(torch.from_numpy(state).float())
if args.clip_reward:
reward = max(min(reward, 1), -1)
if not args.baseline:
state = enc(state)
rewards.append(reward)
total_actions.append(action)
step += 1
if done:
break
if done:
break
R = torch.zeros(1, 1)
if not done:
value, _, _ = model((state.unsqueeze(0), (hx_p, cx_p)))
R = value.data
done = True
values.append(Variable(R))
policy_loss = 0
value_loss = 0
advantages = np.zeros_like(rewards, dtype=float)
R = Variable(R)
gae = torch.zeros(1, 1)
Rs = np.zeros_like(rewards, dtype=float)
vlog("values: %s" % str([v.data[0,0] for v in values]), args.v)
for i in reversed(range(len(rewards))):
R = args.gamma * R + rewards[i]
Rs[i] = R
advantage = R - values[i]
advantages[i] = advantage
if args.algo == 'a3c':
value_loss += 0.5 * advantage.pow(2)
# Generalized Advantage Estimation
if args.gae:
delta_t = rewards[i] + args.gamma * values[i + 1].data \
- values[i].data
gae = gae * args.gamma * args.tau + delta_t
policy_loss -= (log_probs[i] * Variable(gae).expand_as(
log_probs[i])).mean()
else:
policy_loss -= advantage * (log_probs[i].mean())
if args.algo == 'a3c':
optimizer.zero_grad()
(policy_loss + args.value_loss_coef * value_loss - \
args.entropy_coef * np.mean(entropies)).backward()
torch.nn.utils.clip_grad_norm(model.parameters(), args.max_grad_norm)
ensure_shared_grads(model, shared_model)
optimizer.step()
########Bookkeeping and logging#############
U = 1. / min(i_episode, 100)
running_reward = running_reward * (1 - U) + sum(rewards) * U
running_t = running_t * (1 - U) + episode_length * U
running_policy_loss = running_policy_loss * (1 - U) + policy_loss.squeeze().data[0] * U
running_value_loss = running_value_loss * (1 - U) + \
args.value_loss_coef * value_loss.squeeze().data[0] * U
mean_entropy = np.mean([e.mean().data[0] for e in entropies])
mean_predicted_value = np.mean([v.sum().data[0] for v in values])
if total_episode % args.log_interval == 0 and done:
if not args.discrete:
sample_logits = (list(logit[0].data[0].numpy()),
list(logit[1].data[0].numpy()))
else:
sample_logits = list(logit.data[0].numpy())
log(
'Frames {}\t'.format(total_episode) + \
'Avg reward: {:.2f}\tAverage length: {:.2f}\t'.format(
running_reward, running_t) + \
'Entropy: {:.2f}\tTime: {:.2f}\tRank: {}\t'.format(
mean_entropy, time.time() - start, rank) + \
'Policy Loss: {:.2f}\t'.format(running_policy_loss) + \
# 'Reward Loss: {:.2f}\t'.format(running_reward_loss) + \
'Weighted Value Loss: {:.2f}\t'.format(running_value_loss))
vlog('Sample Action: %s\t' % str(list(action.data.numpy())) + \
'Logits: %s\t' % str(sample_logits), args.v)
# write summaries here
if writer_dir is not None and done:
log('writing to tensorboard')
# running losses
writer.add_scalar('reward/running_reward', running_reward, i_episode)
writer.add_scalar('reward/running_policy_loss', running_policy_loss, i_episode)
writer.add_scalar('reward/running_value_loss', running_value_loss, i_episode)
# current episode stats
writer.add_scalar('reward/episode_reward', sum(rewards), i_episode)
writer.add_scalar('reward/episode_policy_loss', policy_loss.squeeze().data[0], i_episode)
writer.add_scalar('reward/episode_value_loss', value_loss.squeeze().data[0], i_episode)
writer.add_scalar('reward/mean_entropy', mean_entropy, i_episode)
writer.add_scalar('reward/mean_predicted_value', mean_predicted_value, i_episode)
results_dict['reward'].append(sum(rewards))
results_dict['policy_loss'].append(policy_loss.squeeze().data[0])
results_dict['value_loss'].append(value_loss.squeeze().data[0])
results_dict['mean_entropy'].append(mean_entropy)
results_dict['mean_predicted_value'].append(mean_predicted_value)
if total_episode % args.checkpoint_interval == 0:
args.curr_iter = total_episode
args.optimizer = optimizer
torch.save((shared_model.state_dict(), args), os.path.join(
args.out, args.model_name + '%s.pt' % total_episode))
log("Saved model %d rank %s" % (total_episode, rank))
log(os.path.join(
args.out, args.model_name + '%s.pt' % total_episode))
if writer_dir is not None and i_episode % \
(args.checkpoint_interval // args.num_processes) == 0:
torch.save(results_dict,
os.path.join(args.out, 'results_dict.pt'))
log(os.path.join(args.out, 'results_dict.pt'))
if writer_dir is not None:
torch.save(results_dict,
os.path.join(args.out, 'results_dict.pt'))
log(os.path.join(args.out, 'results_dict.pt'))
|
ddr-master
|
train_reward_module.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import math
import numpy as np
import os
import time
from itertools import chain
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data as data
from torch.autograd import Variable
from model import Encoder, Decoder, D_Module
from common import *
def get_dynamics_losses(s, s_hat, s_prime, s_prime_hat, z_prime, z_prime_hat,
a_hat, curr_actions, discrete=False):
# reconstruction loss
recon_loss = F.mse_loss(s_hat, s)
# next state prediction loss
model_loss = F.mse_loss(s_prime_hat, s_prime)
# net decoder loss
dec_loss = (F.mse_loss(s_hat, s) + F.mse_loss(s_prime_hat, s_prime))
# action reconstruction loss
if discrete:
a_hat = F.log_softmax(a_hat)
inv_loss = F.mse_loss(a_hat, curr_actions)
# representation space constraint
forward_loss = F.mse_loss(z_prime_hat, z_prime.detach())
return recon_loss, model_loss, dec_loss, inv_loss, forward_loss
def get_maze_dynamics_losses(s, s_hat_logits,
s_prime, s_prime_hat_logits,
z_prime, z_prime_hat,
a_hat_logits, curr_actions, discrete=True,
dec_mask=None):
"""
dec_mask: if to reweigh the weights on the agent and goal locations,
"""
# reconstruction loss
if dec_mask is not None:
recon_loss = F.cross_entropy(s_hat_logits.view(-1, 2), s.view(-1).long(), reduce=False)
recon_loss = (recon_loss * dec_mask).mean()
else:
recon_loss = F.cross_entropy(s_hat_logits.view(-1, 2), s.view(-1).long())
# next state prediction loss
if dec_mask is not None:
model_loss = F.cross_entropy(s_prime_hat_logits.view(-1, 2), s_prime.view(-1).long(), reduce=False)
model_loss = (model_loss * dec_mask).mean()
else:
model_loss = F.cross_entropy(s_prime_hat_logits.view(-1, 2), s_prime.view(-1).long())
# net decoder loss
dec_loss = recon_loss + model_loss
# action reconstruction loss
inv_loss = F.cross_entropy(a_hat_logits, curr_actions.view(-1).long())
# representation space constraint
forward_loss = F.mse_loss(z_prime_hat, z_prime.detach())
return recon_loss, model_loss, dec_loss, inv_loss, forward_loss
class DynamicsDataset(data.Dataset):
def __init__(self, root, size, batch, rollout):
self.size = size
self.root = root
self.actions = []
self.states = []
start = 0
while len(self.actions) < size:
end = start + batch
states, actions = torch.load(
os.path.join(self.root, 'states_actions_%s_%s.pt' % (start, end)))
self.states += states
self.actions += actions
start = end
rollout = len(actions[0])
self.actions = torch.Tensor(self.actions[:size]).view(
self.size, rollout, -1)
self.states = torch.Tensor(self.states[:size]).view(
self.size, rollout + 1, -1)
def __getitem__(self, index):
assert index < self.size
return self.states[index], self.actions[index]
def __len__(self):
return len(self.actions)
class MazeDynamicsDataset(data.Dataset):
def __init__(self, root, size, batch, rollout):
"""
batch: is the size of the blocks of the data
size: total size of the dataset, num of trajectories
rollout: length of the trajectory
"""
self.size = size
self.root = root
self.actions = []
self.states = []
start = 0
while len(self.actions) < size:
end = start + batch
states, actions = torch.load(
os.path.join(self.root, 'states_actions_%s_%s.pt' % (start, end)))
self.states += states
self.actions += actions
start = end
# convert the state and actions to the float
self.states = np.asarray(self.states, dtype=np.float32)
self.actions = np.asarray(self.actions, dtype=np.float32)
# convert to tensors
self.actions = torch.Tensor(self.actions).view(
self.size, rollout, -1)
self.states = torch.Tensor(self.states).view(
self.size, rollout + 1, -1)
def __getitem__(self, index):
assert index < self.size
return self.states[index], self.actions[index]
def __len__(self):
return len(self.actions)
def forward(i, states, target_actions, enc, dec, d_module, args,
d_init=None, dec_mask=None):
if args.framework == "mazebase":
# cx_d = Variable(torch.zeros(states.size(0), args.lstm_dim))
# hx_d = Variable(torch.zeros(states.size(0), args.lstm_dim))
hx_d, cx_d = d_init(Variable(states[:, 0, :]).contiguous().cuda())
else:
cx_d = Variable(torch.zeros(states.size(0), args.dim))
hx_d = Variable(torch.zeros(states.size(0), args.dim))
if args.gpu:
cx_d = cx_d.cuda()
hx_d = hx_d.cuda()
dec_loss = 0
inv_loss = 0
model_loss = 0
recon_loss = 0
forward_loss = 0
current_epoch_actions = 0
current_epoch_predicted_a_hat = 0
s = None
for r in range(args.rollout):
curr_state = states[:, r, :]
next_state = states[:, r + 1, :]
if args.framework == "mazebase":
curr_actions = Variable(target_actions[:, r].contiguous().view(
-1, 1))
else:
curr_actions = Variable(target_actions[:, r].contiguous().view(
-1, args.action_space.shape[0]))
if s is None:
s = Variable(curr_state.contiguous())
if args.gpu:
s = s.cuda()
z = enc(s)
s_prime = Variable(next_state.contiguous())
if args.gpu:
s_prime = s_prime.cuda()
z_prime = enc(s_prime)
if args.gpu:
curr_actions = curr_actions.cuda()
if args.framework == "mazebase":
s_hat, s_hat_binary = dec(z)
z_prime_hat, a_hat, (hx_d, cx_d) = d_module(
z, curr_actions.long(), z_prime.detach(), (hx_d, cx_d))
s_prime_hat, s_prime_hat_binary = dec(z_prime_hat)
r_loss, m_loss, d_loss, i_loss, f_loss = get_maze_dynamics_losses(
s, s_hat, s_prime, s_prime_hat, z_prime, z_prime_hat, a_hat,
curr_actions, discrete=args.discrete, dec_mask= dec_mask)
# caculate the accuracy here
_, predicted_a = torch.max(F.sigmoid(a_hat),1)
current_epoch_predicted_a_hat += (predicted_a == curr_actions.view(-1).long()).sum().data[0]
current_epoch_actions += curr_actions.size(0)
else:
s_hat = dec(z)
z_prime_hat, a_hat, (hx_d, cx_d) = d_module(
(z, z_prime, curr_actions, (hx_d, cx_d)))
s_prime_hat = dec(z_prime_hat)
r_loss, m_loss, d_loss, i_loss, f_loss = get_dynamics_losses(
s, s_hat, s_prime, s_prime_hat, z_prime, z_prime_hat,
a_hat, curr_actions, discrete=args.discrete)
inv_loss += i_loss
dec_loss += d_loss
forward_loss += f_loss
recon_loss += r_loss
model_loss += m_loss
s = s_prime
z = z_prime
return forward_loss, inv_loss, dec_loss, recon_loss, model_loss, \
current_epoch_predicted_a_hat, current_epoch_actions
def forward_planning(i, states, target_actions, enc, dec, d_module, args,
d_init=None, dec_mask=None):
cx_d = Variable(torch.zeros(states.size(0), args.dim))
hx_d = Variable(torch.zeros(states.size(0), args.dim))
if args.gpu:
cx_d = cx_d.cuda()
hx_d = hx_d.cuda()
dec_loss = 0
inv_loss = 0
model_loss = 0
recon_loss = 0
forward_loss = 0
current_epoch_actions = 0
current_epoch_predicted_a_hat = 0
s = None
for r in range(args.rollout):
curr_state = states[:, r, :]
next_state = states[:, r + 1, :]
curr_actions = Variable(target_actions[:, r].contiguous().view(
-1, args.action_space.shape[0]))
if s is None:
s = Variable(curr_state.contiguous())
if args.gpu:
s = s.cuda()
z = enc(s)
s_prime = Variable(next_state.contiguous())
if args.gpu:
s_prime = s_prime.cuda()
z_prime = enc(s_prime)
if args.gpu:
curr_actions = curr_actions.cuda()
s_hat = dec(z)
z_prime_hat, a_hat, (hx_d, cx_d) = d_module(
(z, z_prime, curr_actions, (hx_d, cx_d)))
s_prime_hat = dec(z_prime_hat)
r_loss, m_loss, d_loss, i_loss, f_loss = get_dynamics_losses(
s, s_hat, s_prime, s_prime_hat, z_prime, z_prime_hat,
a_hat, curr_actions, discrete=args.discrete)
inv_loss += i_loss
dec_loss += d_loss
forward_loss += f_loss
recon_loss += r_loss
model_loss += m_loss
s = s_prime
z = z_prime_hat
return forward_loss, inv_loss, dec_loss, recon_loss, model_loss, \
current_epoch_predicted_a_hat, current_epoch_actions
def multiple_forward(i, states, target_actions, enc, dec, d_module, args,
d_init=None, dec_mask = None):
cx_d = Variable(torch.zeros(states.size(0), args.dim))
hx_d = Variable(torch.zeros(states.size(0), args.dim))
if args.gpu:
cx_d = cx_d.cuda()
hx_d = hx_d.cuda()
dec_loss = 0
inv_loss = 0
model_loss = 0
recon_loss = 0
forward_loss = 0
current_epoch_actions = 0
current_epoch_predicted_a_hat = 0
s = None
for r in range(args.rollout):
curr_state = states[:, r, :]
next_state = states[:, r + 1, :]
if args.framework == "mazebase":
curr_actions = Variable(target_actions[:, r].contiguous().view(
-1, 1))
else:
curr_actions = Variable(target_actions[:, r].contiguous().view(
-1, args.action_space.shape[0]))
if s is None:
s = Variable(curr_state.contiguous())
if args.gpu:
s = s.cuda()
z = enc(s)
s_prime = Variable(next_state.contiguous())
if args.gpu:
s_prime = s_prime.cuda()
z_prime = enc(s_prime)
if args.gpu:
curr_actions = curr_actions.cuda()
if args.framework == "mazebase":
s_hat, s_hat_binary = dec(z)
z_prime_hat, a_hat, (hx_d, cx_d) = d_module(
z, curr_actions.long(), z_prime.detach(), (hx_d, cx_d))
s_prime_hat, s_prime_hat_binary = dec(z_prime_hat)
r_loss, m_loss, d_loss, i_loss, f_loss = get_maze_dynamics_losses(
s, s_hat, s_prime, s_prime_hat, z_prime, z_prime_hat, a_hat,
curr_actions, discrete=args.discrete, dec_mask= dec_mask)
# caculate the accuracy here
_, predicted_a = torch.max(F.sigmoid(a_hat),1)
current_epoch_predicted_a_hat += (predicted_a == curr_actions.view(-1).long()).sum().data[0]
current_epoch_actions += curr_actions.size(0)
else:
s_hat = dec(z)
z_prime_hat, a_hat, (hx_d, cx_d) = d_module(
(z, z_prime, curr_actions, (hx_d, cx_d)))
s_prime_hat = dec(z_prime_hat)
r_loss, m_loss, d_loss, i_loss, f_loss = get_dynamics_losses(
s, s_hat, s_prime, s_prime_hat, z_prime, z_prime_hat, a_hat,
curr_actions, discrete=args.discrete)
inv_loss += i_loss
dec_loss += d_loss
forward_loss += f_loss
recon_loss += r_loss
model_loss += m_loss
s = s_prime
z = z_prime_hat
return forward_loss, inv_loss, dec_loss, recon_loss, model_loss, \
current_epoch_predicted_a_hat, current_epoch_actions
def train_dynamics(env, args, writer=None):
"""
Trains the Dynamics module. Supervised.
Arguments:
env: the initialized environment (rllab/gym)
args: input arguments
writer: initialized summary writer for tensorboard
"""
args.action_space = env.action_space
# Initialize models
enc = Encoder(env.observation_space.shape[0], args.dim,
use_conv=args.use_conv)
dec = Decoder(env.observation_space.shape[0], args.dim,
use_conv=args.use_conv)
d_module = D_Module(env.action_space.shape[0], args.dim, args.discrete)
if args.from_checkpoint is not None:
results_dict = torch.load(args.from_checkpoint)
enc.load_state_dict(results_dict['enc'])
dec.load_state_dict(results_dict['dec'])
d_module.load_state_dict(results_dict['d_module'])
all_params = chain(enc.parameters(), dec.parameters(), d_module.parameters())
if args.transfer:
for p in enc.parameters():
p.requires_grad = False
for p in dec.parameters():
p.requires_grad = False
all_params = d_module.parameters()
optimizer = torch.optim.Adam(all_params, lr=args.lr,
weight_decay=args.weight_decay)
if args.gpu:
enc = enc.cuda()
dec = dec.cuda()
d_module = d_module.cuda()
# Initialize datasets
val_loader = None
train_dataset = DynamicsDataset(
args.train_set, args.train_size, batch=args.train_batch,
rollout=args.rollout)
val_dataset = DynamicsDataset(args.test_set, 5000, batch=args.test_batch,
rollout=args.rollout)
val_loader = torch.utils.data.DataLoader(
dataset=val_dataset, batch_size=args.batch_size, shuffle=False,
num_workers=args.num_workers)
train_loader = torch.utils.data.DataLoader(
dataset=train_dataset, batch_size=args.batch_size, shuffle=True,
num_workers=args.num_workers)
results_dict = {
'dec_losses': [],
'forward_losses': [],
'inverse_losses': [],
'total_losses': [],
'enc': None,
'dec': None,
'd_module': None,
'd_init':None,
'args': args
}
total_action_taken = 0
correct_predicted_a_hat = 0
# create the mask here for re-weighting
dec_mask = None
if args.dec_mask is not None:
dec_mask = torch.ones(9)
game_vocab = dict([(b, a) for a, b in enumerate(sorted(env.game.all_possible_features()))])
dec_mask[game_vocab['Agent']] = args.dec_mask
dec_mask[game_vocab['Goal']] = args.dec_mask
dec_mask = dec_mask.expand(args.batch_size, args.maze_length,args.maze_length,9).contiguous().view(-1)
dec_mask = Variable(dec_mask, requires_grad = False)
if args.gpu:
dec_mask = dec_mask.cuda()
for epoch in range(1, args.num_epochs + 1):
enc.train()
dec.train()
d_module.train()
if args.framework == "mazebase":
d_init.train()
# for measuring the accuracy
train_acc = 0
current_epoch_actions = 0
current_epoch_predicted_a_hat = 0
start = time.time()
for i, (states, target_actions) in enumerate(train_loader):
optimizer.zero_grad()
if args.framework != "mazebase":
forward_loss, inv_loss, dec_loss, recon_loss, model_loss, _, _ = forward_planning(
i, states, target_actions, enc, dec, d_module, args)
else:
forward_loss, inv_loss, dec_loss, recon_loss, model_loss, current_epoch_predicted_a_hat, current_epoch_actions = multiple_forward(
i, states, target_actions, enc, dec, d_module, args, d_init, dec_mask )
loss = forward_loss + args.inv_loss_coef * inv_loss + \
args.dec_loss_coef * dec_loss
if i % args.log_interval == 0:
log(
'Epoch [{}/{}]\tIter [{}/{}]\t'.format(
epoch, args.num_epochs, i+1, len(
train_dataset)//args.batch_size) + \
'Time: {:.2f}\t'.format(time.time() - start) + \
'Decoder Loss: {:.2f}\t'.format(dec_loss.data[0]) + \
'Forward Loss: {:.2f}\t'.format(forward_loss.data[0] ) + \
'Inverse Loss: {:.2f}\t'.format(inv_loss.data[0]) + \
'Loss: {:.2f}\t'.format(loss.data[0]))
results_dict['dec_losses'].append(dec_loss.data[0])
results_dict['forward_losses'].append(forward_loss.data[0])
results_dict['inverse_losses'].append(inv_loss.data[0])
results_dict['total_losses'].append(loss.data[0])
# write the summaries here
if writer:
writer.add_scalar('dynamics/total_loss', loss.data[0], epoch)
writer.add_scalar('dynamics/decoder', dec_loss.data[0], epoch)
writer.add_scalar(
'dynamics/reconstruction_loss', recon_loss.data[0], epoch)
writer.add_scalar(
'dynamics/next_state_prediction_loss',
model_loss.data[0], epoch)
writer.add_scalar('dynamics/inv_loss', inv_loss.data[0], epoch)
writer.add_scalar(
'dynamics/forward_loss', forward_loss.data[0], epoch)
writer.add_scalars(
'dynamics/all_losses',
{"total_loss":loss.data[0],
"reconstruction_loss":recon_loss.data[0],
"next_state_prediction_loss":model_loss.data[0],
"decoder_loss":dec_loss.data[0],
"inv_loss":inv_loss.data[0],
"forward_loss":forward_loss.data[0],
} , epoch)
loss.backward()
correct_predicted_a_hat += current_epoch_predicted_a_hat
total_action_taken += current_epoch_actions
# does it not work at all without grad clipping ?
torch.nn.utils.clip_grad_norm(all_params, args.max_grad_norm)
optimizer.step()
# maybe add the generated image to add the logs
# writer.add_image()
# Run validation
if val_loader is not None:
enc.eval()
dec.eval()
d_module.eval()
forward_loss, inv_loss, dec_loss = 0, 0, 0
for i, (states, target_actions) in enumerate(val_loader):
f_loss, i_loss, d_loss, _, _, _, _ = forward_planning(
i, states, target_actions, enc, dec, d_module, args)
forward_loss += f_loss
inv_loss += i_loss
dec_loss += d_loss
loss = forward_loss + args.inv_loss_coef * inv_loss + \
args.dec_loss_coef * dec_loss
if writer:
writer.add_scalar('val/forward_loss', forward_loss.data[0] / i, epoch)
writer.add_scalar('val/inverse_loss', inv_loss.data[0] / i, epoch)
writer.add_scalar('val/decoder_loss', dec_loss.data[0] / i, epoch)
log(
'[Validation]\t' + \
'Decoder Loss: {:.2f}\t'.format(dec_loss.data[0] / i) + \
'Forward Loss: {:.2f}\t'.format(forward_loss.data[0] / i) + \
'Inverse Loss: {:.2f}\t'.format(inv_loss.data[0] / i) + \
'Loss: {:.2f}\t'.format(loss.data[0] / i))
if epoch % args.checkpoint == 0:
results_dict['enc'] = enc.state_dict()
results_dict['dec'] = dec.state_dict()
results_dict['d_module'] = d_module.state_dict()
if args.framework == "mazebase":
results_dict['d_init'] = d_init.state_dict()
torch.save(results_dict,
os.path.join(args.out, 'dynamics_module_epoch%s.pt' % epoch))
log('Saved model %s' % epoch)
results_dict['enc'] = enc.state_dict()
results_dict['dec'] = dec.state_dict()
results_dict['d_module'] = d_module.state_dict()
torch.save(results_dict,
os.path.join(args.out, 'dynamics_module_epoch%s.pt' % epoch))
print(os.path.join(args.out, 'dynamics_module_epoch%s.pt' % epoch))
|
ddr-master
|
train_dynamics_module.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
def get_args():
parser = argparse.ArgumentParser(description='Train Modules')
# Learning parameters
parser.add_argument('--lr', type=float, default=0.0001,
help='learning rate (default: 0.0001)')
parser.add_argument('--gamma', type=float, default=0.99,
help='discount factor for rewards (default: 0.99)')
parser.add_argument('--tau', type=float, default=0.95,
help='parameter for GAE (default: 0.95)')
parser.add_argument('--eps', type=float, default=1e-5,
help='RMSprop optimizer epsilon (default: 1e-5)')
parser.add_argument('--alpha', type=float, default=0.99,
help='RMSprop optimizer apha (default: 0.99)')
parser.add_argument('--max-grad-norm', type=float, default=50,
help='value loss coefficient (default: 50)')
parser.add_argument('--no-shared', default=False,
help='use an optimizer without shared momentum.')
parser.add_argument('--dim', type=int, default=32,
help='number of dimensions of representation space')
parser.add_argument('--use-conv', action='store_true', help='Use conv layers')
parser.add_argument('--discrete', action='store_true', help='discrete action space')
parser.add_argument('--weight-decay', type=float, default=0.0001)
# TODO:// finish implementation for discrete action spaces.
# Environment settings
parser.add_argument('--seed', type=int, default=1,
help='random seed (default: 1)')
parser.add_argument('--num-processes', type=int, default=40,
help='how many training processes to use (default: 40)')
parser.add_argument('--num-steps', type=int, default=200,
help='number of forward steps in A3C (default: 20)')
parser.add_argument('--framework', default='gym',
help='framework of env (default: gym)')
parser.add_argument('--env-name', default='InvertedPendulum-v1',
help='environment to train on (default: InvertedPendulum-v1)')
parser.add_argument('--maze-id', type=int, default=0)
parser.add_argument('--maze-length', type=int, default=1)
# Dynamics Module settings
parser.add_argument('--rollout', type=int, default=20, help="rollout for goal")
parser.add_argument('--train-set', type=str, default=None)
parser.add_argument('--train-batch', type=int, default=2500)
parser.add_argument('--test-set', type=str)
parser.add_argument('--test-batch', type=int, default=2500)
parser.add_argument('--train-size', type=int, default=100000)
parser.add_argument('--dec-loss-coef', type=float, default=0.1,
help='decoder loss coefficient (default: 0.1)')
parser.add_argument('--forward-loss-coef', type=float, default=10,
help='forward loss coefficient (default: 10)')
parser.add_argument('--inv-loss-coef', type=float, default=100,
help='inverse loss coefficient (default: 10)')
parser.add_argument('--num-epochs', type=int, default=1000)
parser.add_argument('--batch-size', type=int, default=128)
parser.add_argument('--num-workers', type=int, default=20)
parser.add_argument('--out', type=str, default='/checkpoint/amyzhang/ddr/models')
parser.add_argument('--dec-mask', type=float, default = None,
help="to use masking while calculating the decoder reconstruction loss ")
# Rewards Module settings
parser.add_argument('--coef-inner-rew', type=float, default=1.)
parser.add_argument('--checkpoint-interval', type=int, default=1000)
parser.add_argument('--num-episodes', type=int, default=1000000,
help='max number of episodes to train')
parser.add_argument('--max-episode-length', type=int, default=500,
help='maximum length of an episode (default: 500)')
parser.add_argument('--curriculum', type=int, default=0,
help='number of iterations in curriculum. (default: 0, no curriculum)')
parser.add_argument('--single-env', action='store_true')
parser.add_argument('--entropy-coef', type=float, default=0.,
help='entropy term coefficient (default: 0.), use 0.0001 for mujoco')
parser.add_argument('--value-loss-coef', type=float, default=0.5,
help='value loss coefficient (default: 0.5)')
parser.add_argument('--rew-loss-coef', type=float, default=0,
help='reward loss coefficient (default: 0)')
parser.add_argument('--lstm-dim', type=int, default=128,
help='number of dimensions of lstm hidden state')
parser.add_argument('--difficulty', type=int, default=-1, help='difficulty of maze')
parser.add_argument('--clip-reward', action='store_true')
parser.add_argument('--finetune-enc', action='store_true',
help="allow the ActorCritic to change the observation space representation")
parser.add_argument('--gae', action='store_true')
parser.add_argument('--algo', default='a3c',
help='algorithm to use: a3c')
# General training settings
parser.add_argument('--checkpoint', type=int, default=10000)
parser.add_argument('--log-interval', type=int, default=100,
help='interval between training status logs (default: 100)')
parser.add_argument('-v', action='store_true', help='verbose logging')
parser.add_argument('--gpu', action='store_true')
parser.add_argument('--log-dir', type=str, default='/checkpoint/amyzhang/ddr/logs',
help='The logging directory to record the logs and tensorboard summaries')
parser.add_argument('--reset-dir', action='store_true',
help="give this argument to delete the existing logs for the current set of parameters")
# transfer
parser.add_argument('--file-path', type=str, default=None,
help='path to XML file for mujoco')
parser.add_argument('--neg-reward', action='store_true',
help='set reward negative for transfer')
parser.add_argument('--random-start', action='store_true')
# What to run
parser.add_argument('--train-dynamics', action='store_true')
parser.add_argument('--train-reward', action='store_true')
parser.add_argument('--train-online', action='store_true',
help='train both modules online')
parser.add_argument('--dynamics-module', type=str, default=None,
help='Encoder from dynamics module')
parser.add_argument('--from-checkpoint', type=str, default=None,
help='Start from stored model')
parser.add_argument('--baseline', action='store_true',
help='Running A3C baseline.')
parser.add_argument('--planning', action='store_true',
help='train with planning (reward and online only)')
parser.add_argument('--transfer', action='store_true',
help='Keep encoder and decoder static')
parser.add_argument('--eval-every', type=float, default=10)
parser.add_argument('--enc-dims', type=int, nargs='+', default=[256, 128])
parser.add_argument('--dec-dims', type=int, nargs='+', default=[128, 256])
parser.add_argument('--num-runs', type=int, default=5,
help='number of models to train in parallel')
parser.add_argument('--mcts', action='store_true', help='Monte Carlo Tree Search')
parser.add_argument('--render', action='store_true')
parser.add_argument('-b', type=int, default=4, help='branching factor')
parser.add_argument('-d', type=int, default=3, help='planning depth')
parser.add_argument('--eval', action='store_true')
parser.add_argument('--local', action='store_true')
args = parser.parse_args()
return args
|
ddr-master
|
arguments.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import time
from collections import deque
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from envs import create_env
from model import *
def test(rank, args, shared_model, counter):
torch.manual_seed(args.seed + rank)
env = create_env(args.env_name)
env.seed(args.seed + rank)
model = ActorCritic(env.observation_space.shape[0], env.action_space)
model.eval()
state = env.reset()
state = torch.from_numpy(state).float()
reward_sum = 0
done = True
start_time = time.time()
# a quick hack to prevent the agent from stucking
actions = deque(maxlen=100)
episode_length = 0
while True:
episode_length += 1
# Sync with the shared model
if done:
model.load_state_dict(shared_model.state_dict())
cx_d = Variable(torch.zeros(1, 256), volatile=True)
hx_d = Variable(torch.zeros(1, 256), volatile=True)
cx_p = Variable(torch.zeros(1, 256), volatile=True)
hx_p = Variable(torch.zeros(1, 256), volatile=True)
else:
cx_d = Variable(cx_d.data, volatile=True)
hx_d = Variable(hx_d.data, volatile=True)
cx_p = Variable(cx_p.data, volatile=True)
hx_p = Variable(hx_p.data, volatile=True)
value, logit, (hx_d, cx_d), (hx_p, cx_p) = model((Variable(
state.unsqueeze(0), volatile=True), (hx_d, cx_d), (hx_p, cx_p)))
if args.discrete:
prob = F.softmax(logit)
action = prob.max(1, keepdim=True)[1].data.numpy()
else:
mu, sigma_sq = logit
sigma_sq = F.softplus(sigma_sq)
eps = torch.randn(mu.size())
action = (mu + sigma_sq.sqrt()*Variable(eps)).data
state, reward, done, _ = env.step(action[0, 0])
done = done or episode_length >= args.max_episode_length
reward_sum += reward
# a quick hack to prevent the agent from stucking
actions.append(action[0, 0])
if actions.count(actions[0]) == actions.maxlen:
done = True
if done:
print("Time {}, num steps {}, FPS {:.0f}, episode reward {}, episode length {}".format(
time.strftime("%Hh %Mm %Ss",
time.gmtime(time.time() - start_time)),
counter.value, counter.value / (time.time() - start_time),
reward_sum, episode_length))
reward_sum = 0
episode_length = 0
actions.clear()
state = env.reset()
time.sleep(60)
state = torch.from_numpy(state).float()
|
ddr-master
|
test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
def normalized_columns_initializer(weights, std=1.0):
out = torch.randn(weights.size())
out *= std / torch.sqrt(out.pow(2).sum(1, keepdim=True))
return out
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
weight_shape = list(m.weight.data.size())
fan_in = np.prod(weight_shape[1:4])
fan_out = np.prod(weight_shape[2:4]) * weight_shape[0]
w_bound = np.sqrt(6. / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
weight_shape = list(m.weight.data.size())
fan_in = weight_shape[1]
fan_out = weight_shape[0]
w_bound = np.sqrt(6. / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
m.bias.data.fill_(0)
class Encoder(torch.nn.Module):
def __init__(self, obs_space, dim, use_conv=False):
"""
architecture should be input, so that we can pass multiple jobs !
"""
super(Encoder, self).__init__()
self.use_conv = use_conv
self.obs_space = obs_space
if use_conv:
self.conv1 = nn.Conv2d(3, 32, 3, stride=2, padding=1)
self.conv2 = nn.Conv2d(32, 32, 3, stride=2, padding=1)
self.conv3 = nn.Conv2d(32, 32, 3, stride=2, padding=1)
self.conv4 = nn.Conv2d(32, 32, 3, stride=2, padding=1)
else:
self.linear1 = nn.Linear(obs_space, dim)
self.linear2 = nn.Linear(dim, 32 * 3 * 3)
self.fc = nn.Linear(32 * 3 * 3, dim)
self.apply(weights_init)
self.train()
def forward(self, inputs):
# why elu and not relu ?
if self.use_conv:
x = F.elu(self.conv1(inputs))
x = F.elu(self.conv2(x))
x = F.elu(self.conv3(x))
x = F.elu(self.conv4(x))
else:
x = F.elu(self.linear1(inputs))
x = F.elu(self.linear2(x))
x = F.tanh(self.fc(x))
return x
class Decoder(torch.nn.Module):
def __init__(self, obs_space, dim, use_conv=False):
super(Decoder, self).__init__()
self.use_conv = use_conv
self.fc = nn.Linear(dim, 32 * 3 * 3)
if self.use_conv:
self.deconv1 = nn.ConvTranspose2d(32, 32, 3, stride=2, padding=1)
self.deconv2 = nn.ConvTranspose2d(32, 32, 3, stride=2, padding=1)
self.deconv3 = nn.ConvTranspose2d(32, 32, 3, stride=2, padding=1)
self.deconv4 = nn.ConvTranspose2d(32, 3, 3, stride=2, padding=1)
else:
self.linear1 = nn.Linear(32 * 3 * 3, dim)
self.linear2 = nn.Linear(dim, obs_space)
self.apply(weights_init)
self.train()
def forward(self, inputs):
x = F.elu(self.fc(inputs))
if self.use_conv:
x = F.elu(self.deconv1(x))
x = F.elu(self.deconv2(x))
x = F.elu(self.deconv3(x))
x = self.deconv4(x)
else:
x = F.elu(self.linear1(x))
x = self.linear2(x)
return x
class D_Module(torch.nn.Module):
def __init__(self, action_space, dim, discrete=False):
super(D_Module, self).__init__()
self.dim = dim
self.discrete = discrete
self.za_embed = nn.Linear(2 * dim, dim)
self.lstm_dynamics = nn.LSTMCell(dim, dim)
self.z_embed = nn.Linear(dim, dim)
self.inv = nn.Linear(2 * dim, dim)
self.inv2 = nn.Linear(dim, action_space)
self.action_linear = nn.Linear(action_space, dim)
self.action_linear2 = nn.Linear(dim, dim)
self.apply(weights_init)
self.lstm_dynamics.bias_ih.data.fill_(0)
self.lstm_dynamics.bias_hh.data.fill_(0)
self.train()
def forward(self, inputs):
z, z_prime, actions, (hx_d, cx_d) = inputs
z = z.view(-1, self.dim)
a_embedding = F.elu(self.action_linear(actions))
a_embedding = self.action_linear2(a_embedding)
za_embedding = self.za_embed(
torch.cat([z, a_embedding.view(z.size())], 1))
hx_d, cx_d = self.lstm_dynamics(za_embedding, (hx_d, cx_d))
z_prime_hat = F.tanh(self.z_embed(hx_d))
# decode the action
if z_prime is not None:
z_prime = z_prime.view(-1, self.dim)
else:
z_prime = z_prime_hat
a_hat = F.elu(self.inv(torch.cat([z, z_prime], 1)))
a_hat = self.inv2(a_hat)
return z_prime_hat, a_hat, (hx_d, cx_d)
class R_Module(torch.nn.Module):
def __init__(self, action_space, dim, discrete=False, baseline=False,
state_space=None):
super(R_Module, self).__init__()
self.discrete = discrete
self.baseline = baseline
self.dim = dim
if baseline:
self.linear1 = nn.Linear(state_space, dim)
self.linear2 = nn.Linear(dim, dim)
self.lstm_policy = nn.LSTMCell(dim, dim)
self.actor_linear = nn.Linear(dim, action_space)
self.critic_linear = nn.Linear(dim, 1)
self.rhat_linear = nn.Linear(dim, 1)
if not discrete:
self.actor_sigma_sq = nn.Linear(dim, action_space)
self.apply(weights_init)
self.actor_linear.weight.data = normalized_columns_initializer(
self.actor_linear.weight.data, 0.01)
self.actor_linear.bias.data.fill_(0)
self.critic_linear.weight.data = normalized_columns_initializer(
self.critic_linear.weight.data, 1.0)
self.critic_linear.bias.data.fill_(0)
# only forget should be 1
self.lstm_policy.bias_ih.data.fill_(0)
self.lstm_policy.bias_hh.data.fill_(0)
if not discrete:
self.actor_sigma_sq.weight.data = normalized_columns_initializer(
self.actor_sigma_sq.weight.data, 0.01)
self.actor_sigma_sq.bias.data.fill_(0)
self.train()
def forward(self, inputs):
inputs, (hx_p, cx_p) = inputs
if self.baseline:
inputs = F.elu(self.linear1(inputs))
inputs = F.elu(self.linear2(inputs))
hx_p, cx_p = self.lstm_policy(inputs, (hx_p, cx_p))
x = hx_p
if self.discrete:
action = self.actor_linear(x)
else:
action = (self.actor_linear(x), self.actor_sigma_sq(x))
return self.critic_linear(x), action, (hx_p, cx_p)
|
ddr-master
|
model.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import math
import sys
from datetime import datetime
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from model import Encoder, D_Module
pi = Variable(torch.FloatTensor([math.pi]))
def get_prob(x, mu, sigma_sq):
a = (-1*(Variable(x)-mu).pow(2)/(2*sigma_sq + 1e-5)).exp()
b = 1/(2*sigma_sq*pi.expand_as(sigma_sq) + 1e-5).sqrt()
return a*b
def log(msg):
print("[%s]\t%s" % (datetime.now().strftime("%Y-%m-%d %H:%M:%S"), msg))
sys.stdout.flush()
def vlog(msg, v):
if v:
log(msg)
def load_encoder(obs_space, args, freeze=True):
enc = Encoder(obs_space, args.dim,
use_conv=args.use_conv)
enc_state = torch.load(args.dynamics_module, map_location=lambda storage,
loc: storage)['enc']
enc.load_state_dict(enc_state)
enc.eval()
if freeze:
for p in enc.parameters():
p.requires_grad = False
return enc
def load_d_module(action_space, args, freeze=True):
d_module_state = torch.load(args.dynamics_module, map_location=lambda storage,
loc: storage)['d_module']
d_module = D_Module(action_space, args.dim, args.discrete)
d_module.load_state_dict(d_module_state)
d_module.eval()
if freeze:
for p in d_module.parameters():
p.requires_grad = False
return d_module
def get_action(logit, discrete, v=False):
"""Compute action, entropy, and log prob for discrete and continuous case
from logit.
"""
if discrete:
prob = F.softmax(logit)
log_prob = F.log_softmax(logit)
# why entropy regularization ?
entropy = -(log_prob * prob).sum(1, keepdim=True)
action = prob.multinomial()
log_prob = log_prob.gather(1, action)
else:
mu, sigma_sq = logit
sigma_sq = F.softplus(sigma_sq)
vlog('sigma_sq: %s' % str(sigma_sq.data), v)
action = torch.normal(mu, sigma_sq)
prob = get_prob(action.data, mu, sigma_sq) + 1e-5
entropy = -0.5*((2 * sigma_sq * pi.expand_as(sigma_sq) + 1e-5).log() + 1)
log_prob = prob.log()
return action, entropy, log_prob
def eval_action(logit, action, discrete, v=False):
mu, sigma_sq = logit
sigma_sq = F.softplus(sigma_sq)
vlog('sigma_sq: %s' % str(sigma_sq.data), v)
prob = get_prob(action.data, mu, sigma_sq) + 1e-5
entropy = -0.5*((2 * sigma_sq * pi.expand_as(sigma_sq) + 1e-5).log() + 1)
log_prob = prob.log()
return entropy, log_prob
def mcts(env, z_hat, r_module, d_module, enc, r_state, d_state, args, discrete,
use_env=False):
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from common import get_action
from envs import get_obs
(hx_r, cx_r) = r_state
(hx_d, cx_d) = d_state
parent_states = [(z_hat, [], (hx_r, cx_r), (hx_d, cx_d), [], [], [])]
child_states = []
init_state = get_obs(env, args.framework)
for i in range(args.d):
actions = []
best_val = None
for z_hat, trajectory, (hx_r, cx_r), (hx_d, cx_d), val, entropies, \
logprobs in parent_states:
if best_val is None:
best_val = val
elif val < best_val:
continue
value, logit, (hx_r_prime, cx_r_prime) = r_module(
(z_hat, (hx_r, cx_r)))
val.append(value)
if not discrete:
for b in range(args.b):
action, entropy, log_prob = get_action(
logit, discrete=False, v=args.v)
actions.append((action, entropy, log_prob))
else:
prob = F.softmax(logit)
actions = np.argpartition(prob.data.numpy(), args.b)[:b]
for a, e, lp in actions:
if not use_env:
z_prime_hat, _, (hx_d_prime, cx_d_prime) = d_module(
(z_hat, z_hat, a, (hx_d, cx_d)))
else:
state = get_obs(env, args.framework)
for t in trajectory:
env.step(t.data.numpy())
s_prime, _, _, _ = env.step(a.data.numpy())
s_prime = Variable(torch.from_numpy(s_prime).float())
z_prime_hat = enc(s_prime).unsqueeze(0)
env.reset(state)
hx_d_prime, cx_d_prime = hx_d, cx_d
child_states.append(
(z_prime_hat, trajectory + [a], (hx_r_prime, cx_r_prime),
(hx_d_prime, cx_d_prime), val, entropies + [e], logprobs + [lp]))
child_states = prune(child_states, b)
parent_states = child_states
child_states = []
# compute value of final state in each trajectory and choose best
best_val = sum(parent_states[0][4]).data[0,0]
best_ind = 0
for ind, (z, traj, hr, hd, v, _, _) in enumerate(parent_states):
vr, _, _ = r_module((z, hr))
v.append(vr)
if sum(v).data[0,0] > best_val:
best_ind = ind
best_val = sum(v).data[0,0]
return parent_states[best_ind]
|
ddr-master
|
common.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import print_function
import argparse
import numpy as np
import os
import random
from operator import itemgetter
# Environment settings
parser = argparse.ArgumentParser(description='Eval DDR')
parser.add_argument('--dynamics-module', type=str, default=None,
help='Dynamics module')
parser.add_argument('--rewards-module', type=str, default=None,
help='Rewards module')
parser.add_argument('--num-processes', type=int, default=20,
help='how many training processes to use (default: 20)')
parser.add_argument('--N', type=int, default=1,
help='Number of episodes')
parser.add_argument('--rollout', type=int, default=20, help="rollout for goal")
parser.add_argument('--seed', type=int, default=1,
help='random seed (default: 1)')
parser.add_argument('--render', action='store_true')
parser.add_argument('--out', type=str, default=None)
parser.add_argument('--max-episode-length', type=int, default=1000,
help='maximum length of an episode')
parser.add_argument('--framework', default='gym',
help='framework of env (default: gym)')
parser.add_argument('--env-name', default='InvertedPendulum-v1',
help='environment to train on (default: InvertedPendulum-v1)')
parser.add_argument('--maze-id', type=int, default=0)
parser.add_argument('--maze-length', type=int, default=1)
parser.add_argument('--log-interval', type=int, default=1)
parser.add_argument('--baseline', action='store_true')
parser.add_argument('--local', action='store_true',
help='running locally to render, no multiprocessing')
parser.add_argument('--single-env', action='store_true')
parser.add_argument('--coef-inner-rew', type=float, default=1.)
parser.add_argument('--mcts', action='store_true', help='Monte Carlo Tree Search')
parser.add_argument('-b', type=int, default=4, help='branching factor')
parser.add_argument('-d', type=int, default=3, help='planning depth')
parser.add_argument('--file-path', type=str, default=None,
help='path to XML file for mujoco')
parser.add_argument('--save-figs', action='store_true')
parser.add_argument('--neg-reward', action='store_true',
help='set reward negative for transfer')
parser.add_argument('--use-env', action='store_true', help='Use env with MCTS')
parser.add_argument('-v', action='store_true', help='verbose logging')
parser.add_argument('--difficulty', type=int, default=-1, help='difficulty of maze')
def prune(states, b):
"""Prune states down to length b, sorting by val."""
return sorted(states, key=itemgetter(4))[:b]
def test(block, args, d_args, r_args, d_module, r_module, enc, dec, q=None, rank=0):
import torch
from torch.autograd import Variable
from envs import create_env, reset_env, get_obs
from common import get_action, log
seed = args.seed * 9823 + 194885 + rank # make sure doesn't copy train
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
i = 1
total_acc, total_reward = [], []
avg_succ, avg_reward, avg_len = 0, 0, 0
while len(total_acc) < block:
reward_sum, succ = 0, 0
actions = []
if args.single_env and i > 1:
reset_env(env, args)
else:
env = create_env(args.env_name, framework=args.framework, args=args, eval_flag=True)
done = False
step = 0
# Should the two LSTMs share a hidden state?
cx_r = Variable(torch.zeros(1, r_args.dim))
hx_r = Variable(torch.zeros(1, r_args.dim))
if not args.baseline:
cx_d = Variable(torch.zeros(1, d_args.dim))
hx_d = Variable(torch.zeros(1, d_args.dim))
while step < args.max_episode_length and not done:
# Encode state
state = get_obs(env, r_args.framework)
state = Variable(torch.from_numpy(state).float())
if not args.baseline:
z = enc(state)
z_prime_hat = z.unsqueeze(0)
else:
z_prime_hat = state.unsqueeze(0)
actions = []
if args.mcts:
z_prime_hat, actions, (hx_r, cx_r), (hx_d, cx_d), _, _, _ = mcts(
env, z_prime_hat, r_module, d_module, enc, (hx_r, cx_r),
(hx_d, cx_d), args, discrete=r_args.discrete,
use_env=args.use_env)
for r in range(args.rollout - args.d):
value, logit, (hx_r, cx_r) = r_module(
(z_prime_hat, (hx_r, cx_r)))
action, entropy, log_prob = get_action(
logit, discrete=r_args.discrete)
actions.append(action)
if not args.baseline:
z_prime_hat, _, (hx_d, cx_d) = d_module(
(z_prime_hat, z_prime_hat, action, (hx_d, cx_d)))
if args.save_figs:
s_prime_hat = dec(z_prime_hat)
for action in actions[:args.rollout]:
_, reward, done, _ = env.step(action.data.numpy())
if args.render:
env.render()
reward_sum += reward
step += 1
if done:
succ = 1
break
U = 1. / i
total_acc.append(succ)
total_reward.append(reward_sum)
avg_succ = avg_succ * (1 - U) + succ * U
avg_reward = avg_reward * (1 - U) + reward_sum * U
avg_len = avg_len * (1 - U) + (step + 1) * U
if i % args.log_interval == 0:
log("Eval: {:d} episodes, avg succ {:.2f}, avg reward {:.2f}, avg length {:.2f}".format(
len(total_acc), avg_succ, reward_sum, step))
i += 1
if args.local:
return (sum(total_acc), len(total_acc), sum(total_reward), avg_len)
q.put((sum(total_acc), len(total_acc), sum(total_reward)))
if __name__ == '__main__':
import torch
import torch.multiprocessing as mp
mp.set_start_method('spawn')
from envs import *
from model import *
from common import *
# from ppo.model import MLPPolicy
os.environ['OMP_NUM_THREADS'] = '1'
os.environ['CUDA_VISIBLE_DEVICES'] = ""
args = parser.parse_args()
if not args.mcts:
args.d = 0
log(args)
torch.manual_seed(args.seed)
d_args, d_module, enc, dec = None, None, None, None
r_state_dict, r_args = torch.load(args.rewards_module, map_location=lambda storage, loc: storage)
if args.single_env and hasattr(r_args, 'maze_structure'):
args.maze_structure = r_args.maze_structure
env = create_env(args.env_name, framework=args.framework, args=args, eval_flag=True)
r_module = R_Module(env.action_space.shape[0], r_args.dim,
discrete=r_args.discrete, baseline=r_args.baseline,
state_space=env.observation_space.shape[0])
r_module.load_state_dict(r_state_dict)
r_module.eval()
if not args.baseline:
if args.local:
r_args.dynamics_module = '/Users/amyzhang/ddr_for_tl' + r_args.dynamics_module[24:]
if args.dynamics_module is None:
d_dict = torch.load(r_args.dynamics_module, map_location=lambda storage, loc: storage)
else:
d_dict = torch.load(args.dynamics_module, map_location=lambda storage, loc: storage)
d_args = d_dict['args']
enc_state = d_dict['enc']
dec_state = d_dict['dec']
d_state_dict = d_dict['d_module']
d_module = D_Module(env.action_space.shape[0], d_args.dim, d_args.discrete)
d_module.load_state_dict(d_state_dict)
d_module.eval()
enc = Encoder(env.observation_space.shape[0], d_args.dim,
use_conv=d_args.use_conv)
dec = Decoder(env.observation_space.shape[0], d_args.dim,
use_conv=d_args.use_conv)
enc.load_state_dict(enc_state)
dec.load_state_dict(dec_state)
enc.eval()
dec.eval()
block = int(args.N / args.num_processes)
if args.local:
all_succ, all_total, avg_reward = test(
block, args, d_args, r_args, d_module, r_module, enc, dec)
else:
processes = []
queues = []
for rank in range(0, args.num_processes):
q = mp.Queue()
p = mp.Process(target=test, args=(
block, args, d_args, r_args, d_module, r_module, enc, dec, q, rank))
p.start()
processes.append(p)
queues.append(q)
for i, p in enumerate(processes):
log("Exit process %d" % i)
p.join()
all_succ = 0
all_total = 0
total_reward = 0
for q in queues:
while not q.empty():
succ, total, total_r = q.get()
all_succ += succ
all_total += total
total_reward += total_r
log("Success: %s, %s, %s" % (all_succ / all_total, all_succ, all_total))
log("Average Reward: %s" % (total_reward / all_total))
if args.out:
with open(args.out, 'a') as f:
f.write("Success: %s \n" % (all_succ / all_total))
|
ddr-master
|
eval.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import print_function
import datetime
import os
import time
import shutil
from itertools import chain
import dill
from arguments import get_args
if __name__ == '__main__':
import torch
import torch.multiprocessing as mp
mp.set_start_method('spawn')
import my_optim
from envs import create_env
from model import *
from test import test
from train_reward_module import train_rewards
from common import *
from train_dynamics_module import train_dynamics
from train_online import train_online
from eval_modules import eval_reward
from tensorboardX import SummaryWriter
os.environ['OMP_NUM_THREADS'] = '1'
args = get_args()
log(args)
if not args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = ""
torch.manual_seed(args.seed)
args_param = vars(args)
toprint = ['seed', 'lr', 'entropy_coef', 'value_loss_coef', 'num_steps',
'dim']
if args.planning:
toprint += ['rollout']
env_name = args.env_name
if args.env_name.endswith("MazeEnv"):
env_name += 'mazeid%slength%s' % (args.maze_id, args.maze_length)
toprint += ['random_start', 'difficulty']
if args.baseline:
model_type = 'baseline'
if args.neg_reward:
model_type += '_neg_reward'
if args.file_path:
model_type += '_dynamics_transfer'
toprint += ['algo', 'gae', 'num_processes']
elif args.train_dynamics:
model_type = 'dynamics_planning'
toprint = ['lr', 'forward_loss_coef', 'dec_loss_coef', 'inv_loss_coef', 'rollout', 'dim',
'train_size']
# env_name = os.path.basename(args.train_set.strip('/'))
if args.single_env:
data_args = torch.load(os.path.join(args.train_set, 'args.pt'))
args.maze_structure = data_args.maze_structure
elif args.train_reward:
model_type = 'reward'
if args.neg_reward:
model_type += '_neg_reward'
if args.file_path:
model_type += '_dynamics_transfer'
toprint += ['algo', 'gae']
if args.planning:
model_type += '_planning'
elif args.train_online:
model_type = 'online'
toprint += ['lr', 'dec_loss_coef', 'inv_loss_coef', 'rollout', 'dim']
if args.transfer:
model_type += '_transfer'
name = ''
for arg in toprint:
name += '_{}{}'.format(arg, args_param[arg])
out_dir = os.path.join(args.out, env_name, model_type, name)
args.out = out_dir
dynamics_path = ''
if args.dynamics_module is not None and not args.baseline:
dynamics_path = args.dynamics_module.split('/')
dynamics_path = dynamics_path[-4] + dynamics_path[-2] +\
'_' + dynamics_path[-1].strip('.pt')
args.out = os.path.join(out_dir, dynamics_path)
os.makedirs(args.out, exist_ok=True)
# create the tensorboard summary writer here
tb_log_dir = os.path.join(args.log_dir, env_name, model_type, name,
dynamics_path, 'tb_logs')
print(tb_log_dir)
print(args.out)
if args.reset_dir:
shutil.rmtree(tb_log_dir, ignore_errors=True)
os.makedirs(tb_log_dir, exist_ok=True)
tb_writer = SummaryWriter(log_dir=tb_log_dir)
# dump all the arguments in the tb_log_dir
print(args, file=open(os.path.join(tb_log_dir, "arguments"), "w"))
env = create_env(args.env_name, framework=args.framework, args=args)
if args.train_dynamics:
train_dynamics(env, args, None) # tb_writer
if args.train_reward:
model_name = 'rewards_module'
if args.from_checkpoint is not None: # using curriculum
model_name += 'curr'
if args.single_env:
model_name += '_single_env'
args.maze_structure = env._env.MAZE_STRUCTURE
args.model_name = model_name
enc = None
d_module = None
assert args.dynamics_module is not None
enc = load_encoder(env.observation_space.shape[0], args)
if args.planning:
d_module = load_d_module(env.action_space.shape[0], args)
shared_model = R_Module(env.action_space.shape[0], args.dim,
discrete=args.discrete, baseline=args.baseline,
state_space=env.observation_space.shape[0])
# shared reward module for everyone
shared_model.share_memory()
if args.no_shared:
optimizer = None
else:
optimizer = my_optim.SharedAdam(shared_model.parameters(), lr=args.lr)
optimizer.share_memory()
processes = []
train_agent_method = None
total_args = args
train_agent_method = train_rewards
for rank in range(0, args.num_processes):
if rank==0:
p = mp.Process(target=train_agent_method, args=(
rank, total_args, shared_model, enc, optimizer, tb_log_dir,
d_module))
else:
p = mp.Process(target=train_agent_method, args=(
rank, total_args, shared_model, enc, optimizer, None, d_module))
p.start()
processes.append(p)
for p in processes:
p.join()
torch.save((shared_model.state_dict(), args), os.path.join(
args.out, model_name + '%s.pt' % args.num_episodes))
print(os.path.join(args.out, model_name))
if args.train_online:
model_name = 'rewards_module'
if args.from_checkpoint is not None: # using curriculum
model_name += 'curr'
if args.single_env:
model_name += '_single_env'
args.maze_structure = env._env.MAZE_STRUCTURE
args.model_name = model_name
shared_enc = Encoder(env.observation_space.shape[0], args.dim,
use_conv=args.use_conv)
shared_dec = Decoder(env.observation_space.shape[0], args.dim,
use_conv=args.use_conv)
shared_d_module = D_Module(env.action_space.shape[0], args.dim,
args.discrete)
shared_r_module = R_Module(env.action_space.shape[0], args.dim,
discrete=args.discrete, baseline=args.baseline,
state_space=env.observation_space.shape[0])
shared_enc = Encoder(env.observation_space.shape[0], args.dim,
use_conv=args.use_conv)
shared_dec = Decoder(env.observation_space.shape[0], args.dim,
use_conv=args.use_conv)
shared_d_module = D_Module(env.action_space.shape[0], args.dim,
args.discrete)
shared_r_module = R_Module(env.action_space.shape[0], args.dim,
discrete=args.discrete, baseline=args.baseline,
state_space=env.observation_space.shape[0])
shared_enc.share_memory()
shared_dec.share_memory()
shared_d_module.share_memory()
shared_r_module.share_memory()
all_params = chain(shared_enc.parameters(), shared_dec.parameters(),
shared_d_module.parameters(),
shared_r_module.parameters())
shared_model = [shared_enc, shared_dec, shared_d_module, shared_r_module]
if args.single_env:
model_name += '_single_env'
args.maze_structure = env.MAZE_STRUCTURE
if args.no_shared:
optimizer = None
else:
optimizer = my_optim.SharedAdam(all_params, lr=args.lr)
optimizer.share_memory()
train_agent_method = train_online
processes = []
for rank in range(0, args.num_processes):
if rank==0:
p = mp.Process(target=train_agent_method, args=(
rank, args, shared_model, optimizer, tb_log_dir))
else:
p = mp.Process(target=train_agent_method, args=(
rank, args, shared_model, optimizer))
p.start()
processes.append(p)
# start an eval process here
eval_agent_method = eval_reward
p = mp.Process(target=eval_agent_method, args=(
args, shared_model, tb_log_dir))
p.start()
processes.append(p)
for p in processes:
p.join()
results_dict = {'args': args}
torch.save((shared_r_module.state_dict(), args), os.path.join(
args.out, 'reward_module%s.pt' % args.num_episodes))
results_dict['enc'] = shared_enc.state_dict()
results_dict['dec'] = shared_dec.state_dict()
results_dict['d_module'] = shared_d_module.state_dict()
torch.save(results_dict,
os.path.join(args.out, 'dynamics_module%s.pt' % args.num_episodes))
log("Saved model %s" % os.path.join(args.out, model_name))
|
ddr-master
|
main.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import math
import numpy as np
import os
import time
import torch
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from envs import *
from model import Encoder, Decoder, D_Module, R_Module
from common import *
from tensorboardX import SummaryWriter
from itertools import chain
from eval import test
def eval_reward(args, shared_model, writer_dir=None):
"""
For evaluation
Arguments:
- writer: the tensorboard summary writer directory (note: can't get it working directly with the SummaryWriter object)
"""
writer = SummaryWriter(log_dir=os.path.join(writer_dir,'eval')) if writer_dir is not None else None
# current episode stats
episode_reward = episode_value_mse = episode_td_error = episode_pg_loss = episode_length = 0
# global stats
i_episode = 0
total_episode = total_steps = 0
num_goals_achieved = 0
# intilialize the env and models
torch.manual_seed(args.seed)
env = create_env(args.env_name, framework=args.framework, args=args)
set_seed(args.seed , env, args.framework)
shared_enc, shared_dec, shared_d_module, shared_r_module = shared_model
enc = Encoder(env.observation_space.shape[0], args.dim,
use_conv=args.use_conv)
dec = Decoder(env.observation_space.shape[0], args.dim,
use_conv=args.use_conv)
d_module = D_Module(env.action_space.shape[0], args.dim, args.discrete)
r_module = R_Module(env.action_space.shape[0], args.dim,
discrete=args.discrete, baseline=False,
state_space=env.observation_space.shape[0])
all_params = chain(enc.parameters(), dec.parameters(),
d_module.parameters(),
r_module.parameters())
if args.from_checkpoint is not None:
model_state, _ = torch.load(args.from_checkpoint)
model.load_state_dict(model_state)
# set the model to evaluation mode
enc.eval()
dec.eval()
d_module.eval()
r_module.eval()
# reset the state
state = env.reset()
state = Variable(torch.from_numpy(state).float())
start = time.time()
while total_episode < args.num_episodes:
# Sync with the shared model
r_module.load_state_dict(shared_r_module.state_dict())
d_module.load_state_dict(shared_d_module.state_dict())
enc.load_state_dict(shared_enc.state_dict())
dec.load_state_dict(shared_dec.state_dict())
# reset stuff
cd_p = Variable(torch.zeros(1, args.lstm_dim))
hd_p = Variable(torch.zeros(1, args.lstm_dim))
# for the reward
cr_p = Variable(torch.zeros(1, args.lstm_dim))
hr_p = Variable(torch.zeros(1, args.lstm_dim))
i_episode += 1
episode_length = 0
episode_reward = 0
args.local = True
args.d = 0
succ, _, episode_reward, episode_length = test(
1, args, args, args, d_module, r_module, enc)
log("Eval: succ {:.2f}, reward {:.2f}, length {:.2f}".format(
succ, episode_reward, episode_length))
# Episode has ended, write the summaries here
if writer_dir is not None:
# current episode stats
writer.add_scalar('eval/episode_reward', episode_reward, i_episode)
writer.add_scalar('eval/episode_length', episode_length, i_episode)
writer.add_scalar('eval/success', succ, i_episode)
time.sleep(args.eval_every)
print("sleep")
|
ddr-master
|
eval_modules.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import math
import torch
import torch.optim as optim
class SharedAdam(optim.Adam):
"""Implements Adam algorithm with shared states.
"""
def __init__(self,
params,
lr=1e-3,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0):
super(SharedAdam, self).__init__(params, lr, betas, eps, weight_decay)
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['step'] = torch.zeros(1)
state['exp_avg'] = p.data.new().resize_as_(p.data).zero_()
state['exp_avg_sq'] = p.data.new().resize_as_(p.data).zero_()
def share_memory(self):
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['step'].share_memory_()
state['exp_avg'].share_memory_()
state['exp_avg_sq'].share_memory_()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
if group['weight_decay'] != 0:
grad = grad.add(group['weight_decay'], p.data)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1**state['step'][0]
bias_correction2 = 1 - beta2**state['step'][0]
step_size = group['lr'] * math.sqrt(
bias_correction2) / bias_correction1
p.data.addcdiv_(exp_avg, denom, value=-float(step_size.data.numpy()[0]))
return loss
|
ddr-master
|
my_optim.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
import fire
from llama import Llama
def main(
ckpt_dir: str,
tokenizer_path: str,
temperature: float = 0.0,
top_p: float = 0.9,
max_seq_len: int = 192,
max_gen_len: int = 128,
max_batch_size: int = 4,
):
generator = Llama.build(
ckpt_dir=ckpt_dir,
tokenizer_path=tokenizer_path,
max_seq_len=max_seq_len,
max_batch_size=max_batch_size,
)
prompts = [
'''def remove_non_ascii(s: str) -> str:
""" <FILL>
return result
''',
"""# Installation instructions:
```bash
<FILL>
```
This downloads the LLaMA inference code and installs the repository as a local pip package.
""",
"""class InterfaceManagerFactory(AbstractManagerFactory):
def __init__(<FILL>
def main():
factory = InterfaceManagerFactory(start=datetime.now())
managers = []
for i in range(10):
managers.append(factory.build(id=i))
""",
"""/-- A quasi-prefunctoid is 1-connected iff all its etalisations are 1-connected. -/
theorem connected_iff_etalisation [C D : precategoroid] (P : quasi_prefunctoid C D) :
π₁ P = 0 ↔ <FILL> = 0 :=
begin
split,
{ intros h f,
rw pi_1_etalisation at h,
simp [h],
refl
},
{ intro h,
have := @quasi_adjoint C D P,
simp [←pi_1_etalisation, this, h],
refl
}
end
""",
]
prefixes = [p.split("<FILL>")[0] for p in prompts]
suffixes = [p.split("<FILL>")[1] for p in prompts]
results = generator.text_infilling(
prefixes=prefixes,
suffixes=suffixes,
max_gen_len=max_gen_len,
temperature=temperature,
top_p=top_p,
)
for prompt, result in zip(prompts, results):
print("\n================= Prompt text =================\n")
print(prompt)
print("\n================= Filled text =================\n")
print(result["full_text"])
if __name__ == "__main__":
fire.Fire(main)
|
codellama-main
|
example_infilling.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
from typing import Optional
import fire
from llama import Llama
def main(
ckpt_dir: str,
tokenizer_path: str,
temperature: float = 0.2,
top_p: float = 0.95,
max_seq_len: int = 512,
max_batch_size: int = 8,
max_gen_len: Optional[int] = None,
):
generator = Llama.build(
ckpt_dir=ckpt_dir,
tokenizer_path=tokenizer_path,
max_seq_len=max_seq_len,
max_batch_size=max_batch_size,
)
instructions = [
[
{
"role": "user",
"content": "In Bash, how do I list all text files in the current directory (excluding subdirectories) that have been modified in the last month?",
}
],
[
{
"role": "user",
"content": "What is the difference between inorder and preorder traversal? Give an example in Python.",
}
],
[
{
"role": "system",
"content": "Provide answers in JavaScript",
},
{
"role": "user",
"content": "Write a function that computes the set of sums of all contiguous sublists of a given list.",
}
],
]
results = generator.chat_completion(
instructions, # type: ignore
max_gen_len=max_gen_len,
temperature=temperature,
top_p=top_p,
)
for instruction, result in zip(instructions, results):
for msg in instruction:
print(f"{msg['role'].capitalize()}: {msg['content']}\n")
print(
f"> {result['generation']['role'].capitalize()}: {result['generation']['content']}"
)
print("\n==================================\n")
if __name__ == "__main__":
fire.Fire(main)
|
codellama-main
|
example_instructions.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
from typing import Optional
import fire
from llama import Llama
def main(
ckpt_dir: str,
tokenizer_path: str,
temperature: float = 0.2,
top_p: float = 0.9,
max_seq_len: int = 256,
max_batch_size: int = 4,
max_gen_len: Optional[int] = None,
):
generator = Llama.build(
ckpt_dir=ckpt_dir,
tokenizer_path=tokenizer_path,
max_seq_len=max_seq_len,
max_batch_size=max_batch_size,
)
prompts = [
# For these prompts, the expected answer is the natural continuation of the prompt
"""\
import socket
def ping_exponential_backoff(host: str):""",
"""\
import argparse
def main(string: str):
print(string)
print(string[::-1])
if __name__ == "__main__":"""
]
results = generator.text_completion(
prompts,
max_gen_len=max_gen_len,
temperature=temperature,
top_p=top_p,
)
for prompt, result in zip(prompts, results):
print(prompt)
print(f"> {result['generation']}")
print("\n==================================\n")
if __name__ == "__main__":
fire.Fire(main)
|
codellama-main
|
example_completion.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
from setuptools import find_packages, setup
def get_requirements(path: str):
return [l.strip() for l in open(path)]
setup(
name="codellama",
version="0.0.1",
packages=find_packages(),
install_requires=get_requirements("requirements.txt"),
)
|
codellama-main
|
setup.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
import json
import os
import sys
import time
from pathlib import Path
from typing import List, Literal, Optional, Tuple, TypedDict
import torch
import torch.nn.functional as F
from fairscale.nn.model_parallel.initialize import (
get_model_parallel_rank,
initialize_model_parallel,
model_parallel_is_initialized,
)
from llama.model import ModelArgs, Transformer
from llama.tokenizer import Tokenizer
Role = Literal["system", "user", "assistant"]
class Message(TypedDict):
role: Role
content: str
class InfillingPrediction(TypedDict, total=False):
generation: str
full_text: str
tokens: List[str] # not required
logprobs: List[float] # not required
class CompletionPrediction(TypedDict, total=False):
generation: str
tokens: List[str] # not required
logprobs: List[float] # not required
class ChatPrediction(TypedDict, total=False):
generation: Message
tokens: List[str] # not required
logprobs: List[float] # not required
Dialog = List[Message]
B_INST, E_INST = "[INST]", "[/INST]"
B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
SPECIAL_TAGS = [B_INST, E_INST, "<<SYS>>", "<</SYS>>"]
UNSAFE_ERROR = "Error: special tags are not allowed as part of the prompt."
class Llama:
@staticmethod
def build(
ckpt_dir: str,
tokenizer_path: str,
max_seq_len: int,
max_batch_size: int,
model_parallel_size: Optional[int] = None,
) -> "Llama":
if not torch.distributed.is_initialized():
torch.distributed.init_process_group("nccl")
if not model_parallel_is_initialized():
if model_parallel_size is None:
model_parallel_size = int(os.environ.get("WORLD_SIZE", 1))
initialize_model_parallel(model_parallel_size)
local_rank = int(os.environ.get("LOCAL_RANK", 0))
torch.cuda.set_device(local_rank)
# seed must be the same in all processes
torch.manual_seed(1)
if local_rank > 0:
sys.stdout = open(os.devnull, "w")
start_time = time.time()
checkpoints = sorted(Path(ckpt_dir).glob("*.pth"))
assert len(checkpoints) > 0, f"no checkpoint files found in {ckpt_dir}"
assert model_parallel_size == len(
checkpoints
), f"Loading a checkpoint for MP={len(checkpoints)} but world size is {model_parallel_size}"
ckpt_path = checkpoints[get_model_parallel_rank()]
checkpoint = torch.load(ckpt_path, map_location="cpu")
with open(Path(ckpt_dir) / "params.json", "r") as f:
params = json.loads(f.read())
model_args: ModelArgs = ModelArgs(
max_seq_len=max_seq_len,
max_batch_size=max_batch_size,
**params,
)
tokenizer = Tokenizer(model_path=tokenizer_path)
model_args.vocab_size = tokenizer.n_words
if torch.cuda.is_bf16_supported():
torch.set_default_tensor_type(torch.cuda.BFloat16Tensor)
else:
torch.set_default_tensor_type(torch.cuda.HalfTensor)
model = Transformer(model_args)
model.load_state_dict(checkpoint, strict=False)
print(f"Loaded in {time.time() - start_time:.2f} seconds")
return Llama(model, tokenizer)
def __init__(self, model: Transformer, tokenizer: Tokenizer):
self.model = model
self.tokenizer = tokenizer
@torch.inference_mode()
def generate(
self,
prompt_tokens: List[List[int]],
max_gen_len: int,
temperature: float = 0.6,
top_p: float = 0.9,
logprobs: bool = False,
echo: bool = False,
stop_token: Optional[int] = None,
) -> Tuple[List[List[int]], Optional[List[List[float]]]]:
if stop_token is None:
stop_token = self.tokenizer.eos_id
params = self.model.params
bsz = len(prompt_tokens)
assert bsz <= params.max_batch_size, (bsz, params.max_batch_size)
min_prompt_len = min(len(t) for t in prompt_tokens)
max_prompt_len = max(len(t) for t in prompt_tokens)
assert max_prompt_len <= params.max_seq_len
total_len = min(params.max_seq_len, max_gen_len + max_prompt_len)
pad_id = self.tokenizer.pad_id
tokens = torch.full((bsz, total_len), pad_id, dtype=torch.long, device="cuda")
for k, t in enumerate(prompt_tokens):
tokens[k, : len(t)] = torch.tensor(t, dtype=torch.long, device="cuda")
if logprobs:
token_logprobs = torch.zeros_like(tokens, dtype=torch.float)
prev_pos = 0
stop_reached = torch.tensor([False] * bsz, device="cuda")
input_text_mask = tokens != pad_id
for cur_pos in range(min_prompt_len, total_len):
logits = self.model.forward(tokens[:, prev_pos:cur_pos], prev_pos)
if logprobs:
token_logprobs[:, prev_pos + 1 : cur_pos + 1] = -F.cross_entropy(
input=logits.transpose(1, 2),
target=tokens[:, prev_pos + 1 : cur_pos + 1],
reduction="none",
ignore_index=pad_id,
)
if temperature > 0:
probs = torch.softmax(logits[:, -1] / temperature, dim=-1)
next_token = sample_top_p(probs, top_p)
else:
next_token = torch.argmax(logits[:, -1], dim=-1)
next_token = next_token.reshape(-1)
# only replace token if prompt has already been generated
next_token = torch.where(
input_text_mask[:, cur_pos], tokens[:, cur_pos], next_token
)
tokens[:, cur_pos] = next_token
stop_reached |= (~input_text_mask[:, cur_pos]) & (next_token == stop_token)
prev_pos = cur_pos
if all(stop_reached):
break
if logprobs:
token_logprobs = token_logprobs.tolist()
out_tokens, out_logprobs = [], []
for i, toks in enumerate(tokens.tolist()):
# cut to max gen len
start = 0 if echo else len(prompt_tokens[i])
toks = toks[start : len(prompt_tokens[i]) + max_gen_len]
probs = None
if logprobs:
probs = token_logprobs[i][start : len(prompt_tokens[i]) + max_gen_len]
# cut to stop token if present
if stop_token in toks:
stop_idx = toks.index(stop_token)
toks = toks[:stop_idx]
probs = probs[:stop_idx] if logprobs else None
out_tokens.append(toks)
out_logprobs.append(probs)
return (out_tokens, out_logprobs if logprobs else None)
def text_completion(
self,
prompts: List[str],
temperature: float = 0.6,
top_p: float = 0.9,
max_gen_len: Optional[int] = None,
logprobs: bool = False,
echo: bool = False,
) -> List[CompletionPrediction]:
if max_gen_len is None:
max_gen_len = self.model.params.max_seq_len - 1
prompt_tokens = [self.tokenizer.encode(x, bos=True, eos=False) for x in prompts]
generation_tokens, generation_logprobs = self.generate(
prompt_tokens=prompt_tokens,
max_gen_len=max_gen_len,
temperature=temperature,
top_p=top_p,
logprobs=logprobs,
echo=echo,
)
if logprobs:
return [
{
"generation": self.tokenizer.decode(t),
"tokens": [self.tokenizer.decode(x) for x in t],
"logprobs": logprobs_i,
}
for t, logprobs_i in zip(generation_tokens, generation_logprobs)
]
return [{"generation": self.tokenizer.decode(t)} for t in generation_tokens]
def text_infilling(
self,
prefixes: List[str],
suffixes: List[str],
temperature: float = 0.6,
top_p: float = 0.9,
max_gen_len: Optional[int] = None,
logprobs: bool = False,
suffix_first: bool = False,
) -> List[InfillingPrediction]:
assert self.tokenizer.eot_id is not None
if max_gen_len is None:
max_gen_len = self.model.params.max_seq_len - 1
prompt_tokens = [
infilling_prompt_tokens(
self.tokenizer, prefix, suffix, suffix_first=suffix_first
)
for prefix, suffix in zip(prefixes, suffixes)
]
generation_tokens, generation_logprobs = self.generate(
prompt_tokens=prompt_tokens,
max_gen_len=max_gen_len,
temperature=temperature,
top_p=top_p,
logprobs=logprobs,
echo=False,
stop_token=self.tokenizer.eot_id,
)
generations = [self.tokenizer.decode_infilling(t) for t in generation_tokens]
if logprobs:
return [
{
"generation": generation,
"logprobs": logprobs_i,
"tokens": t,
"full_text": prefix + generation + suffix,
}
for prefix, suffix, generation, t, logprobs_i in zip(
prefixes,
suffixes,
generations,
generation_tokens,
generation_logprobs,
)
]
else:
return [
{
"generation": generation,
"full_text": prefix + generation + suffix,
}
for prefix, suffix, generation in zip(prefixes, suffixes, generations)
]
def chat_completion(
self,
dialogs: List[Dialog],
temperature: float = 0.6,
top_p: float = 0.9,
max_gen_len: Optional[int] = None,
logprobs: bool = False,
) -> List[ChatPrediction]:
if max_gen_len is None:
max_gen_len = self.model.params.max_seq_len - 1
prompt_tokens = []
unsafe_requests = []
for dialog in dialogs:
unsafe_requests.append(
any([tag in msg["content"] for tag in SPECIAL_TAGS for msg in dialog])
)
if dialog[0]["role"] == "system":
dialog = [
{
"role": dialog[1]["role"],
"content": B_SYS
+ dialog[0]["content"]
+ E_SYS
+ dialog[1]["content"],
}
] + dialog[2:]
assert all([msg["role"] == "user" for msg in dialog[::2]]) and all(
[msg["role"] == "assistant" for msg in dialog[1::2]]
), (
"model only supports 'system', 'user' and 'assistant' roles, "
"starting with 'system', then 'user' and alternating (u/a/u/a/u...)"
)
dialog_tokens: List[int] = sum(
[
self.tokenizer.encode(
f"{B_INST} {(prompt['content']).strip()} {E_INST} {(answer['content']).strip()} ",
bos=True,
eos=True,
)
for prompt, answer in zip(
dialog[::2],
dialog[1::2],
)
],
[],
)
assert (
dialog[-1]["role"] == "user"
), f"Last message must be from user, got {dialog[-1]['role']}"
dialog_tokens += self.tokenizer.encode(
f"{B_INST} {(dialog[-1]['content']).strip()} {E_INST}",
bos=True,
eos=False,
)
prompt_tokens.append(dialog_tokens)
generation_tokens, generation_logprobs = self.generate(
prompt_tokens=prompt_tokens,
max_gen_len=max_gen_len,
temperature=temperature,
top_p=top_p,
logprobs=logprobs,
)
if logprobs:
return [
{
"generation": {
"role": "assistant",
"content": self.tokenizer.decode(t)
if not unsafe
else UNSAFE_ERROR,
},
"tokens": [self.tokenizer.decode(x) for x in t],
"logprobs": logprobs_i,
}
for t, logprobs_i, unsafe in zip(
generation_tokens, generation_logprobs, unsafe_requests
)
]
return [
{
"generation": {
"role": "assistant",
"content": self.tokenizer.decode(t) if not unsafe else UNSAFE_ERROR,
}
}
for t, unsafe in zip(generation_tokens, unsafe_requests)
]
def sample_top_p(probs, p):
probs_sort, probs_idx = torch.sort(probs, dim=-1, descending=True)
probs_sum = torch.cumsum(probs_sort, dim=-1)
mask = probs_sum - probs_sort > p
probs_sort[mask] = 0.0
probs_sort.div_(probs_sort.sum(dim=-1, keepdim=True))
next_token = torch.multinomial(probs_sort, num_samples=1)
next_token = torch.gather(probs_idx, -1, next_token)
return next_token
def infilling_prompt_tokens(
tokenizer: Tokenizer,
pre: str,
suf: str,
suffix_first: bool = False,
) -> List[int]:
"""
Format and encode an infilling problem.
If `suffix_first` is set, format in suffix-prefix-middle format.
"""
assert tokenizer.prefix_id is not None
assert tokenizer.middle_id is not None
assert tokenizer.suffix_id is not None
if suffix_first:
# format as "<PRE> <SUF>{suf} <MID> {pre}"
return (
[tokenizer.bos_id, tokenizer.prefix_id, tokenizer.suffix_id]
+ tokenizer.encode_infilling(suf)
+ [tokenizer.middle_id]
+ tokenizer.encode(pre, bos=False, eos=False)
)
else:
# format as "<PRE> {pre} <SUF>{suf} <MID>"
return (
[tokenizer.bos_id, tokenizer.prefix_id]
+ tokenizer.encode(pre, bos=False, eos=False)
+ [tokenizer.suffix_id]
+ tokenizer.encode_infilling(suf)
+ [tokenizer.middle_id]
)
|
codellama-main
|
llama/generation.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
from .generation import Llama
from .model import ModelArgs, Transformer
from .tokenizer import Tokenizer
|
codellama-main
|
llama/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
import math
from dataclasses import dataclass
from typing import Any, Optional, Tuple
import fairscale.nn.model_parallel.initialize as fs_init
import torch
import torch.nn.functional as F
from fairscale.nn.model_parallel.layers import (
ColumnParallelLinear,
ParallelEmbedding,
RowParallelLinear,
)
from torch import nn
@dataclass
class ModelArgs:
dim: int = 4096
n_layers: int = 32
n_heads: int = 32
n_kv_heads: Optional[int] = None
vocab_size: int = -1 # defined later by tokenizer
multiple_of: int = 256 # make SwiGLU hidden layer size multiple of large power of 2
ffn_dim_multiplier: Optional[float] = None
norm_eps: float = 1e-5
rope_theta: float = 10000
max_batch_size: int = 32
max_seq_len: int = 2048
class RMSNorm(torch.nn.Module):
def __init__(self, dim: int, eps: float = 1e-6):
super().__init__()
self.eps = eps
self.weight = nn.Parameter(torch.ones(dim))
def _norm(self, x):
return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
def forward(self, x):
output = self._norm(x.float()).type_as(x)
return output * self.weight
def precompute_freqs_cis(dim: int, end: int, theta: float = 10000.0):
freqs = 1.0 / (theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim))
t = torch.arange(end, device=freqs.device, dtype=torch.float32) # type: ignore
freqs = torch.outer(t, freqs) # type: ignore
freqs_cis = torch.polar(torch.ones_like(freqs), freqs) # complex64
return freqs_cis
def reshape_for_broadcast(freqs_cis: torch.Tensor, x: torch.Tensor):
ndim = x.ndim
assert 0 <= 1 < ndim
assert freqs_cis.shape == (x.shape[1], x.shape[-1])
shape = [d if i == 1 or i == ndim - 1 else 1 for i, d in enumerate(x.shape)]
return freqs_cis.view(*shape)
def apply_rotary_emb(
xq: torch.Tensor,
xk: torch.Tensor,
freqs_cis: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2))
xk_ = torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2))
freqs_cis = reshape_for_broadcast(freqs_cis, xq_)
xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(3)
xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(3)
return xq_out.type_as(xq), xk_out.type_as(xk)
def repeat_kv(x: torch.Tensor, n_rep: int) -> torch.Tensor:
"""torch.repeat_interleave(x, dim=2, repeats=n_rep)"""
bs, slen, n_kv_heads, head_dim = x.shape
if n_rep == 1:
return x
return (
x[:, :, :, None, :]
.expand(bs, slen, n_kv_heads, n_rep, head_dim)
.reshape(bs, slen, n_kv_heads * n_rep, head_dim)
)
class Attention(nn.Module):
def __init__(self, args: ModelArgs):
super().__init__()
self.n_kv_heads = args.n_heads if args.n_kv_heads is None else args.n_kv_heads
model_parallel_size = fs_init.get_model_parallel_world_size()
self.n_local_heads = args.n_heads // model_parallel_size
self.n_local_kv_heads = self.n_kv_heads // model_parallel_size
self.n_rep = self.n_local_heads // self.n_local_kv_heads
self.head_dim = args.dim // args.n_heads
self.wq = ColumnParallelLinear(
args.dim,
args.n_heads * self.head_dim,
bias=False,
gather_output=False,
init_method=lambda x: x,
)
self.wk = ColumnParallelLinear(
args.dim,
self.n_kv_heads * self.head_dim,
bias=False,
gather_output=False,
init_method=lambda x: x,
)
self.wv = ColumnParallelLinear(
args.dim,
self.n_kv_heads * self.head_dim,
bias=False,
gather_output=False,
init_method=lambda x: x,
)
self.wo = RowParallelLinear(
args.n_heads * self.head_dim,
args.dim,
bias=False,
input_is_parallel=True,
init_method=lambda x: x,
)
self.cache_k = torch.zeros(
(
args.max_batch_size,
args.max_seq_len,
self.n_local_kv_heads,
self.head_dim,
)
).cuda()
self.cache_v = torch.zeros(
(
args.max_batch_size,
args.max_seq_len,
self.n_local_kv_heads,
self.head_dim,
)
).cuda()
def forward(
self,
x: torch.Tensor,
start_pos: int,
freqs_cis: torch.Tensor,
mask: Optional[torch.Tensor],
):
bsz, seqlen, _ = x.shape
xq, xk, xv = self.wq(x), self.wk(x), self.wv(x)
xq = xq.view(bsz, seqlen, self.n_local_heads, self.head_dim)
xk = xk.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim)
xv = xv.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim)
xq, xk = apply_rotary_emb(xq, xk, freqs_cis=freqs_cis)
self.cache_k = self.cache_k.to(xq)
self.cache_v = self.cache_v.to(xq)
self.cache_k[:bsz, start_pos : start_pos + seqlen] = xk
self.cache_v[:bsz, start_pos : start_pos + seqlen] = xv
keys = self.cache_k[:bsz, : start_pos + seqlen]
values = self.cache_v[:bsz, : start_pos + seqlen]
# repeat k/v heads if n_kv_heads < n_heads
keys = repeat_kv(keys, self.n_rep) # (bs, seqlen, n_local_heads, head_dim)
values = repeat_kv(values, self.n_rep) # (bs, seqlen, n_local_heads, head_dim)
xq = xq.transpose(1, 2) # (bs, n_local_heads, seqlen, head_dim)
keys = keys.transpose(1, 2)
values = values.transpose(1, 2)
scores = torch.matmul(xq, keys.transpose(2, 3)) / math.sqrt(self.head_dim)
if mask is not None:
scores = scores + mask # (bs, n_local_heads, seqlen, cache_len + seqlen)
scores = F.softmax(scores.float(), dim=-1).type_as(xq)
output = torch.matmul(scores, values) # (bs, n_local_heads, seqlen, head_dim)
output = output.transpose(1, 2).contiguous().view(bsz, seqlen, -1)
return self.wo(output)
class FeedForward(nn.Module):
def __init__(
self,
dim: int,
hidden_dim: int,
multiple_of: int,
ffn_dim_multiplier: Optional[float],
):
super().__init__()
hidden_dim = int(2 * hidden_dim / 3)
# custom dim factor multiplier
if ffn_dim_multiplier is not None:
hidden_dim = int(ffn_dim_multiplier * hidden_dim)
hidden_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of)
self.w1 = ColumnParallelLinear(
dim, hidden_dim, bias=False, gather_output=False, init_method=lambda x: x
)
self.w2 = RowParallelLinear(
hidden_dim, dim, bias=False, input_is_parallel=True, init_method=lambda x: x
)
self.w3 = ColumnParallelLinear(
dim, hidden_dim, bias=False, gather_output=False, init_method=lambda x: x
)
def forward(self, x):
return self.w2(F.silu(self.w1(x)) * self.w3(x))
class TransformerBlock(nn.Module):
def __init__(self, layer_id: int, args: ModelArgs):
super().__init__()
self.n_heads = args.n_heads
self.dim = args.dim
self.head_dim = args.dim // args.n_heads
self.attention = Attention(args)
self.feed_forward = FeedForward(
dim=args.dim,
hidden_dim=4 * args.dim,
multiple_of=args.multiple_of,
ffn_dim_multiplier=args.ffn_dim_multiplier,
)
self.layer_id = layer_id
self.attention_norm = RMSNorm(args.dim, eps=args.norm_eps)
self.ffn_norm = RMSNorm(args.dim, eps=args.norm_eps)
def forward(
self,
x: torch.Tensor,
start_pos: int,
freqs_cis: torch.Tensor,
mask: Optional[torch.Tensor],
):
h = x + self.attention.forward(
self.attention_norm(x), start_pos, freqs_cis, mask
)
out = h + self.feed_forward.forward(self.ffn_norm(h))
return out
class Transformer(nn.Module):
def __init__(self, params: ModelArgs):
super().__init__()
self.params = params
self.vocab_size = params.vocab_size
self.n_layers = params.n_layers
self.tok_embeddings = ParallelEmbedding(
params.vocab_size, params.dim, init_method=lambda x: x
)
self.layers = torch.nn.ModuleList()
for layer_id in range(params.n_layers):
self.layers.append(TransformerBlock(layer_id, params))
self.norm = RMSNorm(params.dim, eps=params.norm_eps)
self.output = ColumnParallelLinear(
params.dim, params.vocab_size, bias=False, init_method=lambda x: x
)
self.freqs_cis = precompute_freqs_cis(
self.params.dim // self.params.n_heads,
self.params.max_seq_len * 2,
params.rope_theta,
)
@torch.inference_mode()
def forward(self, tokens: torch.Tensor, start_pos: int):
_bsz, seqlen = tokens.shape
h = self.tok_embeddings(tokens)
self.freqs_cis = self.freqs_cis.to(h.device)
freqs_cis = self.freqs_cis[start_pos : start_pos + seqlen]
mask = None
if seqlen > 1:
mask = torch.full(
(1, 1, seqlen, seqlen), float("-inf"), device=tokens.device
)
mask = mask.to(torch.float32).triu(diagonal=start_pos+1).type_as(h)
for layer in self.layers:
h = layer(h, start_pos, freqs_cis, mask)
h = self.norm(h)
output = self.output(h).float()
return output
|
codellama-main
|
llama/model.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
import os
from logging import getLogger
from typing import List, Optional
from sentencepiece import SentencePieceProcessor
logger = getLogger()
class Tokenizer:
def __init__(self, model_path: str):
# reload tokenizer
assert os.path.isfile(model_path), model_path
self.sp_model = SentencePieceProcessor(model_file=model_path)
logger.info(f"Reloaded SentencePiece model from {model_path}")
# BOS / EOS token IDs
self.n_words: int = self.sp_model.vocab_size()
self.bos_id: int = self.sp_model.bos_id()
self.eos_id: int = self.sp_model.eos_id()
self.pad_id: int = self.sp_model.pad_id()
# token IDs for special infilling tokens
self.prefix_id: Optional[int] = self.sp_model.piece_to_id("▁<PRE>") or None
self.middle_id: Optional[int] = self.sp_model.piece_to_id("▁<MID>") or None
self.suffix_id: Optional[int] = self.sp_model.piece_to_id("▁<SUF>") or None
self.eot_id: Optional[int] = self.sp_model.piece_to_id("▁<EOT>") or None
logger.info(
f"#words: {self.n_words} - BOS ID: {self.bos_id} - EOS ID: {self.eos_id} "
f"- PRE ID: {self.prefix_id} - MID ID: {self.middle_id} - SUF ID: {self.suffix_id} - EOT ID: {self.eot_id}"
)
assert self.sp_model.vocab_size() == self.sp_model.get_piece_size()
def encode(self, s: str, bos: bool, eos: bool) -> List[int]:
assert type(s) is str
t = self.sp_model.encode(s)
if bos:
t = [self.bos_id] + t
if eos:
t = t + [self.eos_id]
return t
def decode(self, t: List[int]) -> str:
return self.sp_model.decode(t)
def encode_infilling(self, s: str) -> List[int]:
"""Encode a string without an implicit leading space."""
return self.sp_model.encode("☺" + s)[2:]
def decode_infilling(self, t: List[int]) -> str:
"""Decode a string without an implicit leading space."""
return self.sp_model.decode([self.sp_model.piece_to_id("☺")] + t)[1:]
|
codellama-main
|
llama/tokenizer.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
import torch.nn.functional as F
from layers import convnet, coordinates
class FiLMed(nn.Module):
"""
Implements a FiLMed block.
"""
def __init__(self, num_conv_filts_in, num_conv_filts, stride, dilation):
super(FiLMed, self).__init__()
self.conv1 = nn.Conv2d(num_conv_filts_in,
num_conv_filts,
kernel_size=3,
padding=1,
stride=stride,
dilation=dilation)
self.conv2 = nn.Conv2d(num_conv_filts,
num_conv_filts,
kernel_size=3,
padding=1,
stride=stride,
dilation=dilation)
self.batchnorm2 = nn.BatchNorm2d(num_conv_filts, affine=False)
def forward(self, x, gamma, beta):
b1 = F.relu(self.conv1(x))
b2 = self.batchnorm2(self.conv2(b1))
gamma = gamma.unsqueeze(2).unsqueeze(3).expand_as(b2)
beta = beta.unsqueeze(2).unsqueeze(3).expand_as(b2)
b2 = F.relu((b2 * gamma) + beta)
return (b1 + b2)
class FiLM(nn.Module):
"""
Implements FiLM.
"""
def __init__(self,
vocab_dim,
embedding_dim,
padding_idx,
lstm_hidden_dim_q,
num_lstm_layers_q,
bidirectional,
num_conv_filts_base,
num_conv_layers_base,
stride_base,
dilation_base,
use_coordinates,
num_conv_filts_film,
num_conv_layers_film,
stride_film,
dilation_film,
fcn_output_dim,
fcn_coeff_dim,
fcn_temp_dim,
aggregate,
output_hidden_dim,
output_dim):
super(FiLM, self).__init__()
self.bidirectional = bidirectional
self.use_coordinates = use_coordinates
self.embeddings = nn.Embedding(vocab_dim,
embedding_dim,
padding_idx=padding_idx)
self.lstm_q = nn.LSTM(embedding_dim,
lstm_hidden_dim_q,
num_lstm_layers_q,
batch_first=True,
bidirectional=bidirectional)
if bidirectional:
lstm_output_dim_q = (2 * lstm_hidden_dim_q)
else:
lstm_output_dim_q = lstm_hidden_dim_q
# Compute required output dimension given convnet specs
# * 2 for gamma and beta. Assumes constant num filters per layer
num_feats = num_conv_filts_film * num_conv_layers_film * 2
self.num_conv_filts_film = num_conv_filts_film
self.num_conv_layers_film = num_conv_layers_film
self.decoder = nn.Linear(lstm_output_dim_q, num_feats)
# Base convnet
self.conv, num_channels, _ = convnet(num_conv_filts_base,
num_conv_layers_base,
stride_base,
dilation_base)
# Filmed convnet
self.film_conv_modules = []
for i in range(num_conv_layers_film):
num_channels += 2 if use_coordinates else 0
fcm = FiLMed(num_channels,
num_conv_filts_film,
stride_film,
dilation_film)
num_channels = num_conv_filts_film
self.film_conv_modules.append(fcm)
self.add_module('film_module_%d' % i, fcm)
num_conv_filts_film += 2 if use_coordinates else 0
self.conv1 = nn.Conv2d(num_conv_filts_film,
fcn_output_dim,
kernel_size=1,
padding=0)
if aggregate == 'max':
self.pool = nn.AdaptiveMaxPool2d((fcn_coeff_dim, fcn_temp_dim))
elif aggregate == 'mean':
self.pool = nn.AdaptiveAvgPool2d((fcn_coeff_dim, fcn_temp_dim))
else:
assert False, 'Unknown aggregate function.'
self.use_coordinates_class = (use_coordinates
and fcn_coeff_dim > 1
and fcn_temp_dim > 1)
fcn_output_dim += 2 if self.use_coordinates_class else 0
adaptive_pool_dim = fcn_output_dim * fcn_coeff_dim * fcn_temp_dim
self.output = nn.Sequential(nn.Linear(adaptive_pool_dim, output_hidden_dim),
nn.ReLU(),
nn.Linear(output_hidden_dim, output_dim))
def forward(self, a, len_a, q, len_q):
embeddings = self.embeddings(q)
packed = torch.nn.utils.rnn.pack_padded_sequence(embeddings,
len_q,
batch_first=True)
lstm_q, _ = self.lstm_q(packed)
unpacked, lens = torch.nn.utils.rnn.pad_packed_sequence(lstm_q,
batch_first=True)
if self.bidirectional:
bid_q = unpacked.view(unpacked.size(0),
unpacked.size(1),
2,
int(unpacked.size(2) / 2))
enc_q = torch.cat((bid_q[torch.arange(bid_q.size(0), dtype=torch.long),
lens - 1, 0],
bid_q[torch.arange(bid_q.size(0), dtype=torch.long),
lens - 1, 1]),
dim=1)
else:
enc_q = unpacked[torch.arange(unpacked.size(0), dtype=torch.long), lens - 1]
gammas_betas = self.decoder(enc_q)
gammas_betas = gammas_betas.view(gammas_betas.size(0),
self.num_conv_layers_film,
self.num_conv_filts_film,
2)
a = torch.unsqueeze(a, 1)
a = self.conv(a)
for i, fcm in enumerate(self.film_conv_modules):
# Append coordinate maps
if self.use_coordinates:
coordinates_maps = coordinates(a.shape[2], a.shape[3]).to(a.device)
a = torch.cat((a, coordinates_maps.expand(a.size(0), -1, -1, -1)), 1)
# see FiLM appendix for + 1
a = fcm(a, gammas_betas[:, i, :, 0] + 1, gammas_betas[:, i, :, 1])
if self.use_coordinates:
coordinates_maps = coordinates(a.shape[2], a.shape[3]).to(a.device)
a = torch.cat((a, coordinates_maps.expand(a.size(0), -1, -1, -1)), 1)
a = self.conv1(a)
a = self.pool(a)
if self.use_coordinates_class:
coordinates_maps = coordinates(a.shape[2], a.shape[3]).to(a.device)
a = torch.cat((a, coordinates_maps.expand(a.size(0), -1, -1, -1)), 1)
a = a.view(a.size(0), -1)
output = self.output(a)
return output
|
daqa-master
|
daqa-mod/film.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
from layers import StackedAttention, StackedAttention1D, convnet, coordinates
class LSTMN(nn.Module):
def __init__(self,
vocab_dim,
embedding_dim,
padding_idx,
lstm_hidden_dim_q,
num_lstm_layers_q,
bidirectional,
input_dim,
lstm_hidden_dim_a,
num_lstm_layers_a,
output_hidden_dim,
output_dim):
super(LSTMN, self).__init__()
self.bidirectional = bidirectional
self.embeddings = nn.Embedding(vocab_dim,
embedding_dim,
padding_idx=padding_idx)
self.lstm_q = nn.LSTM(embedding_dim,
lstm_hidden_dim_q,
num_lstm_layers_q,
batch_first=True,
bidirectional=bidirectional)
self.lstm_a = nn.LSTM(input_dim,
lstm_hidden_dim_a,
num_lstm_layers_a,
batch_first=True,
bidirectional=bidirectional)
if bidirectional:
lstm_output_dim_q = (2 * lstm_hidden_dim_q)
lstm_output_dim_a = (2 * lstm_hidden_dim_a)
else:
lstm_output_dim_q = lstm_hidden_dim_q
lstm_output_dim_a = lstm_hidden_dim_a
lstm_output_dim = lstm_output_dim_q + lstm_output_dim_a
self.output = nn.Sequential(nn.Linear(lstm_output_dim, output_hidden_dim),
nn.ReLU(),
nn.Linear(output_hidden_dim, output_dim))
def forward(self, a, len_a, q, len_q):
embeddings = self.embeddings(q)
packed = torch.nn.utils.rnn.pack_padded_sequence(embeddings,
len_q,
batch_first=True)
# self.lstm_q.flatten_parameters()
lstm_q, _ = self.lstm_q(packed)
unpacked, lens = torch.nn.utils.rnn.pad_packed_sequence(lstm_q,
batch_first=True)
# self.lstm_a.flatten_parameters()
lstm_a, _ = self.lstm_a(a)
if self.bidirectional:
bid_q = unpacked.view(unpacked.size(0),
unpacked.size(1),
2,
int(unpacked.size(2) / 2))
bid_a = lstm_a.view(lstm_a.size(0),
lstm_a.size(1),
2,
int(lstm_a.size(2) / 2))
cat_q = torch.cat((bid_q[torch.arange(bid_q.size(0), dtype=torch.long),
lens - 1, 0],
bid_q[torch.arange(bid_q.size(0), dtype=torch.long),
lens - 1, 1]),
dim=1)
cat_a = torch.cat((bid_a[torch.arange(bid_a.size(0), dtype=torch.long),
len_a - 1, 0],
bid_a[torch.arange(bid_a.size(0), dtype=torch.long),
len_a - 1, 1]),
dim=1)
else:
cat_a = lstm_a[torch.arange(lstm_a.size(0), dtype=torch.long), len_a - 1]
cat_q = unpacked[torch.arange(unpacked.size(0), dtype=torch.long), lens - 1]
cat = torch.cat((cat_a, cat_q), 1)
output = self.output(cat)
return output
class FCNLSTMN(nn.Module):
def __init__(self,
vocab_dim,
embedding_dim,
padding_idx,
lstm_hidden_dim_q,
num_lstm_layers_q,
bidirectional,
num_conv_filts,
num_conv_layers,
stride,
dilation,
fcn_output_dim,
fcn_coeff_dim,
fcn_temp_dim,
aggregate,
output_hidden_dim,
output_dim):
super(FCNLSTMN, self).__init__()
self.bidirectional = bidirectional
self.embeddings = nn.Embedding(vocab_dim,
embedding_dim,
padding_idx=padding_idx)
self.lstm_q = nn.LSTM(embedding_dim,
lstm_hidden_dim_q,
num_lstm_layers_q,
batch_first=True,
bidirectional=bidirectional)
if bidirectional:
lstm_output_dim_q = (2 * lstm_hidden_dim_q)
else:
lstm_output_dim_q = lstm_hidden_dim_q
self.conv, num_channels, _ = convnet(num_conv_filts,
num_conv_layers,
stride,
dilation)
self.conv1 = nn.Conv2d(num_channels,
fcn_output_dim,
kernel_size=1,
padding=0)
if aggregate == 'max':
self.pool = nn.AdaptiveMaxPool2d((fcn_coeff_dim, fcn_temp_dim))
elif aggregate == 'mean':
self.pool = nn.AdaptiveAvgPool2d((fcn_coeff_dim, fcn_temp_dim))
else:
assert False, 'Unknown aggregate function.'
lstm_output_dim = lstm_output_dim_q \
+ (fcn_output_dim * fcn_coeff_dim * fcn_temp_dim)
self.output = nn.Sequential(nn.Linear(lstm_output_dim, output_hidden_dim),
nn.ReLU(),
nn.Linear(output_hidden_dim, output_dim))
def forward(self, a, len_a, q, len_q):
embeddings = self.embeddings(q)
packed = torch.nn.utils.rnn.pack_padded_sequence(embeddings,
len_q,
batch_first=True)
# self.lstm_q.flatten_parameters()
lstm_q, _ = self.lstm_q(packed)
unpacked, lens = torch.nn.utils.rnn.pad_packed_sequence(lstm_q,
batch_first=True)
if self.bidirectional:
bid_q = unpacked.view(unpacked.size(0),
unpacked.size(1),
2,
int(unpacked.size(2) / 2))
cat_q = torch.cat((bid_q[torch.arange(bid_q.size(0), dtype=torch.long),
lens - 1, 0],
bid_q[torch.arange(bid_q.size(0), dtype=torch.long),
lens - 1, 1]),
dim=1)
else:
cat_q = unpacked[torch.arange(unpacked.size(0), dtype=torch.long), lens - 1]
a = torch.unsqueeze(a, 1)
conv_a = self.conv(a)
conv1_a = self.conv1(conv_a)
pool_a = self.pool(conv1_a)
cat_a = pool_a.view(pool_a.size(0), -1)
cat = torch.cat((cat_a, cat_q), 1)
output = self.output(cat)
return output
class CONVLSTMN(nn.Module):
def __init__(self,
vocab_dim,
embedding_dim,
padding_idx,
lstm_hidden_dim_q,
num_lstm_layers_q,
bidirectional,
input_dim,
num_conv_filts,
num_conv_layers,
stride,
dilation,
lstm_hidden_dim_a,
num_lstm_layers_a,
output_hidden_dim,
output_dim):
super(CONVLSTMN, self).__init__()
self.bidirectional = bidirectional
self.embeddings = nn.Embedding(vocab_dim,
embedding_dim,
padding_idx=padding_idx)
self.lstm_q = nn.LSTM(embedding_dim,
lstm_hidden_dim_q,
num_lstm_layers_q,
batch_first=True,
bidirectional=bidirectional)
self.conv, num_channels, conv_red_dim = convnet(num_conv_filts,
num_conv_layers,
stride,
dilation)
self.lstm_a = nn.LSTM(num_channels * int(input_dim / conv_red_dim),
lstm_hidden_dim_a,
num_lstm_layers_a,
batch_first=True,
bidirectional=bidirectional)
if bidirectional:
lstm_output_dim_q = (2 * lstm_hidden_dim_q)
lstm_output_dim_a = (2 * lstm_hidden_dim_a)
else:
lstm_output_dim_q = lstm_hidden_dim_q
lstm_output_dim_a = lstm_hidden_dim_a
lstm_output_dim = lstm_output_dim_q + lstm_output_dim_a
self.output = nn.Sequential(nn.Linear(lstm_output_dim, output_hidden_dim),
nn.ReLU(),
nn.Linear(output_hidden_dim, output_dim))
def forward(self, a, len_a, q, len_q):
embeddings = self.embeddings(q)
packed = torch.nn.utils.rnn.pack_padded_sequence(embeddings,
len_q,
batch_first=True)
# self.lstm_q.flatten_parameters()
lstm_q, _ = self.lstm_q(packed)
unpacked, lens = torch.nn.utils.rnn.pad_packed_sequence(lstm_q,
batch_first=True)
a = torch.unsqueeze(a, 1)
a = self.conv(a)
a = a.permute(0, 2, 1, 3).contiguous()
a = a.view(a.size(0), a.size(1), a.size(2) * a.size(3))
lstm_a, _ = self.lstm_a(a)
if self.bidirectional:
bid_a = lstm_a.view(lstm_a.size(0),
lstm_a.size(1),
2,
int(lstm_a.size(2) / 2))
bid_q = unpacked.view(unpacked.size(0),
unpacked.size(1),
2,
int(unpacked.size(2) / 2))
cat_a = torch.cat((bid_a[torch.arange(bid_a.size(0), dtype=torch.long),
-1, 0],
bid_a[torch.arange(bid_a.size(0), dtype=torch.long),
-1, 1]),
dim=1)
cat_q = torch.cat((bid_q[torch.arange(bid_q.size(0), dtype=torch.long),
lens - 1, 0],
bid_q[torch.arange(bid_q.size(0), dtype=torch.long),
lens - 1, 1]),
dim=1)
else:
cat_a = lstm_a[torch.arange(lstm_a.size(0), dtype=torch.long), -1]
cat_q = unpacked[torch.arange(unpacked.size(0), dtype=torch.long), lens - 1]
cat = torch.cat((cat_a, cat_q), 1)
output = self.output(cat)
return output
class FCNLSTMNSA(nn.Module):
def __init__(self,
vocab_dim,
embedding_dim,
padding_idx,
lstm_hidden_dim_q,
num_lstm_layers_q,
bidirectional,
num_conv_filts,
num_conv_layers,
stride,
dilation,
fcn_output_dim,
fcn_coeff_dim,
fcn_temp_dim,
aggregate,
use_coordinates,
stacked_att_dim,
num_stacked_att,
output_hidden_dim,
output_dim):
super(FCNLSTMNSA, self).__init__()
self.bidirectional = bidirectional
self.embeddings = nn.Embedding(vocab_dim,
embedding_dim,
padding_idx=padding_idx)
self.lstm_q = nn.LSTM(embedding_dim,
lstm_hidden_dim_q,
num_lstm_layers_q,
batch_first=True,
bidirectional=bidirectional)
if bidirectional:
lstm_output_dim_q = (2 * lstm_hidden_dim_q)
else:
lstm_output_dim_q = lstm_hidden_dim_q
self.conv, num_channels, _ = convnet(num_conv_filts,
num_conv_layers,
stride,
dilation)
self.conv1 = nn.Conv2d(num_channels,
fcn_output_dim,
kernel_size=1,
padding=0)
if aggregate == 'max':
self.pool = nn.AdaptiveMaxPool2d((fcn_coeff_dim, fcn_temp_dim))
elif aggregate == 'mean':
self.pool = nn.AdaptiveAvgPool2d((fcn_coeff_dim, fcn_temp_dim))
else:
assert False, 'Unknown aggregate function.'
self.use_coordinates = (use_coordinates
and fcn_coeff_dim > 1
and fcn_temp_dim > 1)
fcn_output_dim += 2 if self.use_coordinates else 0
self.projection = nn.Conv2d(fcn_output_dim,
lstm_output_dim_q,
kernel_size=1,
padding=0)
self.stacked_att = []
for i in range(num_stacked_att):
sa = StackedAttention(lstm_output_dim_q, stacked_att_dim)
self.stacked_att.append(sa)
self.add_module('stacked_att_%d' % i, sa)
self.output = nn.Sequential(nn.Linear(lstm_output_dim_q, output_hidden_dim),
nn.ReLU(),
nn.Linear(output_hidden_dim, output_dim))
def forward(self, a, len_a, q, len_q):
embeddings = self.embeddings(q)
packed = torch.nn.utils.rnn.pack_padded_sequence(embeddings,
len_q,
batch_first=True)
# self.lstm_q.flatten_parameters()
lstm_q, _ = self.lstm_q(packed)
unpacked, lens = torch.nn.utils.rnn.pad_packed_sequence(lstm_q,
batch_first=True)
if self.bidirectional:
bid_q = unpacked.view(unpacked.size(0),
unpacked.size(1),
2,
int(unpacked.size(2) / 2))
cat_q = torch.cat((bid_q[torch.arange(bid_q.size(0), dtype=torch.long),
lens - 1, 0],
bid_q[torch.arange(bid_q.size(0), dtype=torch.long),
lens - 1, 1]),
dim=1)
else:
cat_q = unpacked[torch.arange(unpacked.size(0), dtype=torch.long), lens - 1]
a = torch.unsqueeze(a, 1)
conv_a = self.conv(a)
conv1_a = self.conv1(conv_a)
pool_a = self.pool(conv1_a)
if self.use_coordinates:
coo = coordinates(pool_a.shape[2], pool_a.shape[3]).to(pool_a.device)
pool_a = torch.cat((pool_a, coo.expand(pool_a.size(0), -1, -1, -1)), 1)
pool_a = torch.tanh(self.projection(pool_a))
for sa in self.stacked_att:
cat_q = sa(pool_a, cat_q)
output = self.output(cat_q)
return output
class CONVLSTMNSA(nn.Module):
def __init__(self,
vocab_dim,
embedding_dim,
padding_idx,
lstm_hidden_dim_q,
num_lstm_layers_q,
bidirectional,
input_dim,
num_conv_filts,
num_conv_layers,
stride,
dilation,
lstm_hidden_dim_a,
num_lstm_layers_a,
stacked_att_dim,
num_stacked_att,
output_hidden_dim,
output_dim):
super(CONVLSTMNSA, self).__init__()
self.bidirectional = bidirectional
self.embeddings = nn.Embedding(vocab_dim,
embedding_dim,
padding_idx=padding_idx)
self.lstm_q = nn.LSTM(embedding_dim,
lstm_hidden_dim_q,
num_lstm_layers_q,
batch_first=True,
bidirectional=bidirectional)
self.conv, num_channels, conv_red_dim = convnet(num_conv_filts,
num_conv_layers,
stride,
dilation)
self.lstm_a = nn.LSTM(num_channels * int(input_dim / conv_red_dim),
lstm_hidden_dim_a,
num_lstm_layers_a,
batch_first=True,
bidirectional=bidirectional)
if bidirectional:
lstm_output_dim_q = (2 * lstm_hidden_dim_q)
lstm_output_dim_a = (2 * lstm_hidden_dim_a)
else:
lstm_output_dim_q = lstm_hidden_dim_q
lstm_output_dim_a = lstm_hidden_dim_a
self.projection = nn.Linear(lstm_output_dim_a, lstm_output_dim_q)
self.stacked_att = []
for i in range(num_stacked_att):
sa = StackedAttention1D(lstm_output_dim_q, stacked_att_dim)
self.stacked_att.append(sa)
self.add_module('stacked_att_%d' % i, sa)
self.output = nn.Sequential(nn.Linear(lstm_output_dim_q, output_hidden_dim),
nn.ReLU(),
nn.Linear(output_hidden_dim, output_dim))
def forward(self, a, len_a, q, len_q):
embeddings = self.embeddings(q)
packed = torch.nn.utils.rnn.pack_padded_sequence(embeddings,
len_q,
batch_first=True)
# self.lstm_q.flatten_parameters()
lstm_q, _ = self.lstm_q(packed)
unpacked, lens = torch.nn.utils.rnn.pad_packed_sequence(lstm_q,
batch_first=True)
a = torch.unsqueeze(a, 1)
a = self.conv(a)
a = a.permute(0, 2, 1, 3).contiguous()
a = a.view(a.size(0), a.size(1), a.size(2) * a.size(3))
# self.lstm_a.flatten_parameters()
lstm_a, _ = self.lstm_a(a)
if self.bidirectional:
bid_a = lstm_a.view(lstm_a.size(0),
lstm_a.size(1),
2,
int(lstm_a.size(2) / 2))
bid_q = unpacked.view(unpacked.size(0),
unpacked.size(1),
2,
int(unpacked.size(2) / 2))
cat_a = torch.cat((bid_a[torch.arange(bid_a.size(0), dtype=torch.long),
-1, 0],
bid_a[torch.arange(bid_a.size(0), dtype=torch.long),
-1, 1]),
dim=1)
cat_q = torch.cat((bid_q[torch.arange(bid_q.size(0), dtype=torch.long),
lens - 1, 0],
bid_q[torch.arange(bid_q.size(0), dtype=torch.long),
lens - 1, 1]),
dim=1)
else:
cat_a = lstm_a[torch.arange(lstm_a.size(0), dtype=torch.long), -1]
cat_q = unpacked[torch.arange(unpacked.size(0), dtype=torch.long), lens - 1]
cat_a = torch.tanh(self.projection(cat_a)) # cat_a.size() == cat_q.size()
for sa in self.stacked_att:
cat_q = sa(cat_a, cat_q)
output = self.output(cat_q)
return output
|
daqa-master
|
daqa-mod/models.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
import torch.nn.functional as F
from layers import convnet, coordinates
class FiLM(nn.Module):
"""
Implements a FiLM block.
"""
def __init__(self, num_conv_filts_in, num_conv_filts, stride, dilation):
super(FiLM, self).__init__()
self.conv1 = nn.Conv2d(num_conv_filts_in,
num_conv_filts,
kernel_size=3,
padding=1,
stride=stride,
dilation=dilation)
self.conv2 = nn.Conv2d(num_conv_filts,
num_conv_filts,
kernel_size=3,
padding=1,
stride=stride,
dilation=dilation)
self.batchnorm2 = nn.BatchNorm2d(num_conv_filts, affine=False)
def forward(self, x, gamma, beta):
b1 = F.relu(self.conv1(x))
b2 = self.batchnorm2(self.conv2(b1))
gamma = gamma.unsqueeze(2).unsqueeze(3).expand_as(b2)
beta = beta.unsqueeze(2).unsqueeze(3).expand_as(b2)
b2 = F.relu((b2 * gamma) + beta)
return (b1 + b2)
class MALiMo(nn.Module):
"""
Implements MALiMo.
"""
def __init__(self,
vocab_dim,
embedding_dim,
padding_idx,
lstm_hidden_dim_q,
num_lstm_layers_q,
bidirectional,
num_conv_filts_base,
num_conv_layers_base,
stride_base,
dilation_base,
input_dim,
a_aggregate,
lstm_hidden_dim_a,
num_lstm_layers_a,
use_coordinates,
num_conv_filts_film,
num_conv_layers_film,
stride_film,
dilation_film,
fcn_output_dim,
fcn_coeff_dim,
fcn_temp_dim,
aggregate,
output_hidden_dim,
output_dim):
super(MALiMo, self).__init__()
self.bidirectional = bidirectional
self.use_coordinates = use_coordinates
# Base convnet
self.conv, num_channels, freq_red = convnet(num_conv_filts_base,
num_conv_layers_base,
stride_base,
dilation_base)
# Compute required output dimension given convnet specs
# * 2 for gamma and beta. Assumes constant num filters per layer
num_feats = num_conv_filts_film * num_conv_layers_film * 2
self.num_conv_filts_film = num_conv_filts_film
self.num_conv_layers_film = num_conv_layers_film
# Audio Controller
if a_aggregate == 'max':
self.a_decoder_pool = nn.MaxPool2d(
kernel_size=(input_dim // freq_red, 8),
stride=(input_dim // freq_red, 8))
elif a_aggregate == 'mean':
self.a_decoder_pool = nn.MaxPool2d(
kernel_size=(input_dim // freq_red, 8),
stride=(input_dim // freq_red, 8))
else:
assert False, 'Unknown aggregate function.'
self.lstm_a = nn.LSTM(num_channels,
lstm_hidden_dim_a,
num_lstm_layers_a,
batch_first=True,
bidirectional=bidirectional)
if bidirectional:
lstm_output_dim_a = (2 * lstm_hidden_dim_a)
else:
lstm_output_dim_a = lstm_hidden_dim_a
self.audio_decoder = nn.Linear(lstm_output_dim_a, num_feats)
# Question Controller
self.embeddings = nn.Embedding(vocab_dim,
embedding_dim,
padding_idx=padding_idx)
self.lstm_q = nn.LSTM(embedding_dim,
lstm_hidden_dim_q,
num_lstm_layers_q,
batch_first=True,
bidirectional=bidirectional)
if bidirectional:
lstm_output_dim_q = (2 * lstm_hidden_dim_q)
else:
lstm_output_dim_q = lstm_hidden_dim_q
self.question_decoder = nn.Linear(lstm_output_dim_q, num_feats)
# Modulated Layers
self.a_modulated_modules = []
self.q_modulated_modules = []
for i in range(num_conv_layers_film):
num_channels += 2 if use_coordinates else 0
afcm = FiLM(num_channels,
num_conv_filts_film,
stride_film,
dilation_film)
self.a_modulated_modules.append(afcm)
self.add_module('a_modulated_module_%d' % i, afcm)
num_channels = num_conv_filts_film
num_channels += 2 if use_coordinates else 0
qfcm = FiLM(num_channels,
num_conv_filts_film,
stride_film,
dilation_film)
self.q_modulated_modules.append(qfcm)
self.add_module('q_modulated_module_%d' % i, qfcm)
num_channels = num_conv_filts_film
num_conv_filts_film += 2 if use_coordinates else 0
self.conv1 = nn.Conv2d(num_conv_filts_film,
fcn_output_dim,
kernel_size=1,
padding=0)
if aggregate == 'max':
self.pool = nn.AdaptiveMaxPool2d((fcn_coeff_dim, fcn_temp_dim))
elif aggregate == 'mean':
self.pool = nn.AdaptiveAvgPool2d((fcn_coeff_dim, fcn_temp_dim))
else:
assert False, 'Unknown aggregate function.'
# Classifier
self.use_coordinates_class = (use_coordinates
and fcn_coeff_dim > 1
and fcn_temp_dim > 1)
fcn_output_dim += 2 if self.use_coordinates_class else 0
adaptive_pool_dim = fcn_output_dim * fcn_coeff_dim * fcn_temp_dim
self.output = nn.Sequential(nn.Linear(adaptive_pool_dim, output_hidden_dim),
nn.ReLU(),
nn.Linear(output_hidden_dim, output_dim))
# Initialization
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight)
def forward(self, a, len_a, q, len_q):
# Base convnet
a = torch.unsqueeze(a, 1)
a = self.conv(a)
# Audio Controller
pooled_a = self.a_decoder_pool(a)
pooled_a = torch.transpose(pooled_a, 1, 2)
pooled_a = pooled_a.view(pooled_a.size(0),
pooled_a.size(1),
pooled_a.size(2) * pooled_a.size(3))
lstm_a, _ = self.lstm_a(pooled_a)
if self.bidirectional:
bid_a = lstm_a.view(lstm_a.size(0),
lstm_a.size(1),
2,
int(lstm_a.size(2) / 2))
enc_a = torch.cat((bid_a[torch.arange(bid_a.size(0), dtype=torch.long),
-1, 0],
bid_a[torch.arange(bid_a.size(0), dtype=torch.long),
-1, 1]),
dim=1)
else:
enc_a = lstm_a[torch.arange(lstm_a.size(0), dtype=torch.long), -1]
a_gammas_betas = self.audio_decoder(enc_a)
a_gammas_betas = a_gammas_betas.view(a_gammas_betas.size(0),
self.num_conv_layers_film,
self.num_conv_filts_film,
2)
# Question Controller
embeddings = self.embeddings(q)
packed = torch.nn.utils.rnn.pack_padded_sequence(embeddings,
len_q,
batch_first=True)
# self.lstm_q.flatten_parameters()
lstm_q, _ = self.lstm_q(packed)
unpacked, lens = torch.nn.utils.rnn.pad_packed_sequence(lstm_q,
batch_first=True)
if self.bidirectional:
bid_q = unpacked.view(unpacked.size(0),
unpacked.size(1),
2,
int(unpacked.size(2) / 2))
enc_q = torch.cat((bid_q[torch.arange(bid_q.size(0), dtype=torch.long),
lens - 1, 0],
bid_q[torch.arange(bid_q.size(0), dtype=torch.long),
lens - 1, 1]),
dim=1)
else:
enc_q = unpacked[torch.arange(unpacked.size(0), dtype=torch.long), lens - 1]
q_gammas_betas = self.question_decoder(enc_q)
q_gammas_betas = q_gammas_betas.view(q_gammas_betas.size(0),
self.num_conv_layers_film,
self.num_conv_filts_film,
2)
# Modulated Layers
for i, (afcm, qfcm) in enumerate(zip(self.a_modulated_modules,
self.q_modulated_modules)):
# Append coordinate maps
if self.use_coordinates:
coordinates_maps = coordinates(a.shape[2], a.shape[3]).to(a.device)
a = torch.cat((a, coordinates_maps.expand(a.size(0), -1, -1, -1)), 1)
# see FiLM appendix for + 1
a = afcm(a, a_gammas_betas[:, i, :, 0] + 1, a_gammas_betas[:, i, :, 1])
# Append coordinate maps
if self.use_coordinates:
coordinates_maps = coordinates(a.shape[2], a.shape[3]).to(a.device)
a = torch.cat((a, coordinates_maps.expand(a.size(0), -1, -1, -1)), 1)
# see FiLM appendix for + 1
a = qfcm(a, q_gammas_betas[:, i, :, 0] + 1, q_gammas_betas[:, i, :, 1])
# Classifier
if self.use_coordinates:
coordinates_maps = coordinates(a.shape[2], a.shape[3]).to(a.device)
a = torch.cat((a, coordinates_maps.expand(a.size(0), -1, -1, -1)), 1)
a = self.conv1(a)
a = self.pool(a)
if self.use_coordinates_class:
coordinates_maps = coordinates(a.shape[2], a.shape[3]).to(a.device)
a = torch.cat((a, coordinates_maps.expand(a.size(0), -1, -1, -1)), 1)
a = a.view(a.size(0), -1)
output = self.output(a)
return output
|
daqa-master
|
daqa-mod/malimo.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import os
# import numpy as np
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets # NOQA F401
from data import DAQA
from models import LSTMN, FCNLSTMN, CONVLSTMN, FCNLSTMNSA, CONVLSTMNSA
from film import FiLM
from malimo import MALiMo
# Training settings
parser = argparse.ArgumentParser()
# Input
parser.add_argument('--audio-training-set', type=str,
default='daqa_audio_train.h5',
help='Path to training data.')
parser.add_argument('--qa-training-set', type=str,
default='daqa_train_questions_answers.json',
help='Path to training data.')
parser.add_argument('--audio-test-set', type=str,
default='daqa_audio_val.h5',
help='Path to test data.')
parser.add_argument('--qa-test-set', type=str,
default='daqa_val_questions_answers.json',
help='Path to test data.')
# Settings
parser.add_argument('--seed', type=int, default=0, metavar='S',
help='Random seed.')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='Disable CUDA.')
parser.add_argument('--multi-gpus', action='store_true', default=False,
help='Use all available GPUs.')
parser.add_argument('--distributed-parallel', action='store_true', default=False,
help='Distributed data parallel mode.')
parser.add_argument('--resume', action='store_true', default=False,
help='Resume training.')
parser.add_argument('--model', type=str, default='malimo',
help='Model to train.')
parser.add_argument('--embedding-dim', type=int, default=256,
help='Size of embedding layer.')
parser.add_argument('--lstm-hidden-dim-q', type=int, default=128,
help='Size of layer(s) in LSTM.')
parser.add_argument('--num-lstm-layers-q', type=int, default=1,
help='Number of layers in LSTM.')
parser.add_argument('--bidirectional', action='store_true', default=False,
help='Bidirectional LSTM.')
parser.add_argument('--num-conv-filts', type=int, default=16,
help='Number of filters in first layer in ConvNet.')
parser.add_argument('--num-conv-layers', type=int, default=5,
help='Number of layers in ConvNet.')
parser.add_argument('--stride', type=int, default=1,
help='Convolution stride.')
parser.add_argument('--dilation', type=int, default=1,
help='Convolution dilation.')
parser.add_argument('--fcn-output-dim', type=int, default=256,
help='Number of filters in final FCN layer.')
parser.add_argument('--fcn-coeff-dim', type=int, default=1,
help='Dimension along coefficients in adaptive pooling.')
parser.add_argument('--fcn-temp-dim', type=int, default=1,
help='Dimension along time in adaptive pooling.')
parser.add_argument('--aggregate', type=str, default='mean',
help='Function to aggregate over variable size input.')
parser.add_argument('--lstm-hidden-dim-a', type=int, default=128,
help='Size of layer(s) in LSTM.')
parser.add_argument('--num-lstm-layers-a', type=int, default=1,
help='Number of layers in LSTM.')
parser.add_argument('--stacked-att-dim', type=int, default=512,
help='Stacked attention layer dimension.')
parser.add_argument('--num-stacked-att', type=int, default=2,
help='Number of stacked attention layers.')
parser.add_argument('--use-coordinates', action='store_true', default=False,
help='Append coordinates to feature maps.')
parser.add_argument('--num-conv-filts-film', type=int, default=64,
help='Number of filters in first layer in film ConvNet.')
parser.add_argument('--num-conv-layers-film', type=int, default=2,
help='Number of layers in film ConvNet.')
parser.add_argument('--output-hidden-dim', type=int, default=1024,
help='Dimension of hidden layer before output layer.')
parser.add_argument('--optimizer', type=str, default='adam',
help='Optimzer.')
parser.add_argument('--lr', type=float, default=0.0001, metavar='L',
help='Learning rate.')
parser.add_argument('--l2', type=float, default=0.0001, metavar='M',
help='Weight decay.')
parser.add_argument('--dropout', type=float, default=0.0, metavar='R',
help='Dropout rate.')
parser.add_argument('--batch-size', type=int, default=1, metavar='N',
help='Batch size for training.')
parser.add_argument('--test-batch-size', type=int, default=1, metavar='N',
help='Batch size for testing.')
parser.add_argument('--epochs', type=int, default=10, metavar='T',
help='Number of epochs to train.')
parser.add_argument('--early-stopping', action='store_true', default=False,
help='Early stopping.')
parser.add_argument('--anneal-learning-rate', action='store_true', default=False,
help='Anneal Learning Rate.')
parser.add_argument('--patience', type=int, default=10, metavar='P',
help='Number of epochs before early stopping.')
# Output
parser.add_argument('--show-log', action='store_true', default=False,
help='Log training status.')
parser.add_argument('--log-interval', type=int, default=1000, metavar='I',
help='Number of batches to logging status.')
parser.add_argument('--save-model', action='store_true', default=False,
help='Save current model.')
parser.add_argument('--model-dir', type=str, default='models',
help='Path to model.')
parser.add_argument('--model-name', type=str, default='model.pt',
help='Model name.')
parser.add_argument('--infer-only', action='store_true', default=False,
help='Run in test mode only.')
def build_model(args, vocab_dim, padding_idx, input_dim, output_dim):
if args.model == 'lstmn':
model = LSTMN(vocab_dim=vocab_dim,
embedding_dim=args.embedding_dim,
padding_idx=padding_idx,
lstm_hidden_dim_q=args.lstm_hidden_dim_q,
num_lstm_layers_q=args.num_lstm_layers_q,
bidirectional=args.bidirectional,
input_dim=input_dim,
lstm_hidden_dim_a=args.lstm_hidden_dim_a,
num_lstm_layers_a=args.num_lstm_layers_a,
output_hidden_dim=args.output_hidden_dim,
output_dim=output_dim)
elif args.model == 'fcnlstmn':
model = FCNLSTMN(vocab_dim=vocab_dim,
embedding_dim=args.embedding_dim,
padding_idx=padding_idx,
lstm_hidden_dim_q=args.lstm_hidden_dim_q,
num_lstm_layers_q=args.num_lstm_layers_q,
bidirectional=args.bidirectional,
num_conv_filts=args.num_conv_filts,
num_conv_layers=args.num_conv_layers,
stride=args.stride,
dilation=args.dilation,
fcn_output_dim=args.fcn_output_dim,
fcn_coeff_dim=args.fcn_coeff_dim,
fcn_temp_dim=args.fcn_temp_dim,
aggregate=args.aggregate,
output_hidden_dim=args.output_hidden_dim,
output_dim=output_dim)
elif args.model == 'convlstmn':
model = CONVLSTMN(vocab_dim=vocab_dim,
embedding_dim=args.embedding_dim,
padding_idx=padding_idx,
lstm_hidden_dim_q=args.lstm_hidden_dim_q,
num_lstm_layers_q=args.num_lstm_layers_q,
bidirectional=args.bidirectional,
input_dim=input_dim,
num_conv_filts=args.num_conv_filts,
num_conv_layers=args.num_conv_layers,
stride=args.stride,
dilation=args.dilation,
lstm_hidden_dim_a=args.lstm_hidden_dim_a,
num_lstm_layers_a=args.num_lstm_layers_a,
output_hidden_dim=args.output_hidden_dim,
output_dim=output_dim)
elif args.model == 'fcnlstmnsa':
model = FCNLSTMNSA(vocab_dim=vocab_dim,
embedding_dim=args.embedding_dim,
padding_idx=padding_idx,
lstm_hidden_dim_q=args.lstm_hidden_dim_q,
num_lstm_layers_q=args.num_lstm_layers_q,
bidirectional=args.bidirectional,
num_conv_filts=args.num_conv_filts,
num_conv_layers=args.num_conv_layers,
stride=args.stride,
dilation=args.dilation,
fcn_output_dim=args.fcn_output_dim,
fcn_coeff_dim=args.fcn_coeff_dim,
fcn_temp_dim=args.fcn_temp_dim,
aggregate=args.aggregate,
use_coordinates=args.use_coordinates,
stacked_att_dim=args.stacked_att_dim,
num_stacked_att=args.num_stacked_att,
output_hidden_dim=args.output_hidden_dim,
output_dim=output_dim)
elif args.model == 'convlstmnsa':
model = CONVLSTMNSA(vocab_dim=vocab_dim,
embedding_dim=args.embedding_dim,
padding_idx=padding_idx,
lstm_hidden_dim_q=args.lstm_hidden_dim_q,
num_lstm_layers_q=args.num_lstm_layers_q,
bidirectional=args.bidirectional,
input_dim=input_dim,
num_conv_filts=args.num_conv_filts,
num_conv_layers=args.num_conv_layers,
stride=args.stride,
dilation=args.dilation,
lstm_hidden_dim_a=args.lstm_hidden_dim_a,
num_lstm_layers_a=args.num_lstm_layers_a,
stacked_att_dim=args.stacked_att_dim,
num_stacked_att=args.num_stacked_att,
output_hidden_dim=args.output_hidden_dim,
output_dim=output_dim)
elif args.model == 'film':
model = FiLM(vocab_dim=vocab_dim,
embedding_dim=args.embedding_dim,
padding_idx=padding_idx,
lstm_hidden_dim_q=args.lstm_hidden_dim_q,
num_lstm_layers_q=args.num_lstm_layers_q,
bidirectional=args.bidirectional,
num_conv_filts_base=args.num_conv_filts,
num_conv_layers_base=args.num_conv_layers,
stride_base=args.stride,
dilation_base=args.dilation,
use_coordinates=args.use_coordinates,
num_conv_filts_film=args.num_conv_filts_film,
num_conv_layers_film=args.num_conv_layers_film,
stride_film=args.stride,
dilation_film=args.dilation,
fcn_output_dim=args.fcn_output_dim,
fcn_coeff_dim=args.fcn_coeff_dim,
fcn_temp_dim=args.fcn_temp_dim,
aggregate=args.aggregate,
output_hidden_dim=args.output_hidden_dim,
output_dim=output_dim)
elif args.model == 'malimo':
model = MALiMo(vocab_dim=vocab_dim,
embedding_dim=args.embedding_dim,
padding_idx=padding_idx,
lstm_hidden_dim_q=args.lstm_hidden_dim_q,
num_lstm_layers_q=args.num_lstm_layers_q,
bidirectional=args.bidirectional,
num_conv_filts_base=args.num_conv_filts,
num_conv_layers_base=args.num_conv_layers,
stride_base=args.stride,
dilation_base=args.dilation,
input_dim=input_dim,
a_aggregate=args.aggregate,
lstm_hidden_dim_a=args.lstm_hidden_dim_a,
num_lstm_layers_a=args.num_lstm_layers_a,
use_coordinates=args.use_coordinates,
num_conv_filts_film=args.num_conv_filts_film,
num_conv_layers_film=args.num_conv_layers_film,
stride_film=args.stride,
dilation_film=args.dilation,
fcn_output_dim=args.fcn_output_dim,
fcn_coeff_dim=args.fcn_coeff_dim,
fcn_temp_dim=args.fcn_temp_dim,
aggregate=args.aggregate,
output_hidden_dim=args.output_hidden_dim,
output_dim=output_dim)
else:
assert False, 'Unknown model.'
return model
def save_state(args, epoch, model, optimizer, scheduler, train_loss, train_perf,
test_loss, test_perf, best_perf, patience, early_stopping, best=False):
checkpoint = os.path.join(args.model_dir, args.model_name)
kwargs = {
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict(),
'train_loss': train_loss,
'train_perf': train_perf,
'test_loss': test_loss,
'test_perf': test_perf,
'best_perf': best_perf,
'patience': patience,
'early_stopping': early_stopping,
}
if best:
checkpoint += '.best'
kwargs['model_state_dict'] = model.module.state_dict() # unwrap model
torch.save(kwargs, checkpoint)
else:
kwargs['model_state_dict'] = model.state_dict()
torch.save(kwargs, checkpoint)
def load_state(args, model, optimizer, scheduler):
checkpoint = torch.load(os.path.join(args.model_dir, args.model_name))
model.load_state_dict(checkpoint['model_state_dict'])
sepoch = checkpoint['epoch'] if 'epoch' in checkpoint else 0
if 'optimizer_state_dict' in checkpoint:
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
if 'scheduler_state_dict' in checkpoint:
scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
train_loss = checkpoint['train_loss'] if 'train_loss' in checkpoint else 0
train_perf = checkpoint['train_perf'] if 'train_perf' in checkpoint else 0
test_loss = checkpoint['test_loss'] if 'test_loss' in checkpoint else 0
test_perf = checkpoint['test_perf'] if 'test_perf' in checkpoint else 0
best_perf = checkpoint['best_perf'] if 'best_perf' in checkpoint else 0
patience = checkpoint['patience'] if 'patience' in checkpoint else 0
if 'early_stopping' in checkpoint:
early_stopping = checkpoint['early_stopping']
else:
early_stopping = False
return sepoch, model, optimizer, scheduler, train_loss, train_perf, \
test_loss, test_perf, best_perf, patience, early_stopping
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (a, len_a, q, len_q, target) in enumerate(train_loader):
a = a.to(device)
len_a = len_a.to(device)
q = q.to(device)
len_q = len_q.to(device)
target = target.to(device)
optimizer.zero_grad()
output = model(a, len_a, q, len_q)
loss = F.cross_entropy(output, target)
loss.backward()
optimizer.step()
if args.show_log and batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx, len(train_loader),
100. * batch_idx / len(train_loader), loss.item()))
def test(args, model, device, test_loader):
model.eval()
test_loss, correct, examples = 0., 0, 0
with torch.no_grad():
for a, len_a, q, len_q, target in test_loader:
a = a.to(device)
len_a = len_a.to(device)
q = q.to(device)
len_q = len_q.to(device)
target = target.to(device)
output = model(a, len_a, q, len_q)
test_loss += F.cross_entropy(output, target, reduction='sum').item()
label = output.argmax(dim=1, keepdim=True)
correct += label.eq(target.view_as(label)).sum().item()
examples += len(a)
test_loss /= examples
perf = correct / examples
# print('Average loss: {:.4f}, perf: {:.4f}%'.format(test_loss, 100. * perf))
return test_loss, perf
def main(id, args): # noqa C901
# Infra
use_cuda = not args.no_cuda and torch.cuda.is_available()
dist_parallel_mode = (use_cuda
and args.multi_gpus
and args.distributed_parallel
and torch.cuda.device_count() > 1)
if dist_parallel_mode:
dist.init_process_group(backend='nccl',
init_method='tcp://127.0.0.1:23456',
world_size=torch.cuda.device_count(),
rank=id)
torch.cuda.set_device(id)
device = torch.device('cuda:%d' % id)
else:
device = torch.device('cuda' if use_cuda else 'cpu')
if id == 0 and args.save_model:
if not os.path.isdir(args.model_dir):
os.makedirs(args.model_dir)
# Dataset
train_set = DAQA(args.audio_training_set, args.qa_training_set)
test_set = DAQA(args.audio_test_set,
args.qa_test_set,
train_set.stats,
train_set.word_to_ix,
train_set.answer_to_ix)
if dist_parallel_mode:
sampler_kwargs = {'num_replicas': torch.cuda.device_count(), 'rank': id}
train_sampler = torch.utils.data.DistributedSampler(train_set, **sampler_kwargs)
# test_sampler = torch.utils.data.DistributedSampler(test_set, **sampler_kwargs)
# The above is commented out because we only evaluate on the main process
# Note also that this means that evaluation using train_sampler will lead to
# evaluation on a subset of the training set which is advantageous.
test_sampler = torch.utils.data.RandomSampler(test_set)
batch_size = int(args.batch_size / torch.cuda.device_count())
else:
train_sampler = torch.utils.data.RandomSampler(train_set)
test_sampler = torch.utils.data.RandomSampler(test_set)
batch_size = args.batch_size
assert batch_size == 1, 'Batch size / number of GPUs != 1.'
loader_kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
train_loader = torch.utils.data.DataLoader(train_set,
batch_size=batch_size,
sampler=train_sampler,
collate_fn=DAQA.pad_collate_fn,
**loader_kwargs)
test_loader = torch.utils.data.DataLoader(test_set,
batch_size=batch_size,
sampler=test_sampler,
collate_fn=DAQA.pad_collate_fn,
**loader_kwargs)
# Model
model = build_model(args,
vocab_dim=len(train_set.word_to_ix),
padding_idx=train_set.word_to_ix['<pad>'],
input_dim=train_set.stats['mean'].shape[0],
output_dim=len(train_set.answer_to_ix))
model = model.to(device)
# GPU / multi-GPU / distributed multi-GPU
if dist_parallel_mode:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model = torch.nn.parallel.DistributedDataParallel(model,
device_ids=[id],
output_device=id,
check_reduction=True,
broadcast_buffers=False)
if id == 0:
print('DistributedDataParallel! Using', device)
elif (use_cuda
and args.multi_gpus
and torch.cuda.device_count() > 1):
model = nn.DataParallel(model)
print('DataParallel! Using', torch.cuda.device_count(), 'GPUs!')
else:
print('Single CPU/GPU! Using', device)
# Optimizer and scheduler
if args.optimizer == 'adam':
optimizer = optim.Adam(model.parameters(),
lr=args.lr,
weight_decay=args.l2)
elif args.optimizer == 'rmsprop':
optimizer = optim.RMSprop(model.parameters(),
lr=args.lr,
weight_decay=args.l2)
else:
assert False, 'Unknown optimizer.'
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'max', 0.1,
int(args.patience / 2),
verbose=True)
checkpoint_pt = os.path.join(args.model_dir, args.model_name)
# Inference
if args.infer_only:
if id == 0:
if os.path.isfile(checkpoint_pt):
print('Testing: ' + checkpoint_pt)
_, model, optimizer, scheduler, _, _, _, _, _, _, _ = \
load_state(args, model, optimizer, scheduler)
print(' ')
print('Hyperparamters')
print(args)
print(' ')
print('Model')
print(model)
print(' ')
print('Start testing.')
test_loss, test_perf = test(args, model, device, test_loader)
print(('Test loss: {:.3f}, Test Perf: {:.3f}%.').format(
test_loss,
100. * test_perf))
else:
print('Could not find model to test.')
return # inference done, nothing else to do here.
# Initialize or load from exisiting checkpoint
if (args.resume and os.path.isfile(checkpoint_pt)):
if id == 0:
print('Continue training from: ' + checkpoint_pt)
sepoch, model, optimizer, scheduler, train_loss, train_perf, \
test_loss, test_perf, best_perf, patience, early_stopping = \
load_state(args, model, optimizer, scheduler)
else:
sepoch = 0
best_perf, patience = 0., 0
early_stopping = False
if id == 0: # evaluate only on main process
print(' ')
print('Hyperparamters')
print(args)
print(' ')
print('Model')
print(model)
print(' ')
print('Start training.')
train_loss, train_perf = test(args, model, device, train_loader)
test_loss, test_perf = test(args, model, device, test_loader)
print(('Epoch {:03d}. Train loss: {:.3f}, Train Perf: {:.3f}%'
+ '. Test loss: {:.3f}, Test Perf: {:.3f}%.').format(sepoch,
train_loss,
100. * train_perf,
test_loss,
100. * test_perf))
else: # Other processes don't need this
train_loss, train_perf, test_loss, test_perf = 0, 0, 0, 0
# Force other processes to wait
if dist_parallel_mode or dist.is_initialized():
dist.barrier()
# Training loop
for epoch in range(sepoch + 1, args.epochs + 1):
# Load latest checkpoint to synchronize optimizer, early stopping, etc.
if dist_parallel_mode and epoch > sepoch + 1:
if args.anneal_learning_rate or args.early_stopping:
checkpoint = torch.load(checkpoint_pt)
if args.anneal_learning_rate:
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
if args.early_stopping:
early_stopping = checkpoint['early_stopping']
if early_stopping:
print('Early Stopping! Id: ' + id)
break
# DistributedSampler requires manually seting the epoch for randomization
if dist_parallel_mode:
train_loader.sampler.set_epoch(epoch)
# test_loader is RandomSampler doesnt require this
# Train
train(args, model, device, train_loader, optimizer, epoch)
# Force other processes to wait
if dist_parallel_mode or dist.is_initialized():
dist.barrier()
# Eval
if id == 0: # evaluate only on main process
train_loss, train_perf = test(args, model, device, train_loader)
test_loss, test_perf = test(args, model, device, test_loader)
print(('Epoch {:03d}. Train loss: {:.3f}, Train Perf: {:.3f}%'
+ '. Test loss: {:.3f}, Test Perf: {:.3f}%.').format(epoch,
train_loss,
100. * train_perf,
test_loss,
100. * test_perf))
if args.anneal_learning_rate:
scheduler.step(test_perf)
# Monitor best performance so far assuming higher better
if test_perf > best_perf:
best_perf, patience = test_perf, 0
print('Best Model at Epoch ' + str(epoch))
if args.save_model:
save_state(args, epoch, model, optimizer, scheduler,
train_loss, train_perf, test_loss, test_perf,
best_perf, patience, early_stopping, best=True)
else:
patience += 1
if args.early_stopping and (patience >= args.patience):
early_stopping = True
if (args.save_model):
save_state(args, epoch, model, optimizer, scheduler,
train_loss, train_perf, test_loss, test_perf,
best_perf, patience, early_stopping)
# If there is only a single process then break now
# If > a single process then all processes break start of next epoch
if not dist_parallel_mode and early_stopping:
print('Early Stopping!')
break
# Force other processes to wait
if dist_parallel_mode or dist.is_initialized():
dist.barrier()
def union(args):
# Set seed
# np.random.seed(args.seed)
torch.manual_seed(args.seed)
if (not args.no_cuda
and torch.cuda.is_available()
and args.multi_gpus
and args.distributed_parallel
and torch.cuda.device_count() > 1):
assert args.batch_size == torch.cuda.device_count(), \
'Batch size must equal to number of GPUs.'
if not args.save_model:
assert not args.anneal_learning_rate, \
'Checkpoints are used to synchronize learning rate.'
assert not args.early_stopping, \
'Checkpoints are used to synchronize early stopping flag.'
print('Distributed!')
mp.spawn(main, nprocs=torch.cuda.device_count(), args=(args,), daemon=False)
else:
assert args.batch_size == 1, 'Illegal batch size > 1 for undistributed mode.'
main(0, args)
if __name__ == '__main__':
args = parser.parse_args()
union(args)
print('Success!')
|
daqa-master
|
daqa-mod/main.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
import torch.nn.functional as F
def convnet(num_conv_filts, num_conv_layers, stride, dilation, max_n_filts=512):
"""
Implements num_conv_layers conv layers a la VGG.
"""
layers = []
in_channels = 1
n_filts = num_conv_filts
conv_red_dim = 1 # subsampling factor
for _ in range(num_conv_layers):
if len(layers) == 0:
layers += [nn.Conv2d(in_channels,
n_filts,
kernel_size=(12, 3),
padding=1,
stride=(9, stride),
dilation=dilation)]
else:
layers += [nn.Conv2d(in_channels,
n_filts,
kernel_size=3,
padding=1,
stride=stride,
dilation=dilation)]
layers += [nn.BatchNorm2d(n_filts, affine=True)]
layers += [nn.ReLU()]
layers += [nn.Conv2d(n_filts,
n_filts,
kernel_size=3,
padding=1,
stride=stride,
dilation=dilation)]
layers += [nn.BatchNorm2d(n_filts, affine=True)]
layers += [nn.ReLU()]
if conv_red_dim <= 32:
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
conv_red_dim *= 2 # max pooled (only correct for frequency dim)
in_channels = n_filts
n_filts = 2 * n_filts if n_filts < max_n_filts else n_filts
return nn.Sequential(*layers), in_channels, conv_red_dim
def coordinates(x, y, start=-1, end=1):
"""
Returns a map of coordinates with x rows and y columns.
Input:
- x: rows
- y: columns
Returns:
- xy_coords: 1 x 2 x 'x' x y
"""
x_row = torch.linspace(start, end, steps=y) # y
y_row = torch.linspace(start, end, steps=x) # x
x_coords = x_row.unsqueeze(0).expand(x, y).unsqueeze(0) # 1 x y
y_coords = y_row.unsqueeze(1).expand(x, y).unsqueeze(0) # 1 x y
# 1 2 x y
return torch.autograd.Variable(torch.cat([x_coords, y_coords], 0).unsqueeze(0))
class StackedAttention1D(nn.Module):
"""
Adapted from clevr-iep/blob/master/iep/models/baselines.py
"""
def __init__(self, input_dim, hidden_dim):
super(StackedAttention1D, self).__init__()
self.Wa = nn.Linear(input_dim, hidden_dim)
self.Wu = nn.Linear(input_dim, hidden_dim)
self.Wp = nn.Linear(hidden_dim, input_dim)
def forward(self, a, u):
"""
Input:
- a: N x D
- u: N x D
Returns:
- next_u: N x D
"""
a_proj = self.Wa(a) # N x K
u_proj = self.Wu(u) # N x K
h = torch.tanh(a_proj + u_proj)
p = F.softmax(self.Wp(h), dim=1) # N x D
a_tilde = p * a # N x D
next_u = a_tilde + u # N x D
return next_u
class StackedAttention(nn.Module):
"""
Adapted from clevr-iep/blob/master/iep/models/baselines.py
"""
def __init__(self, input_dim, hidden_dim):
super(StackedAttention, self).__init__()
self.Wv = nn.Conv2d(input_dim, hidden_dim, kernel_size=1, padding=0)
self.Wu = nn.Linear(input_dim, hidden_dim)
self.Wp = nn.Conv2d(hidden_dim, 1, kernel_size=1, padding=0)
self.hidden_dim = hidden_dim
self.attention_maps = None
def forward(self, v, u):
"""
Input:
- v: N x D x H x W
- u: N x D
Returns:
- next_u: N x D
"""
N, K = v.size(0), self.hidden_dim
H, W = v.size(2), v.size(3)
v_proj = self.Wv(v) # N x K x H x W
u_proj = self.Wu(u) # N x K
u_proj_expand = u_proj.view(N, K, 1, 1).expand(N, K, H, W)
h = torch.tanh(v_proj + u_proj_expand)
p = F.softmax(self.Wp(h).view(N, H * W), dim=1).view(N, 1, H, W)
self.attention_maps = p.data.clone()
v_tilde = (p.expand_as(v) * v).sum((2, 3))
next_u = u + v_tilde
return next_u
|
daqa-master
|
daqa-mod/layers.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import json
import re
import h5py
import torch
from torch.utils.data.dataloader import default_collate
from torchvision import datasets # NOQA F401
class DAQA(torch.utils.data.Dataset):
_special_ix = {'<pad>': 0}
def __init__(self, audio_pt, ques_ans_pt, stats=None,
word_to_ix=None, answer_to_ix=None):
# Read audio HDF5 file
self.audio = h5py.File(audio_pt, 'r')
if stats is None:
self.stats = {}
self.stats['mean'] = self.audio['mean'][:]
self.stats['stddev'] = self.audio['stddev'][:]
else:
self.stats = stats
# h5py doesnt support using one file handle for multithreaded ops
# uncomment the following two lines if using >1 worker
# and ammend __getitem__ accordingly.
self.audio.close()
self.audio = audio_pt
# Read JSON file
with open(ques_ans_pt, 'r') as f:
questions_answers = json.load(f)
# Audio, questions, and answers to a nice list
dataset = []
for i in range(len(questions_answers['questions'])):
aud = questions_answers['questions'][i]['audio_filename'][:-4]
ques = questions_answers['questions'][i]['question']
ans = questions_answers['questions'][i]['answer_token']
dataset.append({'audio': aud, 'question': ques, 'answer': ans})
if word_to_ix is None:
self.word_to_ix = DAQA.build_vocab_questions(dataset, DAQA._special_ix)
else:
self.word_to_ix = word_to_ix
dataset = DAQA.encode_questions(dataset, self.word_to_ix)
if answer_to_ix is None:
self.answer_to_ix = DAQA.build_vocab_answers(dataset)
else:
self.answer_to_ix = answer_to_ix
dataset = DAQA.encode_answers(dataset, self.answer_to_ix)
self.dataset = dataset
# Pack questions and answers for each audio into a nice dictionary.
dataset_wrt_audio = {}
for i in range(len(dataset)):
aud = dataset[i]['audio']
ques = dataset[i]['question']
ans = dataset[i]['answer']
if aud not in dataset_wrt_audio:
dataset_wrt_audio[aud] = [{'question': ques, 'answer': ans}]
else:
dataset_wrt_audio[aud] += [{'question': ques, 'answer': ans}]
self.dataset_wrt_audio = dataset_wrt_audio
def __len__(self):
# return len(self.dataset)
return len(self.dataset_wrt_audio)
def __getitem__(self, index):
sub_mini_batch = []
audio = sorted(self.dataset_wrt_audio)[index] # maybe move up
audio_pt = h5py.File(self.audio, 'r') # swmr=True
a = audio_pt[audio][:]
a = torch.tensor((a - self.stats['mean']) / self.stats['stddev'])
# The previous 3 lines should be commented if reading audio from memory,
# as well as audio_pt.close() below.
# The following line should be uncommented if reading audio from memory.
# a = torch.tensor((self.audio[audio][:] - self.stats['mean'])
# / self.stats['stddev'])
len_a = torch.tensor(a.shape[0], dtype=torch.long)
for qas in range(len(self.dataset_wrt_audio[audio])):
q = torch.tensor(self.dataset_wrt_audio[audio][qas]['question'],
dtype=torch.long)
len_q = torch.tensor(len(q), dtype=torch.long)
y = torch.tensor(self.dataset_wrt_audio[audio][qas]['answer'],
dtype=torch.long)
sub_mini_batch += [(a, len_a, q, len_q, y)]
audio_pt.close()
return sub_mini_batch
@staticmethod
def build_vocab_questions(d, special_ix):
to_ix = special_ix # start with special tokens
for i in range(len(d)):
# Remove punctuation, lower case, convert to list of words
qr = re.sub(r'[^\w\s]', '', d[i]['question']).lower().split()
for w in qr:
if w not in to_ix:
to_ix[w] = len(to_ix)
return to_ix
@staticmethod
def build_vocab_answers(d):
to_ix = {}
for i in range(len(d)):
if d[i]['answer'] not in to_ix:
to_ix[d[i]['answer']] = len(to_ix)
return to_ix
@staticmethod
def encode_questions(d, to_ix):
for i in range(len(d)):
qr = re.sub(r'[^\w\s]', '', d[i]['question']).lower().split()
d[i]['question'] = [to_ix[w] for w in qr if w in to_ix]
# if w in to_ix is potentially dangerous
return d
@staticmethod
def encode_answers(d, to_ix):
for i in range(len(d)):
d[i]['answer'] = to_ix[d[i]['answer']]
return d
@staticmethod
def pad_collate_fn(batch):
"""
Input: a list of list((A, len_A, Q, len_Q, Ans)).
"""
batch = [i for j in batch for i in j] # unpack list of lists to list
pad_idx = DAQA._special_ix['<pad>']
# Sort batch wrt to length of question
batch = sorted(batch, key=lambda x: x[3], reverse=True) # sort wrt Q
max_len_q = batch[0][3]
# Pad questions with pad_idx
for i in range(len(batch)):
x = torch.ones(max_len_q, dtype=batch[i][2].dtype) * pad_idx
x[:batch[i][2].size(0)] = batch[i][2]
batch[i] = (batch[i][0], batch[i][1], x, batch[i][3], batch[i][4])
return default_collate(batch)
|
daqa-master
|
daqa-mod/data.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import os
import h5py
import numpy as np
import scipy
import scipy.io.wavfile
import librosa
parser = argparse.ArgumentParser()
# Input
parser.add_argument('--input-wavs', default='wavs', type=str,
help='Path to folder with wavs to process.')
parser.add_argument('--input-features', default='features', type=str,
help='Path to folder with mels to process.')
# Settings
parser.add_argument('--compute-features', action='store_true', default=False,
help='Compute features.')
parser.add_argument('--window', default=0.025, type=float,
help='Window size (s).')
parser.add_argument('--stride', default=0.01, type=float,
help='Window stride (s).')
parser.add_argument('--num-mels', default=64, type=int,
help='Number of Mel coefficients.')
parser.add_argument('--astype', default='float32', type=str,
help='Data type for storage.')
parser.add_argument('--pack-features', action='store_true', default=False,
help='Pack features.')
parser.add_argument('--compressed', action='store_true', default=False,
help='Compress features.')
# Output
parser.add_argument('--output-features', default='features', type=str,
help='Path to folder with processed features.')
parser.add_argument('--output-file', default='features.hdf5', type=str,
help='Path to file with processed features.')
def compute_features(args):
"""
Compute MFSCs for all audio wav files in a given directory.
"""
print('Computing features...')
if not os.path.isdir(args.output_features):
os.makedirs(args.output_features)
lst_wavs = os.listdir(args.input_wavs)
lst_wavs = [e[:-4] for e in lst_wavs if e.endswith('.wav')]
counter = 0
for i in lst_wavs:
try:
fs, audio = scipy.io.wavfile.read(os.path.join(args.input_wavs,
i + '.wav'))
mfsc = librosa.feature.melspectrogram(y=audio.astype(float),
sr=fs,
n_fft=int(fs * args.window),
n_mels=args.num_mels,
hop_length=int(fs * args.stride),
power=1)
mfsc = librosa.power_to_db(mfsc, ref=np.max).T.astype(args.astype)
np.save(os.path.join(args.output_features, i), mfsc)
except Exception:
print('Error processing: ' + str(i))
counter += 1
if counter % 1000 == 0:
print('Finished processing: ' + str(counter) + ' files.')
def pack_features(args):
"""
Pack all npy MFSCs in a given directory into a single hdf file.
"""
print('Packing features...')
lst_npys = os.listdir(args.input_features)
lst_npys = [e[:-4] for e in lst_npys if e.endswith('.npy')]
counter = 0
# Variables for Welford’s mean and variance
n, mean, v = 0, np.zeros(args.num_mels), np.zeros(args.num_mels)
kwargs = {'compression': 'gzip', 'compression_opts': 9} if args.compressed else {}
with h5py.File(args.output_file, 'w') as f:
for i in lst_npys:
mfsc = np.load(os.path.join(args.output_features, i + '.npy'))
f.create_dataset(i, data=mfsc, dtype=args.astype,
**kwargs)
for w in range(mfsc.shape[0]):
n += 1
delta = mfsc[w] - mean
mean += delta / n
v += (mfsc[w] - mean) * delta
counter += 1
if counter % 1000 == 0:
print('Finished packing: ' + str(counter) + ' files.')
var = v / (n - 1)
stddev = np.sqrt(var)
f.create_dataset('mean',
data=mean.astype(args.astype),
dtype=args.astype,
**kwargs)
f.create_dataset('variance',
data=var.astype(args.astype),
dtype=args.astype,
**kwargs)
f.create_dataset('stddev',
data=stddev.astype(args.astype),
dtype=args.astype,
**kwargs)
def main(args):
if args.compute_features:
compute_features(args)
if args.pack_features:
pack_features(args)
if not args.compute_features and not args.pack_features:
print('P.S. I didnt do anything. Both compute and pack features are false.')
if __name__ == "__main__":
args = parser.parse_args()
main(args)
print('Success!')
|
daqa-master
|
daqa-mod/compute_audio_features.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import argparse
import datetime
import json
import random
import numpy as np
from qpas.exist import (was_there,
was_there_two_and,
was_there_two_or,
# was_there_source,
# was_there_source_two_and,
# was_there_source_two_or,
was_there_relative,
was_there_immediate_relative,
was_there_similar_ordinal,
was_there_similar_loudness,
was_there_at_least_two_similar_loudness,
was_there_similar_loudness_ordinal,
was_there_at_least_two_similar_loudness_ordinal,
was_there_similar_duration,
was_there_at_least_two_similar_duration,
was_there_similar_duration_ordinal,
was_there_at_least_two_similar_duration_ordinal,
)
from qpas.query import (what_was,
what_was_relative,
what_was_loudness,
what_was_loudness_relative,
what_was_loudness_relative_ordinal,
what_was_duration,
what_was_duration_relative,
what_was_duration_relative_ordinal,
)
from qpas.count import (how_many,
how_many_event,
how_many_ordinal,
how_many_event_two,
how_many_event_two_ordinal,
how_many_sounds_relative,
how_many_sounds_relative_ordinal,
how_many_event_relative,
how_many_event_relative_ordinal,
how_many_sounds_loudness_event,
how_many_sounds_loudness_ordinal,
how_many_sounds_duration_event,
how_many_sounds_duration_ordinal,
)
from qpas.compare import (compare_ordinal,
compare_ordinal_event,
compare_loudness,
compare_loudness_ordinal,
compare_loudness_event_ordinal,
compare_loudness_ordinal_event,
compare_same_loudness,
compare_same_loudness_ordinal,
compare_same_loudness_event_ordinal,
compare_duration,
compare_duration_ordinal,
compare_duration_event_ordinal,
compare_duration_ordinal_event,
compare_same_duration,
compare_same_duration_ordinal,
compare_same_duration_event_ordinal,
)
from qpas.compare_integer import (less_than,
equal_to,
more_than,
)
parser = argparse.ArgumentParser()
# Input
parser.add_argument('--dataset', default='daqa.json', type=str,
help='JSON file describing the dataset.')
parser.add_argument('--input_narrative_file',
default='../daqa/daqa_narratives.json',
help="Path to narratives JSON file.")
parser.add_argument('--start_narrative_idx', default=0, type=int,
help='Start reading from start_narrative_idx.')
# Settings
parser.add_argument('--set', default='new',
help='Set name: train / val / test.')
parser.add_argument('--num_questions_per_narrative', default=10, type=int,
help='Number of questions per narrative.')
parser.add_argument('--patience_narrative', default=10, type=int,
help='Number of failed attempts to reach num_q_per_narr.')
parser.add_argument('--patience_template', default=10, type=int,
help='Number of failed attempts to reach num_q_per_narr.')
parser.add_argument('--rel_diff', default=0.1, type=int,
help='Loudness sensitivity (%).')
parser.add_argument('--max_diff', default=0.05, type=float,
help='Maximum difference between (in)frequent answers.')
parser.add_argument('--seed', default=0, type=int, help='Random Seed.')
parser.add_argument('--version', default='1.0', type=str, help='Version.')
parser.add_argument('--license',
default='Creative Commons Attribution (CC-BY 4.0)',
help='License.')
parser.add_argument('--date',
default=datetime.datetime.today().strftime("%m/%d/%Y"),
help="Date.")
# Output
parser.add_argument('--start_output_idx', default=0, type=int,
help='Start numbering from start_output_idx.')
parser.add_argument('--output_qa_file',
default='../daqa/daqa_questions_answers.json',
help="Path to questions answers JSON file.")
def tokenize_answer(dataset, ans):
# Tokenize answer
anss = ans.split(' ')
for e in dataset['events']:
lst_syn = dataset['sources'][e] + dataset['actions'][e]
lst_syn = ' '.join(s for s in lst_syn)
lst_check = []
for a in anss:
lst_check.append((' ' + a + ' ') in (' ' + lst_syn + ' '))
if all(lst_check):
ans = e
return ans
def add_answer(ans_dist_per_temp, ques_temp, ans_tk, max_diff):
# Only one answer seen so far for this template
if len(ans_dist_per_temp[ques_temp].keys()) <= 1:
return True
# First instance of this answer in this template
if ans_dist_per_temp[ques_temp][ans_tk] == 0:
return True
num_occ = sorted(((v, k) for k, v in ans_dist_per_temp[ques_temp].items()))
# Not the most frequent answer
if num_occ[-1][1] != ans_tk:
return True
# Difference between the (most + 1) and least frequent is less than max_diff
if ((num_occ[-1][0] + 1) - num_occ[0][0]) <= max_diff:
return True
return False
def main(args):
"""Randomly sample questions for given narrative and deduce answer."""
random.seed(args.seed)
np.random.seed(args.seed)
# Read dataset description and narratives
with open(args.dataset, 'r') as f:
dataset = json.load(f)
with open(args.input_narrative_file, 'r') as f:
narratives = json.load(f)
assert args.set == narratives['info']['set'], 'train/val/test mismatch.'
templates = [was_there,
was_there_two_and,
was_there_two_or,
# was_there_source,
# was_there_source_two_and,
# was_there_source_two_or,
was_there_relative,
was_there_immediate_relative,
was_there_similar_ordinal,
was_there_similar_loudness,
was_there_at_least_two_similar_loudness,
was_there_similar_loudness_ordinal,
was_there_at_least_two_similar_loudness_ordinal,
was_there_similar_duration,
was_there_at_least_two_similar_duration,
was_there_similar_duration_ordinal,
was_there_at_least_two_similar_duration_ordinal,
what_was,
what_was_relative,
what_was_loudness,
what_was_loudness_relative,
what_was_loudness_relative_ordinal,
what_was_duration,
what_was_duration_relative,
what_was_duration_relative_ordinal,
how_many,
how_many_event,
how_many_ordinal,
how_many_event_two,
how_many_event_two_ordinal,
how_many_sounds_relative,
how_many_sounds_relative_ordinal,
how_many_event_relative,
how_many_event_relative_ordinal,
how_many_sounds_loudness_event,
how_many_sounds_loudness_ordinal,
how_many_sounds_duration_event,
how_many_sounds_duration_ordinal,
compare_ordinal,
compare_ordinal_event,
compare_loudness,
compare_loudness_ordinal,
compare_loudness_event_ordinal,
compare_loudness_ordinal_event,
compare_same_loudness,
compare_same_loudness_ordinal,
compare_same_loudness_event_ordinal,
compare_duration,
compare_duration_ordinal,
compare_duration_event_ordinal,
compare_duration_ordinal_event,
compare_same_duration,
compare_same_duration_ordinal,
compare_same_duration_event_ordinal,
less_than,
equal_to,
more_than,
]
print('Generating ' + str(args.num_questions_per_narrative)
+ ' questions for each of the ' + str(len(narratives['narratives']))
+ ' narratives.')
idx = args.start_output_idx
lst_questions = []
num_skewed_answers = 0
num_illposed_questions = 0
ans_dist_per_temp = {}
# The delta between (in)frequent answers is irrespective of the set size
max_diff = (args.max_diff
* ((len(narratives['narratives']) - args.start_narrative_idx)
* args.num_questions_per_narrative) / len(templates))
for n in range(args.start_narrative_idx, len(narratives['narratives'])):
narrative = narratives['narratives'][n]
num_questions, patience_narrative = 0, 0
while num_questions < args.num_questions_per_narrative:
question_template = random.choice(templates)
try: # catch illposed questions
patience_template = 0
while patience_template < args.patience_template:
ques, ans = question_template(dataset, narrative, args.rel_diff)
ans_tk = tokenize_answer(dataset, ans)
ques_temp_name = question_template.__name__
if ques_temp_name not in ans_dist_per_temp:
ans_dist_per_temp[ques_temp_name] = {}
if ans_tk not in ans_dist_per_temp[ques_temp_name]:
ans_dist_per_temp[ques_temp_name][ans_tk] = 0
if add_answer(ans_dist_per_temp, ques_temp_name,
ans_tk, max_diff):
question = {
'set': narrative['set'],
'audio_index': narrative['audio_index'],
'audio_filename': narrative['audio_filename'],
'question_template': ques_temp_name,
'question': ques,
'answer': ans,
'answer_token': ans_tk,
}
lst_questions.append(question)
ans_dist_per_temp[ques_temp_name][ans_tk] += 1
idx += 1
num_questions += 1
break
else:
patience_template += 1
num_skewed_answers += 1
if patience_template >= args.patience_template:
print('R1. Out of patience for narrative #' + str(n)
+ ' for template: ' + ques_temp_name + '.')
except AssertionError as error:
print(error)
patience_narrative += 1
num_illposed_questions += 1
if patience_narrative >= args.patience_narrative:
print('R2. Out of patience for narrative #' + str(n) + '.')
break
print('Generated ' + str(idx) + ' questions.')
print('Failed to generate ' + str(num_skewed_answers) + ' questions.'
+ ' Reason: skewed answers.')
print('Failed to generate ' + str(num_illposed_questions) + ' questions.'
+ ' Reason: illposed questions.')
print('Total number of attempts: '
+ str(idx + num_skewed_answers + num_illposed_questions))
output = {
'info': {
'set': args.set,
'version': args.version,
'date': args.date,
'license': args.license,
},
'questions': lst_questions
}
with open(args.output_qa_file, 'w') as f:
json.dump(output, f)
return True
if __name__ == "__main__":
args = parser.parse_args()
main(args)
print('Success!')
|
daqa-master
|
daqa-gen/generate_questions_answers.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Events with urls are a subset of AudioSet, see https://research.google.com/audioset/.
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import json
def main():
sources = {
1: {
'event': 'a000',
'url': 'dAOa3WbL54w',
'start': 160,
'end': 180,
},
2: {
'event': 'a000',
'url': 'dAOa3WbL54w',
'start': 200,
'end': 215,
},
3: {
'event': 'a000',
'url': 'dAOa3WbL54w',
'start': 220,
'end': 238,
},
4: {
'event': 'a000',
'url': 'dAOa3WbL54w',
'start': 250,
'end': 268,
},
5: {
'event': 'a000',
'url': 'dAOa3WbL54w',
'start': 270,
'end': 290,
},
6: {
'event': 'a000',
'url': 'dAOa3WbL54w',
'start': 310,
'end': 326,
},
7: {
'event': 'a000',
'url': 'dAOa3WbL54w',
'start': 330,
'end': 342,
},
8: {
'event': 'a000',
'url': 'dAOa3WbL54w',
'start': 346,
'end': 364,
},
9: {
'event': 'a000',
'url': 'dAOa3WbL54w',
'start': 366,
'end': 377,
},
10: {
'event': 'a000',
'url': 'AqwOCxkhrjI',
'start': 280,
'end': 299,
},
11: {
'event': 'a000',
'url': '3klGi-ujenE',
'start': 65,
'end': 84,
},
12: {
'event': 'a000',
'url': '8W0KcQLImuo',
'start': 130,
'end': 150,
},
13: {
'event': 'a000',
'url': '9KSO1R50AXY',
'start': 33,
'end': 42,
},
14: {
'event': 'a000',
'url': 'AqwOCxkhrjI',
'start': 92,
'end': 112,
},
15: {
'event': 'a000',
'url': 'AqwOCxkhrjI',
'start': 116,
'end': 130,
},
16: {
'event': 'a000',
'url': 'AqwOCxkhrjI',
'start': 131,
'end': 145,
},
17: {
'event': 'a000',
'url': 'AqwOCxkhrjI',
'start': 146,
'end': 159,
},
18: {
'event': 'a000',
'url': 'AqwOCxkhrjI',
'start': 160,
'end': 175,
},
19: {
'event': 'a000',
'url': 'AqwOCxkhrjI',
'start': 176,
'end': 196,
},
20: {
'event': 'a000',
'url': 'AqwOCxkhrjI',
'start': 198,
'end': 218,
},
#######################################################################
21: {
'event': 'b000',
'url': '-6krAYK2LLo',
'start': 11,
'end': 25,
},
22: {
'event': 'b000',
'url': '-6krAYK2LLo',
'start': 38,
'end': 48,
},
23: {
'event': 'b000',
'url': '-8wQV7VJnmM',
'start': 0,
'end': 20,
},
24: {
'event': 'b000',
'url': '-DYZX74qgFQ',
'start': 10,
'end': 24,
},
25: {
'event': 'b000',
'url': '-DYZX74qgFQ',
'start': 37,
'end': 50,
},
26: {
'event': 'b000',
'url': '-DYZX74qgFQ',
'start': 570,
'end': 579,
},
27: {
'event': 'b000',
'url': '-NPqCu4DyAM',
'start': 17,
'end': 28,
},
28: {
'event': 'b000',
'url': '-u5yvewHxzE',
'start': 0,
'end': 17,
},
29: {
'event': 'b000',
'url': '-u5yvewHxzE',
'start': 414,
'end': 424,
},
30: {
'event': 'b000',
'url': '-u5yvewHxzE',
'start': 590,
'end': 604,
},
31: {
'event': 'b000',
'url': '03frQGyrgQ4',
'start': 1,
'end': 21,
},
32: {
'event': 'b000',
'url': '08YFRFx-g7s',
'start': 0,
'end': 17,
},
33: {
'event': 'b000',
'url': '08YFRFx-g7s',
'start': 20,
'end': 29,
},
34: {
'event': 'b000',
'url': '0WWuZRd-O3c',
'start': 0,
'end': 12,
},
35: {
'event': 'b000',
'url': 'fPIG7nrpgec',
'start': 15,
'end': 30,
},
36: {
'event': 'b000',
'url': 'fYvUB-qy4IM',
'start': 0,
'end': 14,
},
37: {
'event': 'b000',
'url': 'gIQ4QrKXjCc',
'start': 0,
'end': 20,
},
38: {
'event': 'b000',
'url': 'i5TlfRqdawk',
'start': 13,
'end': 28,
},
39: {
'event': 'b000',
'url': 'iUyxzXcyrqI',
'start': 8,
'end': 28,
},
40: {
'event': 'b000',
'url': 'iyB5q7bb1l8',
'start': 0,
'end': 13,
},
#######################################################################
41: {
'event': 'b001',
'url': 'DTieJvYa-sA',
'start': 21,
'end': 26,
},
42: {
'event': 'b001',
'url': 'DVEuOBxAyFM',
'start': 10,
'end': 20,
},
43: {
'event': 'b001',
'url': 'E-As4tECwcQ',
'start': 195,
'end': 200,
},
44: {
'event': 'b001',
'url': 'EodzL5d9A78',
'start': 15,
'end': 25,
},
45: {
'event': 'b001',
'url': 'FSm6Z98ALhw',
'start': 345,
'end': 355,
},
46: {
'event': 'b001',
'url': 'G8tT-uKj3Ls',
'start': 12,
'end': 30,
},
47: {
'event': 'b001',
'url': 'HAS6G7Uq4Oc',
'start': 1,
'end': 16,
},
48: {
'event': 'b001',
'url': 'H_Bcux0FRxM',
'start': 34,
'end': 44,
},
49: {
'event': 'b001',
'url': 'I3_SwBhnUj0',
'start': 8,
'end': 20,
},
50: {
'event': 'b001',
'url': 'KZXC1iouJyo',
'start': 1,
'end': 6,
},
51: {
'event': 'b001',
'url': 'MIV0-6O-dLM',
'start': 10,
'end': 18,
},
52: {
'event': 'b001',
'url': 'MOV9rXOes3k',
'start': 0,
'end': 9,
},
53: {
'event': 'b001',
'url': 'W4oEM0W6mhM',
'start': 0,
'end': 7,
},
54: {
'event': 'b001',
'url': 'WCFt-dggFlk',
'start': 6,
'end': 12,
},
55: {
'event': 'b001',
'url': 'YykyGidfpfw',
'start': 0,
'end': 10,
},
56: {
'event': 'b001',
'url': 'ZPZa1zMpxBU',
'start': 0,
'end': 6,
},
57: {
'event': 'b001',
'url': 'eE8QMTqL01I',
'start': 0,
'end': 6,
},
58: {
'event': 'b001',
'url': 'fppKGJD3Y6c',
'start': 7,
'end': 13,
},
59: {
'event': 'b001',
'url': 'j_ZwYJNu5mE',
'start': 0,
'end': 15,
},
60: {
'event': 'b001',
'url': 'm1yOTcjRjcM',
'start': 0,
'end': 17,
},
#######################################################################
61: {
'event': 'c000',
'dir': 'raws/c000_1.wav',
'start': 0,
'end': -1,
},
62: {
'event': 'c000',
'dir': 'raws/c000_2.wav',
'start': 0,
'end': -1,
},
63: {
'event': 'c000',
'dir': 'raws/c000_3.wav',
'start': 0,
'end': -1,
},
64: {
'event': 'c000',
'dir': 'raws/c000_4.wav',
'start': 0,
'end': -1,
},
65: {
'event': 'c000',
'dir': 'raws/c000_5.wav',
'start': 0,
'end': -1,
},
66: {
'event': 'c000',
'dir': 'raws/c000_6.wav',
'start': 0,
'end': -1,
},
67: {
'event': 'c000',
'dir': 'raws/c000_7.wav',
'start': 0,
'end': -1,
},
68: {
'event': 'c000',
'dir': 'raws/c000_8.wav',
'start': 0,
'end': -1,
},
69: {
'event': 'c000',
'dir': 'raws/c000_9.wav',
'start': 0,
'end': -1,
},
70: {
'event': 'c000',
'dir': 'raws/c000_10.wav',
'start': 0,
'end': -1,
},
71: {
'event': 'c000',
'dir': 'raws/c000_11.wav',
'start': 0,
'end': -1,
},
72: {
'event': 'c000',
'dir': 'raws/c000_12.wav',
'start': 0,
'end': -1,
},
73: {
'event': 'c000',
'dir': 'raws/c000_13.wav',
'start': 0,
'end': -1,
},
74: {
'event': 'c000',
'dir': 'raws/c000_14.wav',
'start': 0,
'end': -1,
},
75: {
'event': 'c000',
'dir': 'raws/c000_15.wav',
'start': 0,
'end': -1,
},
76: {
'event': 'c000',
'dir': 'raws/c000_16.wav',
'start': 0,
'end': -1,
},
77: {
'event': 'c000',
'dir': 'raws/c000_17.wav',
'start': 0,
'end': -1,
},
78: {
'event': 'c000',
'dir': 'raws/c000_18.wav',
'start': 0,
'end': -1,
},
79: {
'event': 'c000',
'dir': 'raws/c000_19.wav',
'start': 0,
'end': -1,
},
80: {
'event': 'c000',
'dir': 'raws/c000_20.wav',
'start': 0,
'end': -1,
},
#######################################################################
81: {
'event': 'c001',
'dir': 'raws/c001_1.wav',
'start': 0,
'end': -1,
},
82: {
'event': 'c001',
'dir': 'raws/c001_2.wav',
'start': 0,
'end': -1,
},
83: {
'event': 'c001',
'dir': 'raws/c001_3.wav',
'start': 0,
'end': -1,
},
84: {
'event': 'c001',
'dir': 'raws/c001_4.wav',
'start': 0,
'end': -1,
},
85: {
'event': 'c001',
'dir': 'raws/c001_5.wav',
'start': 0,
'end': -1,
},
86: {
'event': 'c001',
'dir': 'raws/c001_6.wav',
'start': 0,
'end': -1,
},
87: {
'event': 'c001',
'dir': 'raws/c001_7.wav',
'start': 0,
'end': -1,
},
88: {
'event': 'c001',
'dir': 'raws/c001_8.wav',
'start': 0,
'end': -1,
},
89: {
'event': 'c001',
'dir': 'raws/c001_9.wav',
'start': 0,
'end': -1,
},
90: {
'event': 'c001',
'dir': 'raws/c001_10.wav',
'start': 0,
'end': -1,
},
91: {
'event': 'c001',
'dir': 'raws/c001_11.wav',
'start': 0,
'end': -1,
},
92: {
'event': 'c001',
'dir': 'raws/c001_12.wav',
'start': 0,
'end': -1,
},
93: {
'event': 'c001',
'url': 'ApJA85gwNFo',
'start': 7,
'end': 17,
},
94: {
'event': 'c001',
'url': 'ApJA85gwNFo',
'start': 18,
'end': 30,
},
95: {
'event': 'c001',
'url': 'ApJA85gwNFo',
'start': 45,
'end': 56,
},
96: {
'event': 'c001',
'url': 'ApJA85gwNFo',
'start': 60,
'end': 72,
},
97: {
'event': 'c001',
'url': 'ApJA85gwNFo',
'start': 90,
'end': 96,
},
98: {
'event': 'c001',
'url': 'ApJA85gwNFo',
'start': 120,
'end': 130,
},
99: {
'event': 'c001',
'url': 'ApJA85gwNFo',
'start': 230,
'end': 242,
},
100: {
'event': 'c001',
'url': 'ApJA85gwNFo',
'start': 270,
'end': 279,
},
#######################################################################
101: {
'event': 'c002',
'dir': 'raws/c002_1.wav',
'start': 0,
'end': -1,
},
102: {
'event': 'c002',
'dir': 'raws/c002_2.wav',
'start': 0,
'end': -1,
},
103: {
'event': 'c002',
'dir': 'raws/c002_3.wav',
'start': 0,
'end': -1,
},
104: {
'event': 'c002',
'dir': 'raws/c002_4.wav',
'start': 0,
'end': -1,
},
105: {
'event': 'c002',
'dir': 'raws/c002_5.wav',
'start': 0,
'end': -1,
},
106: {
'event': 'c002',
'dir': 'raws/c002_6.wav',
'start': 0,
'end': -1,
},
107: {
'event': 'c002',
'dir': 'raws/c002_7.wav',
'start': 0,
'end': -1,
},
108: {
'event': 'c002',
'dir': 'raws/c002_8.wav',
'start': 0,
'end': -1,
},
109: {
'event': 'c002',
'dir': 'raws/c002_9.wav',
'start': 0,
'end': -1,
},
110: {
'event': 'c002',
'dir': 'raws/c002_10.wav',
'start': 0,
'end': -1,
},
111: {
'event': 'c002',
'dir': 'raws/c002_11.wav',
'start': 0,
'end': -1,
},
112: {
'event': 'c002',
'dir': 'raws/c002_12.wav',
'start': 0,
'end': -1,
},
113: {
'event': 'c002',
'dir': 'raws/c002_13.wav',
'start': 0,
'end': -1,
},
114: {
'event': 'c002',
'dir': 'raws/c002_14.wav',
'start': 0,
'end': -1,
},
115: {
'event': 'c002',
'dir': 'raws/c002_15.wav',
'start': 0,
'end': -1,
},
116: {
'event': 'c002',
'dir': 'raws/c002_16.wav',
'start': 0,
'end': -1,
},
117: {
'event': 'c002',
'dir': 'raws/c002_17.wav',
'start': 0,
'end': -1,
},
118: {
'event': 'c002',
'dir': 'raws/c002_18.wav',
'start': 0,
'end': -1,
},
119: {
'event': 'c002',
'dir': 'raws/c002_19.wav',
'start': 0,
'end': -1,
},
120: {
'event': 'c002',
'dir': 'raws/c002_20.wav',
'start': 0,
'end': -1,
},
#######################################################################
121: {
'event': 'c003',
'url': '2p_d6vsFKJM',
'start': 2,
'end': 7,
},
122: {
'event': 'c003',
'url': '7e2ifgqrN1Q',
'start': 15,
'end': 20,
},
123: {
'event': 'c003',
'url': 'AiQoXi32QIA',
'start': 13,
'end': 18,
},
124: {
'event': 'c003',
'url': 'acIL82JWyq4',
'start': 90,
'end': 95,
},
125: {
'event': 'c003',
'url': 'acIL82JWyq4',
'start': 99,
'end': 105,
},
126: {
'event': 'c003',
'url': 'TpYdG5rqKnc',
'start': 77,
'end': 81,
},
127: {
'event': 'c003',
'url': 'aLHxMaT3uYg',
'start': 82,
'end': 87,
},
128: {
'event': 'c003',
'url': 'bWtCva4PDKE',
'start': 3,
'end': 10,
},
129: {
'event': 'c003',
'url': 'cM4zYIOdrYk',
'start': 1,
'end': 7,
},
130: {
'event': 'c003',
'url': 'fWBzCRl6LUs',
'start': 0,
'end': 4,
},
131: {
'event': 'c003',
'url': 'f_7ujxIzNmU',
'start': 11,
'end': 16,
},
132: {
'event': 'c003',
'url': 'fxbrSjGLrXY',
'start': 161,
'end': 166,
},
133: {
'event': 'c003',
'url': 'rbI18LmDHpw',
'start': 3,
'end': 8,
},
134: {
'event': 'c003',
'url': 'rbI18LmDHpw',
'start': 9,
'end': 16,
},
135: {
'event': 'c003',
'url': 's-jlycmfUsw',
'start': 21,
'end': 27,
},
136: {
'event': 'c003',
'url': 's-jlycmfUsw',
'start': 50,
'end': 56,
},
137: {
'event': 'c003',
'url': 't5fv6TTbsA0',
'start': 510,
'end': 516,
},
138: {
'event': 'c003',
'url': 'u0DxoED_3kA',
'start': 47,
'end': 52,
},
139: {
'event': 'c003',
'url': 'wBeYh9V8Iw4',
'start': 137,
'end': 142,
},
140: {
'event': 'c003',
'url': 'YJG1Zz097M4',
'start': 1,
'end': 9,
},
#######################################################################
141: {
'event': 'c004',
'url': 'ocOYpa4na5k',
'start': 0,
'end': 16,
},
142: {
'event': 'c004',
'url': 'ow2cNtqCNPw',
'start': 0,
'end': 6,
},
143: {
'event': 'c004',
'url': 'ow2cNtqCNPw',
'start': 8,
'end': 13,
},
144: {
'event': 'c004',
'url': 'ow2cNtqCNPw',
'start': 17,
'end': 23,
},
145: {
'event': 'c004',
'url': 'ow2cNtqCNPw',
'start': 27,
'end': 32,
},
146: {
'event': 'c004',
'url': 'ow2cNtqCNPw',
'start': 37,
'end': 43,
},
147: {
'event': 'c004',
'url': 'ow2cNtqCNPw',
'start': 45,
'end': 53,
},
148: {
'event': 'c004',
'url': 'rqzIV5OzbH0',
'start': 30,
'end': 37,
},
149: {
'event': 'c004',
'url': '1Ms9GajaUQ4',
'start': 0,
'end': 10,
},
150: {
'event': 'c004',
'url': '2rcRqeXnsNw',
'start': 17,
'end': 27,
},
151: {
'event': 'c004',
'url': '8yRROnG0-lA',
'start': 11,
'end': 23,
},
152: {
'event': 'c004',
'url': '8yRROnG0-lA',
'start': 24,
'end': 32,
},
153: {
'event': 'c004',
'url': '9EsNtRXnYbE',
'start': 0,
'end': 14,
},
154: {
'event': 'c004',
'url': '9EsNtRXnYbE',
'start': 15,
'end': 30,
},
155: {
'event': 'c004',
'url': 'FyQuHLiMuIk',
'start': 0,
'end': 5,
},
156: {
'event': 'c004',
'url': 'H7xKYPGjhhg',
'start': 10,
'end': 19,
},
157: {
'event': 'c004',
'url': 'H7xKYPGjhhg',
'start': 23,
'end': 29,
},
158: {
'event': 'c004',
'url': 'UPohyk3ynFk',
'start': 4,
'end': 10,
},
159: {
'event': 'c004',
'url': '7qnX0WB1x1k',
'start': 0,
'end': 9,
},
160: {
'event': 'c004',
'url': 'W-o0tTfwuOg',
'start': 39,
'end': 44,
},
#######################################################################
161: {
'event': 'd000',
'dir': 'raws/d000_1.wav',
'start': 0,
'end': -1,
},
162: {
'event': 'd000',
'dir': 'raws/d000_2.wav',
'start': 0,
'end': -1,
},
163: {
'event': 'd000',
'dir': 'raws/d000_3.wav',
'start': 0,
'end': -1,
},
164: {
'event': 'd000',
'dir': 'raws/d000_4.wav',
'start': 0,
'end': -1,
},
165: {
'event': 'd000',
'dir': 'raws/d000_5.wav',
'start': 0,
'end': -1,
},
166: {
'event': 'd000',
'dir': 'raws/d000_6.wav',
'start': 0,
'end': -1,
},
167: {
'event': 'd000',
'dir': 'raws/d000_7.wav',
'start': 0,
'end': -1,
},
168: {
'event': 'd000',
'dir': 'raws/d000_8.wav',
'start': 0,
'end': -1,
},
169: {
'event': 'd000',
'dir': 'raws/d000_9.wav',
'start': 0,
'end': -1,
},
170: {
'event': 'd000',
'dir': 'raws/d000_10.wav',
'start': 0,
'end': -1,
},
171: {
'event': 'd000',
'dir': 'raws/d000_11.wav',
'start': 0,
'end': -1,
},
172: {
'event': 'd000',
'dir': 'raws/d000_12.wav',
'start': 0,
'end': -1,
},
173: {
'event': 'd000',
'dir': 'raws/d000_13.wav',
'start': 0,
'end': -1,
},
174: {
'event': 'd000',
'dir': 'raws/d000_14.wav',
'start': 0,
'end': -1,
},
175: {
'event': 'd000',
'dir': 'raws/d000_15.wav',
'start': 0,
'end': -1,
},
176: {
'event': 'd000',
'dir': 'raws/d000_16.wav',
'start': 0,
'end': -1,
},
177: {
'event': 'd000',
'dir': 'raws/d000_17.wav',
'start': 0,
'end': -1,
},
178: {
'event': 'd000',
'dir': 'raws/d000_18.wav',
'start': 0,
'end': -1,
},
179: {
'event': 'd000',
'dir': 'raws/d000_19.wav',
'start': 0,
'end': -1,
},
180: {
'event': 'd000',
'dir': 'raws/d000_20.wav',
'start': 0,
'end': -1,
},
#######################################################################
181: {
'event': 'd001',
'url': 'nZIY8BKixjc',
'start': 7,
'end': 12,
},
182: {
'event': 'd001',
'url': 'ptIHZv3KdJw',
'start': 0,
'end': 2,
},
183: {
'event': 'd001',
'url': 'tNEGx3WCwBA',
'start': 0,
'end': 4,
},
184: {
'event': 'd001',
'url': 'vmeWtjzGZPs',
'start': 0,
'end': 6,
},
185: {
'event': 'd001',
'url': 'vmeWtjzGZPs',
'start': 7,
'end': 10,
},
186: {
'event': 'd001',
'url': 'vmeWtjzGZPs',
'start': 11,
'end': 16,
},
187: {
'event': 'd001',
'url': '-9ek6eO0RtI',
'start': 259,
'end': 265,
},
188: {
'event': 'd001',
'url': '6qlfodh49BA',
'start': 0,
'end': 2,
},
189: {
'event': 'd001',
'url': '7P-1BJ1A9ME',
'start': 0,
'end': 6,
},
190: {
'event': 'd001',
'url': '9VJL-ktypNw',
'start': 0,
'end': 5,
},
191: {
'event': 'd001',
'url': 'BurGML_ZqSA',
'start': 490.8,
'end': 495,
},
192: {
'event': 'd001',
'url': 'JL76D1HWv-U',
'start': 549,
'end': 555,
},
193: {
'event': 'd001',
'url': 'M47-JuWnx6U',
'start': 0,
'end': 3.6,
},
194: {
'event': 'd001',
'dir': 'raws/d001_1.wav',
'start': 0,
'end': 10,
},
195: {
'event': 'd001',
'dir': 'raws/d001_2.wav',
'start': 11,
'end': 19,
},
196: {
'event': 'd001',
'dir': 'raws/d001_3.wav',
'start': 20,
'end': 30,
},
197: {
'event': 'd001',
'dir': 'raws/d001_4.wav',
'start': 31,
'end': 41,
},
198: {
'event': 'd001',
'url': 'Vbx6TFxSPYY',
'start': 64,
'end': 70,
},
199: {
'event': 'd001',
'url': 'Vbx6TFxSPYY',
'start': 90,
'end': 92.8,
},
200: {
'event': 'd001',
'url': 'Vbx6TFxSPYY',
'start': 96,
'end': 101,
},
#######################################################################
201: {
'event': 'd002',
'url': '3xCWI_22Z9A',
'start': 45,
'end': 51,
},
202: {
'event': 'd002',
'url': '3xCWI_22Z9A',
'start': 61,
'end': 66,
},
203: {
'event': 'd002',
'url': '3xCWI_22Z9A',
'start': 117,
'end': 126,
},
204: {
'event': 'd002',
'url': '5PbIH_kMyis',
'start': 2,
'end': 18,
},
205: {
'event': 'd002',
'url': '64K4SlYR3BU',
'start': 0,
'end': 17,
},
206: {
'event': 'd002',
'url': 'CTBFPn_S5u0',
'start': 0,
'end': 5,
},
207: {
'event': 'd002',
'url': 'CTBFPn_S5u0',
'start': 12,
'end': 17,
},
208: {
'event': 'd002',
'url': 'EakI8v4Ztt4',
'start': 2,
'end': 14,
},
209: {
'event': 'd002',
'url': 'EakI8v4Ztt4',
'start': 29,
'end': 34,
},
210: {
'event': 'd002',
'url': 'FeRaDiSPb2c',
'start': 11,
'end': 16,
},
211: {
'event': 'd002',
'url': 'FeRaDiSPb2c',
'start': 18,
'end': 23,
},
212: {
'event': 'd002',
'url': 'Fw09tDLa-78',
'start': 0,
'end': 5,
},
213: {
'event': 'd002',
'url': 'Fw09tDLa-78',
'start': 40,
'end': 46,
},
214: {
'event': 'd002',
'url': 'G7vXKtePlGM',
'start': 0,
'end': 20,
},
215: {
'event': 'd002',
'url': 'GamZltmhYuc',
'start': 40,
'end': 45,
},
216: {
'event': 'd002',
'url': 'Glc6Ekc67OE',
'start': 25,
'end': 35,
},
217: {
'event': 'd002',
'url': 'Ki7Xvd2_hxY',
'start': 3,
'end': 10,
},
218: {
'event': 'd002',
'url': 'Ki7Xvd2_hxY',
'start': 15,
'end': 20,
},
219: {
'event': 'd002',
'url': 'KrtiLKd4VCI',
'start': 99,
'end': 110,
},
220: {
'event': 'd002',
'url': 'P_dXuddk3fE',
'start': 0,
'end': 18,
},
#######################################################################
221: {
'event': 'f000',
'url': '0JPT13OUVV8',
'start': 39,
'end': 45,
},
222: {
'event': 'f000',
'url': '3uZy0wkterE',
'start': 33,
'end': 40,
},
223: {
'event': 'f000',
'url': '3uZy0wkterE',
'start': 75,
'end': 81,
},
224: {
'event': 'f000',
'url': '3uZy0wkterE',
'start': 84,
'end': 89,
},
225: {
'event': 'f000',
'url': '3uZy0wkterE',
'start': 103,
'end': 118,
},
226: {
'event': 'f000',
'url': '3uZy0wkterE',
'start': 129,
'end': 135,
},
227: {
'event': 'f000',
'url': '3uZy0wkterE',
'start': 225,
'end': 232,
},
228: {
'event': 'f000',
'url': '3uZy0wkterE',
'start': 235,
'end': 249,
},
229: {
'event': 'f000',
'url': '3uZy0wkterE',
'start': 258,
'end': 264,
},
230: {
'event': 'f000',
'url': '3uZy0wkterE',
'start': 338,
'end': 348,
},
231: {
'event': 'f000',
'url': '3uZy0wkterE',
'start': 384,
'end': 396,
},
232: {
'event': 'f000',
'url': '3uZy0wkterE',
'start': 400,
'end': 410,
},
233: {
'event': 'f000',
'url': '3uZy0wkterE',
'start': 626,
'end': 634,
},
234: {
'event': 'f000',
'url': '3uZy0wkterE',
'start': 719,
'end': 725,
},
235: {
'event': 'f000',
'url': '4yub23Xuzos',
'start': 0,
'end': 12,
},
236: {
'event': 'f000',
'url': '4yub23Xuzos',
'start': 61,
'end': 72,
},
237: {
'event': 'f000',
'url': '4yub23Xuzos',
'start': 73,
'end': 85,
},
238: {
'event': 'f000',
'url': '4yub23Xuzos',
'start': 320,
'end': 340,
},
239: {
'event': 'f000',
'url': '4yub23Xuzos',
'start': 360,
'end': 380,
},
240: {
'event': 'f000',
'url': '4yub23Xuzos',
'start': 407,
'end': 416,
},
#######################################################################
241: {
'event': 'f001',
'url': '5EbJQCFom8o',
'start': 8,
'end': 19,
},
242: {
'event': 'f001',
'url': '6z4HD7Dw7i8',
'start': 35,
'end': 40,
},
243: {
'event': 'f001',
'url': 'PCsQ3zgL3CU',
'start': 66,
'end': 86,
},
244: {
'event': 'f001',
'url': '7GEiPdnqJUw',
'start': 4,
'end': 24,
},
245: {
'event': 'f001',
'url': 'BIK1Ds79KVM',
'start': 105,
'end': 125,
},
246: {
'event': 'f001',
'url': 'CDwk_DbprX4',
'start': 37,
'end': 42,
},
247: {
'event': 'f001',
'url': 'D2_KIhSbmt0',
'start': 9,
'end': 17,
},
248: {
'event': 'f001',
'url': 'DMMCiQB7-E4',
'start': 24,
'end': 29,
},
249: {
'event': 'f001',
'url': 'F3dasUA6LqU',
'start': 85,
'end': 104,
},
250: {
'event': 'f001',
'url': 'GyygYycarL0',
'start': 69,
'end': 80,
},
251: {
'event': 'f001',
'url': 'HFpfDaLZtzQ',
'start': 50,
'end': 60,
},
252: {
'event': 'f001',
'url': 'HFpfDaLZtzQ',
'start': 68,
'end': 78,
},
253: {
'event': 'f001',
'url': 'HFpfDaLZtzQ',
'start': 86,
'end': 96,
},
254: {
'event': 'f001',
'url': 'HFpfDaLZtzQ',
'start': 160,
'end': 170,
},
255: {
'event': 'f001',
'url': 'HxO2GRMD_fw',
'start': 47,
'end': 57,
},
256: {
'event': 'f001',
'url': 'I6YfsWzCvLI',
'start': 25,
'end': 34,
},
257: {
'event': 'f001',
'url': 'IYrVF4tHN08',
'start': 1,
'end': 19,
},
258: {
'event': 'f001',
'url': 'LjeZYuAHjpk',
'start': 2,
'end': 14,
},
259: {
'event': 'f001',
'url': 'Mls0tzvQpzQ',
'start': 83,
'end': 92,
},
260: {
'event': 'f001',
'url': 'O2htSqXhdqE',
'start': 65,
'end': 71,
},
#######################################################################
261: {
'event': 'h000',
'url': 'cSrL0BXsO40',
'start': 0,
'end': 17,
},
262: {
'event': 'h000',
'url': 'drVo5VQfsDc',
'start': 0,
'end': 6,
},
263: {
'event': 'h000',
'url': '-dEOa2GkXHw',
'start': 137,
'end': 143,
},
264: {
'event': 'h000',
'url': 'kVQbu_BsZ9o',
'start': 0,
'end': 10,
},
265: {
'event': 'h000',
'url': 'k_kRSOra2qA',
'start': 9.5,
'end': 17,
},
266: {
'event': 'h000',
'url': 'k_kRSOra2qA',
'start': 294,
'end': 304,
},
267: {
'event': 'h000',
'url': '-q1pzc3VMrg',
'start': 30,
'end': 38,
},
268: {
'event': 'h000',
'url': '-q1pzc3VMrg',
'start': 296,
'end': 309,
},
269: {
'event': 'h000',
'url': 'qF90ezvPe14',
'start': 8,
'end': 13,
},
270: {
'event': 'h000',
'url': 'x9Kkv8j42mI',
'start': 21,
'end': 28,
},
271: {
'event': 'h000',
'url': 'yOelIR7hiMc',
'start': 6,
'end': 25,
},
272: {
'event': 'h000',
'url': '0StCxWx9dV8',
'start': 6,
'end': 14,
},
273: {
'event': 'h000',
'url': 'zLo1mkKE4sw',
'start': 31,
'end': 41,
},
274: {
'event': 'h000',
'url': '0150dZu3Na8',
'start': 0,
'end': 7,
},
275: {
'event': 'h000',
'url': '7rk62G1WyG8',
'start': 17,
'end': 24,
},
276: {
'event': 'h000',
'url': '7rk62G1WyG8',
'start': 60,
'end': 73,
},
277: {
'event': 'h000',
'url': '9avOnbp3NA8',
'start': 3,
'end': 20,
},
278: {
'event': 'h000',
'url': '0jzTEIxgsjM',
'start': 11,
'end': 18,
},
279: {
'event': 'h000',
'url': 'E9etGzNH2SM',
'start': 0,
'end': 8,
},
280: {
'event': 'h000',
'url': 'GGyrdlFfowc',
'start': 0,
'end': 12,
},
#######################################################################
281: {
'event': 'h001',
'url': '8TkXXqFWNWQ',
'start': 24,
'end': 42,
},
282: {
'event': 'h001',
'url': 'KpGO2VTksIw',
'start': 61,
'end': 81,
},
283: {
'event': 'h001',
'url': 'KpGO2VTksIw',
'start': 90,
'end': 102,
},
284: {
'event': 'h001',
'url': 'KpGO2VTksIw',
'start': 123,
'end': 132,
},
285: {
'event': 'h001',
'url': 'KpGO2VTksIw',
'start': 166,
'end': 182,
},
286: {
'event': 'h001',
'url': 'KpGO2VTksIw',
'start': 184,
'end': 202,
},
287: {
'event': 'h001',
'url': 'KpGO2VTksIw',
'start': 203,
'end': 212,
},
288: {
'event': 'h001',
'url': 'KpGO2VTksIw',
'start': 214,
'end': 224,
},
289: {
'event': 'h001',
'url': 'KpGO2VTksIw',
'start': 257,
'end': 272,
},
290: {
'event': 'h001',
'url': 'KpGO2VTksIw',
'start': 274,
'end': 285,
},
291: {
'event': 'h001',
'url': 'KpGO2VTksIw',
'start': 305,
'end': 315,
},
292: {
'event': 'h001',
'url': 'KpGO2VTksIw',
'start': 318,
'end': 324,
},
293: {
'event': 'h001',
'url': 'KpGO2VTksIw',
'start': 327,
'end': 339,
},
294: {
'event': 'h001',
'url': 'KpGO2VTksIw',
'start': 397,
'end': 406,
},
295: {
'event': 'h001',
'url': 'KpGO2VTksIw',
'start': 407,
'end': 426,
},
296: {
'event': 'h001',
'url': 'oDQb7qZsz6o',
'start': 373,
'end': 381,
},
297: {
'event': 'h001',
'url': 'soVRoIbewMM',
'start': 35,
'end': 42,
},
298: {
'event': 'h001',
'url': 'xnliLFqdfo0',
'start': 92,
'end': 112,
},
299: {
'event': 'h001',
'url': 'VOUm1PTYpB0',
'start': 34,
'end': 39,
},
300: {
'event': 'h001',
'url': 'zGsY2GGVSao',
'start': 67,
'end': 73,
},
#######################################################################
301: {
'event': 'h002',
'url': 'K9BXym8IG_o',
'start': 3,
'end': 13,
},
302: {
'event': 'h002',
'url': 'LUK71I-yxXI',
'start': 4,
'end': 20,
},
303: {
'event': 'h002',
'url': 'NYG_T2t542Q',
'start': 19,
'end': 35,
},
304: {
'event': 'h002',
'url': 'RYK03ltDcqM',
'start': 1,
'end': 15,
},
305: {
'event': 'h002',
'url': 'RYK03ltDcqM',
'start': 31,
'end': 41,
},
306: {
'event': 'h002',
'url': 'RYK03ltDcqM',
'start': 51,
'end': 59,
},
307: {
'event': 'h002',
'url': 'RYK03ltDcqM',
'start': 67,
'end': 87,
},
308: {
'event': 'h002',
'url': 'Vb_Xvjbj_TI',
'start': 0,
'end': 15,
},
309: {
'event': 'h002',
'url': 'Vb_Xvjbj_TI',
'start': 16,
'end': 30,
},
310: {
'event': 'h002',
'url': 'Vb_Xvjbj_TI',
'start': 31,
'end': 45,
},
311: {
'event': 'h002',
'url': 'Vb_Xvjbj_TI',
'start': 46,
'end': 60,
},
312: {
'event': 'h002',
'url': 'Vb_Xvjbj_TI',
'start': 61,
'end': 80,
},
313: {
'event': 'h002',
'url': 'bcYI2CTlH5o',
'start': 13,
'end': 24,
},
314: {
'event': 'h002',
'url': 'guYpxmm4vFU',
'start': 85,
'end': 95,
},
315: {
'event': 'h002',
'url': 'guYpxmm4vFU',
'start': 100,
'end': 110,
},
316: {
'event': 'h002',
'url': 'guYpxmm4vFU',
'start': 120,
'end': 126,
},
317: {
'event': 'h002',
'url': 'guYpxmm4vFU',
'start': 159,
'end': 179,
},
318: {
'event': 'h002',
'url': 'hxH7Uith0tQ',
'start': 0,
'end': 15,
},
319: {
'event': 'h002',
'url': 'hxH7Uith0tQ',
'start': 16,
'end': 30,
},
320: {
'event': 'h002',
'url': 'hxH7Uith0tQ',
'start': 31,
'end': 45,
},
#######################################################################
321: {
'event': 'h003',
'url': '3rGHjZMdW4Y',
'start': 15,
'end': 24,
},
322: {
'event': 'h003',
'url': '9bIchzOP8PA',
'start': 18,
'end': 25,
},
323: {
'event': 'h003',
'url': 'A8sju2x5nhE',
'start': 339,
'end': 345,
},
324: {
'event': 'h003',
'url': 'FUXSq44CbHo',
'start': 221,
'end': 232,
},
325: {
'event': 'h003',
'url': 'IWxlWrfpk_g',
'start': 8,
'end': 28,
},
326: {
'event': 'h003',
'url': 'NETQtgbQ9-s',
'start': 0,
'end': 9,
},
327: {
'event': 'h003',
'url': 'SqsmuNOtmwM',
'start': 26,
'end': 39,
},
328: {
'event': 'h003',
'url': 'XrKDtjFM9Ec',
'start': 0,
'end': 13,
},
329: {
'event': 'h003',
'url': '_H4iHqtGlAY',
'start': 0,
'end': 15,
},
330: {
'event': 'h003',
'url': 'aa_468eUE1o',
'start': 230,
'end': 242,
},
331: {
'event': 'h003',
'url': 'aomaneVgUs0',
'start': 51,
'end': 57,
},
332: {
'event': 'h003',
'url': 'bW-xjy5-a1s',
'start': 58,
'end': 71,
},
333: {
'event': 'h003',
'url': 'fIPsH57dZIY',
'start': 0,
'end': 20,
},
334: {
'event': 'h003',
'url': 'ojvtp3aHKdc',
'start': 5,
'end': 12,
},
335: {
'event': 'h003',
'dir': 'raws/h003_1.wav',
'start': 0,
'end': -1,
},
336: {
'event': 'h003',
'dir': 'raws/h003_2.wav',
'start': 0,
'end': -1,
},
337: {
'event': 'h003',
'dir': 'raws/h003_3.wav',
'start': 0,
'end': -1,
},
338: {
'event': 'h003',
'dir': 'raws/h003_4.wav',
'start': 0,
'end': -1,
},
339: {
'event': 'h003',
'dir': 'raws/h003_5.wav',
'start': 0,
'end': -1,
},
340: {
'event': 'h003',
'dir': 'raws/h003_6.wav',
'start': 0,
'end': -1,
},
#######################################################################
341: {
'event': 'h004',
'url': 'nFdmth2N8Bo',
'start': 0,
'end': 18,
},
342: {
'event': 'h004',
'url': 'pNrjoDwCnik',
'start': 280,
'end': 294,
},
343: {
'event': 'h004',
'url': 't4wDKhMiKpA',
'start': 13,
'end': 20,
},
344: {
'event': 'h004',
'url': 'uscPSf6C_Js',
'start': 14,
'end': 30,
},
345: {
'event': 'h004',
'url': 'w8an1GY8T00',
'start': 42,
'end': 46,
},
346: {
'event': 'h004',
'url': '2k6Bw9EVz7g',
'start': 17,
'end': 22,
},
347: {
'event': 'h004',
'url': '8g2Uv6QqI_Y',
'start': 185,
'end': 200,
},
348: {
'event': 'h004',
'url': 'BH0rbQ6zHlw',
'start': 6,
'end': 22,
},
349: {
'event': 'h004',
'url': 'GlWecURh_OU',
'start': 94,
'end': 104,
},
350: {
'event': 'h004',
'url': 'LU1vqeS4G4s',
'start': 78,
'end': 88,
},
351: {
'event': 'h004',
'url': 'SA4SG1Nt0mw',
'start': 0,
'end': 5,
},
352: {
'event': 'h004',
'url': '4t524YeonRo',
'start': 7,
'end': 15,
},
353: {
'event': 'h004',
'url': 'UQtbZNMp1nY',
'start': 0,
'end': 14,
},
354: {
'event': 'h004',
'url': 'UhANSJnLXNs',
'start': 0,
'end': 15,
},
355: {
'event': 'h004',
'url': '4t524YeonRo',
'start': 37,
'end': 43,
},
356: {
'event': 'h004',
'url': 'Wy49nszOnxo',
'start': 139,
'end': 149,
},
357: {
'event': 'h004',
'url': '_yFwVTg-V-M',
'start': 0,
'end': 18,
},
358: {
'event': 'h004',
'url': 'e3BdNhbiDwA',
'start': 191,
'end': 201,
},
359: {
'event': 'h004',
'url': 'iLUd4l1JFDI',
'start': 0,
'end': 16,
},
360: {
'event': 'h004',
'url': 'jx1sWITDw-E',
'start': 24,
'end': 37,
},
#######################################################################
361: {
'event': 'p000',
'url': 'QeS7zmkTOig',
'start': 0,
'end': 4,
},
362: {
'event': 'p000',
'url': 'URxsjJi1IL4',
'start': 2,
'end': 21,
},
363: {
'event': 'p000',
'url': 'Zf5gYtlz6Pw',
'start': 3,
'end': 20,
},
364: {
'event': 'p000',
'url': 'yKls2m5kM14',
'start': 0,
'end': 4,
},
365: {
'event': 'p000',
'url': 'yibeLZXOHiU',
'start': 0,
'end': 4,
},
366: {
'event': 'p000',
'url': 'ys60zlhXTs4',
'start': 0,
'end': 16,
},
367: {
'event': 'p000',
'url': '2QcOD8uCu0E',
'start': 0,
'end': 4,
},
368: {
'event': 'p000',
'url': '4BUEj-TxY5g',
'start': 0,
'end': 11,
},
369: {
'event': 'p000',
'url': 'IigiZ3ss6HE',
'start': 8,
'end': 21,
},
370: {
'event': 'p000',
'url': 'NK92DUyyngc',
'start': 13,
'end': 25,
},
371: {
'event': 'p000',
'url': 'fR2lhjlHR4I',
'start': 28,
'end': 48,
},
372: {
'event': 'p000',
'dir': 'raws/p000_1.wav',
'start': 0,
'end': -1,
},
373: {
'event': 'p000',
'dir': 'raws/p000_2.wav',
'start': 0,
'end': -1,
},
374: {
'event': 'p000',
'dir': 'raws/p000_3.wav',
'start': 0,
'end': -1,
},
375: {
'event': 'p000',
'dir': 'raws/p000_4.wav',
'start': 0,
'end': -1,
},
376: {
'event': 'p000',
'dir': 'raws/p000_5.wav',
'start': 0,
'end': -1,
},
377: {
'event': 'p000',
'dir': 'raws/p000_6.wav',
'start': 0,
'end': -1,
},
378: {
'event': 'p000',
'dir': 'raws/p000_7.wav',
'start': 0,
'end': -1,
},
379: {
'event': 'p000',
'dir': 'raws/p000_8.wav',
'start': 0,
'end': -1,
},
380: {
'event': 'p000',
'dir': 'raws/p000_9.wav',
'start': 0,
'end': -1,
},
#######################################################################
381: {
'event': 't000',
'url': '3y2aZEs1F5s',
'start': 75,
'end': 85,
},
382: {
'event': 't000',
'url': '4lNM6Ah99hw',
'start': 0,
'end': 11,
},
383: {
'event': 't000',
'url': '6OHetw29o_A',
'start': 3,
'end': 15,
},
384: {
'event': 't000',
'url': '78R6KgsSPRk',
'start': 0,
'end': 7,
},
385: {
'event': 't000',
'url': 'APYXZHZPCE4',
'start': 38,
'end': 52,
},
386: {
'event': 't000',
'url': 'SavkOa_GGLs',
'start': 12,
'end': 16,
},
387: {
'event': 't000',
'url': 'bW_PMIAIHBE',
'start': 47,
'end': 52,
},
388: {
'event': 't000',
'url': 'dI9HTTk6Mgs',
'start': 5,
'end': 11,
},
389: {
'event': 't000',
'url': 'dJudErPaMWI',
'start': 39,
'end': 48,
},
390: {
'event': 't000',
'url': 'dxcs_lpcwj0',
'start': 5,
'end': 25,
},
391: {
'event': 't000',
'url': 'dxcs_lpcwj0',
'start': 119,
'end': 139,
},
392: {
'event': 't000',
'url': 'h6voPlJG0m0',
'start': 24,
'end': 31,
},
393: {
'event': 't000',
'url': 'jDdYqpYoIGY',
'start': 29,
'end': 41,
},
394: {
'event': 't000',
'url': 'jotE032i05c',
'start': 2,
'end': 22,
},
395: {
'event': 't000',
'url': 'jotE032i05c',
'start': 60,
'end': 72,
},
396: {
'event': 't000',
'url': 'nD_HctFk3Hc',
'start': 434,
'end': 442,
},
397: {
'event': 't000',
'url': 'y2A1Pmiu7yw',
'start': 76,
'end': 86,
},
398: {
'event': 't000',
'url': 'y2A1Pmiu7yw',
'start': 587,
'end': 595,
},
399: {
'event': 't000',
'url': '8pJUJvPfIx0',
'start': 76,
'end': 94,
},
400: {
'event': 't000',
'url': 'mi-s3pLeR3U',
'start': 616,
'end': 634,
},
}
with open('daqa_sources.json', 'w') as f:
json.dump(sources, f)
if __name__ == "__main__":
main()
print('Success!')
|
daqa-master
|
daqa-gen/daqa_sources.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import json
def main():
dataset = {
'events': ['a000', 'b000', 'b001', 'c000', 'c001', 'c002', 'c003',
'c004', 'd000', 'd001', 'd002', 'f000', 'f001', 'h000',
'h001', 'h002', 'h003', 'h004', 'p000', 't000'], # unique
'sources': {
'a000': ['aircraft', 'plane'],
'b000': ['band'],
'b001': ['bird'],
'c000': ['crowd'],
'c001': ['crowd'],
'c002': ['crowd'],
'c003': ['driver', 'car', 'vehicle'],
'c004': ['car', 'vehicle'],
'd000': ['door'],
'd001': ['doorbell'],
'd002': ['dog'],
'f000': ['fire truck', 'fire engine', 'emergency vehicle'],
'f001': ['fire alarm', 'alarm'],
'h000': ['human'],
'h001': ['human'],
'h002': ['human'],
'h003': ['human'],
'h004': ['human'],
'p000': ['phone'],
't000': ['storm'],
},
'actions': {
'a000': ['passing by', 'flying over'],
'b000': ['playing'],
'b001': ['singing'],
'c000': ['babbling'],
'c001': ['applauding', 'clapping'],
'c002': ['rioting', 'making noise'],
'c003': ['honking'],
'c004': ['passing by'],
'd000': ['slamming', 'closing', 'shutting'],
'd001': ['ringing'],
'd002': ['barking', 'making noise'],
'f000': ['passing by'],
'f001': ['going off'],
'h000': ['speaking', 'talking'],
'h001': ['laughing'],
'h002': ['typing on a keyboard', 'typing'],
'h003': ['whistling'],
'h004': ['operating a machine'],
'p000': ['ringing'],
't000': ['thundering'],
},
'consecutive': {
'a000': True,
'b000': False,
'b001': False,
'c000': False,
'c001': False,
'c002': False,
'c003': False,
'c004': True,
'd000': True,
'd001': False,
'd002': False,
'f000': False,
'f001': False,
'h000': True,
'h001': True,
'h002': False,
'h003': False,
'h004': False,
'p000': False,
't000': False,
}
}
with open('daqa_outline.json', 'w') as f:
json.dump(dataset, f)
if __name__ == "__main__":
main()
print('Success!')
|
daqa-master
|
daqa-gen/daqa_outline.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import argparse
import datetime
import json
import os
import random
import numpy as np
import scipy
import scipy.io.wavfile
parser = argparse.ArgumentParser()
# Input
parser.add_argument('--dataset', default='daqa.json', type=str,
help='JSON file describing the dataset.')
parser.add_argument('--events', default='events', type=str,
help='Location of individual audio events.')
parser.add_argument('--backgrounds', default='backgrounds', type=str,
help='Location of some background noise audio.')
parser.add_argument('--data_fs', default=16000, type=int,
help='Sampling frequency (Hz).')
# Settings
parser.add_argument('--min_num_events', default=5, type=int,
help='Minimum number of events per generated audio.')
parser.add_argument('--max_num_events', default=12, type=int,
help='Maximum number of events per generated audio.')
parser.add_argument('--rand_overlap', default=0.5, type=float,
help='Maximum overlap between adjacent events (seconds).')
parser.add_argument('--seed', default=0, type=int, help='Random Seed.')
parser.add_argument('--version', default='1.0', type=str, help='Version.')
parser.add_argument('--date',
default=datetime.datetime.today().strftime("%m/%d/%Y"),
help="Date.")
parser.add_argument('--license',
default='Creative Commons Attribution (CC-BY 4.0)',
help='License.')
# Output
parser.add_argument('--start_idx', default=0, type=int,
help='Start numbering from start_idx.')
parser.add_argument('--num_audio', default=10, type=int,
help='Number of audio to generate.')
parser.add_argument('--filename_prefix', default='daqa', type=str,
help='Filename prefix to audio and JSON files.')
parser.add_argument('--set', default='new',
help='Set name: train / val / test.')
parser.add_argument('--num_digits', default=6, type=int,
help='Number of digits to enumerate the generated files.')
parser.add_argument('--output_audio_dir', default='../daqa/audio/',
help='Directory to output generated audio.')
parser.add_argument('--output_narrative_dir', default='../daqa/narratives/',
help='Directory to output generated narratives.')
parser.add_argument('--output_narrative_file',
default='../daqa/daqa_narratives.json',
help="Path to narratives JSON file.")
def main(args):
"""Randomly sample audio events to form sequences of events."""
random.seed(args.seed)
np.random.seed(args.seed)
# Read dataset description
with open(args.dataset, 'r') as f:
dataset = json.load(f)
# Define naming conventions and directories
prefix = '%s_%s_' % (args.filename_prefix, args.set)
audio_template = '%s%%0%dd.wav' % (prefix, args.num_digits)
audio_template = os.path.join(args.output_audio_dir, audio_template)
narrative_template = '%s%%0%dd.json' % (prefix, args.num_digits)
narrative_template = os.path.join(args.output_narrative_dir,
narrative_template)
if not os.path.isdir(args.output_audio_dir):
os.makedirs(args.output_audio_dir)
if not os.path.isdir(args.output_narrative_dir):
os.makedirs(args.output_narrative_dir)
# Get list of events and backgrounds
lst_events = list(dataset['origins'].keys()) # without .wav
lst_events_wav = os.listdir(args.events)
lst_events_wav = [e[:-4] for e in lst_events_wav if e.endswith('.wav')]
assert len(lst_events) == len(lst_events_wav), 'Dataset mismatch.'
assert sorted(lst_events) == sorted(lst_events_wav), 'Dataset mismatch.'
lst_bckgrnds = os.listdir(args.backgrounds)
lst_bckgrnds = [e for e in lst_bckgrnds if e.endswith('.wav')]
x_consctvs = [k for k, v in dataset['consecutive'].items() if v is False]
num_fails = 0
# Generate audio and narratives from events
lst_narrative_paths = []
for i in range(args.num_audio):
idx = args.start_idx + i
audio_path = audio_template % idx
narrative_path = narrative_template % idx
lst_narrative_paths.append(narrative_path)
num_events = random.randint(args.min_num_events, args.max_num_events)
# Sample num_events number of events (not unique)
sel_events = None
while sel_events is None:
sel_events = random.sample(lst_events, num_events)
# The following checks if the sequence of selected events is ok
sel_events_dx = [x.split('_')[0] for x in sel_events]
# Check if the list has any identical consective events
consecutives = []
for x in range(len(sel_events_dx) - 1):
if sel_events_dx[x] == sel_events_dx[x + 1]:
consecutives.append(sel_events_dx[x])
# Check if any of the events in consecutives are not allowed
if len([x for x in consecutives if x in x_consctvs]) > 0:
sel_events = None # retry
num_fails += 1
sel_bckgrnd = random.sample(lst_bckgrnds, 1)
audio, narrative = gen_audio_narrative(dataset=dataset,
args=args,
selcted_events=sel_events,
selcted_bckgrnd=sel_bckgrnd,
output_index=idx,
output_audio=audio_path,
)
scipy.io.wavfile.write(audio_path, args.data_fs, audio)
with open(narrative_path, 'w') as f:
json.dump(narrative, f)
print('Generated ' + str(args.num_audio) + ' audio sequences ('
+ str(num_fails) + ' failed attempts). Compiliing narratives...')
# Combine all narratives into a single JSON file
lst_narratives = []
for narrative_path in lst_narrative_paths:
with open(narrative_path, 'r') as f:
lst_narratives.append(json.load(f))
output = {
'info': {
'set': args.set,
'version': args.version,
'date': args.date,
'license': args.license,
},
'narratives': lst_narratives
}
with open(args.output_narrative_file, 'w') as f:
json.dump(output, f)
return True
def gen_audio_narrative(dataset,
args,
selcted_events,
selcted_bckgrnd,
output_index,
output_audio):
# Read audio events
lst_audio_events = []
for e in selcted_events:
e_wav = os.path.join(args.events, e + '.wav')
event_fs, event = scipy.io.wavfile.read(e_wav)
assert event_fs == args.data_fs, \
'Audio event sampling frequency != ' + str(args.data_fs) + ' Hz.'
lst_audio_events.append(event)
# Toss an unbiased coin to concatenate or add events
if random.random() < 0.5:
# concatenate
audio = np.concatenate(lst_audio_events)
else:
# add (allows overlap between adjacent events)
audio = lst_audio_events[0]
for event in lst_audio_events[1:]:
idx_overlap = random.randint(0, (args.rand_overlap * args.data_fs))
plhldr = np.zeros(event.shape[0] - idx_overlap, event.dtype)
audio = np.concatenate((audio, plhldr))
audio[-event.shape[0]:] += event
assert len(audio.shape) == 1, 'Audio events not concatenated properly.'
# Toss an unbiased coin to add background noise
background = 'None'
if random.random() < 0.5:
selec_bckgrnd = os.path.join(args.backgrounds, selcted_bckgrnd[0])
bckgrnd_fs, bckgrnd = scipy.io.wavfile.read(selec_bckgrnd)
assert event_fs == args.data_fs, \
'Bckgrnd sampling frequency != ' + str(args.data_fs) + ' Hz.'
idx_trim = random.randint(0, bckgrnd.shape[0] - audio.shape[0])
trim_bckgrnd = bckgrnd[idx_trim:(audio.shape[0] + idx_trim)]
audio += trim_bckgrnd
background = selcted_bckgrnd[0][:-4]
events = []
for idx, sel_event in enumerate(selcted_events):
event_dx = sel_event.split('_')[0]
event = { # 'start_time': 'end_time':
'order': idx,
'event': event_dx,
'audio': sel_event,
'source': random.choice(dataset['sources'][event_dx]),
'action': random.choice(dataset['actions'][event_dx]),
'duration': (float(lst_audio_events[idx].shape[0]) / args.data_fs),
'loudness': dataset['origins'][sel_event]['loudness'],
}
events.append(event)
# Generate JSON
narrative = {
'set': args.set,
'audio_index': output_index,
'audio_filename': os.path.basename(output_audio),
'background': background,
'events': events,
}
return audio, narrative
if __name__ == "__main__":
args = parser.parse_args()
main(args)
print('Success!')
|
daqa-master
|
daqa-gen/generate_audio.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import argparse
import datetime
import json
parser = argparse.ArgumentParser()
# Input
parser.add_argument('--outline', default='daqa_outline.json', type=str,
help='Location of outline file.')
parser.add_argument('--sources', default='daqa_sources.json', type=str,
help='Location of sources file.')
parser.add_argument('--loudness', default='daqa_loudness.json', type=str,
help='Location of loudness file.')
# Settings
parser.add_argument('--version', default='1.0', type=str,
help='Version.')
parser.add_argument('--date',
default=datetime.datetime.today().strftime("%m/%d/%Y"),
help="Date.")
parser.add_argument('--license',
default='Creative Commons Attribution (CC-BY 4.0)',
help='License.')
# Output
parser.add_argument('--output', default='daqa.json', type=str,
help='Location of dataset file.')
def main(args):
# Read files
with open(args.outline, 'r') as f:
outline = json.load(f)
with open(args.sources, 'r') as f:
sources = json.load(f)
with open(args.loudness, 'r') as f:
loudness = json.load(f)
dataset = {
'info': {
'version': args.version,
'date': args.date,
'license': args.license,
},
'events': outline['events'],
'sources': outline['sources'],
'actions': outline['actions'],
'consecutive': outline['consecutive'],
'origins': {},
}
counter = {}
for i in range(len(dataset['events'])):
counter[dataset['events'][i]] = 0
for i in range(1, len(sources.keys()) + 1):
counter[sources[str(i)]['event']] += 1
ins = sources[str(i)]['event'] + '_' + \
str(counter[sources[str(i)]['event']])
dataset['origins'][ins] = sources[str(i)]
dataset['origins'][ins]['filename'] = ins + '.wav'
dataset['origins'][ins]['loudness'] = loudness[ins]
with open(args.output, 'w') as f:
json.dump(dataset, f) # indent=2
if __name__ == "__main__":
args = parser.parse_args()
main(args)
print('Success!')
|
daqa-master
|
daqa-gen/daqa.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from qpas.utils import (compute_rel_diff, get_lst_durations, get_lst_events,
get_lst_loudness, sample_absolute_duration,
sample_absolute_loudness, sample_immediate_preposition,
sample_number, sample_preposition, sanitize_question)
def what_was(dataset, narrative, _):
questions = ['What was the <O> sound you [heard,listened to]?',
'What was the <O> sound?',
'What did the <O> sound [sound,seem] like?',
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
number, ordinal = sample_number(len(lst_events))
question = question.replace('<O>', ordinal) # insert ordinal
question = sanitize_question(question) # correct grammar
event = lst_events[number - 1]
answer = (str(np.random.choice(dataset['sources'][event]))
+ ' '
+ str(np.random.choice(dataset['actions'][event])))
return question, answer
def what_was_relative(dataset, narrative, _):
questions = ['What was the sound <RO> the <S> <A>?',
'What was the sound <RO> [hearing,listening to] the <S> <A>?',
'What was the sound <RO> the <S> <A> was heard?',
'What did you [hear,listen to] <RO> the <S> <A>?',
'What did you [hear,listen to] <RO> [hearing,listening to] the <S> <A>?', # noqa: E501
'What did you [hear,listen to] <RO> the <S> <A> was heard?',
'What was the sound <IO> the <S> <A>?',
'What was the sound <IO> [hearing,listening to] the <S> <A>?',
'What was the sound <IO> the <S> <A> was heard?',
'What did you [hear,listen to] <IO> the <S> <A>?',
'What did you [hear,listen to] <IO> [hearing,listening to] the <S> <A>?', # noqa: E501
'What did you [hear,listen to] <IO> the <S> <A> was heard?',
]
question = str(np.random.choice(questions)) # sample question
preposition = sample_preposition()
immediate_preposition = sample_immediate_preposition()
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
assert len(unique_lst_events) > 0, \
'Question (what_was_relative) illposed.'
event = str(np.random.choice(unique_lst_events))
source = str(np.random.choice(dataset['sources'][event])) # sample source
action = str(np.random.choice(dataset['actions'][event])) # sample action
# Only one of the following two lines will have an effect
question = question.replace('<RO>', preposition) # insert preposition
question = question.replace('<IO>', immediate_preposition)
question = question.replace('<S>', source) # insert source
question = question.replace('<A>', action) # insert action
question = sanitize_question(question) # correct grammar
assert lst_events.count(event) == 1, \
'Question (what_was_relative) illposed.'
event_idx = lst_events.index(event)
if 'before' in question:
if (event_idx - 1) < 0:
answer = 'nothing'
else:
e = lst_events[event_idx - 1]
answer = (str(np.random.choice(dataset['sources'][e]))
+ ' '
+ str(np.random.choice(dataset['actions'][e])))
elif 'after' in question:
if (event_idx + 1) >= len(lst_events):
answer = 'nothing'
else:
e = lst_events[event_idx + 1]
answer = (str(np.random.choice(dataset['sources'][e]))
+ ' '
+ str(np.random.choice(dataset['actions'][e])))
else:
assert False, 'Preposition illdefined in Question (what_was_relative).'
return question, answer
def what_was_loudness(dataset, narrative, rel_diff=0.1):
questions = ['What was the <AL> sound?',
'What was the <AL> sound you [heard,listened to]?',
'What was the <AL> sound that you [heard,listened to]?',
'What was the <AL> sound that was heard?',
]
question = str(np.random.choice(questions)) # sample question
loudness = sample_absolute_loudness()
question = question.replace('<AL>', loudness) # insert loudness
question = sanitize_question(question) # correct grammar
lst_events = get_lst_events(narrative)
lst_loudness = get_lst_loudness(narrative)
if 'loud' in question:
est = np.argmax(lst_loudness)
elif 'quiet' in question:
est = np.argmin(lst_loudness)
else:
assert False, \
'Loudness illdefined in Question (what_was_loudness).'
# Assert a good margin in relative loudness
evt_loudness = lst_loudness[est]
x_loudness = [j for i, j in enumerate(lst_loudness) if i != est]
rel_loudness_diff = compute_rel_diff(np.array(x_loudness),
np.array(evt_loudness))
assert np.sum(rel_loudness_diff < rel_diff) <= 0, \
'Question (what_was_loudness) illposed.'
e = lst_events[est]
answer = (str(np.random.choice(dataset['sources'][e]))
+ ' '
+ str(np.random.choice(dataset['actions'][e])))
return question, answer
def what_was_loudness_relative(dataset, narrative, rel_diff=0.1):
questions = ['What was the <AL> sound <RO> the <S> <A>?',
'What was the <AL> sound <RO> [hearing,listening to] the <S> <A>?',
'What was the <AL> sound <RO> the <S> <A> was heard?',
]
question = str(np.random.choice(questions)) # sample question
loudness = sample_absolute_loudness()
preposition = sample_preposition()
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
assert len(unique_lst_events) > 0, \
'Question (what_was_loudness_relative) illposed.'
event = str(np.random.choice(unique_lst_events))
source = str(np.random.choice(dataset['sources'][event])) # sample source
action = str(np.random.choice(dataset['actions'][event])) # sample action
question = question.replace('<AL>', loudness) # insert loudness
question = question.replace('<RO>', preposition) # insert preposition
question = question.replace('<S>', source) # insert source
question = question.replace('<A>', action) # insert action
question = sanitize_question(question) # correct grammar
assert lst_events.count(event) == 1, \
'Question (what_was_loudness_relative) illposed.'
lst_loudness = get_lst_loudness(narrative)
event_idx = lst_events.index(event)
if 'before' in question:
lst_events_e = lst_events[:event_idx]
lst_events_l = lst_loudness[:event_idx]
elif 'after' in question:
lst_events_e = lst_events[(event_idx + 1):]
lst_events_l = lst_loudness[(event_idx + 1):]
else:
assert False, \
'Preposition illdefined in Question (what_was_loudness_relative).'
assert len(lst_events_e) > 0, \
'Question (what_was_loudness_relative) illposed.'
if 'loud' in question:
est = np.argmax(lst_events_l)
elif 'quiet' in question:
est = np.argmin(lst_events_l)
else:
assert False, \
'Loudness illdefined in Question (what_was_loudness_relative).'
# Assert a good margin in relative loudness
evt_loudness = lst_events_l[est]
x_loudness = [j for i, j in enumerate(lst_events_l) if i != est]
rel_loudness_diff = compute_rel_diff(np.array(x_loudness),
np.array(evt_loudness))
assert np.sum(rel_loudness_diff < rel_diff) <= 0, \
'Question (what_was_loudness_relative) illposed.'
e = lst_events_e[est]
answer = (str(np.random.choice(dataset['sources'][e]))
+ ' '
+ str(np.random.choice(dataset['actions'][e])))
return question, answer
def what_was_loudness_relative_ordinal(dataset, narrative, rel_diff=0.1):
questions = ['What was the <AL> sound <RO> the <O> sound?',
'What was the <AL> sound <RO> [hearing,listening to] the <O> sound?',
'What was the <AL> sound <RO> the <O> sound was heard?',
]
question = str(np.random.choice(questions)) # sample question
loudness = sample_absolute_loudness()
preposition = sample_preposition()
lst_events = get_lst_events(narrative)
number, ordinal = sample_number(len(lst_events))
question = question.replace('<AL>', loudness) # insert loudness
question = question.replace('<RO>', preposition) # insert preposition
question = question.replace('<O>', ordinal) # insert ordinal
question = sanitize_question(question) # correct grammar
lst_loudness = get_lst_loudness(narrative)
event_idx = (number - 1)
answer = None
if 'before' in question:
if (event_idx - 1) < 0:
answer = 'nothing'
else:
lst_events_e = lst_events[:event_idx]
lst_events_l = lst_loudness[:event_idx]
elif 'after' in question:
if (event_idx + 1) >= len(lst_events):
answer = 'nothing'
else:
lst_events_e = lst_events[(event_idx + 1):]
lst_events_l = lst_loudness[(event_idx + 1):]
else:
assert False, \
'Preposition illdefined in Question (what_was_loudness_relative_ordinal).'
if answer is None:
assert len(lst_events_e) > 0, \
'Question (what_was_loudness_relative_ordinal) illposed.'
if 'loud' in question:
est = np.argmax(lst_events_l)
elif 'quiet' in question:
est = np.argmin(lst_events_l)
else:
assert False, \
'Loudness illdefined in Question (what_was_loudness_relative_ordinal).'
# Assert a good margin in relative loudness
evt_loudness = lst_events_l[est]
x_loudness = [j for i, j in enumerate(lst_events_l) if i != est]
rel_loudness_diff = compute_rel_diff(np.array(x_loudness),
np.array(evt_loudness))
assert np.sum(rel_loudness_diff < rel_diff) <= 0, \
'Question (what_was_loudness_relative_ordinal) illposed.'
e = lst_events_e[est]
answer = (str(np.random.choice(dataset['sources'][e]))
+ ' '
+ str(np.random.choice(dataset['actions'][e])))
return question, answer
def what_was_duration(dataset, narrative, rel_diff=0.1):
questions = ['What was the <AD> sound?',
'What was the <AD> sound you [heard,listened to]?',
'What was the <AD> sound that you [heard,listened to]?',
'What was the <AD> sound that was heard?',
]
question = str(np.random.choice(questions)) # sample question
duration = sample_absolute_duration()
question = question.replace('<AD>', duration) # insert duration
question = sanitize_question(question) # correct grammar
lst_events = get_lst_events(narrative)
lst_durations = get_lst_durations(narrative)
if 'long' in question:
est = np.argmax(lst_durations)
elif 'short' in question:
est = np.argmin(lst_durations)
else:
assert False, \
'Duration illdefined in Question (what_was_duration).'
# Assert a good margin in relative duration
evt_duration = lst_durations[est]
x_durations = [j for i, j in enumerate(lst_durations) if i != est]
rel_duration_diff = compute_rel_diff(np.array(x_durations),
np.array(evt_duration))
assert np.sum(rel_duration_diff < rel_diff) <= 0, \
'Question (what_was_duration) illposed.'
e = lst_events[est]
answer = (str(np.random.choice(dataset['sources'][e]))
+ ' '
+ str(np.random.choice(dataset['actions'][e])))
return question, answer
def what_was_duration_relative(dataset, narrative, rel_diff=0.1):
questions = ['What was the <AD> sound <RO> the <S> <A>?',
'What was the <AD> sound <RO> [hearing,listening to] the <S> <A>?',
'What was the <AD> sound <RO> the <S> <A> was heard?',
]
question = str(np.random.choice(questions)) # sample question
duration = sample_absolute_duration()
preposition = sample_preposition()
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
assert len(unique_lst_events) > 0, \
'Question (what_was_duration_relative) illposed.'
event = str(np.random.choice(unique_lst_events))
source = str(np.random.choice(dataset['sources'][event])) # sample source
action = str(np.random.choice(dataset['actions'][event])) # sample action
question = question.replace('<AD>', duration) # insert duration
question = question.replace('<RO>', preposition) # insert preposition
question = question.replace('<S>', source) # insert source
question = question.replace('<A>', action) # insert action
question = sanitize_question(question) # correct grammar
assert lst_events.count(event) == 1, \
'Question (what_was_duration_relative) illposed.'
lst_durations = get_lst_durations(narrative)
event_idx = lst_events.index(event)
if 'before' in question:
lst_events_e = lst_events[:event_idx]
lst_events_d = lst_durations[:event_idx]
elif 'after' in question:
lst_events_e = lst_events[(event_idx + 1):]
lst_events_d = lst_durations[(event_idx + 1):]
else:
assert False, \
'Preposition illdefined in Question (what_was_duration_relative).'
assert len(lst_events_e) > 0, \
'Question (what_was_duration_relative) illposed.'
if 'long' in question:
est = np.argmax(lst_events_d)
elif 'short' in question:
est = np.argmin(lst_events_d)
else:
assert False, \
'Duration illdefined in Question (what_was_duration_relative).'
# Assert a good margin in relative duration
evt_duration = lst_events_d[est]
x_durations = [j for i, j in enumerate(lst_events_d) if i != est]
rel_duration_diff = compute_rel_diff(np.array(x_durations),
np.array(evt_duration))
assert np.sum(rel_duration_diff < rel_diff) <= 0, \
'Question (what_was_duration_relative) illposed.'
e = lst_events_e[est]
answer = (str(np.random.choice(dataset['sources'][e]))
+ ' '
+ str(np.random.choice(dataset['actions'][e])))
return question, answer
def what_was_duration_relative_ordinal(dataset, narrative, rel_diff=0.1):
questions = ['What was the <AD> sound <RO> the <O> sound?',
'What was the <AD> sound <RO> [hearing,listening to] the <O> sound?',
'What was the <AD> sound <RO> the <O> sound was heard?',
]
question = str(np.random.choice(questions)) # sample question
duration = sample_absolute_duration()
preposition = sample_preposition()
lst_events = get_lst_events(narrative)
number, ordinal = sample_number(len(lst_events))
question = question.replace('<AD>', duration) # insert duration
question = question.replace('<RO>', preposition) # insert preposition
question = question.replace('<O>', ordinal) # insert ordinal
question = sanitize_question(question) # correct grammar
lst_durations = get_lst_durations(narrative)
event_idx = (number - 1)
answer = None
if 'before' in question:
if (event_idx - 1) < 0:
answer = 'nothing'
else:
lst_events_e = lst_events[:event_idx]
lst_events_d = lst_durations[:event_idx]
elif 'after' in question:
if (event_idx + 1) >= len(lst_events):
answer = 'nothing'
else:
lst_events_e = lst_events[(event_idx + 1):]
lst_events_d = lst_durations[(event_idx + 1):]
else:
assert False, \
'Preposition illdefined in Question (what_was_duration_relative_ordinal).'
if answer is None:
assert len(lst_events_e) > 0, \
'Question (what_was_duration_relative_ordinal) illposed.'
if 'long' in question:
est = np.argmax(lst_events_d)
elif 'short' in question:
est = np.argmin(lst_events_d)
else:
assert False, \
'Duration illdefined in Question (what_was_duration_relative_ordinal).'
# Assert a good margin in relative duration
evt_duration = lst_events_d[est]
x_durations = [j for i, j in enumerate(lst_events_d) if i != est]
rel_duration_diff = compute_rel_diff(np.array(x_durations),
np.array(evt_duration))
assert np.sum(rel_duration_diff < rel_diff) <= 0, \
'Question (what_was_duration_relative_ordinal) illposed.'
e = lst_events_e[est]
answer = (str(np.random.choice(dataset['sources'][e]))
+ ' '
+ str(np.random.choice(dataset['actions'][e])))
return question, answer
|
daqa-master
|
daqa-gen/qpas/query.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from qpas.utils import (compute_rel_diff, get_lst_all_sources,
get_lst_durations, get_lst_events, get_lst_loudness,
sample_duration, sample_immediate_preposition,
sample_loudness, sample_number, sample_preposition,
sanitize_question)
def was_there(dataset, narrative, _):
questions = ['Did you [hear,listen to] [a,an] <S> <A>?',
'Have you [heard,listened to] [a,an] <S> <A>?',
'Did you [hear,listen to] any <S> <A>?',
'Have you [heard,listened to] any <S> <A>?',
'Did you [hear,listen to] a sound that [sounds like,sounded like,is,was] [a,an] <S> <A>?', # noqa: E501
'Have you [heard,listened to] a sound that [sounds like,sounded like,is,was] [a,an] <S> <A>?', # noqa: E501
'Was there [a,an] <S> <A>?',
'Were there any <S>s <A>?',
]
question = str(np.random.choice(questions)) # sample question
event = str(np.random.choice(dataset['events'])) # sample event
source = str(np.random.choice(dataset['sources'][event])) # sample source
action = str(np.random.choice(dataset['actions'][event])) # sample action
question = question.replace('<S>', source) # insert source
question = question.replace('<A>', action) # insert action
question = sanitize_question(question) # correct grammar
answer = 'yes' if event in get_lst_events(narrative) else 'no'
return question, answer
def was_there_two_and(dataset, narrative, _):
questions = ['Did you [hear,listen to] [a,an] <S1> <A1> and [a,an] <S2> <A2>?',
'Have you [heard,listened to] [a,an] <S1> <A1> and [a,an] <S2> <A2>?',
'Did you [hear,listen to] any <S1> <A1> and any <S2> <A2>?',
'Have you [heard,listened to] any <S1> <A1> and any <S2> <A2>?',
'Did you [hear,listen to] a sound that [sounds like,is] [a,an] <S1> <A1> and a sound [sounds like,is] [a,an] <S2> <A2>?', # noqa: E501
'Did you [hear,listen to] a sound that [sounded like,was] [a,an] <S1> <A1> and a sound [sounded like,was] [a,an] <S2> <A2>?', # noqa: E501
'Have you [heard,listened to] a sound that [sounds like,is] [a,an] <S1> <A1> and a sound [sounds like,is] [a,an] <S2> <A2>?', # noqa: E501
'Have you [heard,listened to] a sound that [sounded like,was] [a,an] <S1> <A1> and a sound [sounded like,was] [a,an] <S2> <A2>?', # noqa: E501
'Was there [a,an] <S1> <A1> and [a,an] <S2> <A2>?',
'Were there any <S1>s <A1> and any <S2>s <A2>?',
]
question = str(np.random.choice(questions)) # sample question
event_1 = str(np.random.choice(dataset['events'])) # sample event
source_1 = str(np.random.choice(dataset['sources'][event_1])) # sample source
action_1 = str(np.random.choice(dataset['actions'][event_1])) # sample action
lst_events = [e for e in dataset['events'] if e != event_1]
event_2 = str(np.random.choice(lst_events)) # sample event
source_2 = str(np.random.choice(dataset['sources'][event_2])) # sample source
action_2 = str(np.random.choice(dataset['actions'][event_2])) # sample action
question = question.replace('<S1>', source_1) # insert source
question = question.replace('<A1>', action_1) # insert action
question = question.replace('<S2>', source_2) # insert source
question = question.replace('<A2>', action_2) # insert action
question = sanitize_question(question) # correct grammar
lst_events = get_lst_events(narrative)
answer = 'yes' if (event_1 in lst_events and event_2 in lst_events) else 'no'
return question, answer
def was_there_two_or(dataset, narrative, _):
questions = ['Did you [hear,listen to] [a,an] <S1> <A1> or [a,an] <S2> <A2>?',
'Have you [heard,listened to] [a,an] <S1> <A1> or [a,an] <S2> <A2>?',
'Did you [hear,listen to] any <S1> <A1> or any <S2> <A2>?',
'Have you [heard,listened to] any <S1> <A1> or any <S2> <A2>?',
'Did you [hear,listen to] a sound that [sounds like,is] [a,an] <S1> <A1> or a sound [sounds like,is] [a,an] <S2> <A2>?', # noqa: E501
'Did you [hear,listen to] a sound that [sounded like,was] [a,an] <S1> <A1> or a sound [sounded like,was] [a,an] <S2> <A2>?', # noqa: E501
'Have you [heard,listened to] a sound that [sounds like,is] [a,an] <S1> <A1> or a sound [sounds like,is] [a,an] <S2> <A2>?', # noqa: E501
'Have you [heard,listened to] a sound that [sounded like,was] [a,an] <S1> <A1> or a sound [sounded like,was] [a,an] <S2> <A2>?', # noqa: E501
'Was there [a,an] <S1> <A1> or [a,an] <S2> <A2>?',
'Were there any <S1>s <A1> or any <S2>s <A2>?',
]
question = str(np.random.choice(questions)) # sample question
event_1 = str(np.random.choice(dataset['events'])) # sample event
source_1 = str(np.random.choice(dataset['sources'][event_1])) # sample source
action_1 = str(np.random.choice(dataset['actions'][event_1])) # sample action
lst_events = [e for e in dataset['events'] if e != event_1]
event_2 = str(np.random.choice(lst_events)) # sample event
source_2 = str(np.random.choice(dataset['sources'][event_2])) # sample source
action_2 = str(np.random.choice(dataset['actions'][event_2])) # sample action
question = question.replace('<S1>', source_1) # insert source
question = question.replace('<A1>', action_1) # insert action
question = question.replace('<S2>', source_2) # insert source
question = question.replace('<A2>', action_2) # insert action
question = sanitize_question(question) # correct grammar
lst_events = get_lst_events(narrative)
answer = 'yes' if (event_1 in lst_events or event_2 in lst_events) else 'no'
return question, answer
def was_there_source(dataset, narrative, _):
questions = ['Did you [hear,listen to] [a,an] <S>?',
'Have you [heard,listened to] [a,an] <S>?'
'Did you [hear,listen to] any <S>?',
'Have you [heard,listened to] any <S>?',
'Was there a sound [produced,made] by [a,an] <S>?',
'Were there any sounds [produced,made] by [a,an] <S>?',
]
question = str(np.random.choice(questions)) # sample question
event = str(np.random.choice(dataset['events'])) # sample event
source = str(np.random.choice(dataset['sources'][event])) # sample source
question = question.replace('<S>', source) # insert source
question = sanitize_question(question) # correct grammar
answer = 'yes' if source in get_lst_all_sources(dataset, narrative) else 'no'
return question, answer
def was_there_source_two_and(dataset, narrative, _):
questions = ['Did you [hear,listen to] [a,an] <S1> and [a,an] <S2>?',
'Have you [heard,listened to] [a,an] <S1> and [a,an] <S2>?'
'Did you [hear,listen to] any <S1> and any <S2>?',
'Have you [heard,listened to] any <S1> and any <S2>?',
'Was there a sound [produced,made] by [a,an] <S1> and a sound [produced,made] by [a,an] <S2>?', # noqa: E501
'Were there any sounds [produced,made] by [a,an] <S1> and any sounds [produced,made] by [a,an] <S2>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
event_1 = str(np.random.choice(dataset['events'])) # sample event
source_1 = str(np.random.choice(dataset['sources'][event_1])) # sample source
lst_events = [e for e in dataset['events'] if e != event_1]
event_2 = str(np.random.choice(lst_events)) # sample event
source_2 = str(np.random.choice(dataset['sources'][event_2])) # sample source
question = question.replace('<S1>', source_1) # insert source
question = question.replace('<S2>', source_2) # insert source
question = sanitize_question(question) # correct grammar
lst_sources = get_lst_all_sources(dataset, narrative)
answer = 'yes' if (source_1 in lst_sources and source_2 in lst_sources) else 'no'
return question, answer
def was_there_source_two_or(dataset, narrative, _):
questions = ['Did you [hear,listen to] [a,an] <S1> or [a,an] <S2>?',
'Have you [heard,listened to] [a,an] <S1> or [a,an] <S2>?'
'Did you [hear,listen to] any <S1> or any <S2>?',
'Have you [heard,listened to] any <S1> or any <S2>?',
'Was there a sound [produced,made] by [a,an] <S1> or a sound [produced,made] by [a,an] <S2>?', # noqa: E501
'Were there any sounds [produced,made] by [a,an] <S1> or any sounds [produced,made] by [a,an] <S2>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
event_1 = str(np.random.choice(dataset['events'])) # sample event
source_1 = str(np.random.choice(dataset['sources'][event_1])) # sample source
lst_events = [e for e in dataset['events'] if e != event_1]
event_2 = str(np.random.choice(lst_events)) # sample event
source_2 = str(np.random.choice(dataset['sources'][event_2])) # sample source
question = question.replace('<S1>', source_1) # insert source
question = question.replace('<S2>', source_2) # insert source
question = sanitize_question(question) # correct grammar
lst_sources = get_lst_all_sources(dataset, narrative)
answer = 'yes' if (source_1 in lst_sources or source_2 in lst_sources) else 'no'
return question, answer
def was_there_relative(dataset, narrative, _):
questions = ['Did you [hear,listen to] [a,an] <S1> <A1> <RO> the <S2> <A2>?', # noqa: E501
'Have you [heard,listened to] [a,an] <S1> <A1> <RO> the <S2> <A2>?', # noqa: E501
'Did you [hear,listen to] any <S1> <A1> <RO> the <S2> <A2>?',
'Have you [heard,listened to] any <S1> <A1> <RO> the <S2> <A2>?',
'Was there [a,an] <S1> <A1> <RO> the <S2> <A2>?',
'Were there any <S1>s <A1> <RO> the <S2> <A2>?',
'Did you [hear,listen to] a sound that [sounds like,sounded like,is,was] [a,an] <S1> <A1> <RO> the <S2> <A2>?', # noqa: E501
'<RO> the <S2> <A2>, did you [hear,listen to] [a,an] <S1> <A1> ?', # noqa: E501
'<RO> the <S2> <A2>, did you [hear,listen to] any <S1> <A1>?',
'<RO> the <S2> <A2>, was there [a,an] <S1> <A1>?',
'<RO> the <S2> <A2>, were there any <S1>s <A1>?',
'<RO> the <S2> <A2>, did you [hear,listen to] a sound that [sounds like,sounded like,is,was] [a,an] <S1> <A1>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
event_1 = str(np.random.choice(dataset['events'])) # sample event
source_1 = str(np.random.choice(dataset['sources'][event_1]))
action_1 = str(np.random.choice(dataset['actions'][event_1]))
preposition = sample_preposition()
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
unique_lst_events = [e for e in unique_lst_events if e != event_1]
assert len(unique_lst_events) > 0, \
'Question (was_there_relative) illposed.'
event_2 = str(np.random.choice(unique_lst_events))
source_2 = str(np.random.choice(dataset['sources'][event_2]))
action_2 = str(np.random.choice(dataset['actions'][event_2]))
question = question.replace('<S1>', source_1) # insert source
question = question.replace('<A1>', action_1) # insert action
question = question.replace('<RO>', preposition) # insert preposition
question = question.replace('<S2>', source_2) # insert source
question = question.replace('<A2>', action_2) # insert action
question = sanitize_question(question) # correct grammar
assert lst_events.count(event_2) == 1, \
'Question (was_there_relative) illposed.'
event_2_idx = lst_events.index(event_2)
if 'before' in preposition:
lst_events = lst_events[:event_2_idx]
elif 'after' in preposition:
lst_events = lst_events[(event_2_idx + 1):]
else:
assert False, 'Preposition illdefined in Question (was_there_relative).'
answer = 'yes' if event_1 in lst_events else 'no'
return question, answer
def was_there_immediate_relative(dataset, narrative, _):
questions = ['Did you [hear,listen to] [a,an] <S1> <A1> <IO> the <S2> <A2>?', # noqa: E501
'Have you [heard,listened to] [a,an] <S1> <A1> <IO> the <S2> <A2>?', # noqa: E501
'Did you [hear,listen to] any <S1> <A1> <IO> the <S2> <A2>?',
'Have you [heard,listened to] any <S1> <A1> <IO> the <S2> <A2>?',
'Was there [a,an] <S1> <A1> <IO> the <S2> <A2>?',
'Were there any <S1>s <A1> <IO> the <S2> <A2>?',
'Did you [hear,listen to] a sound that [sounds like,sounded like,is,was] [a,an] <S1> <A1> <IO> the <S2> <A2>?', # noqa: E501
'<IO> the <S2> <A2>, did you [hear,listen to] [a,an] <S1> <A1> ?', # noqa: E501
'<IO> the <S2> <A2>, did you [hear,listen to] any <S1> <A1>?',
'<IO> the <S2> <A2>, was there [a,an] <S1> <A1>?',
'<IO> the <S2> <A2>, were there any <S1>s <A1>?',
'<IO> the <S2> <A2>, did you [hear,listen to] a sound that [sounds like,sounded like,is,was] [a,an] <S1> <A1>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
event_1 = str(np.random.choice(dataset['events'])) # sample event
source_1 = str(np.random.choice(dataset['sources'][event_1]))
action_1 = str(np.random.choice(dataset['actions'][event_1]))
preposition = sample_immediate_preposition()
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
unique_lst_events = [e for e in unique_lst_events if e != event_1]
assert len(unique_lst_events) > 0, \
'Question (was_there_immediate_relative) illposed.'
event_2 = str(np.random.choice(unique_lst_events))
source_2 = str(np.random.choice(dataset['sources'][event_2]))
action_2 = str(np.random.choice(dataset['actions'][event_2]))
question = question.replace('<S1>', source_1) # insert source
question = question.replace('<A1>', action_1) # insert action
question = question.replace('<IO>', preposition) # insert preposition
question = question.replace('<S2>', source_2) # insert source
question = question.replace('<A2>', action_2) # insert action
question = sanitize_question(question) # correct grammar
assert lst_events.count(event_2) == 1, \
'Question (was_there_immediate_relative) illposed.'
event_2_idx = lst_events.index(event_2)
if 'before' in preposition:
if (event_2_idx - 1) < 0:
target_event = []
else:
target_event = lst_events[event_2_idx - 1]
elif 'after' in preposition:
if (event_2_idx + 1) >= len(lst_events):
target_event = []
else:
target_event = lst_events[event_2_idx + 1]
else:
assert False, \
'Preposition illdefined in Question (was_there_immediate_relative).'
answer = 'yes' if event_1 == target_event else 'no'
return question, answer
def was_there_similar_ordinal(dataset, narrative, _):
questions = ['Were there any similar sounds to the <O> sound?',
'Were there any sounds that were similar to the <O> sound?',
'Was there at least a sound similar to the <O> sound?',
'Was there at least a sound that was similar to the <O> sound?', # noqa: E501
'Was there at least [one,a single] sound similar to the <O> sound?',
'Was there at least [one,a single] sound that was similar to the <O> sound?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
number, ordinal = sample_number(len(lst_events))
question = question.replace('<O>', ordinal) # insert ordinal
question = sanitize_question(question) # correct grammar
event = lst_events[number - 1]
answer = 'yes' if lst_events.count(event) > 1 else 'no' # 1 for reference
return question, answer
def was_there_similar_loudness(dataset, narrative, rel_diff=0.1):
questions = ['Were there any sounds [roughly,approximately] as <L> as the <S> <A>?', # noqa: E501
'Were there any sounds that were [roughly,approximately] as <L> as the <S> <A>?', # noqa: E501
'Were there any sounds that were [roughly,approximately] the same loudness as the <S> <A>?', # noqa: E501
'Was there any sound [roughly,approximately] as <L> as the <S> <A>?', # noqa: E501
'Was there any sound that was [roughly,approximately] as <L> as the <S> <A>?', # noqa: E501
'Was there any sound that was [roughly,approximately] the same loudness as the <S> <A>?', # noqa: E501
'Was there at least a sound [roughly,approximately] as <L> as the <S> <A>?', # noqa: E501
'Was there at least a sound that was [roughly,approximately] as <L> as the <S> <A>?', # noqa: E501
'Was there at least a sound that was [roughly,approximately] the same loudness as <S> <A>?', # noqa: E501
'Was there at least [one,a single] sound [roughly,approximately] as <L> as the <S> <A>?', # noqa: E501
'Was there at least [one,a single] sound that was [roughly,approximately] as <L> as the <S> <A>?', # noqa: E501
'Was there at least [one,a single] sound that was [roughly,approximately] the same loudness as <S> <A>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
loudness = sample_loudness() # sample loudness
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
assert len(unique_lst_events) > 0, \
'Question (was_there_similar_loudness) illposed.'
event = str(np.random.choice(unique_lst_events))
source = str(np.random.choice(dataset['sources'][event])) # sample source
action = str(np.random.choice(dataset['actions'][event])) # sample action
question = question.replace('<L>', loudness) # insert loudness
question = question.replace('<S>', source) # insert source
question = question.replace('<A>', action) # insert action
question = sanitize_question(question) # correct grammar
assert lst_events.count(event) == 1, \
'Question (was_there_similar_loudness) illposed.'
lst_loudness = get_lst_loudness(narrative)
event_idx = lst_events.index(event)
evt_loudness = lst_loudness[event_idx]
x_loudness = [j for i, j in enumerate(lst_loudness) if i != event_idx]
rel_loudness_diff = compute_rel_diff(np.array(x_loudness),
np.array(evt_loudness))
# Assert a good margin in relative loudness
assert np.sum(np.logical_and(rel_loudness_diff > rel_diff,
rel_loudness_diff < (2 * rel_diff))) <= 0, \
'Question (was_there_similar_loudness) illposed.'
answer = 'yes' if np.sum(rel_loudness_diff <= rel_diff) >= 1 else 'no'
return question, answer
def was_there_at_least_two_similar_loudness(dataset, narrative, rel_diff=0.1):
questions = ['Were there at least two sounds [roughly,approximately] as <L> as the <S> <A>?', # noqa: E501
'Were there at least two sounds that were [roughly,approximately] as <L> as the <S> <A>?', # noqa: E501
'Were there at least two sounds that were [roughly,approximately] the same loudness as the <S> <A>?', # noqa: E501
'Was there more than a sound [roughly,approximately] as <L> as the <S> <A>?', # noqa: E501
'Was there more than a sound that was [roughly,approximately] as <L> as the <S> <A>?', # noqa: E501
'Was there more than a sound that was [roughly,approximately] the same loudness as the <S> <A>?', # noqa: E501
'Was there more than [one,a single] sound [roughly,approximately] as <L> as the <S> <A>?', # noqa: E501
'Was there more than [one,a single] sound that was [roughly,approximately] as <L> as the <S> <A>?', # noqa: E501
'Was there more than [one,a single] sound that was [roughly,approximately] the same loudness as the <S> <A>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
loudness = sample_loudness() # sample loudness
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
assert len(unique_lst_events) > 0, \
'Question (was_there_at_least_two_similar_loudness) illposed.'
event = str(np.random.choice(unique_lst_events))
source = str(np.random.choice(dataset['sources'][event])) # sample source
action = str(np.random.choice(dataset['actions'][event])) # sample action
question = question.replace('<L>', loudness) # insert loudness
question = question.replace('<S>', source) # insert source
question = question.replace('<A>', action) # insert action
question = sanitize_question(question) # correct grammar
assert lst_events.count(event) == 1, \
'Question (was_there_at_least_two_similar_loudness) illposed.'
lst_loudness = get_lst_loudness(narrative)
event_idx = lst_events.index(event)
evt_loudness = lst_loudness[event_idx]
x_loudness = [j for i, j in enumerate(lst_loudness) if i != event_idx]
rel_loudness_diff = compute_rel_diff(np.array(x_loudness),
np.array(evt_loudness))
# Assert a good margin in relative loudness
assert np.sum(np.logical_and(rel_loudness_diff > rel_diff,
rel_loudness_diff < (2 * rel_diff))) <= 0, \
'Question (was_there_at_least_two_similar_loudness) illposed.'
answer = 'yes' if np.sum(rel_loudness_diff <= rel_diff) >= 2 else 'no'
return question, answer
def was_there_similar_loudness_ordinal(dataset, narrative, rel_diff=0.1):
questions = ['Were there any sounds [roughly,approximately] as <L> as the <O> sound?', # noqa: E501
'Were there any sounds that were [roughly,approximately] as <L> as the <O> sound?', # noqa: E501
'Were there any sounds that were [roughly,approximately] the same loudness as the <O> sound?', # noqa: E501
'Was there any sound [roughly,approximately] as <L> as the <O> sound?', # noqa: E501
'Was there any sound that was [roughly,approximately] as <L> as the <O> sound?', # noqa: E501
'Was there any sound that was [roughly,approximately] the same loudness as the <O> sound?', # noqa: E501
'Was there at least a sound [roughly,approximately] as <L> as the <O> sound?', # noqa: E501
'Was there at least a sound that was [roughly,approximately] as <L> as the <O> sound?', # noqa: E501
'Was there at least a sound that was [roughly,approximately] the same loudness as the <O> sound?', # noqa: E501
'Was there at least [one,a single] sound that was [roughly,approximately] as <L> as the <O> sound?', # noqa: E501
'Was there at least [one,a single] sound [roughly,approximately] as <L> as the <O> sound?', # noqa: E501
'Was there at least [one,a single] sound that was [roughly,approximately] the same loudness as the <O> sound?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
loudness = sample_loudness() # sample loudness
lst_events = get_lst_events(narrative)
number, ordinal = sample_number(len(lst_events))
question = question.replace('<L>', loudness) # insert loudness
question = question.replace('<O>', ordinal) # insert ordinal
question = sanitize_question(question) # correct grammar
lst_loudness = get_lst_loudness(narrative)
evt_loudness = lst_loudness[number - 1]
x_loudness = [j for i, j in enumerate(lst_loudness) if i != (number - 1)]
rel_loudness_diff = compute_rel_diff(np.array(x_loudness),
np.array(evt_loudness))
# Assert a good margin in relative loudness
assert np.sum(np.logical_and(rel_loudness_diff > rel_diff,
rel_loudness_diff < (2 * rel_diff))) <= 0, \
'Question (was_there_similar_loudness_ordinal) illposed.'
answer = 'yes' if np.sum(rel_loudness_diff <= rel_diff) >= 1 else 'no'
return question, answer
def was_there_at_least_two_similar_loudness_ordinal(dataset,
narrative,
rel_diff=0.1):
questions = ['Were there at least two sounds [roughly,approximately] as <L> as the <O> sound?', # noqa: E501
'Were there at least two sounds that were [roughly,approximately] as <L> as the <O> sound?', # noqa: E501
'Were there at least two sounds that were [roughly,approximately] the same loudness as the <O> sound?', # noqa: E501
'Was there more than a sound [roughly,approximately] as <L> as the <O> sound?', # noqa: E501
'Was there more than a sound that was [roughly,approximately] as <L> as the <O> sound?', # noqa: E501
'Was there more than a sound that was [roughly,approximately] the same loudness as the <O> sound?', # noqa: E501
'Was there more than [one,a single] sound [roughly,approximately] as <L> as the <O> sound?', # noqa: E501
'Was there more than [one,a single] sound that was [roughly,approximately] as <L> as the <O> sound?', # noqa: E501
'Was there more than [one,a single] sound that was [roughly,approximately] the same loudness as the <O> sound?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
loudness = sample_loudness() # sample loudness
lst_events = get_lst_events(narrative)
number, ordinal = sample_number(len(lst_events))
question = question.replace('<L>', loudness) # insert loudness
question = question.replace('<O>', ordinal) # insert ordinal
question = sanitize_question(question) # correct grammar
lst_loudness = get_lst_loudness(narrative)
evt_loudness = lst_loudness[number - 1]
x_loudness = [j for i, j in enumerate(lst_loudness) if i != (number - 1)]
rel_loudness_diff = compute_rel_diff(np.array(x_loudness),
np.array(evt_loudness))
# Assert a good margin in relative loudness
assert np.sum(np.logical_and(rel_loudness_diff > rel_diff,
rel_loudness_diff < (2 * rel_diff))) <= 0, \
'Question (was_there_at_least_two_similar_loudness_ordinal) illposed.'
answer = 'yes' if np.sum(rel_loudness_diff <= rel_diff) >= 2 else 'no'
return question, answer
def was_there_similar_duration(dataset, narrative, rel_diff=0.1):
questions = ['Were there any sounds [roughly,approximately] as <D> as the <S> <A>?', # noqa: E501
'Were there any sounds that were [roughly,approximately] as <D> as the <S> <A>?', # noqa: E501
'Were there any sounds that were [roughly,approximately] the same duration as the <S> <A>?', # noqa: E501
'Was there any sound [roughly,approximately] as <D> as the <S> <A>?', # noqa: E501
'Was there any sound that was [roughly,approximately] as <D> as the <S> <A>?', # noqa: E501
'Was there any sound that was [roughly,approximately] the same duration as the <S> <A>?', # noqa: E501
'Was there at least a sound [roughly,approximately] as <D> as the <S> <A>?', # noqa: E501
'Was there at least a sound that was [roughly,approximately] as <D> as the <S> <A>?', # noqa: E501
'Was there at least a sound that was [roughly,approximately] the same duration as <S> <A>?', # noqa: E501
'Was there at least [one,a single] sound [roughly,approximately] as <D> as the <S> <A>?', # noqa: E501
'Was there at least [one,a single] sound that was [roughly,approximately] as <D> as the <S> <A>?', # noqa: E501
'Was there at least [one,a single] sound that was [roughly,approximately] the same duration as <S> <A>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
duration = sample_duration() # sample duration
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
assert len(unique_lst_events) > 0, \
'Question (was_there_similar_duration) illposed.'
event = str(np.random.choice(unique_lst_events))
source = str(np.random.choice(dataset['sources'][event])) # sample source
action = str(np.random.choice(dataset['actions'][event])) # sample action
question = question.replace('<D>', duration) # insert duration
question = question.replace('<S>', source) # insert source
question = question.replace('<A>', action) # insert action
question = sanitize_question(question) # correct grammar
assert lst_events.count(event) == 1, \
'Question (was_there_similar_duration) illposed.'
lst_durations = get_lst_durations(narrative)
event_idx = lst_events.index(event)
evt_duration = lst_durations[event_idx]
x_durations = [j for i, j in enumerate(lst_durations) if i != event_idx]
rel_durations_diff = compute_rel_diff(np.array(x_durations),
np.array(evt_duration))
# Assert a good margin in relative duration
assert np.sum(np.logical_and(rel_durations_diff > rel_diff,
rel_durations_diff < (2 * rel_diff))) <= 0, \
'Question (was_there_similar_duration) illposed.'
answer = 'yes' if np.sum(rel_durations_diff <= rel_diff) >= 1 else 'no'
return question, answer
def was_there_at_least_two_similar_duration(dataset, narrative, rel_diff=0.1):
questions = ['Were there at least two sounds [roughly,approximately] as <D> as the <S> <A>?', # noqa: E501
'Were there at least two sounds that were [roughly,approximately] as <D> as the <S> <A>?', # noqa: E501
'Were there at least two sounds that were [roughly,approximately] the same duration as the <S> <A>?', # noqa: E501
'Was there more than a sound [roughly,approximately] as <D> as the <S> <A>?', # noqa: E501
'Was there more than a sound that was [roughly,approximately] as <D> as the <S> <A>?', # noqa: E501
'Was there more than a sound that was [roughly,approximately] the same duration as the <S> <A>?', # noqa: E501
'Was there more than [one,a single] sound [roughly,approximately] as <D> as the <S> <A>?', # noqa: E501
'Was there more than [one,a single] sound that was [roughly,approximately] as <D> as the <S> <A>?', # noqa: E501
'Was there more than [one,a single] sound that was [roughly,approximately] the same duration as the <S> <A>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
duration = sample_duration() # sample duration
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
assert len(unique_lst_events) > 0, \
'Question (was_there_at_least_two_similar_duration) illposed.'
event = str(np.random.choice(unique_lst_events))
source = str(np.random.choice(dataset['sources'][event])) # sample source
action = str(np.random.choice(dataset['actions'][event])) # sample action
question = question.replace('<D>', duration) # insert duration
question = question.replace('<S>', source) # insert source
question = question.replace('<A>', action) # insert action
question = sanitize_question(question) # correct grammar
assert lst_events.count(event) == 1, \
'Question (was_there_at_least_two_similar_duration) illposed.'
lst_durations = get_lst_durations(narrative)
event_idx = lst_events.index(event)
evt_duration = lst_durations[event_idx]
x_durations = [j for i, j in enumerate(lst_durations) if i != event_idx]
rel_durations_diff = compute_rel_diff(np.array(x_durations),
np.array(evt_duration))
# Assert a good margin in relative duration
assert np.sum(np.logical_and(rel_durations_diff > rel_diff,
rel_durations_diff < (2 * rel_diff))) <= 0, \
'Question (was_there_at_least_two_similar_duration) illposed.'
answer = 'yes' if np.sum(rel_durations_diff <= rel_diff) >= 2 else 'no'
return question, answer
def was_there_similar_duration_ordinal(dataset, narrative, rel_diff=0.1):
questions = ['Were there any sounds [roughly,approximately] as <D> as the <O> sound?', # noqa: E501
'Were there any sounds that were [roughly,approximately] as <D> as the <O> sound?', # noqa: E501
'Were there any sounds that were [roughly,approximately] the same duration as the <O> sound?', # noqa: E501
'Was there any sound [roughly,approximately] as <D> as the <O> sound?', # noqa: E501
'Was there any sound that was [roughly,approximately] as <D> as the <O> sound?', # noqa: E501
'Was there any sound that was [roughly,approximately] the same duration as the <O> sound?', # noqa: E501
'Was there at least a sound [roughly,approximately] as <D> as the <O> sound?', # noqa: E501
'Was there at least a sound that was [roughly,approximately] as <D> as the <O> sound?', # noqa: E501
'Was there at least a sound that was [roughly,approximately] the same duration as the <O> sound?', # noqa: E501
'Was there at least [one,a single] sound that was [roughly,approximately] as <D> as the <O> sound?', # noqa: E501
'Was there at least [one,a single] sound [roughly,approximately] as <D> as the <O> sound?', # noqa: E501
'Was there at least [one,a single] sound that was [roughly,approximately] the same duration as the <O> sound?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
duration = sample_duration() # sample duration
lst_events = get_lst_events(narrative)
number, ordinal = sample_number(len(lst_events))
question = question.replace('<D>', duration) # insert duration
question = question.replace('<O>', ordinal) # insert ordinal
question = sanitize_question(question) # correct grammar
lst_durations = get_lst_durations(narrative)
evt_duration = lst_durations[number - 1]
x_durations = [j for i, j in enumerate(lst_durations) if i != (number - 1)]
rel_durations_diff = compute_rel_diff(np.array(x_durations),
np.array(evt_duration))
# Assert a good margin in relative duration
assert np.sum(np.logical_and(rel_durations_diff > rel_diff,
rel_durations_diff < (2 * rel_diff))) <= 0, \
'Question (was_there_similar_duration_ordinal) illposed.'
answer = 'yes' if np.sum(rel_durations_diff <= rel_diff) >= 1 else 'no'
return question, answer
def was_there_at_least_two_similar_duration_ordinal(dataset,
narrative,
rel_diff=0.1):
questions = ['Were there at least two sounds [roughly,approximately] as <D> as the <O> sound?', # noqa: E501
'Were there at least two sounds that were [roughly,approximately] as <D> as the <O> sound?', # noqa: E501
'Were there at least two sounds that were [roughly,approximately] the same duration as the <O> sound?', # noqa: E501
'Was there more than a sound [roughly,approximately] as <D> as the <O> sound?', # noqa: E501
'Was there more than a sound that was [roughly,approximately] as <D> as the <O> sound?', # noqa: E501
'Was there more than a sound that was [roughly,approximately] the same duration as the <O> sound?', # noqa: E501
'Was there more than [one,a single] sound [roughly,approximately] as <D> as the <O> sound?', # noqa: E501
'Was there more than [one,a single] sound that was [roughly,approximately] as <D> as the <O> sound?', # noqa: E501
'Was there more than [one,a single] sound that was [roughly,approximately] the same duration as the <O> sound?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
duration = sample_duration() # sample duration
lst_events = get_lst_events(narrative)
number, ordinal = sample_number(len(lst_events))
question = question.replace('<D>', duration) # insert duration
question = question.replace('<O>', ordinal) # insert ordinal
question = sanitize_question(question) # correct grammar
lst_durations = get_lst_durations(narrative)
evt_duration = lst_durations[number - 1]
x_durations = [j for i, j in enumerate(lst_durations) if i != (number - 1)]
rel_durations_diff = compute_rel_diff(np.array(x_durations),
np.array(evt_duration))
# Assert a good margin in relative duration
assert np.sum(np.logical_and(rel_durations_diff > rel_diff,
rel_durations_diff < (2 * rel_diff))) <= 0, \
'Question (was_there_at_least_two_similar_duration_ordinal) illposed.'
answer = 'yes' if np.sum(rel_durations_diff <= rel_diff) >= 2 else 'no'
return question, answer
|
daqa-master
|
daqa-gen/qpas/exist.py
|
daqa-master
|
daqa-gen/qpas/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import re
import numpy as np
def a_or_an(q):
a_an_letter = re.findall(r'\[a,an\] \w', q)
for e in a_an_letter:
a_an, letter = e.split(' ')
if letter in ['a', 'e', 'i', 'o', 'u']:
q = q.replace('[a,an]', 'an', 1) # 1 to denote first occurrence
else:
q = q.replace('[a,an]', 'a', 1)
return q
def options(q):
assert ('[a' not in q) or ('an]' not in q), '[a,an] choice cant be random.'
opt = re.findall(r'\[(.*?)\]', q)
for o in opt:
q = q.replace('[' + o + ']', np.random.choice(o.split(',')))
return q
def spaces(q):
q = q.replace(' ', ' ')
q = q.replace(' ', ' ')
return q
def sanitize_question(q):
q = a_or_an(q)
q = options(q)
q = spaces(q)
q = q.lower()
q = q.capitalize() # capitalizes only first letter
assert '<' not in q, 'Could not sanitize template: ' + q
assert '>' not in q, 'Could not sanitize template: ' + q
assert '[' not in q, 'Could not sanitize template: ' + q
assert ']' not in q, 'Could not sanitize template: ' + q
return q
def sample_conjunction():
return str(np.random.choice(['and', 'or']))
def sample_preposition():
return str(np.random.choice(['before', 'after']))
def sample_immediate_preposition():
return '[just,immediately] ' + sample_preposition()
def numbers_to_ordinals(num):
ordinals = {
1: 'first',
2: 'second',
3: 'third',
4: 'fourth',
5: 'fifth',
6: 'sixth',
7: 'seventh',
8: 'eighth',
9: 'ninth',
10: 'tenth',
11: 'eleventh',
12: 'twelveth',
13: 'thirteenth',
14: 'fourteenth',
15: 'fifteenth',
}
return ordinals[num]
def sample_number(n):
number = int(np.random.randint(1, n + 1, 1)) # human indexing
return number, numbers_to_ordinals(number)
def sample_second_number(n, x_n):
lst_x_n = list(range(1, n + 1)) # human indexing
lst_x_n.remove(x_n)
number = int(np.random.choice(lst_x_n))
return number, numbers_to_ordinals(number)
def sample_loudness():
return str(np.random.choice(['quiet', 'loud']))
def sample_rel_loudness():
return str(np.random.choice(['quieter', 'louder']))
def sample_absolute_loudness():
return str(np.random.choice(['quietest', 'loudest']))
def sample_duration():
return str(np.random.choice(['short', 'long']))
def sample_rel_duration():
return str(np.random.choice(['shorter', 'longer']))
def sample_absolute_duration():
return str(np.random.choice(['shortest', 'longest']))
def get_lst_events(narrative):
le = len(narrative['events'])
return [narrative['events'][e]['event'] for e in range(le)]
def get_lst_sources(narrative):
le = len(narrative['events'])
return [narrative['events'][e]['source'] for e in range(le)]
def get_lst_all_sources(dataset, narrative):
ls = []
for e in range(len(narrative['events'])):
ls += dataset['sources'][narrative['events'][e]['event']]
return ls
def get_lst_actions(narrative):
le = len(narrative['events'])
return [narrative['events'][e]['action'] for e in range(le)]
def get_lst_durations(narrative):
le = len(narrative['events'])
return np.array([narrative['events'][e]['duration'] for e in range(le)])
def get_lst_loudness(narrative):
le = len(narrative['events'])
return np.array([narrative['events'][e]['loudness'] for e in range(le)])
def compute_rel_diff(actual, reference):
return np.abs(actual - reference) / reference
def numbers_to_words(n):
numbers = {
0: 'zero',
1: 'one',
2: 'two',
3: 'three',
4: 'four',
5: 'five',
6: 'six',
7: 'seven',
8: 'eight',
9: 'nine',
10: 'ten',
11: 'eleven',
12: 'twelve',
13: 'thirteen',
14: 'fourteen',
15: 'fifteen',
}
return numbers[n]
|
daqa-master
|
daqa-gen/qpas/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from qpas.utils import (compute_rel_diff, get_lst_durations, get_lst_events,
get_lst_loudness, sample_duration, sample_loudness,
sample_number, sample_second_number,
sample_rel_duration, sample_rel_loudness,
sanitize_question)
def compare_ordinal(dataset, narrative, _):
questions = ['Was the <O1> [sound event,sound] [the same as,similar to] the <O2> [sound event,sound]?', # noqa: E501
'Was the <O1> [sound event,sound] and <O2> [sound event,sound] [the same,similar]?', # noqa: E501
'Were the <O1> and <O2> [sound events,sounds] [the same,similar]?',
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
number_1, ordinal_1 = sample_number(len(lst_events))
number_2, ordinal_2 = sample_second_number(len(lst_events), number_1)
assert number_1 != number_2, 'Question (compare_ordinal) illposed.'
question = question.replace('<O1>', ordinal_1) # insert ordinal
question = question.replace('<O2>', ordinal_2) # insert ordinal
question = sanitize_question(question) # correct grammar
answer = 'yes' if lst_events[number_1 - 1] == lst_events[number_2 - 1] \
else 'no'
return question, answer
def compare_ordinal_event(dataset, narrative, _):
questions = ['Was the <O> [sound event,sound] [a,an] <S> <A>?', # noqa: E501
'Did the <O> [sound event,sound] [sound,seem] like [a,an] <S> <A>?', # noqa: E501
'[Listening to,Hearing] the <O> [sound event,sound], was it [a,an] <S> <A>?', # noqa: E501
'[Listening to,Hearing] the <O> [sound event,sound], did it [sound,seem] like [a,an] <S> <A>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
number, ordinal = sample_number(len(lst_events))
event = str(np.random.choice(dataset['events'])) # sample event
source = str(np.random.choice(dataset['sources'][event])) # sample source
action = str(np.random.choice(dataset['actions'][event])) # sample action
question = question.replace('<O>', ordinal) # insert ordinal
question = question.replace('<S>', source) # insert source
question = question.replace('<A>', action) # insert action
question = sanitize_question(question) # correct grammar
answer = 'yes' if lst_events[number - 1] == event else 'no'
return question, answer
def compare_loudness(dataset, narrative, rel_diff):
questions = ['Was the <S1> <A1> <RL> than the <S2> <A2>?',
'Was the sound of the <S1> <A1> <RL> than the sound of the <S2> <A2>?', # noqa: E501
'[Comparing,Listening to,Hearing] the sound of the <S1> <A1> and the sound of the <S2> <A2>, was the former <RL>?', # noqa: E501
'[Comparing,Listening to,Hearing] the sounds of the <S1> <A1> and the <S2> <A2>, was the former <RL>?', # noqa: E501
'[Comparing,Listening to,Hearing] the sound of the <S2> <A2> and the sound of the <S1> <A1>, was the latter <RL>?', # noqa: E501
'[Comparing,Listening to,Hearing] the sounds of the <S2> <A2> and the <S1> <A1>, was the latter <RL>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
assert len(unique_lst_events) > 0, 'Question (compare_loudness) illposed.'
event_1 = str(np.random.choice(unique_lst_events)) # sample event
source_1 = str(np.random.choice(dataset['sources'][event_1]))
action_1 = str(np.random.choice(dataset['actions'][event_1]))
rel_loudness = sample_rel_loudness()
x_unique_lst_events = [e for e in unique_lst_events if e != event_1]
assert len(x_unique_lst_events) > 0, \
'Question (compare_loudness) illposed.'
event_2 = str(np.random.choice(x_unique_lst_events)) # sample event
source_2 = str(np.random.choice(dataset['sources'][event_2]))
action_2 = str(np.random.choice(dataset['actions'][event_2]))
assert lst_events.count(event_1) == 1, \
'Question (compare_loudness) illposed.'
assert lst_events.count(event_2) == 1, \
'Question (compare_loudness) illposed.'
assert event_1 != event_2, 'Question (compare_loudness) illposed.'
question = question.replace('<S1>', source_1) # insert source
question = question.replace('<A1>', action_1) # insert action
question = question.replace('<RL>', rel_loudness) # insert loudness
question = question.replace('<S2>', source_2) # insert source
question = question.replace('<A2>', action_2) # insert action
question = sanitize_question(question)
lst_loudness = get_lst_loudness(narrative)
e_1_loudness = lst_loudness[lst_events.index(event_1)]
e_2_loudness = lst_loudness[lst_events.index(event_2)]
# Assert a good margin in relative loudness
rel_loudness_diff = compute_rel_diff(np.array(e_1_loudness),
np.array(e_2_loudness))
assert np.sum(rel_loudness_diff < rel_diff) <= 0, \
'Question (compare_loudness) illposed.'
if 'quiet' in question:
answer = 'yes' if e_1_loudness < e_2_loudness else 'no'
elif 'loud' in question:
answer = 'yes' if e_1_loudness > e_2_loudness else 'no'
else:
assert False, 'Loudness illdefined in Question (compare_loudness).'
return question, answer
def compare_loudness_ordinal(dataset, narrative, rel_diff=0.1):
questions = ['Was the <O1> [sound event,sound] <RL> than the <O2> [sound event,sound]?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O1> [sound event,sound] and the <O2> [sound event,sound], was the former <RL>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O1> and <O2> [sound events,sounds], was the former <RL>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O2> [sound event,sound] and the <O1> [sound event,sound], was the latter <RL>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O2> and <O1> [sound events,sounds], was the latter <RL>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
number_1, ordinal_1 = sample_number(len(lst_events))
rel_loudness = sample_rel_loudness()
number_2, ordinal_2 = sample_second_number(len(lst_events), number_1)
assert number_1 != number_2, 'Question (compare_loudness_ordinal) illposed.'
question = question.replace('<O1>', ordinal_1) # insert ordinal
question = question.replace('<RL>', rel_loudness) # insert loudness
question = question.replace('<O2>', ordinal_2) # insert ordinal
question = sanitize_question(question) # correct grammar
lst_loudness = get_lst_loudness(narrative)
e_1_loudness = lst_loudness[number_1 - 1]
e_2_loudness = lst_loudness[number_2 - 1]
# Assert a good margin in relative loudness
rel_loudness_diff = compute_rel_diff(np.array(e_1_loudness),
np.array(e_2_loudness))
assert np.sum(rel_loudness_diff < rel_diff) <= 0, \
'Question (compare_loudness_ordinal) illposed.'
if 'quiet' in question:
answer = 'yes' if e_1_loudness < e_2_loudness else 'no'
elif 'loud' in question:
answer = 'yes' if e_1_loudness > e_2_loudness else 'no'
else:
assert False, 'Loudness illdefined in Question (compare_loudness_ordinal).'
return question, answer
def compare_loudness_event_ordinal(dataset, narrative, rel_diff=0.1):
questions = ['Was the <S> <A> <RL> than the <O> [sound event,sound]?',
'Was the sound of the <S> <A> <RL> than the <O> [sound event,sound]?', # noqa: E501
'[Comparing,Listening to,Hearing] the <S> <A> and the <O> [sound event,sound], was the former <RL>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O> [sound event,sound] and the <S> <A>, was the latter <RL>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
assert len(unique_lst_events) > 0, \
'Question (compare_loudness_event_ordinal) illposed.'
event = str(np.random.choice(unique_lst_events)) # sample event
source = str(np.random.choice(dataset['sources'][event]))
action = str(np.random.choice(dataset['actions'][event]))
rel_loudness = sample_rel_loudness()
number, ordinal = sample_second_number(len(lst_events), lst_events.index(event) + 1)
assert lst_events.count(event) == 1, \
'Question (compare_loudness_event_ordinal) illposed.'
assert lst_events.index(event) != (number - 1), \
'Question (compare_loudness_event_ordinal) illposed.'
question = question.replace('<S>', source) # insert source
question = question.replace('<A>', action) # insert action
question = question.replace('<RL>', rel_loudness) # insert loudness
question = question.replace('<O>', ordinal) # insert ordinal
question = sanitize_question(question) # correct grammar
lst_loudness = get_lst_loudness(narrative)
e_1_loudness = lst_loudness[lst_events.index(event)]
e_2_loudness = lst_loudness[number - 1]
# Assert a good margin in relative loudness
rel_loudness_diff = compute_rel_diff(np.array(e_1_loudness),
np.array(e_2_loudness))
assert np.sum(rel_loudness_diff < rel_diff) <= 0, \
'Question (compare_loudness_event_ordinal) illposed.'
if 'quiet' in question:
answer = 'yes' if e_1_loudness < e_2_loudness else 'no'
elif 'loud' in question:
answer = 'yes' if e_1_loudness > e_2_loudness else 'no'
else:
assert False, \
'Loudness illdefined in Question (compare_loudness_event_ordinal).'
return question, answer
def compare_loudness_ordinal_event(dataset, narrative, rel_diff=0.1):
questions = ['Was the <O> [sound event,sound] <RL> than the <S> <A>?',
'Was the <O> [sound event,sound] <RL> than the sound of the <S> <A>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O> [sound event,sound] and the <S> <A>, was the former <RL>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <S> <A> and the <O> [sound event,sound], was the latter <RL>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
assert len(unique_lst_events) > 0, \
'Question (compare_loudness_ordinal_event) illposed.'
event = str(np.random.choice(unique_lst_events)) # sample event
source = str(np.random.choice(dataset['sources'][event]))
action = str(np.random.choice(dataset['actions'][event]))
rel_loudness = sample_rel_loudness()
number, ordinal = sample_second_number(len(lst_events), lst_events.index(event) + 1)
assert lst_events.count(event) == 1, \
'Question (compare_loudness_ordinal_event) illposed.'
assert lst_events.index(event) != (number - 1), \
'Question (compare_loudness_ordinal_event) illposed.'
question = question.replace('<S>', source) # insert source
question = question.replace('<A>', action) # insert action
question = question.replace('<RL>', rel_loudness) # insert loudness
question = question.replace('<O>', ordinal) # insert ordinal
question = sanitize_question(question) # correct grammar
lst_loudness = get_lst_loudness(narrative)
e_1_loudness = lst_loudness[number - 1]
e_2_loudness = lst_loudness[lst_events.index(event)]
# Assert a good margin in relative loudness
rel_loudness_diff = compute_rel_diff(np.array(e_1_loudness),
np.array(e_2_loudness))
assert np.sum(rel_loudness_diff < rel_diff) <= 0, \
'Question (compare_loudness_ordinal_event) illposed.'
if 'quiet' in question:
answer = 'yes' if e_1_loudness < e_2_loudness else 'no'
elif 'loud' in question:
answer = 'yes' if e_1_loudness > e_2_loudness else 'no'
else:
assert False, \
'Loudness illdefined in Question (compare_loudness_ordinal_event).'
return question, answer
def compare_same_loudness(dataset, narrative, rel_diff=0.1):
questions = ['Was the <S1> <A1> [roughly,approximately] as <L> as the <S2> <A2>?', # noqa: E501
'Was the sound of the <S1> <A1> [roughly,approximately] as <L> as the sound of the <S2> <A2>?', # noqa: E501
'Was the sound of the <S1> <A1> [roughly,approximately] the same loudness as the sound of the <S2> <A2>?', # noqa: E501
'[Comparing,Listening to,Hearing] the sound of the <S1> <A1> and the sound of the <S2> <A2>, did they [roughly,approximately] have the same loudness?', # noqa: E501
'[Comparing,Listening to,Hearing] the sounds of the <S1> <A1> and the <S2> <A2>, did they [roughly,approximately] have the same loudness?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
assert len(unique_lst_events) > 0, \
'Question (compare_same_loudness) illposed.'
event_1 = str(np.random.choice(unique_lst_events)) # sample event
source_1 = str(np.random.choice(dataset['sources'][event_1]))
action_1 = str(np.random.choice(dataset['actions'][event_1]))
loudness = sample_loudness()
x_unique_lst_events = [e for e in unique_lst_events if e != event_1]
assert len(x_unique_lst_events) > 0, \
'Question (compare_same_loudness) illposed.'
event_2 = str(np.random.choice(x_unique_lst_events)) # sample event
source_2 = str(np.random.choice(dataset['sources'][event_2]))
action_2 = str(np.random.choice(dataset['actions'][event_2]))
assert lst_events.count(event_1) == 1, \
'Question (compare_same_loudness) illposed.'
assert lst_events.count(event_2) == 1, \
'Question (compare_same_loudness) illposed.'
assert event_1 != event_2, 'Question (compare_same_loudness) illposed.'
question = question.replace('<S1>', source_1) # insert source
question = question.replace('<A1>', action_1) # insert action
question = question.replace('<L>', loudness) # insert loudness
question = question.replace('<S2>', source_2) # insert source
question = question.replace('<A2>', action_2) # insert action
question = sanitize_question(question)
lst_loudness = get_lst_loudness(narrative)
e_1_loudness = lst_loudness[lst_events.index(event_1)]
e_2_loudness = lst_loudness[lst_events.index(event_2)]
rel_loudness_diff = compute_rel_diff(np.array(e_1_loudness),
np.array(e_2_loudness))
# Assert a good margin in relative loudness
assert np.sum(np.logical_and(rel_loudness_diff > rel_diff,
rel_loudness_diff < (2 * rel_diff))) <= 0, \
'Question (compare_same_loudness) illposed.'
answer = 'yes' if rel_loudness_diff <= rel_diff else 'no'
return question, answer
def compare_same_loudness_ordinal(dataset, narrative, rel_diff=0.1):
questions = ['Was the <O1> [sound event,sound] [roughly,approximately] as <L> as the <O2> [sound event,sound]?', # noqa: E501
'Was the <O1> and <O2> [sound events,sounds] [roughly,approximately] as <L>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O1> [sound event,sound] and the <O2> [sound event,sound], were they [roughly,approximately] as loud?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O1> [sound event,sound] and the <O2> [sound event,sound], did they [roughly,approximately] have the same loudness?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O1> and <O2> [sound events,sounds], were they [roughly,approximately] as loud?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O1> and <O2> [sound events,sounds], did they have [roughly,approximately] the same loudness?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
number_1, ordinal_1 = sample_number(len(lst_events))
loudness = sample_loudness()
number_2, ordinal_2 = sample_second_number(len(lst_events), number_1)
assert number_1 != number_2, 'Question (compare_same_loudness_ordinal) illposed.'
question = question.replace('<O1>', ordinal_1) # insert ordinal
question = question.replace('<L>', loudness) # insert loudness
question = question.replace('<O2>', ordinal_2) # insert ordinal
question = sanitize_question(question) # correct grammar
lst_loudness = get_lst_loudness(narrative)
e_1_loudness = lst_loudness[number_1 - 1]
e_2_loudness = lst_loudness[number_2 - 1]
rel_loudness_diff = compute_rel_diff(np.array(e_1_loudness),
np.array(e_2_loudness))
# Assert a good margin in relative loudness
assert np.sum(np.logical_and(rel_loudness_diff > rel_diff,
rel_loudness_diff < (2 * rel_diff))) <= 0, \
'Question (compare_same_loudness_ordinal) illposed.'
answer = 'yes' if rel_loudness_diff <= rel_diff else 'no'
return question, answer
def compare_same_loudness_event_ordinal(dataset, narrative, rel_diff=0.1):
questions = ['Was the <S> <A> [roughly,approximately] as <L> as the <O> [sound event,sound]?', # noqa: E501
'[Comparing,Listening to,Hearing] the <S> <A> and the <O> [sound event,sound], were they [roughly,approximately] as loud?', # noqa: E501
'[Comparing,Listening to,Hearing] the sound of the <S> <A> and the <O> [sound event,sound], were they [roughly,approximately] as loud?', # noqa: E501
'[Comparing,Listening to,Hearing] the <S> <A> and the <O> [sound event,sound], did they [roughly,approximately] have the same loudness?', # noqa: E501
'[Comparing,Listening to,Hearing] the sound of the <S> <A> and the <O> [sound event,sound], did they [roughly,approximately] have the same loudness?', # noqa: E501
'Was the <O> [sound event,sound] [roughly,approximately] as <L> as the <S> <A>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O> [sound event,sound] and the <S> <A>, were they [roughly,approximately] as loud?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O> [sound event,sound] and the sound of the <S> <A>, were they [roughly,approximately] as loud?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O> [sound event,sound] and the <S> <A>, did they [roughly,approximately] have the same loudness?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O> [sound event,sound] and the sound of the <S> <A>, did they [roughly,approximately] have the same loudness?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
assert len(unique_lst_events) > 0, \
'Question (compare_same_loudness_event_ordinal) illposed.'
event = str(np.random.choice(unique_lst_events)) # sample event
source = str(np.random.choice(dataset['sources'][event]))
action = str(np.random.choice(dataset['actions'][event]))
loudness = sample_loudness()
number, ordinal = sample_second_number(len(lst_events), lst_events.index(event) + 1)
assert lst_events.count(event) == 1, \
'Question (compare_same_loudness_event_ordinal) illposed.'
assert lst_events.index(event) != (number - 1), \
'Question (compare_same_loudness_event_ordinal) illposed.'
question = question.replace('<S>', source) # insert source
question = question.replace('<A>', action) # insert action
question = question.replace('<L>', loudness) # insert loudness
question = question.replace('<O>', ordinal) # insert ordinal
question = sanitize_question(question) # correct grammar
lst_loudness = get_lst_loudness(narrative)
e_1_loudness = lst_loudness[lst_events.index(event)]
e_2_loudness = lst_loudness[number - 1]
rel_loudness_diff = compute_rel_diff(np.array(e_1_loudness),
np.array(e_2_loudness))
# Assert a good margin in relative loudness
assert np.sum(np.logical_and(rel_loudness_diff > rel_diff,
rel_loudness_diff < (2 * rel_diff))) <= 0, \
'Question (compare_same_loudness_event_ordinal) illposed.'
answer = 'yes' if rel_loudness_diff <= rel_diff else 'no'
return question, answer
def compare_duration(dataset, narrative, rel_diff=0.1):
questions = ['Was the <S1> <A1> <RD> than the <S2> <A2>?',
'Was the sound of the <S1> <A1> <RD> than the sound of the <S2> <A2>?', # noqa: E501
'[Comparing,Listening to,Hearing] the sound of the <S1> <A1> and the sound of the <S2> <A2>, was the former <RD>?', # noqa: E501
'[Comparing,Listening to,Hearing] the sounds of the <S1> <A1> and the <S2> <A2>, was the former <RD>?', # noqa: E501
'[Comparing,Listening to,Hearing] the sound of the <S2> <A2> and the sound of the <S1> <A1>, was the latter <RD>?', # noqa: E501
'[Comparing,Listening to,Hearing] the sounds of the <S2> <A2> and the <S1> <A1>, was the latter <RD>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
assert len(unique_lst_events) > 0, \
'Question (compare_duration) illposed.'
event_1 = str(np.random.choice(unique_lst_events)) # sample event
source_1 = str(np.random.choice(dataset['sources'][event_1]))
action_1 = str(np.random.choice(dataset['actions'][event_1]))
rel_duration = sample_rel_duration()
x_unique_lst_events = [e for e in unique_lst_events if e != event_1]
assert len(x_unique_lst_events) > 0, \
'Question (compare_duration) illposed.'
event_2 = str(np.random.choice(x_unique_lst_events)) # sample event
source_2 = str(np.random.choice(dataset['sources'][event_2]))
action_2 = str(np.random.choice(dataset['actions'][event_2]))
assert lst_events.count(event_1) == 1, \
'Question (compare_duration) illposed.'
assert lst_events.count(event_2) == 1, \
'Question (compare_duration) illposed.'
assert event_1 != event_2, 'Question (compare_duration) illposed.'
question = question.replace('<S1>', source_1) # insert source
question = question.replace('<A1>', action_1) # insert action
question = question.replace('<RD>', rel_duration) # insert duration
question = question.replace('<S2>', source_2) # insert source
question = question.replace('<A2>', action_2) # insert action
question = sanitize_question(question)
lst_duration = get_lst_durations(narrative)
e_1_duration = lst_duration[lst_events.index(event_1)]
e_2_duration = lst_duration[lst_events.index(event_2)]
# Assert a good margin in relative duration
rel_duration_diff = compute_rel_diff(np.array(e_1_duration),
np.array(e_2_duration))
assert np.sum(rel_duration_diff < rel_diff) <= 0, \
'Question (compare_duration) illposed.'
if 'short' in question:
answer = 'yes' if e_1_duration < e_2_duration else 'no'
elif 'long' in question:
answer = 'yes' if e_1_duration > e_2_duration else 'no'
else:
assert False, 'Duration illdefined in Question (compare_duration).'
return question, answer
def compare_duration_ordinal(dataset, narrative, rel_diff=0.1):
questions = ['Was the <O1> [sound event,sound] <RD> than the <O2> [sound event,sound]?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O1> [sound event,sound] and the <O2> [sound event,sound], was the former <RD>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O1> and <O2> [sound events,sounds], was the former <RD>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O2> [sound event,sound] and the <O1> [sound event,sound], was the latter <RD>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O2> and <O1> [sound events,sounds], was the latter <RD>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
number_1, ordinal_1 = sample_number(len(lst_events))
rel_duration = sample_rel_duration()
number_2, ordinal_2 = sample_second_number(len(lst_events), number_1)
assert number_1 != number_2, 'Question (compare_duration_ordinal) illposed.'
question = question.replace('<O1>', ordinal_1) # insert ordinal
question = question.replace('<RD>', rel_duration) # insert duration
question = question.replace('<O2>', ordinal_2) # insert ordinal
question = sanitize_question(question) # correct grammar
lst_duration = get_lst_durations(narrative)
e_1_duration = lst_duration[number_1 - 1]
e_2_duration = lst_duration[number_2 - 1]
# Assert a good margin in relative duration
rel_duration_diff = compute_rel_diff(np.array(e_1_duration),
np.array(e_2_duration))
assert np.sum(rel_duration_diff < rel_diff) <= 0, \
'Question (compare_duration_ordinal) illposed.'
if 'short' in question:
answer = 'yes' if e_1_duration < e_2_duration else 'no'
elif 'long' in question:
answer = 'yes' if e_1_duration > e_2_duration else 'no'
else:
assert False, 'Duration illdefined in Question (compare_duration_ordinal).'
return question, answer
def compare_duration_event_ordinal(dataset, narrative, rel_diff=0.1):
questions = ['Was the <S> <A> <RD> than the <O> [sound event,sound]?',
'Was the sound of the <S> <A> <RD> than the <O> [sound event,sound]?', # noqa: E501
'[Comparing,Listening to,Hearing] the <S> <A> and the <O> [sound event,sound], was the former <RD>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O> [sound event,sound] and the <S> <A>, was the latter <RD>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
assert len(unique_lst_events) > 0, \
'Question (compare_duration_event_ordinal) illposed.'
event = str(np.random.choice(unique_lst_events)) # sample event
source = str(np.random.choice(dataset['sources'][event]))
action = str(np.random.choice(dataset['actions'][event]))
rel_duration = sample_rel_duration()
number, ordinal = sample_second_number(len(lst_events), lst_events.index(event) + 1)
assert lst_events.count(event) == 1, \
'Question (compare_duration_event_ordinal) illposed.'
assert lst_events.index(event) != (number - 1), \
'Question (compare_duration_event_ordinal) illposed.'
question = question.replace('<S>', source) # insert source
question = question.replace('<A>', action) # insert action
question = question.replace('<RD>', rel_duration) # insert duration
question = question.replace('<O>', ordinal) # insert ordinal
question = sanitize_question(question) # correct grammar
lst_duration = get_lst_durations(narrative)
e_1_duration = lst_duration[lst_events.index(event)]
e_2_duration = lst_duration[number - 1]
# Assert a good margin in relative duration
rel_duration_diff = compute_rel_diff(np.array(e_1_duration),
np.array(e_2_duration))
assert np.sum(rel_duration_diff < rel_diff) <= 0, \
'Question (compare_duration_event_ordinal) illposed.'
if 'short' in question:
answer = 'yes' if e_1_duration < e_2_duration else 'no'
elif 'long' in question:
answer = 'yes' if e_1_duration > e_2_duration else 'no'
else:
assert False, \
'Duration illdefined in Question (compare_duration_event_ordinal).'
return question, answer
def compare_duration_ordinal_event(dataset, narrative, rel_diff=0.1):
questions = ['Was the <O> [sound event,sound] <RD> than the <S> <A>?',
'Was the <O> [sound event,sound] <RD> than the sound of the <S> <A>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O> [sound event,sound] and the <S> <A>, was the former <RD>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <S> <A> and the <O> [sound event,sound], was the latter <RD>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
assert len(unique_lst_events) > 0, \
'Question (compare_duration_ordinal_event) illposed.'
event = str(np.random.choice(unique_lst_events)) # sample event
source = str(np.random.choice(dataset['sources'][event]))
action = str(np.random.choice(dataset['actions'][event]))
rel_duration = sample_rel_duration()
number, ordinal = sample_second_number(len(lst_events), lst_events.index(event) + 1)
assert lst_events.count(event) == 1, \
'Question (compare_duration_ordinal_event) illposed.'
assert lst_events.index(event) != (number - 1), \
'Question (compare_duration_ordinal_event) illposed.'
question = question.replace('<S>', source) # insert source
question = question.replace('<A>', action) # insert action
question = question.replace('<RD>', rel_duration) # insert duration
question = question.replace('<O>', ordinal) # insert ordinal
question = sanitize_question(question) # correct grammar
lst_duration = get_lst_durations(narrative)
e_1_duration = lst_duration[number - 1]
e_2_duration = lst_duration[lst_events.index(event)]
# Assert a good margin in relative duration
rel_duration_diff = compute_rel_diff(np.array(e_1_duration),
np.array(e_2_duration))
assert np.sum(rel_duration_diff < rel_diff) <= 0, \
'Question (compare_duration_ordinal_event) illposed.'
if 'short' in question:
answer = 'yes' if e_1_duration < e_2_duration else 'no'
elif 'long' in question:
answer = 'yes' if e_1_duration > e_2_duration else 'no'
else:
assert False, \
'Duration illdefined in Question (compare_duration_ordinal_event).'
return question, answer
def compare_same_duration(dataset, narrative, rel_diff=0.1):
questions = ['Was the <S1> <A1> [roughly,approximately] as <D> as the <S2> <A2>?', # noqa: E501
'Was the sound of the <S1> <A1> [roughly,approximately] as <D> as the sound of the <S2> <A2>?', # noqa: E501
'Was the sound of the <S1> <A1> [roughly,approximately] the same duration as the sound of the <S2> <A2>?', # noqa: E501
'[Comparing,Listening to,Hearing] the sound of the <S1> <A1> and the sound of the <S2> <A2>, did they [roughly,approximately] have the same duration?', # noqa: E501
'[Comparing,Listening to,Hearing] the sounds of the <S1> <A1> and the <S2> <A2>, did they [roughly,approximately] have the same duration?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
assert len(unique_lst_events) > 0, \
'Question (compare_same_duration) illposed.'
event_1 = str(np.random.choice(unique_lst_events)) # sample event
source_1 = str(np.random.choice(dataset['sources'][event_1]))
action_1 = str(np.random.choice(dataset['actions'][event_1]))
duration = sample_duration()
x_unique_lst_events = [e for e in unique_lst_events if e != event_1]
assert len(x_unique_lst_events) > 0, \
'Question (compare_same_duration) illposed.'
event_2 = str(np.random.choice(x_unique_lst_events)) # sample event
source_2 = str(np.random.choice(dataset['sources'][event_2]))
action_2 = str(np.random.choice(dataset['actions'][event_2]))
assert lst_events.count(event_1) == 1, \
'Question (compare_same_duration) illposed.'
assert lst_events.count(event_2) == 1, \
'Question (compare_same_duration) illposed.'
assert event_1 != event_2, 'Question (compare_same_duration) illposed.'
question = question.replace('<S1>', source_1) # insert source
question = question.replace('<A1>', action_1) # insert action
question = question.replace('<D>', duration) # insert duration
question = question.replace('<S2>', source_2) # insert source
question = question.replace('<A2>', action_2) # insert action
question = sanitize_question(question)
lst_duration = get_lst_durations(narrative)
e_1_duration = lst_duration[lst_events.index(event_1)]
e_2_duration = lst_duration[lst_events.index(event_2)]
rel_duration_diff = compute_rel_diff(np.array(e_1_duration),
np.array(e_2_duration))
# Assert a good margin in relative duration
assert np.sum(np.logical_and(rel_duration_diff > rel_diff,
rel_duration_diff < (2 * rel_diff))) <= 0, \
'Question (compare_same_duration) illposed.'
answer = 'yes' if rel_duration_diff <= rel_diff else 'no'
return question, answer
def compare_same_duration_ordinal(dataset, narrative, rel_diff=0.1):
questions = ['Was the <O1> [sound event,sound] [roughly,approximately] as <D> as the <O2> [sound event,sound]?', # noqa: E501
'Was the <O1> and <O2> [sound events,sounds] [roughly,approximately] as <D>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O1> [sound event,sound] and the <O2> [sound event,sound], were they [roughly,approximately] as <D>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O1> [sound event,sound] and the <O2> [sound event,sound], did they [roughly,approximately] have the same duration?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O1> and <O2> [sound events,sounds], were they [roughly,approximately] as <D>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O1> and <O2> [sound events,sounds], did they [roughly,approximately] have the same duration?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
number_1, ordinal_1 = sample_number(len(lst_events))
duration = sample_duration()
number_2, ordinal_2 = sample_second_number(len(lst_events), number_1)
assert number_1 != number_2, 'Question (compare_same_duration_ordinal) illposed.'
question = question.replace('<O1>', ordinal_1) # insert ordinal
question = question.replace('<D>', duration) # insert duration
question = question.replace('<O2>', ordinal_2) # insert ordinal
question = sanitize_question(question) # correct grammar
lst_duration = get_lst_durations(narrative)
e_1_duration = lst_duration[number_1 - 1]
e_2_duration = lst_duration[number_2 - 1]
rel_duration_diff = compute_rel_diff(np.array(e_1_duration),
np.array(e_2_duration))
# Assert a good margin in relative duration
assert np.sum(np.logical_and(rel_duration_diff > rel_diff,
rel_duration_diff < (2 * rel_diff))) <= 0, \
'Question (compare_same_duration_ordinal) illposed.'
answer = 'yes' if rel_duration_diff <= rel_diff else 'no'
return question, answer
def compare_same_duration_event_ordinal(dataset, narrative, rel_diff=0.1):
questions = ['Was the <S> <A> [roughly,approximately] as <D> as the <O> [sound event,sound]?', # noqa: E501
'[Comparing,Listening to,Hearing] the <S> <A> and the <O> [sound event,sound], were they [roughly,approximately] as <D>?', # noqa: E501
'[Comparing,Listening to,Hearing] the sound of the <S> <A> and the <O> [sound event,sound], were they [roughly,approximately] as <D>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <S> <A> and the <O> [sound event,sound], did they [roughly,approximately] have the same duration?', # noqa: E501
'[Comparing,Listening to,Hearing] the sound of the <S> <A> and the <O> [sound event,sound], did they [roughly,approximately] have the same duration?', # noqa: E501
'Was the <O> [sound event,sound] [roughly,approximately] as <D> as the <S> <A>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O> [sound event,sound] and the <S> <A>, were they [roughly,approximately] as <D>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O> [sound event,sound] and the sound of the <S> <A>, were they [roughly,approximately] as <D>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O> [sound event,sound] and the <S> <A>, did they [roughly,approximately] have the same duration?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O> [sound event,sound] and the sound of the <S> <A>, did they [roughly,approximately] have the same duration?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
assert len(unique_lst_events) > 0, \
'Question (compare_same_duration_event_ordinal) illposed.'
event = str(np.random.choice(unique_lst_events)) # sample event
source = str(np.random.choice(dataset['sources'][event]))
action = str(np.random.choice(dataset['actions'][event]))
duration = sample_duration()
number, ordinal = sample_second_number(len(lst_events), lst_events.index(event) + 1)
assert lst_events.count(event) == 1, \
'Question (compare_same_duration_event_ordinal) illposed.'
assert lst_events.index(event) != (number - 1), \
'Question (compare_same_duration_event_ordinal) illposed.'
question = question.replace('<S>', source) # insert source
question = question.replace('<A>', action) # insert action
question = question.replace('<D>', duration) # insert duration
question = question.replace('<O>', ordinal) # insert ordinal
question = sanitize_question(question) # correct grammar
lst_duration = get_lst_durations(narrative)
e_1_duration = lst_duration[lst_events.index(event)]
e_2_duration = lst_duration[number - 1]
rel_duration_diff = compute_rel_diff(np.array(e_1_duration),
np.array(e_2_duration))
# Assert a good margin in relative duration
assert np.sum(np.logical_and(rel_duration_diff > rel_diff,
rel_duration_diff < (2 * rel_diff))) <= 0, \
'Question (compare_same_duration_event_ordinal) illposed.'
answer = 'yes' if rel_duration_diff <= rel_diff else 'no'
return question, answer
|
daqa-master
|
daqa-gen/qpas/compare.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from qpas.utils import (compute_rel_diff, get_lst_durations, get_lst_events,
get_lst_loudness, numbers_to_words, sample_duration,
sample_loudness, sample_number, sample_second_number,
sample_preposition, sanitize_question)
def how_many(dataset, narrative, _):
questions = ['How many [sound events,sounds] were there?',
'How many [sound events,sounds] [did,could] you [hear,listen to]?',
'How many [sound events,sounds] have you [heard,listened to]?',
'What is the number of [sound events,sounds]?',
'What is the number of [sound events,sounds] [did,could] you [hear,listen to]?', # noqa: E501
'What is the number of [sound events,sounds] have you [heard,listened to]?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
question = sanitize_question(question) # correct grammar
lst_events = get_lst_events(narrative)
answer = numbers_to_words(len(lst_events))
return question, answer
def how_many_event(dataset, narrative, _):
questions = ['How many times was [a,an] <S> <A>?',
'How many times did you [hear,listen to] [a,an] <S> <A>?',
'How many times have you [heard,listened to] [a,an] <S> <A>?',
'What is the number of times [a,an] <S> <A>?',
'What is the number of times did you [hear,listen to] [a,an] <S> <A>?', # noqa: E501
'What is the number of times you [heard,listened to] [a,an] <S> <A>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
event = str(np.random.choice(lst_events))
source = str(np.random.choice(dataset['sources'][event])) # sample source
action = str(np.random.choice(dataset['actions'][event])) # sample action
question = question.replace('<S>', source) # insert source
question = question.replace('<A>', action) # insert action
question = sanitize_question(question) # correct grammar
answer = numbers_to_words(lst_events.count(event))
return question, answer
def how_many_ordinal(dataset, narrative, _):
questions = ['How many times did you [hear,listen to] a sound that [sounded,seemed] like the <O> [sound event,sound]?', # noqa: E501
'What is the number of times did you [hear,listen to] a sound that [sounded,seemed] like the <O> [sound event,sound]?', # noqa: E501
'[Hearing,Listening to] the <O> [sound event,sound], how many sounds were [the same, similar]?', # noqa: E501
'[Hearing,Listening to] the <O> [sound event,sound], what is the number of sounds that were [the same, similar]?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
number, ordinal = sample_number(len(lst_events))
question = question.replace('<O>', ordinal) # insert ordinal
question = sanitize_question(question) # correct grammar
event = lst_events[number - 1]
answer = numbers_to_words(lst_events.count(event) - 1) # -1 for base event
return question, answer
def how_many_event_two(dataset, narrative, _):
questions = ['How many times was [a,an] <S1> <A1> [or,and] [a,an] <S2> <A2>?',
'How many times did you [hear,listen to] [a,an] <S1> <A1> [or,and] [a,an] <S2> <A2>?', # noqa: E501
'How many times have you [heard,listened to] [a,an] <S1> <A1> [or,and] [a,an] <S2> <A2>?', # noqa: E501
'What is the number of times [a,an] <S1> <A1> [or,and] [a,an] <S2> <A2>?', # noqa: E501
'What is the number of times did you [hear,listen to] [a,an] <S1> <A1> [or,and] [a,an] <S2> <A2>?', # noqa: E501
'What is the number of times you [heard,listened to] [a,an] <S1> <A1> [or,and] [a,an] <S2> <A2>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
event_1 = str(np.random.choice(dataset['events'])) # sample event
source_1 = str(np.random.choice(dataset['sources'][event_1]))
action_1 = str(np.random.choice(dataset['actions'][event_1]))
x_lst_events = [e for e in dataset['events'] if e != event_1]
event_2 = str(np.random.choice(x_lst_events)) # sample event
source_2 = str(np.random.choice(dataset['sources'][event_2]))
action_2 = str(np.random.choice(dataset['actions'][event_2]))
assert event_1 != event_2, 'Question (how_many_event_two) illposed.'
question = question.replace('<S1>', source_1) # insert source
question = question.replace('<A1>', action_1) # insert action
question = question.replace('<S2>', source_2) # insert source
question = question.replace('<A2>', action_2) # insert action
question = sanitize_question(question) # correct grammar
lst_events = get_lst_events(narrative)
answer = numbers_to_words(lst_events.count(event_1)
+ lst_events.count(event_2))
return question, answer
def how_many_event_two_ordinal(dataset, narrative, _):
questions = ['How many times did you [hear,listen to] a sound that [sounded,seemed] like the <O1> [sound event,sound] [or,and] the <O2> [sound event,sound]?', # noqa: E501
'What is the number of times did you [hear,listen to] a sound that [sounded,seemed] like the <O1> [sound event,sound] [or,and] the <O2> [sound event,sound]?', # noqa: E501
'[Hearing,Listening to] the <O1> [sound event,sound] and the <O2> [sound event,sound], how many sounds were [the same,similar]?', # noqa: E501
'[Hearing,Listening to] the <O1> [sound event,sound] and the <O2> [sound event,sound], what is the number of sounds that were [the same,similar]?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
number_1, ordinal_1 = sample_number(len(lst_events))
number_2, ordinal_2 = sample_second_number(len(lst_events), number_1)
question = question.replace('<O1>', ordinal_1) # insert ordinal
question = question.replace('<O2>', ordinal_2) # insert ordinal
question = sanitize_question(question) # correct grammar
event_1 = lst_events[number_1 - 1]
event_2 = lst_events[number_2 - 1]
answer = numbers_to_words((lst_events.count(event_1) - 1) # -1 for base event
+ (lst_events.count(event_2) - 1))
return question, answer
def how_many_sounds_relative(dataset, narrative, _):
questions = ['How many [sound events,sounds] <RO> the <S> <A> were there?',
'How many [sound events,sounds] <RO> the <S> <A> [did,could] you [hear,listen to]?', # noqa: E501
'How many [sound events,sounds] <RO> the <S> <A> have you [heard,listened to]?', # noqa: E501
'What is the number of [sound events,sounds] <RO> the <S> <A>?',
'What is the number of [sound events,sounds] <RO> the <S> <A> [did,could] you [hear,listen to]?', # noqa: E501
'What is the number of [sound events,sounds] <RO> the <S> <A> have you [heard,listened to]?', # noqa: E501
'There is [a,an] <S> <A>; how many [sound events,sounds] [did,could] you hear <RO>?', # noqa: E501
'There is [a,an] <S> <A>; how many [sound events,sounds] have you heard <RO>?', # noqa: E501
'There is [a,an] <S> <A>; what is the number of [sound events,sounds] [did,could] you hear <RO>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
preposition = sample_preposition()
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
assert len(unique_lst_events) > 0, \
'Question (how_many_sounds_relative) illposed.'
event = str(np.random.choice(unique_lst_events))
source = str(np.random.choice(dataset['sources'][event])) # sample source
action = str(np.random.choice(dataset['actions'][event])) # sample action
question = question.replace('<RO>', preposition) # insert preposition
question = question.replace('<S>', source) # insert source
question = question.replace('<A>', action) # insert action
question = sanitize_question(question) # correct grammar
assert lst_events.count(event) == 1, \
'Question (how_many_sounds_relative) illposed.'
event_idx = lst_events.index(event)
if 'before' in question:
lst_events_e = lst_events[:event_idx]
elif 'after' in question:
lst_events_e = lst_events[(event_idx + 1):]
else:
assert False, \
'Preposition illdefined in Question (how_many_sounds_relative).'
answer = numbers_to_words(len(lst_events_e))
return question, answer
def how_many_sounds_relative_ordinal(dataset, narrative, _):
questions = ['How many [sound events,sounds] after the <O> [sound event,sound] were there?', # noqa: E501
'How many [sound events,sounds] after the <O> [sound event,sound] [did,could] you [hear,listen to]?', # noqa: E501
'How many [sound events,sounds] after the <O> [sound event,sound] have you [heard,listened to]?', # noqa: E501
'What is the number of [sound events,sounds] after the <O> [sound event,sound]?', # noqa: E501
'What is the number of [sound events,sounds] after the <O> [sound event,sound] [did,could] you [hear,listen to]?', # noqa: E501
'What is the number of [sound events,sounds] after the <O> [sound event,sound] have you [heard,listened to]?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
number, ordinal = sample_number(len(lst_events))
question = question.replace('<O>', ordinal) # insert ordinal
question = sanitize_question(question) # correct grammar
assert number < (len(lst_events) - 1), \
'Question (how_many_sounds_relative_ordinal) illposed.'
lst_events_e = lst_events[number:]
answer = numbers_to_words(len(lst_events_e))
return question, answer
def how_many_event_relative(dataset, narrative, _):
questions = ['How many <S1>s <A1> <RO> the <S2> <A2> were there?',
'How many <S1>s <A1> <RO> the <S2> <A2> [did,could] you [hear,listen to]?', # noqa: E501
'How many <S1>s <A1> <RO> the <S2> <A2> have you [heard,listened to]?', # noqa: E501
'What is the number of <S1>s <A1> <RO> the <S2> <A2>?',
'What is the number of <S1>s <A1> <RO> the <S2> <A2> [did,could] you [hear,listen to]?', # noqa: E501
'What is the number of <S1>s <A1> <RO> the <S2> <A2> have you [heard,listened to]?', # noqa: E501
'There is [a,an] <S2> <A2>; how many <S1>s <A1> [did,could] you hear <RO>?', # noqa: E501
'There is [a,an] <S2> <A2>; how many <S1>s <A1> have you heard <RO>?', # noqa: E501
'There is [a,an] <S2> <A2>; what is the number of <S1>s <A1> [did,could] you hear <RO>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
event_1 = str(np.random.choice(dataset['events'])) # sample event
source_1 = str(np.random.choice(dataset['sources'][event_1]))
action_1 = str(np.random.choice(dataset['actions'][event_1]))
preposition = sample_preposition()
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
unique_lst_events = [e for e in unique_lst_events if e != event_1]
assert len(unique_lst_events) > 0, \
'Question (how_many_event_relative) illposed.'
event_2 = str(np.random.choice(unique_lst_events))
source_2 = str(np.random.choice(dataset['sources'][event_2]))
action_2 = str(np.random.choice(dataset['actions'][event_2]))
question = question.replace('<S1>', source_1) # insert source
question = question.replace('<A1>', action_1) # insert action
question = question.replace('<RO>', preposition) # insert preposition
question = question.replace('<S2>', source_2) # insert source
question = question.replace('<A2>', action_2) # insert action
question = sanitize_question(question) # correct grammar
assert lst_events.count(event_2) == 1, \
'Question (how_many_event_relative) illposed.'
event_2_idx = lst_events.index(event_2)
if 'before' in question:
lst_events_e = lst_events[:event_2_idx]
elif 'after' in question:
lst_events_e = lst_events[(event_2_idx + 1):]
else:
assert False, \
'Relative preposition illdefined in Question (how_many_event_relative).'
answer = numbers_to_words(lst_events_e.count(event_1))
return question, answer
def how_many_event_relative_ordinal(dataset, narrative, _):
questions = ['How many <S>s <A> <RO> the <O> [sound event,sound] were there?',
'How many <S>s <A> <RO> the <O> [sound event,sound] [did,could] you [hear,listen to]?', # noqa: E501
'How many <S>s <A> <RO> the <O> [sound event,sound] have you [heard,listened to]?', # noqa: E501
'What is the number of <S>s <A> <RO> the <O> [sound event,sound]?',
'What is the number of <S>s <A> <RO> the <O> [sound event,sound] [did,could] you [hear,listen to]?', # noqa: E501
'What is the number of <S>s <A> <RO> the <O> [sound event,sound] have you [heard,listened to]?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
event = str(np.random.choice(dataset['events'])) # sample event
source = str(np.random.choice(dataset['sources'][event]))
action = str(np.random.choice(dataset['actions'][event]))
preposition = sample_preposition()
lst_events = get_lst_events(narrative)
number, ordinal = sample_number(len(lst_events))
question = question.replace('<S>', source) # insert source
question = question.replace('<A>', action) # insert action
question = question.replace('<RO>', preposition) # insert preposition
question = question.replace('<O>', ordinal) # insert ordinal
question = sanitize_question(question) # correct grammar
if 'before' in question:
assert number > 1, 'Question (how_many_event_relative_ordinal) illposed.'
lst_events_e = lst_events[:(number - 1)]
elif 'after' in question:
assert number < (len(lst_events) - 1), \
'Question (how_many_event_relative_ordinal) illposed.'
lst_events_e = lst_events[number:]
else:
assert False, \
'Relative preposition illdefined in Question (how_many_event_relative_ordinal).' # noqa: E501
answer = numbers_to_words(lst_events_e.count(event))
return question, answer
def how_many_sounds_loudness_event(dataset, narrative, rel_diff=0.1):
questions = ['How many [sound events,sounds] [roughly,approximately] as <L> as the <S> <A>?', # noqa: E501
'How many [sound events,sounds] that are [roughly,approximately] as <L> as the <S> <A> [did,could] you [hear,listen to]?', # noqa: E501
'How many [sound events,sounds] that are [roughly,approximately] as <L> as the <S> <A> have you heard?', # noqa: E501
'How many [sound events,sounds] that have [roughly,approximately] the same loudness as the <S> <A>?', # noqa: E501
'How many [sound events,sounds] that have [roughly,approximately] the same loudness as the <S> <A> [did,could] you [hear,listen to]?', # noqa: E501
'How many [sound events,sounds] that have [roughly,approximately] the same loudness as the <S> <A> have you heard?', # noqa: E501
'What is the number of [sound events,sounds] [roughly,approximately] as <L> as the <S> <A>?', # noqa: E501
'What is the number of [sound events,sounds] that are [roughly,approximately] as <L> as the <S> <A> [did,could] you [hear,listen to]?', # noqa: E501
'What is the number of [sound events,sounds] that are [roughly,approximately] as <L> as the <S> <A> have you heard?', # noqa: E501
'What is the number of [sound events,sounds] that have [roughly,approximately] the same loudness as the <S> <A>?', # noqa: E501
'What is the number of [sound events,sounds] that have [roughly,approximately] the same loudness as the <S> <A> [did,could] you [hear,listen to]?', # noqa: E501
'What is the number of [sound events,sounds] that have [roughly,approximately] the same loudness as the <S> <A> have you heard?', # noqa: E501
'There is [a,an] <S> <A>; how many [sound events,sounds] that are [roughly,approximately] as <L>?', # noqa: E501
'There is [a,an] <S> <A>; what is the number of [sound events,sounds] that are [roughly,approximately] as <L>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
loudness = sample_loudness() # sample loudness
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
assert len(unique_lst_events) > 0, \
'Question (how_many_sounds_loudness_event) illposed.'
event = str(np.random.choice(unique_lst_events))
source = str(np.random.choice(dataset['sources'][event])) # sample source
action = str(np.random.choice(dataset['actions'][event])) # sample action
question = question.replace('<L>', loudness) # insert loudness
question = question.replace('<S>', source) # insert source
question = question.replace('<A>', action) # insert action
question = sanitize_question(question) # correct grammar
assert lst_events.count(event) == 1, \
'Question (how_many_sounds_loudness_event) illposed.'
lst_loudness = get_lst_loudness(narrative)
event_idx = lst_events.index(event)
evt_loudness = lst_loudness[event_idx]
x_loudness = [j for i, j in enumerate(lst_loudness) if i != event_idx]
rel_loudness_diff = compute_rel_diff(np.array(x_loudness),
np.array(evt_loudness))
# Assert a good margin in relative loudness
assert np.sum(np.logical_and(rel_loudness_diff > rel_diff,
rel_loudness_diff < (2 * rel_diff))) <= 0, \
'Question (how_many_sounds_loudness_event) illposed.'
answer = numbers_to_words(np.sum(rel_loudness_diff <= rel_diff))
return question, answer
def how_many_sounds_loudness_ordinal(dataset, narrative, rel_diff=0.1):
questions = ['How many [sound events,sounds] [roughly,approximately] as <L> as the <O> sound?', # noqa: E501
'How many [sound events,sounds] that are [roughly,approximately] as <L> as the <O> sound?', # noqa: E501
'How many [sound events,sounds] that are [roughly,approximately] as <L> as the <O> sound [did,could] you [hear,listen to]?', # noqa: E501
'How many [sound events,sounds] that are [roughly,approximately] as <L> as the <O> sound have you heard?', # noqa: E501
'How many [sound events,sounds] that have [roughly,approximately] the same loudness as the <O> sound?', # noqa: E501
'How many [sound events,sounds] that have [roughly,approximately] the same loudness as the <O> sound [did,could] you [hear,listen to]?', # noqa: E501
'How many [sound events,sounds] that have [roughly,approximately] the same loudness as the <O> sound have you heard?', # noqa: E501
'What is the number of [sound events,sounds] [roughly,approximately] as <L> as the <O> sound?', # noqa: E501
'What is the number of [sound events,sounds] that are [roughly,approximately] as <L> as the <O> sound?', # noqa: E501
'What is the number of [sound events,sounds] that are [roughly,approximately] as <L> as the <O> sound [did,could] you [hear,listen to]?', # noqa: E501
'What is the number of [sound events,sounds] that are [roughly,approximately] as <L> as the <O> sound have you heard?', # noqa: E501
'What is the number of [sound events,sounds] that have [roughly,approximately] the same loudness as the <O> sound?', # noqa: E501
'What is the number of [sound events,sounds] that have [roughly,approximately] the same loudness as the <O> sound [did,could] you [hear,listen to]?', # noqa: E501
'What is the number of [sound events,sounds] that have [roughly,approximately] the same loudness as the <O> sound have you heard?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
loudness = sample_loudness() # sample loudness
lst_events = get_lst_events(narrative)
number, ordinal = sample_number(len(lst_events))
question = question.replace('<L>', loudness) # insert loudness
question = question.replace('<O>', ordinal) # insert ordinal
question = sanitize_question(question) # correct grammar
lst_loudness = get_lst_loudness(narrative)
evt_loudness = lst_loudness[number - 1]
x_loudness = [j for i, j in enumerate(lst_loudness) if i != (number - 1)]
rel_loudness_diff = compute_rel_diff(np.array(x_loudness),
np.array(evt_loudness))
# Assert a good margin in relative loudness
assert np.sum(np.logical_and(rel_loudness_diff > rel_diff,
rel_loudness_diff < (2 * rel_diff))) <= 0, \
'Question (how_many_sounds_loudness_ordinal) illposed.'
answer = numbers_to_words(np.sum(rel_loudness_diff <= rel_diff))
return question, answer
def how_many_sounds_duration_event(dataset, narrative, rel_diff=0.1):
questions = ['How many [sound events,sounds] [roughly,approximately] as <D> as the <S> <A>?', # noqa: E501
'How many [sound events,sounds] that are [roughly,approximately] as <D> as the <S> <A> [did,could] you [hear,listen to]?', # noqa: E501
'How many [sound events,sounds] that are [roughly,approximately] as <D> as the <S> <A> have you heard?', # noqa: E501
'How many [sound events,sounds] that have [roughly,approximately] the same duration as the <S> <A>?', # noqa: E501
'How many [sound events,sounds] that have [roughly,approximately] the same duration as the <S> <A> [did,could] you [hear,listen to]?', # noqa: E501
'How many [sound events,sounds] that have [roughly,approximately] the same duration as the <S> <A> have you heard?', # noqa: E501
'What is the number of [sound events,sounds] [roughly,approximately] as <D> as the <S> <A>?', # noqa: E501
'What is the number of [sound events,sounds] that are [roughly,approximately] as <D> as the <S> <A> [did,could] you [hear,listen to]?', # noqa: E501
'What is the number of [sound events,sounds] that are [roughly,approximately] as <D> as the <S> <A> have you heard?', # noqa: E501
'What is the number of [sound events,sounds] that have [roughly,approximately] the same duration as the <S> <A>?', # noqa: E501
'What is the number of [sound events,sounds] that have [roughly,approximately] the same duration as the <S> <A> [did,could] you [hear,listen to]?', # noqa: E501
'What is the number of [sound events,sounds] that have [roughly,approximately] the same duration as the <S> <A> have you heard?', # noqa: E501
'There is [a,an] <S> <A>; how many [sound events,sounds] that are [roughly,approximately] as <D>?', # noqa: E501
'There is [a,an] <S> <A>; what is the number of [sound events,sounds] that are [roughly,approximately] as <D>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
duration = sample_duration() # sample duration
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
assert len(unique_lst_events) > 0, \
'Question (how_many_sounds_duration_event) illposed.'
event = str(np.random.choice(unique_lst_events))
source = str(np.random.choice(dataset['sources'][event])) # sample source
action = str(np.random.choice(dataset['actions'][event])) # sample action
question = question.replace('<D>', duration) # insert duration
question = question.replace('<S>', source) # insert source
question = question.replace('<A>', action) # insert action
question = sanitize_question(question) # correct grammar
assert lst_events.count(event) == 1, \
'Question (how_many_sounds_duration_event) illposed.'
lst_durations = get_lst_durations(narrative)
event_idx = lst_events.index(event)
evt_duration = lst_durations[event_idx]
x_durations = [j for i, j in enumerate(lst_durations) if i != event_idx]
rel_durations_diff = compute_rel_diff(np.array(x_durations),
np.array(evt_duration))
# Assert a good margin in relative loudness
assert np.sum(np.logical_and(rel_durations_diff > rel_diff,
rel_durations_diff < (2 * rel_diff))) <= 0, \
'Question (how_many_sounds_duration_event) illposed.'
answer = numbers_to_words(np.sum(rel_durations_diff <= rel_diff))
return question, answer
def how_many_sounds_duration_ordinal(dataset, narrative, rel_diff=0.1):
questions = ['How many [sound events,sounds] [roughly,approximately] as <D> as the <O> sound?', # noqa: E501
'How many [sound events,sounds] that are [roughly,approximately] as <D> as the <O> sound?', # noqa: E501
'How many [sound events,sounds] that are [roughly,approximately] as <D> as the <O> sound [did,could] you [hear,listen to]?', # noqa: E501
'How many [sound events,sounds] that are [roughly,approximately] as <D> as the <O> sound have you heard?', # noqa: E501
'How many [sound events,sounds] that have [roughly,approximately] the same duration as the <O> sound?', # noqa: E501
'How many [sound events,sounds] that have [roughly,approximately] the same duration as the <O> sound [did,could] you [hear,listen to]?', # noqa: E501
'How many [sound events,sounds] that have [roughly,approximately] the same duration as the <O> sound have you heard?', # noqa: E501
'What is the number of [sound events,sounds] [roughly,approximately] as <D> as the <O> sound?', # noqa: E501
'What is the number of [sound events,sounds] that are [roughly,approximately] as <D> as the <O> sound?', # noqa: E501
'What is the number of [sound events,sounds] that are [roughly,approximately] as <D> as the <O> sound [did,could] you [hear,listen to]?', # noqa: E501
'What is the number of [sound events,sounds] that are [roughly,approximately] as <D> as the <O> sound have you heard?', # noqa: E501
'What is the number of [sound events,sounds] that have [roughly,approximately] the same duration as the <O> sound?', # noqa: E501
'What is the number of [sound events,sounds] that have [roughly,approximately] the same duration as the <O> sound [did,could] you [hear,listen to]?', # noqa: E501
'What is the number of [sound events,sounds] that have [roughly,approximately] the same duration as the <O> sound have you heard?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
duration = sample_duration() # sample duration
lst_events = get_lst_events(narrative)
number, ordinal = sample_number(len(lst_events))
question = question.replace('<D>', duration) # insert duration
question = question.replace('<O>', ordinal) # insert ordinal
question = sanitize_question(question) # correct grammar
lst_durations = get_lst_durations(narrative)
evt_duration = lst_durations[number - 1]
x_durations = [j for i, j in enumerate(lst_durations) if i != (number - 1)]
rel_durations_diff = compute_rel_diff(np.array(x_durations),
np.array(evt_duration))
# Assert a good margin in relative loudness
assert np.sum(np.logical_and(rel_durations_diff > rel_diff,
rel_durations_diff < (2 * rel_diff))) <= 0, \
'Question (how_many_sounds_duration_ordinal) illposed.'
answer = numbers_to_words(np.sum(rel_durations_diff <= rel_diff))
return question, answer
|
daqa-master
|
daqa-gen/qpas/count.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from qpas.utils import get_lst_events, sanitize_question
def less_than(dataset, narrative, _):
questions = ['Were there fewer <S1>s <A1> than <S2>s <A2>?',
'Was the number of [times,instances,occurrences] [a,an] <S1> <A1> less than the number of [times,instances,occurrences] [a,an] <S2> <A2>?', # noqa: E501
'[Comparing,Listening to,Hearing] the sounds of [a,an] <S1> <A1> and [a,an] <S2> <A2>, were there fewer [times,instances,occurrences] of the former?', # noqa: E501
'[Comparing,Listening to,Hearing] the sounds of [a,an] <S2> <A2> and [a,an] <S1> <A1>, were there fewer [times,instances,occurrences] of the latter?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
event_1 = str(np.random.choice(lst_events)) # sample event
source_1 = str(np.random.choice(dataset['sources'][event_1]))
action_1 = str(np.random.choice(dataset['actions'][event_1]))
x_lst_events = [e for e in lst_events if e != event_1]
assert len(x_lst_events) > 0, 'Question (less_than) illposed.'
event_2 = str(np.random.choice(x_lst_events)) # sample event
source_2 = str(np.random.choice(dataset['sources'][event_2]))
action_2 = str(np.random.choice(dataset['actions'][event_2]))
assert event_1 != event_2, 'Question (less_than) illposed.'
question = question.replace('<S1>', source_1) # insert source
question = question.replace('<A1>', action_1) # insert action
question = question.replace('<S2>', source_2) # insert source
question = question.replace('<A2>', action_2) # insert action
question = sanitize_question(question)
answer = 'yes' \
if lst_events.count(event_1) < lst_events.count(event_2) \
else 'no'
return question, answer
def equal_to(dataset, narrative, _):
questions = ['Was the number of times [a,an] <S1> <A1> equal to the number of times [a,an] <S2> <A2>?', # noqa: E501
'Was the number of times [a,an] <S1> <A1> the same as the number of times [a,an] <S2> <A2>?', # noqa: E501
'Was there an equal number of times [a,an] <S1> <A1> and [a,an] <S2> <A2>?', # noqa: E501
'Was there the same number of <S1> <A1> and <S2> <A2>?',
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
event_1 = str(np.random.choice(lst_events)) # sample event
source_1 = str(np.random.choice(dataset['sources'][event_1]))
action_1 = str(np.random.choice(dataset['actions'][event_1]))
x_lst_events = [e for e in lst_events if e != event_1]
assert len(x_lst_events) > 0, 'Question (equal_to) illposed.'
event_2 = str(np.random.choice(x_lst_events)) # sample event
source_2 = str(np.random.choice(dataset['sources'][event_2]))
action_2 = str(np.random.choice(dataset['actions'][event_2]))
assert event_1 != event_2, 'Question (equal_to) illposed.'
question = question.replace('<S1>', source_1) # insert source
question = question.replace('<A1>', action_1) # insert action
question = question.replace('<S2>', source_2) # insert source
question = question.replace('<A2>', action_2) # insert action
question = sanitize_question(question)
answer = 'yes' \
if lst_events.count(event_1) == lst_events.count(event_2) \
else 'no'
return question, answer
def more_than(dataset, narrative, _):
questions = ['Were there more <S1>s <A1> than <S2>s <A2>?',
'Was the number of [times,instances,occurrences] [a,an] <S1> <A1> more than the number of [times,instances,occurrences] [a,an] <S2> <A2>?', # noqa: E501
'[Comparing,Listening to,Hearing] the sounds of [a,an] <S1> <A1> and [a,an] <S2> <A2>, were there more [times,instances,occurrences] of the former?', # noqa: E501
'[Comparing,Listening to,Hearing] the sounds of [a,an] <S2> <A2> and [a,an] <S1> <A1>, were there more [times,instances,occurrences] of the latter?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
event_1 = str(np.random.choice(lst_events)) # sample event
source_1 = str(np.random.choice(dataset['sources'][event_1]))
action_1 = str(np.random.choice(dataset['actions'][event_1]))
x_lst_events = [e for e in lst_events if e != event_1]
assert len(x_lst_events) > 0, 'Question (more_than) illposed.'
event_2 = str(np.random.choice(x_lst_events)) # sample event
source_2 = str(np.random.choice(dataset['sources'][event_2]))
action_2 = str(np.random.choice(dataset['actions'][event_2]))
assert event_1 != event_2, 'Question (more_than) illposed.'
question = question.replace('<S1>', source_1) # insert source
question = question.replace('<A1>', action_1) # insert action
question = question.replace('<S2>', source_2) # insert source
question = question.replace('<A2>', action_2) # insert action
question = sanitize_question(question)
answer = 'yes' \
if lst_events.count(event_1) > lst_events.count(event_2) \
else 'no'
return question, answer
|
daqa-master
|
daqa-gen/qpas/compare_integer.py
|
from unittest import TestCase
from base import AbstractFeatureSelector
import numpy as np
from scipy import stats
from scipy.sparse import issparse
from sklearn.feature_selection import f_classif, SelectFromModel, SelectPercentile
from sklearn.linear_model import Lasso
from sklearn.svm import LinearSVC
from sklearn.utils import check_X_y
from sklearn.utils.extmath import safe_sparse_dot, row_norms
from scipy.linalg import norm
# modified to address the issue of centering sparse matrices with a bit of algebra
def better_f_regression(X, y, center=True):
"""Univariate linear regression tests.
Quick linear model for testing the effect of a single regressor,
sequentially for many regressors.
This is done in 2 steps:
1. The cross correlation between each regressor and the target is computed,
that is, ((X[:, i] - mean(X[:, i])) * (y - mean_y)) / (std(X[:, i]) *
std(y)).
2. It is converted to an F score then to a p-value.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix} shape = (n_samples, n_features)
The set of regressors that will be tested sequentially.
y : array of shape(n_samples).
The data matrix
center : True, bool,
If true, X and y will be centered.
Returns
-------
F : array, shape=(n_features,)
F values of features.
pval : array, shape=(n_features,)
p-values of F-scores.
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
chi2: Chi-squared stats of non-negative features for classification tasks.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64)
n_samples = X.shape[0]
if center:
y = y - np.mean(y)
if issparse(X):
X_means = X.mean(axis=0).getA1()
else:
X_means = X.mean(axis=0)
X_norms = np.sqrt(row_norms(X.T, squared=True) - n_samples*X_means**2)
else:
X_norms = row_norms(X.T)
# compute the correlation
corr = safe_sparse_dot(y, X)
corr /= X_norms
corr /= norm(y)
# convert to p-value
degrees_of_freedom = y.size - (2 if center else 1)
F = corr ** 2 / (1 - corr ** 2) * degrees_of_freedom
pv = stats.f.sf(F, 1, degrees_of_freedom)
return F, pv
class SelectFromLinearSVC(AbstractFeatureSelector):
param_distributions = {
'threshold': (1e-5,),
'C': [float(x) for x in np.logspace(-2, 5, 100)]
}
def __init__(self, threshold=None, penalty='l1', loss='squared_hinge', dual=False, tol=0.0001, C=1.0, fit_intercept=True, random_state=None, max_iter=1000):
self.threshold = threshold
self.penalty = penalty
self.loss = loss
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.random_state = random_state
self.max_iter = max_iter
def fit(self, X, y):
self.linear_svc = LinearSVC(penalty=self.penalty, loss=self.loss, dual=self.dual, tol=self.tol,
fit_intercept=self.fit_intercept, random_state=self.random_state,
max_iter=self.max_iter)
self.linear_svc.fit(X, y)
self.select_from_model = SelectFromModel(self.linear_svc, threshold=self.threshold, prefit=True)
return self
def _get_support_mask(self):
return self.select_from_model._get_support_mask()
class SelectPercentileClassification(AbstractFeatureSelector, SelectPercentile):
param_distributions = {
'score_func': ('f_classif',),
'percentile': [int(x) for x in np.linspace(10, 100, 100)]
}
score_funcs = {
'f_classif': f_classif
}
def __init__(self, *args, **kwargs):
if 'score_func' in kwargs:
kwargs['score_func'] = self.score_funcs[kwargs['score_func']]
super().__init__(*args, **kwargs)
class SelectFromLasso(AbstractFeatureSelector):
param_distributions = {
'threshold': (1e-5,),
'alpha': [float(x) for x in np.logspace(-5, 2, 100)]
}
def __init__(self, threshold=None, alpha=1.0, fit_intercept=True, normalize=False, max_iter=1000, tol=0.0001, positive=False, selection='cyclic', random_state=None):
self.threshold = threshold
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.tol = tol
self.positive = positive
self.selection = selection
self.random_state = random_state
def fit(self, X, y):
# NOTE: y is an ndarray of strings
self.lasso = Lasso(alpha=self.alpha, fit_intercept=self.fit_intercept, normalize=self.normalize,
max_iter=self.max_iter, tol=self.tol, positive=self.positive, selection=self.selection,
random_state=self.random_state)
self.lasso.fit(X, y)
self.select_from_model = SelectFromModel(self.lasso, threshold=self.threshold, prefit=True)
return self
def _get_support_mask(self):
return self.select_from_model._get_support_mask()
class SelectPercentileRegression(AbstractFeatureSelector, SelectPercentile):
param_distributions = {
'score_func': ('f_regression',),
'percentile': [int(x) for x in np.linspace(10, 100, 100)]
}
score_funcs = {
'f_regression': better_f_regression
}
def __init__(self, *args, **kwargs):
if 'score_func' in kwargs:
kwargs['score_func'] = self.score_funcs[kwargs['score_func']]
super().__init__(*args, **kwargs)
def fit(self, X, y):
# NOTE: y is an ndarray of strings
super().fit(X, y)
return self
|
d3m-model-search-master
|
test_data/185_baseball/185_baseball_solution/src/feature_selection.py
|
from base import AbstractEstimator
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.kernel_approximation import RBFSampler
from sklearn.linear_model import SGDClassifier, SGDRegressor
class SGDClassifierEstimator(AbstractEstimator):
param_distributions = {
'loss': ('hinge', 'log', 'squared_hinge', 'perceptron'),
'penalty': ('elasticnet',),
'alpha': [float(x) for x in np.logspace(-9, 0, 10)],
'l1_ratio': [float(x) for x in np.linspace(0, 1, 11)],
'fit_intercept': (True, True, True, False)
}
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def fit(self, X, y):
n_samples = X.shape[0]
self.kwargs['n_iter'] = max(5, int(10**6 / n_samples))
self.sgd_classifier = SGDClassifier(*self.args, **self.kwargs)
self.sgd_classifier.fit(X, y)
def predict(self, X):
return self.sgd_classifier.predict(X)
class SGDRegressorEstimator(AbstractEstimator):
param_distributions = {
'loss': ('squared_loss', 'huber'),
'penalty': ('elasticnet',),
'alpha': [float(x) for x in np.logspace(-9, 0, 10)],
'l1_ratio': [float(x) for x in np.linspace(0, 1, 11)],
'fit_intercept': (True, True, True, False),
'epsilon': [float(x) for x in np.logspace(-2, 0, 5)],
'learning_rate': ('optimal', 'invscaling'),
'eta0': (0.1, 0.01, 0.001),
'power_t': [float(x) for x in np.linspace(0, 1, 5)]
}
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def fit(self, X, y):
n_samples = X.shape[0]
self.kwargs['n_iter'] = max(5, int(10**6 / n_samples))
self.sgd_regressor = SGDRegressor(*self.args, **self.kwargs)
self.sgd_regressor.fit(X, y)
def predict(self, X):
return self.sgd_regressor.predict(X)
# TODO: inherit AbstractEstimator, grab param_distributions from cv_setup_map.py in the old slacker,
class RBFSamplerSGDClassifierEstimator(BaseEstimator, TransformerMixin):
def __init__(self, gamma=1.0, n_components=100, random_state=None, **kwargs):
kwargs['random_state'] = random_state
self.rbf_sampler = RBFSampler(gamma=gamma, n_components=n_components, random_state=random_state)
self.sgdclassifier = SGDClassifier(**kwargs)
def fit(self, X, y):
X = self.rbf_sampler.fit_transform(X)
self.sgdclassifier.fit(X, y)
return self
def transform(self, X, y=None):
return np.sqrt(self.rbf_sampler.n_components) / np.sqrt(2.) * self.rbf_sampler.transform(X)
def predict(self, X):
return self.sgdclassifier.predict(self.transform(X))
def decision_function(self, X):
return self.sgdclassifier.decision_function(self.transform(X))
# TODO: inherit AbstractEstimator, grab param_distributions from cv_setup_map.py in the old slacker,
class RBFSamplerSGDRegressorEstimator(BaseEstimator, TransformerMixin):
def __init__(self, gamma=1.0, n_components=100, random_state=None, **kwargs):
kwargs['random_state'] = random_state
self.rbf_sampler = RBFSampler(gamma=gamma, n_components=n_components, random_state=random_state)
self.sgdregressor = SGDRegressor(**kwargs)
def fit(self, X, y):
X = self.rbf_sampler.fit_transform(X)
self.sgdregressor.fit(X, y)
return self
def transform(self, X, y=None):
return np.sqrt(self.rbf_sampler.n_components) / np.sqrt(2.) * self.rbf_sampler.transform(X)
def predict(self, X):
return self.sgdregressor.predict(self.transform(X))
# TODO: Add kernel SVM
# TODO: Add kernel ridge regressor
# TODO: Add random forests / xgboost
|
d3m-model-search-master
|
test_data/185_baseball/185_baseball_solution/src/estimation.py
|
d3m-model-search-master
|
test_data/185_baseball/185_baseball_solution/src/__init__.py
|
|
from collections import defaultdict, OrderedDict
import numpy as np
from scipy import signal
from scipy.sparse import csr_matrix, hstack
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import Imputer, OneHotEncoder, StandardScaler
from sklearn.utils.validation import check_is_fitted
from base import AbstractFeatureExtractor
class DenseMixedStrategyImputer(BaseEstimator, TransformerMixin):
def __init__(self, missing_values='NaN', strategies=None, add_missing_indicator=True, verbose=False):
self.missing_values = missing_values
if strategies is None:
raise ValueError('Must provide strategy.')
allowed_strategies = ['mean', 'median', 'most_frequent']
if any(s not in allowed_strategies for s in strategies):
raise ValueError('Invalid strategy in list.')
self.strategies = strategies
self.add_missing_indicator = add_missing_indicator
self.verbose = verbose
def fit(self, X, y=None):
n_samples, n_features = X.shape
print('n_features',n_features)
if len(self.strategies) != n_features:
raise ValueError('Number of strategies must equal number of features.')
self.impute_strategies = list(set(self.strategies))
self.impute_indices = [np.array([i for i, x in enumerate(self.strategies) if x == s]) for s in self.impute_strategies]
self.impute_valid_indices = []
self.imputers = [Imputer(missing_values=self.missing_values, strategy=s, verbose=self.verbose) for s in
self.impute_strategies]
for indices, imputer in zip(self.impute_indices, self.imputers):
imputer.fit(X[:, indices])
valid_mask = np.logical_not(np.isnan(imputer.statistics_))
self.impute_valid_indices.append(indices[valid_mask])
return self
def transform(self, X):
n_samples, n_features = X.shape
if len(self.strategies) != n_features:
raise ValueError('Number of strategies must equal number of features.')
check_is_fitted(self, 'imputers')
if self.add_missing_indicator:
output_scale = 2
else:
output_scale = 1
X_out = np.zeros((n_samples, output_scale*n_features))
for input_indices, output_indices, imputer in zip(self.impute_indices, self.impute_valid_indices, self.imputers):
X_out[:, output_scale*output_indices] = imputer.transform(X[:, input_indices])
if self.add_missing_indicator:
X_out[:, np.arange(1, 2*n_features, 2)] = np.isnan(X).astype('float', copy=False)
return X_out
class DataFrameCategoricalEncoder(BaseEstimator, TransformerMixin):
def fit(self, X, y=None):
self.code_maps = {}
for k in X.columns:
self.code_maps[k] = defaultdict(lambda: np.nan)
self.code_maps[k].update({v: k for k, v in enumerate(X[k].astype('category').cat.categories)})
return self
def transform(self, X):
if set(X.columns) != set(self.code_maps):
raise ValueError('Columns do not match fit model.')
return X.apply(lambda x: x.apply(lambda y: self.code_maps[x.name][y])).as_matrix()
class AnnotatedTabularExtractor(AbstractFeatureExtractor):
param_distributions = {
'normalize_text': [True, False],
'categorize': [True, False],
'numeric_strategy': ['mean', 'median'],
'add_missing_indicator': [True, False]
}
def __init__(self, normalize_text=False, categorize=False, numeric_strategy='mean', add_missing_indicator=True):
self.normalize_text = normalize_text
self.categorize = categorize
self.numeric_strategy = numeric_strategy
self.add_missing_indicator = add_missing_indicator
def set_cols_info(self, cols_info):
self.cols_info = cols_info
def determine_colType(self, column):
variables = self.cols_info
for var in variables:
var_colName = var['colName']
if str(var_colName) != str(column):
continue
var_colType = var['colType']
if var_colType in {'categorical', 'boolean'}:
return 'categorical'
elif var_colType in {'integer', 'real'}:
return 'numeric'
elif var_colType == 'string':
return 'text'
elif var_colType == 'dateTime':
raise RuntimeError('datTime not implemented in this feature extractor yet !!')
def fit_transform(self, df, variables):
df = self.copy_normalize_text(df)
self.column_types = OrderedDict()
for column in df:
itype = self.determine_colType(column)
# print('itype',itype)
self.column_types[column] = itype
self.numeric_columns = [column for column, type in self.column_types.items() if type == 'numeric']
self.categorical_columns = [column for column, type in self.column_types.items() if type == 'categorical']
self.text_columns = [column for column, type in self.column_types.items() if type == 'text']
output_arrays = []
if len(self.numeric_columns) > 0:
X = df[self.numeric_columns].apply(lambda x: pd.to_numeric(x, errors='coerce')).as_matrix()
self.numeric_imputer = DenseMixedStrategyImputer(
strategies=[self.numeric_strategy]*len(self.numeric_columns),
add_missing_indicator=self.add_missing_indicator
)
X = self.numeric_imputer.fit_transform(X)
self.numeric_scaler = StandardScaler()
output_arrays.append(self.numeric_scaler.fit_transform(X))
if len(self.categorical_columns) > 0:
self.categorical_encoder = DataFrameCategoricalEncoder()
X = self.categorical_encoder.fit_transform(df[self.categorical_columns])
self.categorical_imputer = DenseMixedStrategyImputer(
strategies=['most_frequent']*len(self.categorical_columns),
add_missing_indicator=self.add_missing_indicator
)
X = self.categorical_imputer.fit_transform(X)
self.one_hot_encoder = OneHotEncoder(
categorical_features=np.arange(len(self.categorical_columns)) * (2 if self.add_missing_indicator else 1)
)
output_arrays.append(self.one_hot_encoder.fit_transform(X))
return hstack([csr_matrix(X) for X in output_arrays], format='csr')
def transform(self, df):
check_is_fitted(self, 'column_types')
if list(df) != list(self.column_types):
raise ValueError('Data to be transformed does not match fitting data.')
df = self.copy_normalize_text(df)
output_arrays = []
if len(self.numeric_columns) > 0:
X = df[self.numeric_columns].apply(lambda x: pd.to_numeric(x, errors='coerce')).as_matrix()
output_arrays.append(self.numeric_scaler.transform(self.numeric_imputer.transform(X)))
if len(self.categorical_columns) > 0:
X = self.categorical_encoder.transform(df[self.categorical_columns])
output_arrays.append(self.one_hot_encoder.transform(self.categorical_imputer.transform(X)))
return hstack([csr_matrix(X) for X in output_arrays], format='csr')
def copy_normalize_text(self, df):
df = df.copy()
if self.normalize_text:
for column in df:
try:
df[column] = df[column].str.lower().str.strip()
except:
df[column] = df[column]
return df
|
d3m-model-search-master
|
test_data/185_baseball/185_baseball_solution/src/feature_extraction.py
|
# -*- coding: utf-8 -*-
# file: d3mds.py
# lab: MIT Lincoln Lab
# author(s): sw26425
# description: a rudimentary API for interacting with D3MDataSupply, which mainly consists of a Dataset and a Problem
import os, json, sys
import pandas as pd
import numpy as np
import warnings
DATASET_SCHEMA_VERSION = '3.0'
PROBLEM_SCHEMA_VERSION = '3.0'
class D3MDataset:
dsHome = None
dsDoc = None
learningDataFile = None
def __init__(self, datasetPath):
self.dsHome = datasetPath
# read the schema in dsHome
_dsDoc = os.path.join(self.dsHome, 'datasetDoc.json')
assert os.path.exists(_dsDoc)
with open(_dsDoc, 'r') as f:
self.dsDoc = json.load(f)
# make sure the versions line up
if self.get_datasetSchemaVersion() != DATASET_SCHEMA_VERSION:
warnings.warn("the datasetSchemaVersions in the API and datasetDoc do not match !!!!!!!")
# locate the special learningData file
self.learningDataFile = self._get_learning_data_path()
def get_datasetID(self):
"""
Returns the datasetID from datasetDoc
"""
return self.dsDoc['about']['datasetID']
def get_datasetSchemaVersion(self):
"""
Returns the dataset schema version that was used to create this dataset
"""
return self.dsDoc['about']['datasetSchemaVersion']
def get_learning_data(self, view=None, problem=None):
"""
Returns the contents of learningData.doc as a DataFrame.
If view is 'TRAIN' or 'TEST', then the full learningData is filtered to return learningData only for that view.
For view-based filtering, the problem object has to be passed because this method used the splitsData from the problem.
"""
df = pd.read_csv(self.learningDataFile, index_col='d3mIndex')
if view is None:
return df
if view.upper() == 'TRAIN' or view.upper() == 'TEST':
if problem is None:
raise RuntimeError('asking for learningData for a split, but the problem is not given')
splitsdf = problem.get_datasplits(view)
df = df.loc[splitsdf.index]
return df
def get_learning_data_columns(self):
res = self._get_learning_data_resource()
return res['columns']
def set_learning_data(self, df):
"""
Sets the contents of the learningData file to df
"""
df.to_csv(self.learningDataFile)
def delete_column_entries(self, target):
"""
Deletes all the entries of a particular column of a particular tabular data resource.
The deleted entries are set to numpy.NaN
"""
resID = target['resID']
colIndex = target['colIndex']
colName = target['colName']
for res in self.dsDoc['dataResources']:
_resID = res['resID']
if _resID != resID:
continue
_resPath = res['resPath']
_resPath = os.path.join(self.dsHome, _resPath)
_resType = res['resType']
assert _resType == 'table'
for col in res['columns']:
_colIndex = col['colIndex']
if _colIndex != colIndex:
continue
_colName = col['colName']
assert _colName == colName
df = pd.read_csv(_resPath)
df[_colName] = [np.NaN]*len(df[_colName])
df.to_csv(_resPath, index=None)
return True
raise RuntimeError('could not find the column')
raise RuntimeError('could not find the resource')
def delete_identifying_fields(self, view):
"""
Deletes some fields that might contain identifying information.
These fields should not be in the train or test view during the blinds evaluation.
"""
assert view.upper()=='TRAIN' or view.upper()=='TEST' # ensures we perform this only if view is train or test
self.dsDoc['about']['datasetName']='NULL'
self.dsDoc['about']['redacted'] = True
try:
del self.dsDoc['about']['description']
except KeyError:
pass
try:
del self.dsDoc['about']['citation']
except KeyError:
pass
try:
del self.dsDoc['about']['source']
except KeyError:
pass
try:
del self.dsDoc['about']['sourceURI']
except KeyError:
pass
# save datasetDoc.json file
with open(os.path.join(self.dsHome, 'datasetDoc.json'), 'w') as fp:
json.dump(self.dsDoc, fp, indent=2, sort_keys=False)
############# private methods
def _get_learning_data_path(self):
"""
Returns the path of learningData.csv in a dataset
"""
for res in self.dsDoc['dataResources']:
resID = res['resID']
resPath = res['resPath']
resType = res['resType']
resFormat = res['resFormat']
dirname = os.path.basename(os.path.normpath(os.path.dirname(resPath)))
if resType =='table' and dirname=='tables':
if 'learningData.csv' in res['resPath'] :
return os.path.join(self.dsHome, resPath)
else:
raise RuntimeError('non-CSV learningData (not implemented yet ...)')
# if the for loop is over and learningDoc is not found, then return None
raise RuntimeError('could not find learningData file the dataset')
def _get_learning_data_resource(self):
"""
Returns the path of learningData.csv in a dataset
"""
for res in self.dsDoc['dataResources']:
resID = res['resID']
resPath = res['resPath']
resType = res['resType']
resFormat = res['resFormat']
if resType =='table':
if 'learningData.csv' in res['resPath'] :
return res
else:
raise RuntimeError('could not find learningData.csv')
# if the for loop is over and learningDoc is not found, then return None
raise RuntimeError('could not find learningData resource')
class D3MProblem:
prHome = None
prDoc = None
splitsFile = None
def __init__(self, problemPath):
self.prHome = problemPath
# read the schema in prHome
_prDoc = os.path.join(self.prHome, 'problemDoc.json')
assert os.path.exists(_prDoc)
with open(_prDoc, 'r') as f:
self.prDoc = json.load(f)
# make sure the versions line up
if self.get_problemSchemaVersion() != PROBLEM_SCHEMA_VERSION:
warnings.warn("the problemSchemaVersions in the API and datasetDoc do not match !!!!!!!")
# locate the splitsFile
self.splitsFile = self._get_datasplits_file()
def get_problemID(self):
"""
Returns the problemID from problemDoc
"""
return self.prDoc['about']['problemID']
def get_problemSchemaVersion(self):
"""
Returns the problem schema version that was used to create this dataset
"""
return self.prDoc['about']['problemSchemaVersion']
def get_datasetID(self):
"""
Returns the ID of the dataset referenced in the problem
"""
return self.prDoc['inputs']['data'][0]['datasetID']
def get_targets(self):
"""
Looks at the problemDoc and returns the colIndex and colName of the target variable
"""
return self.prDoc['inputs']['data'][0]['targets']
def get_datasplits(self, view=None):
"""
Returns the data splits splits in a dataframe
"""
df = pd.read_csv(self.splitsFile, index_col='d3mIndex')
if view is None:
return df
elif view.upper() == 'TRAIN':
df = df[df['type']=='TRAIN']
return df
elif view.upper() == 'TEST':
df = df[df['type']=='TEST']
return df
def set_datasplits(self, df):
"""
Sets the contents of the dataSplits file to df
"""
df.to_csv(self.splitsFile)
def delete_identifying_fields(self, view):
"""
Deletes some fields that might contain identifying information.
These fields should not be in the train or test view during the blinds evaluation.
"""
assert view.upper()=='TRAIN' or view.upper()=='TEST' # ensures we perform this only if view is train or test
self.prDoc['about']['problemName']='NULL'
try:
del self.prDoc['about']['problemDescription']
except KeyError:
pass
# save datasetDoc.json file
with open(os.path.join(self.prHome, 'problemDoc.json'), 'w') as fp:
json.dump(self.prDoc, fp, indent=2, sort_keys=False)
def get_performance_metrics(self):
return self.prDoc['inputs']['performanceMetrics']
############# private methods
def _get_datasplits_file(self):
splitsFile = self.prDoc['inputs']['dataSplits']['splitsFile']
splitsFile = os.path.join(self.prHome, splitsFile)
assert os.path.exists(splitsFile)
return splitsFile
class D3MDS:
dataset = None
problem = None
def __init__(self, datasetPath, problemPath):
self.dataset = D3MDataset(datasetPath)
self.problem = D3MProblem(problemPath)
# sanity check
assert self.dataset.get_datasetID() == self.problem.get_datasetID()
def _get_target_columns(self, df):
target_cols = []
targets = self.problem.get_targets()
for target in targets:
colIndex = target['colIndex']-1 # 0th column is d3mIndex
colName = df.columns[colIndex]
assert colName == target['colName']
target_cols.append(colIndex)
return target_cols
def get_data_all(self):
df = self.dataset.get_learning_data(view=None, problem=None)
return df
def get_train_data(self):
df = self.dataset.get_learning_data(view='train', problem=self.problem)
target_cols = self._get_target_columns(df)
df.drop(df.columns[target_cols],axis=1,inplace=True)
return df
def get_train_targets(self):
df = self.dataset.get_learning_data(view='train', problem=self.problem)
target_cols = self._get_target_columns(df)
X = df.shape[0]
Y = len(target_cols)
return (df[df.columns[target_cols]]).as_matrix().reshape(X,Y)
# return np.ravel(df[df.columns[target_cols]])
def get_test_data(self):
df = self.dataset.get_learning_data(view='test', problem=self.problem)
target_cols = self._get_target_columns(df)
df.drop(df.columns[target_cols],axis=1,inplace=True)
return df
def get_test_targets(self):
df = self.dataset.get_learning_data(view='test', problem=self.problem)
target_cols = self._get_target_columns(df)
X = df.shape[0]
Y = len(target_cols)
return (df[df.columns[target_cols]]).as_matrix().reshape(X,Y)
# return np.ravel(df[df.columns[target_cols]])
|
d3m-model-search-master
|
test_data/185_baseball/185_baseball_solution/src/d3mds.py
|
import os, sys, json
import pandas as pd
from sklearn.pipeline import Pipeline
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import f1_score, mean_squared_error
here = os.path.dirname(os.path.abspath(__file__))
from d3mds import D3MDataset, D3MProblem, D3MDS
from feature_extraction import *
from feature_selection import *
from estimation import *
if __name__ == '__main__':
# get the paths of the dataset and problem
try:
dspath = (sys.argv[1])
except:
dspath = input('Enter the path to the dataset: ')
# dspath = os.path.join(here, '..', '..', 'data', '185_baseball_dataset')
assert os.path.exists(dspath)
try:
prpath = (sys.argv[2])
except:
prpath = input('Enter the path to the problem: ')
# prpath = os.path.join(here, '..', '..', 'data', '185_baseball_problem')
assert os.path.exists(prpath)
# check the pipeline JSON file
pipe_json = os.path.join(here, 'pipeline.json')
assert os.path.exists(pipe_json)
# read the JSON file
with open(pipe_json) as data_file:
ps = json.load(data_file)
## TBD: we need to make a check that that JSON aligns with the dataset and problem
# initialize the API class
d3mds = D3MDS(dspath, prpath) # this checks that the problem and dataset correspond
# get the train and test data
X_train = d3mds.get_train_data()
y_train = d3mds.get_train_targets()
X_test = d3mds.get_test_data()
y_test = d3mds.get_test_targets()
# get columns information
cols_info = d3mds.dataset.get_learning_data_columns()
## instantiate feature extractor
key, fe = ps['feature_extractors'].popitem()
fe_class = fe['feature_extractor']
fe_params = fe['params']
FE = eval(fe_class)(**fe_params)
if isinstance(FE, AnnotatedTabularExtractor):
FE.set_cols_info(cols_info)
## instantiate feature selector
fs = ps['feature_selector']
fs_class = fs['feature_selector']
fs_params = fs['params']
FS = eval(fs_class)(**fs_params)
## instantiate estimator
est = ps['estimator']
est_class = est['estimator']
est_params = est['params']
EST = eval(est_class)(**est_params)
## make a pipeline from the above three components
pipeline = Pipeline([
('vect', FE),
('sel', FS),
('clf', EST),
])
## train the pipeline on train data
pipeline.fit(X_train, y_train)
## predict on test data
y_pred = pipeline.predict(X_test)
targetCols = [col['colName'] for col in d3mds.problem.get_targets()]
y_pred_df = pd.DataFrame(index=X_test.index, data=y_pred, columns=targetCols)
y_pred_df.to_csv(os.path.join('.','predictions.csv'))
## compute the score on test data
metrics = d3mds.problem.get_performance_metrics()
scoresdf = pd.DataFrame(columns=['metric','value'])
for item in metrics:
metric = item['metric']
if metric == 'f1Macro':
score = f1_score(y_test, y_pred, average='macro')
print('f1Macro', score)
scoresdf.loc[len(scoresdf)]=['f1Macro', score]
elif metric == 'meanSquaredError':
score = mean_squared_error(y_test, y_pred)
print('meanSquaredError', score)
scoresdf.loc[len(scoresdf)]=['meanSquaredError', score]
scoresdf.to_csv(os.path.join('.','scores.csv'))
|
d3m-model-search-master
|
test_data/185_baseball/185_baseball_solution/src/pipeline.py
|
from abc import ABC, abstractmethod
from collections import OrderedDict
import numpy as np
from numpy import ndarray
from scipy.sparse import csr_matrix
from pandas import DataFrame
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.feature_selection.base import SelectorMixin
# https://stackoverflow.com/a/3862957
def get_all_subclasses(cls):
return cls.__subclasses__() + [g for s in cls.__subclasses__() for g in get_all_subclasses(s)]
def sample_param_distributions(param_distributions):
try:
return sample_param_distributions_dict(param_distributions)
except AttributeError:
i = np.random.randint(len(param_distributions))
return sample_param_distributions_dict(param_distributions[i])
def sample_param_distributions_dict(param_distributions_dict):
params = {}
for k, v in param_distributions_dict.items():
i = np.random.randint(len(v))
params[k] = v[i]
return params
class AbstractParameterized(ABC):
param_distributions = {}
@classmethod
def get_random_parameters(cls):
return sample_param_distributions(cls.param_distributions)
class AbstractFeatureExtractor(AbstractParameterized, BaseEstimator):
def fit(self, df, variables):
self.fit_transform(df, variables)
return self
@abstractmethod
def fit_transform(self, df, variables):
""" Fits the feature extractor
:param df:
:type df: DataFrame
:param variables:
:type variables: list[D3MVariable]
:return:
:rtype: csr_matrix
"""
pass
@abstractmethod
def transform(self, df):
""" Transforms the data
:param df:
:type df: DataFrame
:return:
:rtype: csr_matrix
"""
pass
class AbstractFeatureSelector(AbstractParameterized, BaseEstimator, SelectorMixin):
pass
class AbstractEstimator(AbstractParameterized, BaseEstimator):
@abstractmethod
def fit(self, X, y):
"""
:param X:
:type X: csr_matrix
:param y:
:type y: ndarray
:return:
:rtype: AbstractEstimator
"""
return self
@abstractmethod
def predict(self, X):
"""
:param X:
:type X: csr_matrix
:return:
:rtype: ndarray
"""
pass
|
d3m-model-search-master
|
test_data/185_baseball/185_baseball_solution/src/base.py
|
# -*- coding: utf-8 -*-
# file: d3mds.py
# lab: MIT Lincoln Lab
# author(s): sw26425
# description: a rudimentary API for interacting with D3MDataSupply, which mainly consists of a Dataset and a Problem
import os, json, sys
import pandas as pd
import numpy as np
import warnings
DATASET_SCHEMA_VERSION = '3.0'
PROBLEM_SCHEMA_VERSION = '3.0'
class D3MDataset:
dsHome = None
dsDoc = None
learningDataFile = None
def __init__(self, datasetPath):
self.dsHome = datasetPath
# read the schema in dsHome
_dsDoc = os.path.join(self.dsHome, 'datasetDoc.json')
assert os.path.exists(_dsDoc)
with open(_dsDoc, 'r') as f:
self.dsDoc = json.load(f)
# make sure the versions line up
if self.get_datasetSchemaVersion() != DATASET_SCHEMA_VERSION:
warnings.warn("the datasetSchemaVersions in the API and datasetDoc do not match !!!!!!!")
# locate the special learningData file
self.learningDataFile = self._get_learning_data_path()
def get_datasetID(self):
"""
Returns the datasetID from datasetDoc
"""
return self.dsDoc['about']['datasetID']
def get_datasetSchemaVersion(self):
"""
Returns the dataset schema version that was used to create this dataset
"""
return self.dsDoc['about']['datasetSchemaVersion']
def get_learning_data(self, view=None, problem=None):
"""
Returns the contents of learningData.doc as a DataFrame.
If view is 'TRAIN' or 'TEST', then the full learningData is filtered to return learningData only for that view.
For view-based filtering, the problem object has to be passed because this method used the splitsData from the problem.
"""
df = pd.read_csv(self.learningDataFile, index_col='d3mIndex')
if view is None:
return df
if view.upper() == 'TRAIN' or view.upper() == 'TEST':
if problem is None:
raise RuntimeError('asking for learningData for a split, but the problem is not given')
splitsdf = problem.get_datasplits(view)
df = df.loc[splitsdf.index]
return df
def get_learning_data_columns(self):
res = self._get_learning_data_resource()
return res['columns']
def set_learning_data(self, df):
"""
Sets the contents of the learningData file to df
"""
df.to_csv(self.learningDataFile)
def delete_column_entries(self, target):
"""
Deletes all the entries of a particular column of a particular tabular data resource.
The deleted entries are set to numpy.NaN
"""
resID = target['resID']
colIndex = target['colIndex']
colName = target['colName']
for res in self.dsDoc['dataResources']:
_resID = res['resID']
if _resID != resID:
continue
_resPath = res['resPath']
_resPath = os.path.join(self.dsHome, _resPath)
_resType = res['resType']
assert _resType == 'table'
for col in res['columns']:
_colIndex = col['colIndex']
if _colIndex != colIndex:
continue
_colName = col['colName']
assert _colName == colName
df = pd.read_csv(_resPath)
df[_colName] = [np.NaN]*len(df[_colName])
df.to_csv(_resPath, index=None)
return True
raise RuntimeError('could not find the column')
raise RuntimeError('could not find the resource')
def delete_identifying_fields(self, view):
"""
Deletes some fields that might contain identifying information.
These fields should not be in the train or test view during the blinds evaluation.
"""
assert view.upper()=='TRAIN' or view.upper()=='TEST' # ensures we perform this only if view is train or test
self.dsDoc['about']['datasetName']='NULL'
self.dsDoc['about']['redacted'] = True
try:
del self.dsDoc['about']['description']
except KeyError:
pass
try:
del self.dsDoc['about']['citation']
except KeyError:
pass
try:
del self.dsDoc['about']['source']
except KeyError:
pass
try:
del self.dsDoc['about']['sourceURI']
except KeyError:
pass
# save datasetDoc.json file
with open(os.path.join(self.dsHome, 'datasetDoc.json'), 'w') as fp:
json.dump(self.dsDoc, fp, indent=2, sort_keys=False)
############# private methods
def _get_learning_data_path(self):
"""
Returns the path of learningData.csv in a dataset
"""
for res in self.dsDoc['dataResources']:
resID = res['resID']
resPath = res['resPath']
resType = res['resType']
resFormat = res['resFormat']
dirname = os.path.basename(os.path.normpath(os.path.dirname(resPath)))
if resType =='table' and dirname=='tables':
if 'learningData.csv' in res['resPath'] :
return os.path.join(self.dsHome, resPath)
else:
raise RuntimeError('non-CSV learningData (not implemented yet ...)')
# if the for loop is over and learningDoc is not found, then return None
raise RuntimeError('could not find learningData file the dataset')
def _get_learning_data_resource(self):
"""
Returns the path of learningData.csv in a dataset
"""
for res in self.dsDoc['dataResources']:
resID = res['resID']
resPath = res['resPath']
resType = res['resType']
resFormat = res['resFormat']
if resType =='table':
if 'learningData.csv' in res['resPath'] :
return res
else:
raise RuntimeError('could not find learningData.csv')
# if the for loop is over and learningDoc is not found, then return None
raise RuntimeError('could not find learningData resource')
class D3MProblem:
prHome = None
prDoc = None
splitsFile = None
def __init__(self, problemPath):
self.prHome = problemPath
# read the schema in prHome
_prDoc = os.path.join(self.prHome, 'problemDoc.json')
assert os.path.exists(_prDoc)
with open(_prDoc, 'r') as f:
self.prDoc = json.load(f)
# make sure the versions line up
if self.get_problemSchemaVersion() != PROBLEM_SCHEMA_VERSION:
warnings.warn("the problemSchemaVersions in the API and datasetDoc do not match !!!!!!!")
# locate the splitsFile
self.splitsFile = self._get_datasplits_file()
def get_problemID(self):
"""
Returns the problemID from problemDoc
"""
return self.prDoc['about']['problemID']
def get_problemSchemaVersion(self):
"""
Returns the problem schema version that was used to create this dataset
"""
return self.prDoc['about']['problemSchemaVersion']
def get_datasetID(self):
"""
Returns the ID of the dataset referenced in the problem
"""
return self.prDoc['inputs']['data'][0]['datasetID']
def get_targets(self):
"""
Looks at the problemDoc and returns the colIndex and colName of the target variable
"""
return self.prDoc['inputs']['data'][0]['targets']
def get_datasplits(self, view=None):
"""
Returns the data splits splits in a dataframe
"""
df = pd.read_csv(self.splitsFile, index_col='d3mIndex')
if view is None:
return df
elif view.upper() == 'TRAIN':
df = df[df['type']=='TRAIN']
return df
elif view.upper() == 'TEST':
df = df[df['type']=='TEST']
return df
def set_datasplits(self, df):
"""
Sets the contents of the dataSplits file to df
"""
df.to_csv(self.splitsFile)
def delete_identifying_fields(self, view):
"""
Deletes some fields that might contain identifying information.
These fields should not be in the train or test view during the blinds evaluation.
"""
assert view.upper()=='TRAIN' or view.upper()=='TEST' # ensures we perform this only if view is train or test
self.prDoc['about']['problemName']='NULL'
try:
del self.prDoc['about']['problemDescription']
except KeyError:
pass
# save datasetDoc.json file
with open(os.path.join(self.prHome, 'problemDoc.json'), 'w') as fp:
json.dump(self.prDoc, fp, indent=2, sort_keys=False)
def get_performance_metrics(self):
return self.prDoc['inputs']['performanceMetrics']
############# private methods
def _get_datasplits_file(self):
splitsFile = self.prDoc['inputs']['dataSplits']['splitsFile']
splitsFile = os.path.join(self.prHome, splitsFile)
assert os.path.exists(splitsFile)
return splitsFile
class D3MDS:
dataset = None
problem = None
def __init__(self, datasetPath, problemPath):
self.dataset = D3MDataset(datasetPath)
self.problem = D3MProblem(problemPath)
# sanity check
assert self.dataset.get_datasetID() == self.problem.get_datasetID()
def _get_target_columns(self, df):
target_cols = []
targets = self.problem.get_targets()
for target in targets:
colIndex = target['colIndex']-1 # 0th column is d3mIndex
colName = df.columns[colIndex]
assert colName == target['colName']
target_cols.append(colIndex)
return target_cols
def get_data_all(self):
df = self.dataset.get_learning_data(view=None, problem=None)
return df
def get_train_data(self):
df = self.dataset.get_learning_data(view='train', problem=self.problem)
target_cols = self._get_target_columns(df)
df.drop(df.columns[target_cols],axis=1,inplace=True)
return df
def get_train_targets(self):
df = self.dataset.get_learning_data(view='train', problem=self.problem)
target_cols = self._get_target_columns(df)
X = df.shape[0]
Y = len(target_cols)
return (df[df.columns[target_cols]]).as_matrix().reshape(X,Y)
# return np.ravel(df[df.columns[target_cols]])
def get_test_data(self):
df = self.dataset.get_learning_data(view='test', problem=self.problem)
target_cols = self._get_target_columns(df)
df.drop(df.columns[target_cols],axis=1,inplace=True)
return df
def get_test_targets(self):
df = self.dataset.get_learning_data(view='test', problem=self.problem)
target_cols = self._get_target_columns(df)
X = df.shape[0]
Y = len(target_cols)
return (df[df.columns[target_cols]]).as_matrix().reshape(X,Y)
# return np.ravel(df[df.columns[target_cols]])
|
d3m-model-search-master
|
test_data/test_cases_only/LL0_acled/LL0_acled_solution/src/d3mds.py
|
# coding: utf-8
import numpy as np
import pandas as pd
import os, json, sys, random
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, confusion_matrix
if __name__ == "__main__":
here = os.path.dirname(os.path.abspath(__file__))
from d3mds import D3MDataset, D3MProblem, D3MDS
dspath = os.path.join(here, '..', '..', 'LL0_acled_dataset')
prpath = os.path.join(here, '..', '..', 'LL0_acled_problem')
assert os.path.exists(dspath)
assert os.path.exists(prpath)
d3mds = D3MDS(dspath, prpath) # this checks that the problem and dataset correspond
print('\nLoading train and test data')
X_train = d3mds.get_train_data()
y_train = d3mds.get_train_targets().ravel()
print('X_train shape:', X_train.shape)
print('y_train shape:', y_train.shape)
X_test = d3mds.get_test_data()
y_test = d3mds.get_test_targets().ravel()
print('X_test shape:', X_test.shape)
print('y_test shape:', y_test.shape)
X_train = X_train[['notes']]
X_test = X_test[['notes']]
# Convert categorical labels to integers
le = LabelEncoder()
y_train_encoded = le.fit_transform(y_train)
y_test_encoded = le.transform(y_test)
print('\nBuilding and applying TF-IDF vectorizer')
text_train = X_train['notes'].values
vectorizer = TfidfVectorizer(token_pattern='(?u)\\b[^\d\W]+\\b')
X_train_vec = vectorizer.fit_transform(text_train)
print('\nTraining Random Forest Classifier')
clf = RandomForestClassifier(n_estimators=100, random_state=0)
clf.fit(X_train_vec, y_train_encoded)
# print('\nEvaluating model on train set')
# X_train_vec = vectorizer.transform(text_train)
# pred_train = clf.predict(X_train_vec)
# accuracy_train = accuracy_score(y_train_encoded, pred_train)
# confusion_mat_train = confusion_matrix(y_train_encoded, pred_train)
# print('Accuracy (train): ', accuracy_train)
# print('Confusion Matrix (train): \n', confusion_mat_train)
print('\nEvaluating model on test set')
text_test = X_test['notes'].values
X_test_vec = vectorizer.transform(text_test)
pred_test = clf.predict(X_test_vec)
accuracy_test = accuracy_score(y_test_encoded, pred_test)
#confusion_mat_test = confusion_matrix(y_test_encoded, pred_test)
print('Accuracy (test): ', accuracy_test)
# print('Confusion Matrix (test): \n', confusion_mat_test)
# Save predictions.csv
target_cols = ([target['colName'] for target in d3mds.problem.get_targets()])
y_predict_df = pd.DataFrame(data=le.inverse_transform(pred_test), index=X_test.index, columns=target_cols)
# y_predict_df = pd.DataFrame(data=pred_test, index=X_test.index, columns=target_cols)
y_predict_df.to_csv(os.path.join(here, '..', 'predictions.csv'))
# Save scores.csv file
df = pd.DataFrame(columns=['metric', 'value'])
df.loc[len(df)] = ['accuracy', accuracy_test]
df.to_csv(os.path.join(here, '..', 'scores.csv'))
|
d3m-model-search-master
|
test_data/test_cases_only/LL0_acled/LL0_acled_solution/src/pipeline.py
|
# -*- coding: utf-8 -*-
# file: d3mds.py
# lab: MIT Lincoln Lab
# author(s): sw26425
# description: a rudimentary API for interacting with D3MDataSupply, which mainly consists of a Dataset and a Problem
import os, json, sys
import pandas as pd
import numpy as np
import warnings
DATASET_SCHEMA_VERSION = '3.0'
PROBLEM_SCHEMA_VERSION = '3.0'
class D3MDataset:
dsHome = None
dsDoc = None
learningDataFile = None
def __init__(self, datasetPath):
self.dsHome = datasetPath
# read the schema in dsHome
_dsDoc = os.path.join(self.dsHome, 'datasetDoc.json')
assert os.path.exists(_dsDoc)
with open(_dsDoc, 'r') as f:
self.dsDoc = json.load(f)
# make sure the versions line up
if self.get_datasetSchemaVersion() != DATASET_SCHEMA_VERSION:
warnings.warn("the datasetSchemaVersions in the API and datasetDoc do not match !!!!!!!")
# locate the special learningData file
self.learningDataFile = self._get_learning_data_path()
def get_datasetID(self):
"""
Returns the datasetID from datasetDoc
"""
return self.dsDoc['about']['datasetID']
def get_datasetSchemaVersion(self):
"""
Returns the dataset schema version that was used to create this dataset
"""
return self.dsDoc['about']['datasetSchemaVersion']
def get_learning_data(self, view=None, problem=None):
"""
Returns the contents of learningData.doc as a DataFrame.
If view is 'TRAIN' or 'TEST', then the full learningData is filtered to return learningData only for that view.
For view-based filtering, the problem object has to be passed because this method used the splitsData from the problem.
"""
df = pd.read_csv(self.learningDataFile, index_col='d3mIndex')
if view is None:
return df
if view.upper() == 'TRAIN' or view.upper() == 'TEST':
if problem is None:
raise RuntimeError('asking for learningData for a split, but the problem is not given')
splitsdf = problem.get_datasplits(view)
df = df.loc[splitsdf.index]
return df
def get_learning_data_columns(self):
res = self._get_learning_data_resource()
return res['columns']
def set_learning_data(self, df):
"""
Sets the contents of the learningData file to df
"""
df.to_csv(self.learningDataFile)
def delete_column_entries(self, target):
"""
Deletes all the entries of a particular column of a particular tabular data resource.
The deleted entries are set to numpy.NaN
"""
resID = target['resID']
colIndex = target['colIndex']
colName = target['colName']
for res in self.dsDoc['dataResources']:
_resID = res['resID']
if _resID != resID:
continue
_resPath = res['resPath']
_resPath = os.path.join(self.dsHome, _resPath)
_resType = res['resType']
assert _resType == 'table'
for col in res['columns']:
_colIndex = col['colIndex']
if _colIndex != colIndex:
continue
_colName = col['colName']
assert _colName == colName
df = pd.read_csv(_resPath)
df[_colName] = [np.NaN]*len(df[_colName])
df.to_csv(_resPath, index=None)
return True
raise RuntimeError('could not find the column')
raise RuntimeError('could not find the resource')
def delete_identifying_fields(self, view):
"""
Deletes some fields that might contain identifying information.
These fields should not be in the train or test view during the blinds evaluation.
"""
assert view.upper()=='TRAIN' or view.upper()=='TEST' # ensures we perform this only if view is train or test
self.dsDoc['about']['datasetName']='NULL'
self.dsDoc['about']['redacted'] = True
try:
del self.dsDoc['about']['description']
except KeyError:
pass
try:
del self.dsDoc['about']['citation']
except KeyError:
pass
try:
del self.dsDoc['about']['source']
except KeyError:
pass
try:
del self.dsDoc['about']['sourceURI']
except KeyError:
pass
# save datasetDoc.json file
with open(os.path.join(self.dsHome, 'datasetDoc.json'), 'w') as fp:
json.dump(self.dsDoc, fp, indent=2, sort_keys=False)
############# private methods
def _get_learning_data_path(self):
"""
Returns the path of learningData.csv in a dataset
"""
for res in self.dsDoc['dataResources']:
resID = res['resID']
resPath = res['resPath']
resType = res['resType']
resFormat = res['resFormat']
dirname = os.path.basename(os.path.normpath(os.path.dirname(resPath)))
if resType =='table' and dirname=='tables':
if 'learningData.csv' in res['resPath'] :
return os.path.join(self.dsHome, resPath)
else:
raise RuntimeError('non-CSV learningData (not implemented yet ...)')
# if the for loop is over and learningDoc is not found, then return None
raise RuntimeError('could not find learningData file the dataset')
def _get_learning_data_resource(self):
"""
Returns the path of learningData.csv in a dataset
"""
for res in self.dsDoc['dataResources']:
resID = res['resID']
resPath = res['resPath']
resType = res['resType']
resFormat = res['resFormat']
if resType =='table':
if 'learningData.csv' in res['resPath'] :
return res
else:
raise RuntimeError('could not find learningData.csv')
# if the for loop is over and learningDoc is not found, then return None
raise RuntimeError('could not find learningData resource')
class D3MProblem:
prHome = None
prDoc = None
splitsFile = None
def __init__(self, problemPath):
self.prHome = problemPath
# read the schema in prHome
_prDoc = os.path.join(self.prHome, 'problemDoc.json')
assert os.path.exists(_prDoc)
with open(_prDoc, 'r') as f:
self.prDoc = json.load(f)
# make sure the versions line up
if self.get_problemSchemaVersion() != PROBLEM_SCHEMA_VERSION:
warnings.warn("the problemSchemaVersions in the API and datasetDoc do not match !!!!!!!")
# locate the splitsFile
self.splitsFile = self._get_datasplits_file()
def get_problemID(self):
"""
Returns the problemID from problemDoc
"""
return self.prDoc['about']['problemID']
def get_problemSchemaVersion(self):
"""
Returns the problem schema version that was used to create this dataset
"""
return self.prDoc['about']['problemSchemaVersion']
def get_datasetID(self):
"""
Returns the ID of the dataset referenced in the problem
"""
return self.prDoc['inputs']['data'][0]['datasetID']
def get_targets(self):
"""
Looks at the problemDoc and returns the colIndex and colName of the target variable
"""
return self.prDoc['inputs']['data'][0]['targets']
def get_datasplits(self, view=None):
"""
Returns the data splits splits in a dataframe
"""
df = pd.read_csv(self.splitsFile, index_col='d3mIndex')
if view is None:
return df
elif view.upper() == 'TRAIN':
df = df[df['type']=='TRAIN']
return df
elif view.upper() == 'TEST':
df = df[df['type']=='TEST']
return df
def set_datasplits(self, df):
"""
Sets the contents of the dataSplits file to df
"""
df.to_csv(self.splitsFile)
def delete_identifying_fields(self, view):
"""
Deletes some fields that might contain identifying information.
These fields should not be in the train or test view during the blinds evaluation.
"""
assert view.upper()=='TRAIN' or view.upper()=='TEST' # ensures we perform this only if view is train or test
self.prDoc['about']['problemName']='NULL'
try:
del self.prDoc['about']['problemDescription']
except KeyError:
pass
# save datasetDoc.json file
with open(os.path.join(self.prHome, 'problemDoc.json'), 'w') as fp:
json.dump(self.prDoc, fp, indent=2, sort_keys=False)
def get_performance_metrics(self):
return self.prDoc['inputs']['performanceMetrics']
############# private methods
def _get_datasplits_file(self):
splitsFile = self.prDoc['inputs']['dataSplits']['splitsFile']
splitsFile = os.path.join(self.prHome, splitsFile)
assert os.path.exists(splitsFile)
return splitsFile
class D3MDS:
dataset = None
problem = None
def __init__(self, datasetPath, problemPath):
self.dataset = D3MDataset(datasetPath)
self.problem = D3MProblem(problemPath)
# sanity check
assert self.dataset.get_datasetID() == self.problem.get_datasetID()
def _get_target_columns(self, df):
target_cols = []
targets = self.problem.get_targets()
for target in targets:
colIndex = target['colIndex']-1 # 0th column is d3mIndex
colName = df.columns[colIndex]
assert colName == target['colName']
target_cols.append(colIndex)
return target_cols
def get_data_all(self):
df = self.dataset.get_learning_data(view=None, problem=None)
return df
def get_train_data(self):
df = self.dataset.get_learning_data(view='train', problem=self.problem)
target_cols = self._get_target_columns(df)
df.drop(df.columns[target_cols],axis=1,inplace=True)
return df
def get_train_targets(self):
df = self.dataset.get_learning_data(view='train', problem=self.problem)
target_cols = self._get_target_columns(df)
X = df.shape[0]
Y = len(target_cols)
return (df[df.columns[target_cols]]).as_matrix().reshape(X,Y)
# return np.ravel(df[df.columns[target_cols]])
def get_test_data(self):
df = self.dataset.get_learning_data(view='test', problem=self.problem)
target_cols = self._get_target_columns(df)
df.drop(df.columns[target_cols],axis=1,inplace=True)
return df
def get_test_targets(self):
df = self.dataset.get_learning_data(view='test', problem=self.problem)
target_cols = self._get_target_columns(df)
X = df.shape[0]
Y = len(target_cols)
return (df[df.columns[target_cols]]).as_matrix().reshape(X,Y)
# return np.ravel(df[df.columns[target_cols]])
|
d3m-model-search-master
|
test_data/test_cases_only/30_personae/30_personae_solution/src/d3mds.py
|
# coding: utf-8
# In[1]:
import nltk, os, glob, sys
import pandas as pd
from normalization import normalize_corpus, tokenize_text
import numpy as np
import codecs
from sklearn.datasets.base import Bunch
from sklearn.cross_validation import train_test_split
from sklearn.model_selection import cross_val_score, ShuffleSplit, KFold
from feature_extractors import bow_extractor, tfidf_extractor
from feature_extractors import averaged_word_vectorizer
from feature_extractors import tfidf_weighted_averaged_word_vectorizer
import nltk
import gensim
from sklearn import metrics
from sklearn.naive_bayes import MultinomialNB, GaussianNB
from sklearn.linear_model import SGDClassifier
import re, json
import warnings
warnings.filterwarnings('ignore')
from collections import OrderedDict
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import f1_score
# In[2]:
here = os.path.dirname(os.path.abspath(__file__))
from d3mds import D3MDataset, D3MProblem, D3MDS
dspath = os.path.join(here, '..', '..', '30_personae_dataset')
prpath = os.path.join(here, '..', '..', '30_personae_problem')
solpath = os.path.join(here, '..')
textPath = os.path.join(dspath, 'text')
assert os.path.exists(dspath)
assert os.path.exists(prpath)
TARGET_FIELD = 'extrovert'
d3mds = D3MDS(dspath, prpath) # this checks that the problem and dataset correspond
RANDOM_STATE = 100
# In[3]:
def get_data(whichData='train'):
dataset = Bunch()
dataset.data = np.array([])
dataset.target = np.array([])
if whichData=='train':
data = d3mds.get_train_data()
targets = d3mds.get_train_targets()
elif whichData=='test':
data = d3mds.get_test_data()
targets = d3mds.get_test_targets()
else:
raise RuntimeError('get_data should be passed either train or test, but got%s'%whichData)
for i, rf in enumerate(data['raw_text_file']):
path = os.path.join(textPath, rf)
raw = open(path, encoding='utf-8').read()
dataset.data = np.append(dataset.data, raw)
dataset.target = targets.ravel()
return dataset
# In[4]:
print('reading training data corpus ...')
dataset = get_data(whichData='train')
corpus, labels = dataset.data, dataset.target
print('normalizing corpus ...')
norm_corpus = normalize_corpus(corpus)
print('creating BOW features ...')
bow_vectorizer, bow_features = bow_extractor(norm_corpus)
# print(bow_features.shape)
print('creating tfidf features ...')
tfidf_vectorizer, tfidf_features = tfidf_extractor(norm_corpus)
# print(tfidf_features.shape)
print('creating averaged word vector features ...')
tokenized_corpus = [nltk.word_tokenize(text) for text in norm_corpus]
model = gensim.models.Word2Vec(tokenized_corpus, size=500, window=100, min_count=30, sample=1e-3)
avg_wv_features = averaged_word_vectorizer(corpus=tokenized_corpus, model=model, num_features=500)
# print(avg_wv_features.shape)
print('creating tfidf weighted averaged word vector features ...')
vocab = tfidf_vectorizer.vocabulary_
tfidf_wv_features = tfidf_weighted_averaged_word_vectorizer(corpus=tokenized_corpus, tfidf_vectors=tfidf_features,
tfidf_vocabulary=vocab,
model=model,
num_features=500)
# print(tfidf_wv_features.shape)
print('initializing RandomForestClassifier(RFC) and SVM classfiers ...')
rfc = RandomForestClassifier(n_estimators=20, max_depth=20, random_state=RANDOM_STATE)
svm = SGDClassifier(loss='hinge', n_iter=100, random_state=RANDOM_STATE)
models=[]
scores=[]
train_performance = OrderedDict()
print('training RFC with BOW features ...')
cv = KFold(n_splits=10, shuffle=True, random_state=RANDOM_STATE)
cv_scores = cross_val_score(rfc, bow_features, labels, cv=cv, scoring='f1')
models.append((rfc, 'bow_features'))
scores.append(cv_scores.mean())
print('training SVM with BOW features ...')
cv = KFold(n_splits=10, shuffle=True, random_state=RANDOM_STATE)
cv_scores = cross_val_score(svm, bow_features, labels, cv=cv, scoring='f1')
models.append((svm, 'bow_features'))
scores.append(cv_scores.mean())
print('training RFC with tfidf features ...')
cv = KFold(n_splits=10, shuffle=True, random_state=RANDOM_STATE)
cv_scores = cross_val_score(rfc, tfidf_features, labels, cv=cv, scoring='f1')
models.append((rfc, 'tfidf_features'))
scores.append(cv_scores.mean())
print('training SVM with tfidf features ...')
cv = KFold(n_splits=10, shuffle=True, random_state=RANDOM_STATE)
cv_scores = cross_val_score(svm, tfidf_features, labels, cv=cv, scoring='f1')
models.append((svm, 'tfidf_features'))
scores.append(cv_scores.mean())
print('training RFC with avg_wv_features features ...')
cv = KFold(n_splits=10, shuffle=True, random_state=RANDOM_STATE)
cv_scores = cross_val_score(rfc, avg_wv_features, labels, cv=cv, scoring='f1')
models.append((rfc, 'avg_wv_features'))
scores.append(cv_scores.mean())
print('training SVM with avg_wv_features features ...')
cv = KFold(n_splits=10, shuffle=True, random_state=RANDOM_STATE)
cv_scores = cross_val_score(svm, avg_wv_features, labels, cv=cv, scoring='f1')
models.append((svm, 'avg_wv_features'))
scores.append(cv_scores.mean())
print('training RFC with tfidf_wv_features features ...')
cv = KFold(n_splits=10, shuffle=True, random_state=RANDOM_STATE)
cv_scores = cross_val_score(rfc, tfidf_wv_features, labels, cv=cv, scoring='f1')
models.append((rfc, 'tfidf_wv_features'))
scores.append(cv_scores.mean())
print('training SVM with tfidf_wv_features features ...')
cv = KFold(n_splits=10, shuffle=True, random_state=RANDOM_STATE)
cv_scores = cross_val_score(svm, tfidf_wv_features, labels, cv=cv, scoring='f1')
models.append((svm, 'tfidf_wv_features'))
scores.append(cv_scores.mean())
print('choosing the best model for baseline...')
baseline = models[np.argmax(scores)]
baselineScore = scores[np.argmax(scores)]
print('baseline model:', str(baseline))
print('baseline performance on 10-fold CV (mean f1):', baselineScore)
# In[22]:
print('training the model on the entire train data...')
baselineMod = baseline[0]
baselineFea = eval(baseline[1])
# print(baselineFea.shape)
baselineMod.fit(baselineFea, labels)
print('=============================================================================================')
## Make prediction on testData
print('making predictions on testData (assuming that testData is available) ...')
dataset = get_data(whichData='test')
corpus, labels = dataset.data, dataset.target
print('normalizing corpus ...')
norm_corpus = normalize_corpus(corpus)
print('creating BOW features ...')
bow_features = bow_vectorizer.transform(norm_corpus)
# print(bow_features.shape)
print('creating tfidf features ...')
tfidf_features = tfidf_vectorizer.transform(norm_corpus)
# print(tfidf_features.shape)
print('creating averaged word vector features ...')
tokenized_corpus = [nltk.word_tokenize(text) for text in norm_corpus]
model = gensim.models.Word2Vec(tokenized_corpus, size=500, window=100, min_count=30, sample=1e-3)
avg_wv_features = averaged_word_vectorizer(corpus=tokenized_corpus, model=model, num_features=500)
# print(avg_wv_features.shape)
print('creating tfidf weighted averaged word vector features ...')
vocab = tfidf_vectorizer.vocabulary_
tfidf_wv_features = tfidf_weighted_averaged_word_vectorizer(corpus=tokenized_corpus, tfidf_vectors=tfidf_features,
tfidf_vocabulary=vocab,
model=model,
num_features=500)
# print(tfidf_wv_features.shape)
test_features = None
if baseline[1] == 'bow_features':
test_features = bow_features
elif baseline[1] == 'tfidf_features':
test_features = tfidf_features
elif baseline[1] == 'avg_wv_features':
test_features = avg_wv_features
elif baseline[1] == 'tfidf_wv_features':
test_features = tfidf_wv_features
print('predicting ...')
y_predict = baselineMod.predict(test_features)
y_truth = labels
f1 = f1_score(y_truth, y_predict)
print('baseline performance on test data (mean f1):', f1)
# save predictions.csv
X_test = d3mds.get_test_data()
target_cols = ([target['colName'] for target in d3mds.problem.get_targets()])
y_predict_df = pd.DataFrame(data=y_predict, index=X_test.index, columns=target_cols)
y_predict_df.to_csv(os.path.join(here, '..', 'predictions.csv'))
# save scores.csv file
df = pd.DataFrame(columns=['metric', 'value'])
df.loc[len(df)] = ['f1', f1]
df.to_csv(os.path.join(here, '..', 'scores.csv'))
|
d3m-model-search-master
|
test_data/test_cases_only/30_personae/30_personae_solution/src/pipeline.py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 27 04:03:12 2016
@author: DIP
"""
from sklearn.feature_extraction.text import CountVectorizer
def bow_extractor(corpus, ngram_range=(1,1)):
vectorizer = CountVectorizer(min_df=1, ngram_range=ngram_range)
features = vectorizer.fit_transform(corpus)
return vectorizer, features
from sklearn.feature_extraction.text import TfidfTransformer
def tfidf_transformer(bow_matrix):
transformer = TfidfTransformer(norm='l2',
smooth_idf=True,
use_idf=True)
tfidf_matrix = transformer.fit_transform(bow_matrix)
return transformer, tfidf_matrix
from sklearn.feature_extraction.text import TfidfVectorizer
def tfidf_extractor(corpus, ngram_range=(1,1)):
vectorizer = TfidfVectorizer(min_df=1,
norm='l2',
smooth_idf=True,
use_idf=True,
ngram_range=ngram_range)
features = vectorizer.fit_transform(corpus)
return vectorizer, features
import numpy as np
def average_word_vectors(words, model, vocabulary, num_features):
feature_vector = np.zeros((num_features,),dtype="float64")
nwords = 0.
for word in words:
if word in vocabulary:
nwords = nwords + 1.
feature_vector = np.add(feature_vector, model[word])
if nwords:
feature_vector = np.divide(feature_vector, nwords)
return feature_vector
def averaged_word_vectorizer(corpus, model, num_features):
vocabulary = set(model.wv.index2word)
features = [average_word_vectors(tokenized_sentence, model, vocabulary, num_features)
for tokenized_sentence in corpus]
return np.array(features)
def tfidf_wtd_avg_word_vectors(words, tfidf_vector, tfidf_vocabulary, model, num_features):
word_tfidfs = [tfidf_vector[0, tfidf_vocabulary.get(word)]
if tfidf_vocabulary.get(word)
else 0 for word in words]
word_tfidf_map = {word:tfidf_val for word, tfidf_val in zip(words, word_tfidfs)}
feature_vector = np.zeros((num_features,),dtype="float64")
vocabulary = set(model.wv.index2word)
wts = 0.
for word in words:
if word in vocabulary:
word_vector = model[word]
weighted_word_vector = word_tfidf_map[word] * word_vector
wts = wts + word_tfidf_map[word]
feature_vector = np.add(feature_vector, weighted_word_vector)
if wts:
feature_vector = np.divide(feature_vector, wts)
return feature_vector
def tfidf_weighted_averaged_word_vectorizer(corpus, tfidf_vectors,
tfidf_vocabulary, model, num_features):
docs_tfidfs = [(doc, doc_tfidf)
for doc, doc_tfidf
in zip(corpus, tfidf_vectors)]
features = [tfidf_wtd_avg_word_vectors(tokenized_sentence, tfidf, tfidf_vocabulary,
model, num_features)
for tokenized_sentence, tfidf in docs_tfidfs]
return np.array(features)
|
d3m-model-search-master
|
test_data/test_cases_only/30_personae/30_personae_solution/src/feature_extractors.py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 26 20:45:10 2016
@author: DIP
"""
from contractions import CONTRACTION_MAP
import re, os
import nltk
import string
from nltk.stem import WordNetLemmatizer
import pandas as pd
here = os.path.dirname(os.path.abspath(__file__))
#stopword_list = nltk.corpus.stopwords.words('english')
stopword_list = pd.read_csv(os.path.join(here, 'stop.txt'), header=None)[0].as_matrix()
wnl = WordNetLemmatizer()
def tokenize_text(text):
tokens = nltk.word_tokenize(text)
tokens = [token.strip() for token in tokens]
return tokens
def expand_contractions(text, contraction_mapping):
contractions_pattern = re.compile('({})'.format('|'.join(contraction_mapping.keys())),
flags=re.IGNORECASE|re.DOTALL)
def expand_match(contraction):
match = contraction.group(0)
first_char = match[0]
expanded_contraction = contraction_mapping.get(match)\
if contraction_mapping.get(match)\
else contraction_mapping.get(match.lower())
expanded_contraction = first_char+expanded_contraction[1:]
return expanded_contraction
expanded_text = contractions_pattern.sub(expand_match, text)
expanded_text = re.sub("'", "", expanded_text)
return expanded_text
# from pattern.en import tag
from nltk.corpus import wordnet as wn
# Annotate text tokens with POS tags
def pos_tag_text(text):
def penn_to_wn_tags(pos_tag):
if pos_tag.startswith('J'):
return wn.ADJ
elif pos_tag.startswith('V'):
return wn.VERB
elif pos_tag.startswith('N'):
return wn.NOUN
elif pos_tag.startswith('R'):
return wn.ADV
else:
return None
# tagged_text = tag(text)
tagged_text = nltk.pos_tag(text)
tagged_lower_text = [(word.lower(), penn_to_wn_tags(pos_tag))
for word, pos_tag in
tagged_text]
return tagged_lower_text
# lemmatize text based on POS tags
def lemmatize_text(text):
pos_tagged_text = pos_tag_text(text)
lemmatized_tokens = [wnl.lemmatize(word, pos_tag) if pos_tag
else word
for word, pos_tag in pos_tagged_text]
lemmatized_text = ' '.join(lemmatized_tokens)
return lemmatized_text
def standardize_case(text):
tokens = tokenize_text(text)
lowered_tokens = list(map(str.lower, tokens))
lowered_text = ' '.join(lowered_tokens)
return lowered_text
def remove_special_characters(text):
tokens = tokenize_text(text)
pattern = re.compile('[{}]'.format(re.escape(string.punctuation)))
filtered_tokens = filter(None, [pattern.sub('', token) for token in tokens])
filtered_text = ' '.join(filtered_tokens)
return filtered_text
def remove_stopwords(text):
tokens = tokenize_text(text)
filtered_tokens = [token for token in tokens if token not in stopword_list]
filtered_text = ' '.join(filtered_tokens)
return filtered_text
def normalize_corpus(corpus, tokenize=False):
normalized_corpus = []
for text in corpus:
text = expand_contractions(text, CONTRACTION_MAP)
text = standardize_case(text)
#text = lemmatize_text(text)
text = remove_special_characters(text)
text = remove_stopwords(text)
normalized_corpus.append(text)
if tokenize:
text = tokenize_text(text)
normalized_corpus.append(text)
return normalized_corpus
|
d3m-model-search-master
|
test_data/test_cases_only/30_personae/30_personae_solution/src/normalization.py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 01 01:11:02 2016
@author: DIP
"""
CONTRACTION_MAP = {
"ain't": "is not",
"aren't": "are not",
"can't": "cannot",
"can't've": "cannot have",
"'cause": "because",
"could've": "could have",
"couldn't": "could not",
"couldn't've": "could not have",
"didn't": "did not",
"doesn't": "does not",
"don't": "do not",
"hadn't": "had not",
"hadn't've": "had not have",
"hasn't": "has not",
"haven't": "have not",
"he'd": "he would",
"he'd've": "he would have",
"he'll": "he will",
"he'll've": "he he will have",
"he's": "he is",
"how'd": "how did",
"how'd'y": "how do you",
"how'll": "how will",
"how's": "how is",
"I'd": "I would",
"I'd've": "I would have",
"I'll": "I will",
"I'll've": "I will have",
"I'm": "I am",
"I've": "I have",
"i'd": "i would",
"i'd've": "i would have",
"i'll": "i will",
"i'll've": "i will have",
"i'm": "i am",
"i've": "i have",
"isn't": "is not",
"it'd": "it would",
"it'd've": "it would have",
"it'll": "it will",
"it'll've": "it will have",
"it's": "it is",
"let's": "let us",
"ma'am": "madam",
"mayn't": "may not",
"might've": "might have",
"mightn't": "might not",
"mightn't've": "might not have",
"must've": "must have",
"mustn't": "must not",
"mustn't've": "must not have",
"needn't": "need not",
"needn't've": "need not have",
"o'clock": "of the clock",
"oughtn't": "ought not",
"oughtn't've": "ought not have",
"shan't": "shall not",
"sha'n't": "shall not",
"shan't've": "shall not have",
"she'd": "she would",
"she'd've": "she would have",
"she'll": "she will",
"she'll've": "she will have",
"she's": "she is",
"should've": "should have",
"shouldn't": "should not",
"shouldn't've": "should not have",
"so've": "so have",
"so's": "so as",
"that'd": "that would",
"that'd've": "that would have",
"that's": "that is",
"there'd": "there would",
"there'd've": "there would have",
"there's": "there is",
"they'd": "they would",
"they'd've": "they would have",
"they'll": "they will",
"they'll've": "they will have",
"they're": "they are",
"they've": "they have",
"to've": "to have",
"wasn't": "was not",
"we'd": "we would",
"we'd've": "we would have",
"we'll": "we will",
"we'll've": "we will have",
"we're": "we are",
"we've": "we have",
"weren't": "were not",
"what'll": "what will",
"what'll've": "what will have",
"what're": "what are",
"what's": "what is",
"what've": "what have",
"when's": "when is",
"when've": "when have",
"where'd": "where did",
"where's": "where is",
"where've": "where have",
"who'll": "who will",
"who'll've": "who will have",
"who's": "who is",
"who've": "who have",
"why's": "why is",
"why've": "why have",
"will've": "will have",
"won't": "will not",
"won't've": "will not have",
"would've": "would have",
"wouldn't": "would not",
"wouldn't've": "would not have",
"y'all": "you all",
"y'all'd": "you all would",
"y'all'd've": "you all would have",
"y'all're": "you all are",
"y'all've": "you all have",
"you'd": "you would",
"you'd've": "you would have",
"you'll": "you will",
"you'll've": "you will have",
"you're": "you are",
"you've": "you have"
}
|
d3m-model-search-master
|
test_data/test_cases_only/30_personae/30_personae_solution/src/contractions.py
|
# -*- coding: utf-8 -*-
# file: d3mds.py
# lab: MIT Lincoln Lab
# author(s): sw26425
# description: a rudimentary API for interacting with D3MDataSupply, which mainly consists of a Dataset and a Problem
import os, json, sys
import pandas as pd
import numpy as np
import warnings
DATASET_SCHEMA_VERSION = '3.0'
PROBLEM_SCHEMA_VERSION = '3.0'
class D3MDataset:
dsHome = None
dsDoc = None
learningDataFile = None
def __init__(self, datasetPath):
self.dsHome = datasetPath
# read the schema in dsHome
_dsDoc = os.path.join(self.dsHome, 'datasetDoc.json')
assert os.path.exists(_dsDoc)
with open(_dsDoc, 'r') as f:
self.dsDoc = json.load(f)
# make sure the versions line up
if self.get_datasetSchemaVersion() != DATASET_SCHEMA_VERSION:
warnings.warn("the datasetSchemaVersions in the API and datasetDoc do not match !!!!!!!")
# locate the special learningData file
self.learningDataFile = self._get_learning_data_path()
def get_datasetID(self):
"""
Returns the datasetID from datasetDoc
"""
return self.dsDoc['about']['datasetID']
def get_datasetSchemaVersion(self):
"""
Returns the dataset schema version that was used to create this dataset
"""
return self.dsDoc['about']['datasetSchemaVersion']
def get_learning_data(self, view=None, problem=None):
"""
Returns the contents of learningData.doc as a DataFrame.
If view is 'TRAIN' or 'TEST', then the full learningData is filtered to return learningData only for that view.
For view-based filtering, the problem object has to be passed because this method used the splitsData from the problem.
"""
df = pd.read_csv(self.learningDataFile, index_col='d3mIndex')
if view is None:
return df
if view.upper() == 'TRAIN' or view.upper() == 'TEST':
if problem is None:
raise RuntimeError('asking for learningData for a split, but the problem is not given')
splitsdf = problem.get_datasplits(view)
df = df.loc[splitsdf.index]
return df
def get_learning_data_columns(self):
res = self._get_learning_data_resource()
return res['columns']
def set_learning_data(self, df):
"""
Sets the contents of the learningData file to df
"""
df.to_csv(self.learningDataFile)
def delete_column_entries(self, target):
"""
Deletes all the entries of a particular column of a particular tabular data resource.
The deleted entries are set to numpy.NaN
"""
resID = target['resID']
colIndex = target['colIndex']
colName = target['colName']
for res in self.dsDoc['dataResources']:
_resID = res['resID']
if _resID != resID:
continue
_resPath = res['resPath']
_resPath = os.path.join(self.dsHome, _resPath)
_resType = res['resType']
assert _resType == 'table'
for col in res['columns']:
_colIndex = col['colIndex']
if _colIndex != colIndex:
continue
_colName = col['colName']
assert _colName == colName
df = pd.read_csv(_resPath)
df[_colName] = [np.NaN]*len(df[_colName])
df.to_csv(_resPath, index=None)
return True
raise RuntimeError('could not find the column')
raise RuntimeError('could not find the resource')
def delete_identifying_fields(self, view):
"""
Deletes some fields that might contain identifying information.
These fields should not be in the train or test view during the blinds evaluation.
"""
assert view.upper()=='TRAIN' or view.upper()=='TEST' # ensures we perform this only if view is train or test
self.dsDoc['about']['datasetName']='NULL'
self.dsDoc['about']['redacted'] = True
try:
del self.dsDoc['about']['description']
except KeyError:
pass
try:
del self.dsDoc['about']['citation']
except KeyError:
pass
try:
del self.dsDoc['about']['source']
except KeyError:
pass
try:
del self.dsDoc['about']['sourceURI']
except KeyError:
pass
# save datasetDoc.json file
with open(os.path.join(self.dsHome, 'datasetDoc.json'), 'w') as fp:
json.dump(self.dsDoc, fp, indent=2, sort_keys=False)
############# private methods
def _get_learning_data_path(self):
"""
Returns the path of learningData.csv in a dataset
"""
for res in self.dsDoc['dataResources']:
resID = res['resID']
resPath = res['resPath']
resType = res['resType']
resFormat = res['resFormat']
dirname = os.path.basename(os.path.normpath(os.path.dirname(resPath)))
if resType =='table' and dirname=='tables':
if 'learningData.csv' in res['resPath'] :
return os.path.join(self.dsHome, resPath)
else:
raise RuntimeError('non-CSV learningData (not implemented yet ...)')
# if the for loop is over and learningDoc is not found, then return None
raise RuntimeError('could not find learningData file the dataset')
def _get_learning_data_resource(self):
"""
Returns the path of learningData.csv in a dataset
"""
for res in self.dsDoc['dataResources']:
resID = res['resID']
resPath = res['resPath']
resType = res['resType']
resFormat = res['resFormat']
if resType =='table':
if 'learningData.csv' in res['resPath'] :
return res
else:
raise RuntimeError('could not find learningData.csv')
# if the for loop is over and learningDoc is not found, then return None
raise RuntimeError('could not find learningData resource')
class D3MProblem:
prHome = None
prDoc = None
splitsFile = None
def __init__(self, problemPath):
self.prHome = problemPath
# read the schema in prHome
_prDoc = os.path.join(self.prHome, 'problemDoc.json')
assert os.path.exists(_prDoc)
with open(_prDoc, 'r') as f:
self.prDoc = json.load(f)
# make sure the versions line up
if self.get_problemSchemaVersion() != PROBLEM_SCHEMA_VERSION:
warnings.warn("the problemSchemaVersions in the API and datasetDoc do not match !!!!!!!")
# locate the splitsFile
self.splitsFile = self._get_datasplits_file()
def get_problemID(self):
"""
Returns the problemID from problemDoc
"""
return self.prDoc['about']['problemID']
def get_problemSchemaVersion(self):
"""
Returns the problem schema version that was used to create this dataset
"""
return self.prDoc['about']['problemSchemaVersion']
def get_datasetID(self):
"""
Returns the ID of the dataset referenced in the problem
"""
return self.prDoc['inputs']['data'][0]['datasetID']
def get_targets(self):
"""
Looks at the problemDoc and returns the colIndex and colName of the target variable
"""
return self.prDoc['inputs']['data'][0]['targets']
def get_datasplits(self, view=None):
"""
Returns the data splits splits in a dataframe
"""
df = pd.read_csv(self.splitsFile, index_col='d3mIndex')
if view is None:
return df
elif view.upper() == 'TRAIN':
df = df[df['type']=='TRAIN']
return df
elif view.upper() == 'TEST':
df = df[df['type']=='TEST']
return df
def set_datasplits(self, df):
"""
Sets the contents of the dataSplits file to df
"""
df.to_csv(self.splitsFile)
def delete_identifying_fields(self, view):
"""
Deletes some fields that might contain identifying information.
These fields should not be in the train or test view during the blinds evaluation.
"""
assert view.upper()=='TRAIN' or view.upper()=='TEST' # ensures we perform this only if view is train or test
self.prDoc['about']['problemName']='NULL'
try:
del self.prDoc['about']['problemDescription']
except KeyError:
pass
# save datasetDoc.json file
with open(os.path.join(self.prHome, 'problemDoc.json'), 'w') as fp:
json.dump(self.prDoc, fp, indent=2, sort_keys=False)
def get_performance_metrics(self):
return self.prDoc['inputs']['performanceMetrics']
############# private methods
def _get_datasplits_file(self):
splitsFile = self.prDoc['inputs']['dataSplits']['splitsFile']
splitsFile = os.path.join(self.prHome, splitsFile)
assert os.path.exists(splitsFile)
return splitsFile
class D3MDS:
dataset = None
problem = None
def __init__(self, datasetPath, problemPath):
self.dataset = D3MDataset(datasetPath)
self.problem = D3MProblem(problemPath)
# sanity check
assert self.dataset.get_datasetID() == self.problem.get_datasetID()
def _get_target_columns(self, df):
target_cols = []
targets = self.problem.get_targets()
for target in targets:
colIndex = target['colIndex']-1 # 0th column is d3mIndex
colName = df.columns[colIndex]
assert colName == target['colName']
target_cols.append(colIndex)
return target_cols
def get_data_all(self):
df = self.dataset.get_learning_data(view=None, problem=None)
return df
def get_train_data(self):
df = self.dataset.get_learning_data(view='train', problem=self.problem)
target_cols = self._get_target_columns(df)
df.drop(df.columns[target_cols],axis=1,inplace=True)
return df
def get_train_targets(self):
df = self.dataset.get_learning_data(view='train', problem=self.problem)
target_cols = self._get_target_columns(df)
X = df.shape[0]
Y = len(target_cols)
return (df[df.columns[target_cols]]).as_matrix().reshape(X,Y)
# return np.ravel(df[df.columns[target_cols]])
def get_test_data(self):
df = self.dataset.get_learning_data(view='test', problem=self.problem)
target_cols = self._get_target_columns(df)
df.drop(df.columns[target_cols],axis=1,inplace=True)
return df
def get_test_targets(self):
df = self.dataset.get_learning_data(view='test', problem=self.problem)
target_cols = self._get_target_columns(df)
X = df.shape[0]
Y = len(target_cols)
return (df[df.columns[target_cols]]).as_matrix().reshape(X,Y)
# return np.ravel(df[df.columns[target_cols]])
|
d3m-model-search-master
|
test_data/test_cases_only/uu1_datasmash/uu1_datasmash_solution/src/d3mds.py
|
import os, sys, json, random
import pandas as pd
import numpy as np
from sklearn.base import BaseEstimator
import pyflux as pf
here = os.path.dirname(os.path.abspath(__file__))
from d3mds import D3MDataset, D3MProblem, D3MDS
dspath = os.path.join(here, '..', '..', 'uu1_datasmash_dataset')
prpath = os.path.join(here, '..', '..', 'uu1_datasmash_problem')
solpath = os.path.join(here, '..')
assert os.path.exists(dspath)
assert os.path.exists(prpath)
d3mds = D3MDS(dspath, prpath) # this checks that the problem and dataset correspond
MIN = 50000
if __name__ == '__main__':
# get training and test data
trainData = d3mds.get_train_data()
trainTargets = d3mds.get_train_targets()
testData = d3mds.get_test_data()
testTargets = d3mds.get_test_targets()
featureCounts = set()
def featurize(fileName):
global featureCounts
path = os.path.join(dspath, 'timeseries', fileName)
assert os.path.exists(path)
features = pd.read_csv(path, index_col=0)['val'].tolist()
featureCounts.add(len(features))
return features[:MIN]
if os.path.exists(os.path.join(here, 'X_train.csv')):
print('loading X_train ....')
X_train = pd.read_csv(os.path.join(here, 'X_train.csv'), index_col=0)
print('shape of X_train', X_train.shape)
else:
print('making X_train from trainData ...')
X_train = pd.DataFrame(index=trainData.index, data=trainData['time_series_file'].apply(featurize).tolist())
X_train.to_csv(os.path.join(here, 'X_train.csv'))
print('shape of X_train', X_train.shape)
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors.nearest_centroid import NearestCentroid
clf = clf = NearestCentroid(shrink_threshold=None)
clf.fit(X_train, trainTargets.ravel())
# print('=========================================================')
X_test = pd.DataFrame(index=testData.index, data=testData['time_series_file'].apply(featurize).tolist())
print('shape of X_test', X_test.shape)
y_pred = clf.predict(X_test)
y_truth = testTargets.ravel()
from sklearn.metrics import accuracy_score, f1_score
accuracy = accuracy_score(y_truth, y_pred)
f1 = f1_score(y_truth, y_pred, average='macro')
print('F1 (macro) score on test data', f1)
# saving the predictions.csv file
y_pred_df = pd.DataFrame(index=testData.index, data=y_pred, columns=[target['colName'] for target in d3mds.problem.get_targets()])
y_pred_df.to_csv(os.path.join(solpath, 'predictions.csv'))
# saving the scores.csv file
df = pd.DataFrame(columns=['metric', 'value'])
df.loc[len(df)] = ['f1Macro', f1]
df.to_csv(os.path.join(solpath, 'scores.csv'))
|
d3m-model-search-master
|
test_data/test_cases_only/uu1_datasmash/uu1_datasmash_solution/src/pipeline.py
|
from unittest import TestCase
from base import AbstractFeatureSelector
import numpy as np
from scipy import stats
from scipy.sparse import issparse
from sklearn.feature_selection import f_classif, SelectFromModel, SelectPercentile
from sklearn.linear_model import Lasso
from sklearn.svm import LinearSVC
from sklearn.utils import check_X_y
from sklearn.utils.extmath import safe_sparse_dot, row_norms
from scipy.linalg import norm
# modified to address the issue of centering sparse matrices with a bit of algebra
def better_f_regression(X, y, center=True):
"""Univariate linear regression tests.
Quick linear model for testing the effect of a single regressor,
sequentially for many regressors.
This is done in 2 steps:
1. The cross correlation between each regressor and the target is computed,
that is, ((X[:, i] - mean(X[:, i])) * (y - mean_y)) / (std(X[:, i]) *
std(y)).
2. It is converted to an F score then to a p-value.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix} shape = (n_samples, n_features)
The set of regressors that will be tested sequentially.
y : array of shape(n_samples).
The data matrix
center : True, bool,
If true, X and y will be centered.
Returns
-------
F : array, shape=(n_features,)
F values of features.
pval : array, shape=(n_features,)
p-values of F-scores.
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
chi2: Chi-squared stats of non-negative features for classification tasks.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64)
n_samples = X.shape[0]
if center:
y = y - np.mean(y)
if issparse(X):
X_means = X.mean(axis=0).getA1()
else:
X_means = X.mean(axis=0)
X_norms = np.sqrt(row_norms(X.T, squared=True) - n_samples*X_means**2)
else:
X_norms = row_norms(X.T)
# compute the correlation
corr = safe_sparse_dot(y, X)
corr /= X_norms
corr /= norm(y)
# convert to p-value
degrees_of_freedom = y.size - (2 if center else 1)
F = corr ** 2 / (1 - corr ** 2) * degrees_of_freedom
pv = stats.f.sf(F, 1, degrees_of_freedom)
return F, pv
class SelectFromLinearSVC(AbstractFeatureSelector):
param_distributions = {
'threshold': (1e-5,),
'C': [float(x) for x in np.logspace(-2, 5, 100)]
}
def __init__(self, threshold=None, penalty='l1', loss='squared_hinge', dual=False, tol=0.0001, C=1.0, fit_intercept=True, random_state=None, max_iter=1000):
self.threshold = threshold
self.penalty = penalty
self.loss = loss
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.random_state = random_state
self.max_iter = max_iter
def fit(self, X, y):
self.linear_svc = LinearSVC(penalty=self.penalty, loss=self.loss, dual=self.dual, tol=self.tol,
fit_intercept=self.fit_intercept, random_state=self.random_state,
max_iter=self.max_iter)
self.linear_svc.fit(X, y)
self.select_from_model = SelectFromModel(self.linear_svc, threshold=self.threshold, prefit=True)
return self
def _get_support_mask(self):
return self.select_from_model._get_support_mask()
class SelectPercentileClassification(AbstractFeatureSelector, SelectPercentile):
param_distributions = {
'score_func': ('f_classif',),
'percentile': [int(x) for x in np.linspace(10, 100, 100)]
}
score_funcs = {
'f_classif': f_classif
}
def __init__(self, *args, **kwargs):
if 'score_func' in kwargs:
kwargs['score_func'] = self.score_funcs[kwargs['score_func']]
super().__init__(*args, **kwargs)
class SelectFromLasso(AbstractFeatureSelector):
param_distributions = {
'threshold': (1e-5,),
'alpha': [float(x) for x in np.logspace(-5, 2, 100)]
}
def __init__(self, threshold=None, alpha=1.0, fit_intercept=True, normalize=False, max_iter=1000, tol=0.0001, positive=False, selection='cyclic', random_state=None):
self.threshold = threshold
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.tol = tol
self.positive = positive
self.selection = selection
self.random_state = random_state
def fit(self, X, y):
# NOTE: y is an ndarray of strings
self.lasso = Lasso(alpha=self.alpha, fit_intercept=self.fit_intercept, normalize=self.normalize,
max_iter=self.max_iter, tol=self.tol, positive=self.positive, selection=self.selection,
random_state=self.random_state)
self.lasso.fit(X, y)
self.select_from_model = SelectFromModel(self.lasso, threshold=self.threshold, prefit=True)
return self
def _get_support_mask(self):
return self.select_from_model._get_support_mask()
class SelectPercentileRegression(AbstractFeatureSelector, SelectPercentile):
param_distributions = {
'score_func': ('f_regression',),
'percentile': [int(x) for x in np.linspace(10, 100, 100)]
}
score_funcs = {
'f_regression': better_f_regression
}
def __init__(self, *args, **kwargs):
if 'score_func' in kwargs:
kwargs['score_func'] = self.score_funcs[kwargs['score_func']]
super().__init__(*args, **kwargs)
def fit(self, X, y):
# NOTE: y is an ndarray of strings
super().fit(X, y)
return self
|
d3m-model-search-master
|
test_data/test_cases_only/185_baseball/185_baseball_solution/src/feature_selection.py
|
from base import AbstractEstimator
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.kernel_approximation import RBFSampler
from sklearn.linear_model import SGDClassifier, SGDRegressor
class SGDClassifierEstimator(AbstractEstimator):
param_distributions = {
'loss': ('hinge', 'log', 'squared_hinge', 'perceptron'),
'penalty': ('elasticnet',),
'alpha': [float(x) for x in np.logspace(-9, 0, 10)],
'l1_ratio': [float(x) for x in np.linspace(0, 1, 11)],
'fit_intercept': (True, True, True, False)
}
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def fit(self, X, y):
n_samples = X.shape[0]
self.kwargs['n_iter'] = max(5, int(10**6 / n_samples))
self.sgd_classifier = SGDClassifier(*self.args, **self.kwargs)
self.sgd_classifier.fit(X, y)
def predict(self, X):
return self.sgd_classifier.predict(X)
class SGDRegressorEstimator(AbstractEstimator):
param_distributions = {
'loss': ('squared_loss', 'huber'),
'penalty': ('elasticnet',),
'alpha': [float(x) for x in np.logspace(-9, 0, 10)],
'l1_ratio': [float(x) for x in np.linspace(0, 1, 11)],
'fit_intercept': (True, True, True, False),
'epsilon': [float(x) for x in np.logspace(-2, 0, 5)],
'learning_rate': ('optimal', 'invscaling'),
'eta0': (0.1, 0.01, 0.001),
'power_t': [float(x) for x in np.linspace(0, 1, 5)]
}
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def fit(self, X, y):
n_samples = X.shape[0]
self.kwargs['n_iter'] = max(5, int(10**6 / n_samples))
self.sgd_regressor = SGDRegressor(*self.args, **self.kwargs)
self.sgd_regressor.fit(X, y)
def predict(self, X):
return self.sgd_regressor.predict(X)
# TODO: inherit AbstractEstimator, grab param_distributions from cv_setup_map.py in the old slacker,
class RBFSamplerSGDClassifierEstimator(BaseEstimator, TransformerMixin):
def __init__(self, gamma=1.0, n_components=100, random_state=None, **kwargs):
kwargs['random_state'] = random_state
self.rbf_sampler = RBFSampler(gamma=gamma, n_components=n_components, random_state=random_state)
self.sgdclassifier = SGDClassifier(**kwargs)
def fit(self, X, y):
X = self.rbf_sampler.fit_transform(X)
self.sgdclassifier.fit(X, y)
return self
def transform(self, X, y=None):
return np.sqrt(self.rbf_sampler.n_components) / np.sqrt(2.) * self.rbf_sampler.transform(X)
def predict(self, X):
return self.sgdclassifier.predict(self.transform(X))
def decision_function(self, X):
return self.sgdclassifier.decision_function(self.transform(X))
# TODO: inherit AbstractEstimator, grab param_distributions from cv_setup_map.py in the old slacker,
class RBFSamplerSGDRegressorEstimator(BaseEstimator, TransformerMixin):
def __init__(self, gamma=1.0, n_components=100, random_state=None, **kwargs):
kwargs['random_state'] = random_state
self.rbf_sampler = RBFSampler(gamma=gamma, n_components=n_components, random_state=random_state)
self.sgdregressor = SGDRegressor(**kwargs)
def fit(self, X, y):
X = self.rbf_sampler.fit_transform(X)
self.sgdregressor.fit(X, y)
return self
def transform(self, X, y=None):
return np.sqrt(self.rbf_sampler.n_components) / np.sqrt(2.) * self.rbf_sampler.transform(X)
def predict(self, X):
return self.sgdregressor.predict(self.transform(X))
# TODO: Add kernel SVM
# TODO: Add kernel ridge regressor
# TODO: Add random forests / xgboost
|
d3m-model-search-master
|
test_data/test_cases_only/185_baseball/185_baseball_solution/src/estimation.py
|
d3m-model-search-master
|
test_data/test_cases_only/185_baseball/185_baseball_solution/src/__init__.py
|
|
from collections import defaultdict, OrderedDict
import numpy as np
from scipy import signal
from scipy.sparse import csr_matrix, hstack
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import Imputer, OneHotEncoder, StandardScaler
from sklearn.utils.validation import check_is_fitted
from base import AbstractFeatureExtractor
class DenseMixedStrategyImputer(BaseEstimator, TransformerMixin):
def __init__(self, missing_values='NaN', strategies=None, add_missing_indicator=True, verbose=False):
self.missing_values = missing_values
if strategies is None:
raise ValueError('Must provide strategy.')
allowed_strategies = ['mean', 'median', 'most_frequent']
if any(s not in allowed_strategies for s in strategies):
raise ValueError('Invalid strategy in list.')
self.strategies = strategies
self.add_missing_indicator = add_missing_indicator
self.verbose = verbose
def fit(self, X, y=None):
n_samples, n_features = X.shape
print('n_features',n_features)
if len(self.strategies) != n_features:
raise ValueError('Number of strategies must equal number of features.')
self.impute_strategies = list(set(self.strategies))
self.impute_indices = [np.array([i for i, x in enumerate(self.strategies) if x == s]) for s in self.impute_strategies]
self.impute_valid_indices = []
self.imputers = [Imputer(missing_values=self.missing_values, strategy=s, verbose=self.verbose) for s in
self.impute_strategies]
for indices, imputer in zip(self.impute_indices, self.imputers):
imputer.fit(X[:, indices])
valid_mask = np.logical_not(np.isnan(imputer.statistics_))
self.impute_valid_indices.append(indices[valid_mask])
return self
def transform(self, X):
n_samples, n_features = X.shape
if len(self.strategies) != n_features:
raise ValueError('Number of strategies must equal number of features.')
check_is_fitted(self, 'imputers')
if self.add_missing_indicator:
output_scale = 2
else:
output_scale = 1
X_out = np.zeros((n_samples, output_scale*n_features))
for input_indices, output_indices, imputer in zip(self.impute_indices, self.impute_valid_indices, self.imputers):
X_out[:, output_scale*output_indices] = imputer.transform(X[:, input_indices])
if self.add_missing_indicator:
X_out[:, np.arange(1, 2*n_features, 2)] = np.isnan(X).astype('float', copy=False)
return X_out
class DataFrameCategoricalEncoder(BaseEstimator, TransformerMixin):
def fit(self, X, y=None):
self.code_maps = {}
for k in X.columns:
self.code_maps[k] = defaultdict(lambda: np.nan)
self.code_maps[k].update({v: k for k, v in enumerate(X[k].astype('category').cat.categories)})
return self
def transform(self, X):
if set(X.columns) != set(self.code_maps):
raise ValueError('Columns do not match fit model.')
return X.apply(lambda x: x.apply(lambda y: self.code_maps[x.name][y])).as_matrix()
class AnnotatedTabularExtractor(AbstractFeatureExtractor):
param_distributions = {
'normalize_text': [True, False],
'categorize': [True, False],
'numeric_strategy': ['mean', 'median'],
'add_missing_indicator': [True, False]
}
def __init__(self, normalize_text=False, categorize=False, numeric_strategy='mean', add_missing_indicator=True):
self.normalize_text = normalize_text
self.categorize = categorize
self.numeric_strategy = numeric_strategy
self.add_missing_indicator = add_missing_indicator
def set_cols_info(self, cols_info):
self.cols_info = cols_info
def determine_colType(self, column):
variables = self.cols_info
for var in variables:
var_colName = var['colName']
if str(var_colName) != str(column):
continue
var_colType = var['colType']
if var_colType in {'categorical', 'boolean'}:
return 'categorical'
elif var_colType in {'integer', 'real'}:
return 'numeric'
elif var_colType == 'string':
return 'text'
elif var_colType == 'dateTime':
raise RuntimeError('datTime not implemented in this feature extractor yet !!')
def fit_transform(self, df, variables):
df = self.copy_normalize_text(df)
self.column_types = OrderedDict()
for column in df:
itype = self.determine_colType(column)
# print('itype',itype)
self.column_types[column] = itype
self.numeric_columns = [column for column, type in self.column_types.items() if type == 'numeric']
self.categorical_columns = [column for column, type in self.column_types.items() if type == 'categorical']
self.text_columns = [column for column, type in self.column_types.items() if type == 'text']
output_arrays = []
if len(self.numeric_columns) > 0:
X = df[self.numeric_columns].apply(lambda x: pd.to_numeric(x, errors='coerce')).as_matrix()
self.numeric_imputer = DenseMixedStrategyImputer(
strategies=[self.numeric_strategy]*len(self.numeric_columns),
add_missing_indicator=self.add_missing_indicator
)
X = self.numeric_imputer.fit_transform(X)
self.numeric_scaler = StandardScaler()
output_arrays.append(self.numeric_scaler.fit_transform(X))
if len(self.categorical_columns) > 0:
self.categorical_encoder = DataFrameCategoricalEncoder()
X = self.categorical_encoder.fit_transform(df[self.categorical_columns])
self.categorical_imputer = DenseMixedStrategyImputer(
strategies=['most_frequent']*len(self.categorical_columns),
add_missing_indicator=self.add_missing_indicator
)
X = self.categorical_imputer.fit_transform(X)
self.one_hot_encoder = OneHotEncoder(
categorical_features=np.arange(len(self.categorical_columns)) * (2 if self.add_missing_indicator else 1)
)
output_arrays.append(self.one_hot_encoder.fit_transform(X))
return hstack([csr_matrix(X) for X in output_arrays], format='csr')
def transform(self, df):
check_is_fitted(self, 'column_types')
if list(df) != list(self.column_types):
raise ValueError('Data to be transformed does not match fitting data.')
df = self.copy_normalize_text(df)
output_arrays = []
if len(self.numeric_columns) > 0:
X = df[self.numeric_columns].apply(lambda x: pd.to_numeric(x, errors='coerce')).as_matrix()
output_arrays.append(self.numeric_scaler.transform(self.numeric_imputer.transform(X)))
if len(self.categorical_columns) > 0:
X = self.categorical_encoder.transform(df[self.categorical_columns])
output_arrays.append(self.one_hot_encoder.transform(self.categorical_imputer.transform(X)))
return hstack([csr_matrix(X) for X in output_arrays], format='csr')
def copy_normalize_text(self, df):
df = df.copy()
if self.normalize_text:
for column in df:
try:
df[column] = df[column].str.lower().str.strip()
except:
df[column] = df[column]
return df
|
d3m-model-search-master
|
test_data/test_cases_only/185_baseball/185_baseball_solution/src/feature_extraction.py
|
# -*- coding: utf-8 -*-
# file: d3mds.py
# lab: MIT Lincoln Lab
# author(s): sw26425
# description: a rudimentary API for interacting with D3MDataSupply, which mainly consists of a Dataset and a Problem
import os, json, sys
import pandas as pd
import numpy as np
import warnings
DATASET_SCHEMA_VERSION = '3.0'
PROBLEM_SCHEMA_VERSION = '3.0'
class D3MDataset:
dsHome = None
dsDoc = None
learningDataFile = None
def __init__(self, datasetPath):
self.dsHome = datasetPath
# read the schema in dsHome
_dsDoc = os.path.join(self.dsHome, 'datasetDoc.json')
assert os.path.exists(_dsDoc)
with open(_dsDoc, 'r') as f:
self.dsDoc = json.load(f)
# make sure the versions line up
if self.get_datasetSchemaVersion() != DATASET_SCHEMA_VERSION:
warnings.warn("the datasetSchemaVersions in the API and datasetDoc do not match !!!!!!!")
# locate the special learningData file
self.learningDataFile = self._get_learning_data_path()
def get_datasetID(self):
"""
Returns the datasetID from datasetDoc
"""
return self.dsDoc['about']['datasetID']
def get_datasetSchemaVersion(self):
"""
Returns the dataset schema version that was used to create this dataset
"""
return self.dsDoc['about']['datasetSchemaVersion']
def get_learning_data(self, view=None, problem=None):
"""
Returns the contents of learningData.doc as a DataFrame.
If view is 'TRAIN' or 'TEST', then the full learningData is filtered to return learningData only for that view.
For view-based filtering, the problem object has to be passed because this method used the splitsData from the problem.
"""
df = pd.read_csv(self.learningDataFile, index_col='d3mIndex')
if view is None:
return df
if view.upper() == 'TRAIN' or view.upper() == 'TEST':
if problem is None:
raise RuntimeError('asking for learningData for a split, but the problem is not given')
splitsdf = problem.get_datasplits(view)
df = df.loc[splitsdf.index]
return df
def get_learning_data_columns(self):
res = self._get_learning_data_resource()
return res['columns']
def set_learning_data(self, df):
"""
Sets the contents of the learningData file to df
"""
df.to_csv(self.learningDataFile)
def delete_column_entries(self, target):
"""
Deletes all the entries of a particular column of a particular tabular data resource.
The deleted entries are set to numpy.NaN
"""
resID = target['resID']
colIndex = target['colIndex']
colName = target['colName']
for res in self.dsDoc['dataResources']:
_resID = res['resID']
if _resID != resID:
continue
_resPath = res['resPath']
_resPath = os.path.join(self.dsHome, _resPath)
_resType = res['resType']
assert _resType == 'table'
for col in res['columns']:
_colIndex = col['colIndex']
if _colIndex != colIndex:
continue
_colName = col['colName']
assert _colName == colName
df = pd.read_csv(_resPath)
df[_colName] = [np.NaN]*len(df[_colName])
df.to_csv(_resPath, index=None)
return True
raise RuntimeError('could not find the column')
raise RuntimeError('could not find the resource')
def delete_identifying_fields(self, view):
"""
Deletes some fields that might contain identifying information.
These fields should not be in the train or test view during the blinds evaluation.
"""
assert view.upper()=='TRAIN' or view.upper()=='TEST' # ensures we perform this only if view is train or test
self.dsDoc['about']['datasetName']='NULL'
self.dsDoc['about']['redacted'] = True
try:
del self.dsDoc['about']['description']
except KeyError:
pass
try:
del self.dsDoc['about']['citation']
except KeyError:
pass
try:
del self.dsDoc['about']['source']
except KeyError:
pass
try:
del self.dsDoc['about']['sourceURI']
except KeyError:
pass
# save datasetDoc.json file
with open(os.path.join(self.dsHome, 'datasetDoc.json'), 'w') as fp:
json.dump(self.dsDoc, fp, indent=2, sort_keys=False)
############# private methods
def _get_learning_data_path(self):
"""
Returns the path of learningData.csv in a dataset
"""
for res in self.dsDoc['dataResources']:
resID = res['resID']
resPath = res['resPath']
resType = res['resType']
resFormat = res['resFormat']
dirname = os.path.basename(os.path.normpath(os.path.dirname(resPath)))
if resType =='table' and dirname=='tables':
if 'learningData.csv' in res['resPath'] :
return os.path.join(self.dsHome, resPath)
else:
raise RuntimeError('non-CSV learningData (not implemented yet ...)')
# if the for loop is over and learningDoc is not found, then return None
raise RuntimeError('could not find learningData file the dataset')
def _get_learning_data_resource(self):
"""
Returns the path of learningData.csv in a dataset
"""
for res in self.dsDoc['dataResources']:
resID = res['resID']
resPath = res['resPath']
resType = res['resType']
resFormat = res['resFormat']
if resType =='table':
if 'learningData.csv' in res['resPath'] :
return res
else:
raise RuntimeError('could not find learningData.csv')
# if the for loop is over and learningDoc is not found, then return None
raise RuntimeError('could not find learningData resource')
class D3MProblem:
prHome = None
prDoc = None
splitsFile = None
def __init__(self, problemPath):
self.prHome = problemPath
# read the schema in prHome
_prDoc = os.path.join(self.prHome, 'problemDoc.json')
assert os.path.exists(_prDoc)
with open(_prDoc, 'r') as f:
self.prDoc = json.load(f)
# make sure the versions line up
if self.get_problemSchemaVersion() != PROBLEM_SCHEMA_VERSION:
warnings.warn("the problemSchemaVersions in the API and datasetDoc do not match !!!!!!!")
# locate the splitsFile
self.splitsFile = self._get_datasplits_file()
def get_problemID(self):
"""
Returns the problemID from problemDoc
"""
return self.prDoc['about']['problemID']
def get_problemSchemaVersion(self):
"""
Returns the problem schema version that was used to create this dataset
"""
return self.prDoc['about']['problemSchemaVersion']
def get_datasetID(self):
"""
Returns the ID of the dataset referenced in the problem
"""
return self.prDoc['inputs']['data'][0]['datasetID']
def get_targets(self):
"""
Looks at the problemDoc and returns the colIndex and colName of the target variable
"""
return self.prDoc['inputs']['data'][0]['targets']
def get_datasplits(self, view=None):
"""
Returns the data splits splits in a dataframe
"""
df = pd.read_csv(self.splitsFile, index_col='d3mIndex')
if view is None:
return df
elif view.upper() == 'TRAIN':
df = df[df['type']=='TRAIN']
return df
elif view.upper() == 'TEST':
df = df[df['type']=='TEST']
return df
def set_datasplits(self, df):
"""
Sets the contents of the dataSplits file to df
"""
df.to_csv(self.splitsFile)
def delete_identifying_fields(self, view):
"""
Deletes some fields that might contain identifying information.
These fields should not be in the train or test view during the blinds evaluation.
"""
assert view.upper()=='TRAIN' or view.upper()=='TEST' # ensures we perform this only if view is train or test
self.prDoc['about']['problemName']='NULL'
try:
del self.prDoc['about']['problemDescription']
except KeyError:
pass
# save datasetDoc.json file
with open(os.path.join(self.prHome, 'problemDoc.json'), 'w') as fp:
json.dump(self.prDoc, fp, indent=2, sort_keys=False)
def get_performance_metrics(self):
return self.prDoc['inputs']['performanceMetrics']
############# private methods
def _get_datasplits_file(self):
splitsFile = self.prDoc['inputs']['dataSplits']['splitsFile']
splitsFile = os.path.join(self.prHome, splitsFile)
assert os.path.exists(splitsFile)
return splitsFile
class D3MDS:
dataset = None
problem = None
def __init__(self, datasetPath, problemPath):
self.dataset = D3MDataset(datasetPath)
self.problem = D3MProblem(problemPath)
# sanity check
assert self.dataset.get_datasetID() == self.problem.get_datasetID()
def _get_target_columns(self, df):
target_cols = []
targets = self.problem.get_targets()
for target in targets:
colIndex = target['colIndex']-1 # 0th column is d3mIndex
colName = df.columns[colIndex]
assert colName == target['colName']
target_cols.append(colIndex)
return target_cols
def get_data_all(self):
df = self.dataset.get_learning_data(view=None, problem=None)
return df
def get_train_data(self):
df = self.dataset.get_learning_data(view='train', problem=self.problem)
target_cols = self._get_target_columns(df)
df.drop(df.columns[target_cols],axis=1,inplace=True)
return df
def get_train_targets(self):
df = self.dataset.get_learning_data(view='train', problem=self.problem)
target_cols = self._get_target_columns(df)
X = df.shape[0]
Y = len(target_cols)
return (df[df.columns[target_cols]]).as_matrix().reshape(X,Y)
# return np.ravel(df[df.columns[target_cols]])
def get_test_data(self):
df = self.dataset.get_learning_data(view='test', problem=self.problem)
target_cols = self._get_target_columns(df)
df.drop(df.columns[target_cols],axis=1,inplace=True)
return df
def get_test_targets(self):
df = self.dataset.get_learning_data(view='test', problem=self.problem)
target_cols = self._get_target_columns(df)
X = df.shape[0]
Y = len(target_cols)
return (df[df.columns[target_cols]]).as_matrix().reshape(X,Y)
# return np.ravel(df[df.columns[target_cols]])
|
d3m-model-search-master
|
test_data/test_cases_only/185_baseball/185_baseball_solution/src/d3mds.py
|
import os, sys, json
import pandas as pd
from sklearn.pipeline import Pipeline
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import f1_score, mean_squared_error
here = os.path.dirname(os.path.abspath(__file__))
from d3mds import D3MDataset, D3MProblem, D3MDS
from feature_extraction import *
from feature_selection import *
from estimation import *
if __name__ == '__main__':
# get the paths of the dataset and problem
try:
dspath = (sys.argv[1])
except:
dspath = input('Enter the path to the dataset: ')
# dspath = os.path.join(here, '..', '..', 'data', '185_baseball_dataset')
assert os.path.exists(dspath)
try:
prpath = (sys.argv[2])
except:
prpath = input('Enter the path to the problem: ')
# prpath = os.path.join(here, '..', '..', 'data', '185_baseball_problem')
assert os.path.exists(prpath)
# check the pipeline JSON file
pipe_json = os.path.join(here, 'pipeline.json')
assert os.path.exists(pipe_json)
# read the JSON file
with open(pipe_json) as data_file:
ps = json.load(data_file)
## TBD: we need to make a check that that JSON aligns with the dataset and problem
# initialize the API class
d3mds = D3MDS(dspath, prpath) # this checks that the problem and dataset correspond
# get the train and test data
X_train = d3mds.get_train_data()
y_train = d3mds.get_train_targets()
X_test = d3mds.get_test_data()
y_test = d3mds.get_test_targets()
# get columns information
cols_info = d3mds.dataset.get_learning_data_columns()
## instantiate feature extractor
key, fe = ps['feature_extractors'].popitem()
fe_class = fe['feature_extractor']
fe_params = fe['params']
FE = eval(fe_class)(**fe_params)
if isinstance(FE, AnnotatedTabularExtractor):
FE.set_cols_info(cols_info)
## instantiate feature selector
fs = ps['feature_selector']
fs_class = fs['feature_selector']
fs_params = fs['params']
FS = eval(fs_class)(**fs_params)
## instantiate estimator
est = ps['estimator']
est_class = est['estimator']
est_params = est['params']
EST = eval(est_class)(**est_params)
## make a pipeline from the above three components
pipeline = Pipeline([
('vect', FE),
('sel', FS),
('clf', EST),
])
## train the pipeline on train data
pipeline.fit(X_train, y_train)
## predict on test data
y_pred = pipeline.predict(X_test)
targetCols = [col['colName'] for col in d3mds.problem.get_targets()]
y_pred_df = pd.DataFrame(index=X_test.index, data=y_pred, columns=targetCols)
y_pred_df.to_csv(os.path.join('.','predictions.csv'))
## compute the score on test data
metrics = d3mds.problem.get_performance_metrics()
scoresdf = pd.DataFrame(columns=['metric','value'])
for item in metrics:
metric = item['metric']
if metric == 'f1Macro':
score = f1_score(y_test, y_pred, average='macro')
print('f1Macro', score)
scoresdf.loc[len(scoresdf)]=['f1Macro', score]
elif metric == 'meanSquaredError':
score = mean_squared_error(y_test, y_pred)
print('meanSquaredError', score)
scoresdf.loc[len(scoresdf)]=['meanSquaredError', score]
scoresdf.to_csv(os.path.join('.','scores.csv'))
|
d3m-model-search-master
|
test_data/test_cases_only/185_baseball/185_baseball_solution/src/pipeline.py
|
from abc import ABC, abstractmethod
from collections import OrderedDict
import numpy as np
from numpy import ndarray
from scipy.sparse import csr_matrix
from pandas import DataFrame
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.feature_selection.base import SelectorMixin
# https://stackoverflow.com/a/3862957
def get_all_subclasses(cls):
return cls.__subclasses__() + [g for s in cls.__subclasses__() for g in get_all_subclasses(s)]
def sample_param_distributions(param_distributions):
try:
return sample_param_distributions_dict(param_distributions)
except AttributeError:
i = np.random.randint(len(param_distributions))
return sample_param_distributions_dict(param_distributions[i])
def sample_param_distributions_dict(param_distributions_dict):
params = {}
for k, v in param_distributions_dict.items():
i = np.random.randint(len(v))
params[k] = v[i]
return params
class AbstractParameterized(ABC):
param_distributions = {}
@classmethod
def get_random_parameters(cls):
return sample_param_distributions(cls.param_distributions)
class AbstractFeatureExtractor(AbstractParameterized, BaseEstimator):
def fit(self, df, variables):
self.fit_transform(df, variables)
return self
@abstractmethod
def fit_transform(self, df, variables):
""" Fits the feature extractor
:param df:
:type df: DataFrame
:param variables:
:type variables: list[D3MVariable]
:return:
:rtype: csr_matrix
"""
pass
@abstractmethod
def transform(self, df):
""" Transforms the data
:param df:
:type df: DataFrame
:return:
:rtype: csr_matrix
"""
pass
class AbstractFeatureSelector(AbstractParameterized, BaseEstimator, SelectorMixin):
pass
class AbstractEstimator(AbstractParameterized, BaseEstimator):
@abstractmethod
def fit(self, X, y):
"""
:param X:
:type X: csr_matrix
:param y:
:type y: ndarray
:return:
:rtype: AbstractEstimator
"""
return self
@abstractmethod
def predict(self, X):
"""
:param X:
:type X: csr_matrix
:return:
:rtype: ndarray
"""
pass
|
d3m-model-search-master
|
test_data/test_cases_only/185_baseball/185_baseball_solution/src/base.py
|
from unittest import TestCase
from base import AbstractFeatureSelector
import numpy as np
from scipy import stats
from scipy.sparse import issparse
from sklearn.feature_selection import f_classif, SelectFromModel, SelectPercentile
from sklearn.linear_model import Lasso
from sklearn.svm import LinearSVC
from sklearn.utils import check_X_y
from sklearn.utils.extmath import safe_sparse_dot, row_norms
from scipy.linalg import norm
# modified to address the issue of centering sparse matrices with a bit of algebra
def better_f_regression(X, y, center=True):
"""Univariate linear regression tests.
Quick linear model for testing the effect of a single regressor,
sequentially for many regressors.
This is done in 2 steps:
1. The cross correlation between each regressor and the target is computed,
that is, ((X[:, i] - mean(X[:, i])) * (y - mean_y)) / (std(X[:, i]) *
std(y)).
2. It is converted to an F score then to a p-value.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix} shape = (n_samples, n_features)
The set of regressors that will be tested sequentially.
y : array of shape(n_samples).
The data matrix
center : True, bool,
If true, X and y will be centered.
Returns
-------
F : array, shape=(n_features,)
F values of features.
pval : array, shape=(n_features,)
p-values of F-scores.
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
chi2: Chi-squared stats of non-negative features for classification tasks.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64)
n_samples = X.shape[0]
if center:
y = y - np.mean(y)
if issparse(X):
X_means = X.mean(axis=0).getA1()
else:
X_means = X.mean(axis=0)
X_norms = np.sqrt(row_norms(X.T, squared=True) - n_samples*X_means**2)
else:
X_norms = row_norms(X.T)
# compute the correlation
corr = safe_sparse_dot(y, X)
corr /= X_norms
corr /= norm(y)
# convert to p-value
degrees_of_freedom = y.size - (2 if center else 1)
F = corr ** 2 / (1 - corr ** 2) * degrees_of_freedom
pv = stats.f.sf(F, 1, degrees_of_freedom)
return F, pv
class SelectFromLinearSVC(AbstractFeatureSelector):
param_distributions = {
'threshold': (1e-5,),
'C': [float(x) for x in np.logspace(-2, 5, 100)]
}
def __init__(self, threshold=None, penalty='l1', loss='squared_hinge', dual=False, tol=0.0001, C=1.0, fit_intercept=True, random_state=None, max_iter=1000):
self.threshold = threshold
self.penalty = penalty
self.loss = loss
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.random_state = random_state
self.max_iter = max_iter
def fit(self, X, y):
self.linear_svc = LinearSVC(penalty=self.penalty, loss=self.loss, dual=self.dual, tol=self.tol,
fit_intercept=self.fit_intercept, random_state=self.random_state,
max_iter=self.max_iter)
self.linear_svc.fit(X, y)
self.select_from_model = SelectFromModel(self.linear_svc, threshold=self.threshold, prefit=True)
return self
def _get_support_mask(self):
return self.select_from_model._get_support_mask()
class SelectPercentileClassification(AbstractFeatureSelector, SelectPercentile):
param_distributions = {
'score_func': ('f_classif',),
'percentile': [int(x) for x in np.linspace(10, 100, 100)]
}
score_funcs = {
'f_classif': f_classif
}
def __init__(self, *args, **kwargs):
if 'score_func' in kwargs:
kwargs['score_func'] = self.score_funcs[kwargs['score_func']]
super().__init__(*args, **kwargs)
class SelectFromLasso(AbstractFeatureSelector):
param_distributions = {
'threshold': (1e-5,),
'alpha': [float(x) for x in np.logspace(-5, 2, 100)]
}
def __init__(self, threshold=None, alpha=1.0, fit_intercept=True, normalize=False, max_iter=1000, tol=0.0001, positive=False, selection='cyclic', random_state=None):
self.threshold = threshold
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.tol = tol
self.positive = positive
self.selection = selection
self.random_state = random_state
def fit(self, X, y):
# NOTE: y is an ndarray of strings
self.lasso = Lasso(alpha=self.alpha, fit_intercept=self.fit_intercept, normalize=self.normalize,
max_iter=self.max_iter, tol=self.tol, positive=self.positive, selection=self.selection,
random_state=self.random_state)
self.lasso.fit(X, y)
self.select_from_model = SelectFromModel(self.lasso, threshold=self.threshold, prefit=True)
return self
def _get_support_mask(self):
return self.select_from_model._get_support_mask()
class SelectPercentileRegression(AbstractFeatureSelector, SelectPercentile):
param_distributions = {
'score_func': ('f_regression',),
'percentile': [int(x) for x in np.linspace(10, 100, 100)]
}
score_funcs = {
'f_regression': better_f_regression
}
def __init__(self, *args, **kwargs):
if 'score_func' in kwargs:
kwargs['score_func'] = self.score_funcs[kwargs['score_func']]
super().__init__(*args, **kwargs)
def fit(self, X, y):
# NOTE: y is an ndarray of strings
super().fit(X, y)
return self
|
d3m-model-search-master
|
test_data/test_cases_only/1491_one_hundred_plants_margin/1491_one_hundred_plants_margin_solution/modules/feature_selection.py
|
from base import AbstractEstimator
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.kernel_approximation import RBFSampler
from sklearn.linear_model import SGDClassifier, SGDRegressor
class SGDClassifierEstimator(AbstractEstimator):
param_distributions = {
'loss': ('hinge', 'log', 'squared_hinge', 'perceptron'),
'penalty': ('elasticnet',),
'alpha': [float(x) for x in np.logspace(-9, 0, 10)],
'l1_ratio': [float(x) for x in np.linspace(0, 1, 11)],
'fit_intercept': (True, True, True, False)
}
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def fit(self, X, y):
n_samples = X.shape[0]
self.kwargs['n_iter'] = max(5, int(10**6 / n_samples))
self.sgd_classifier = SGDClassifier(*self.args, **self.kwargs)
self.sgd_classifier.fit(X, y)
def predict(self, X):
return self.sgd_classifier.predict(X)
class SGDRegressorEstimator(AbstractEstimator):
param_distributions = {
'loss': ('squared_loss', 'huber'),
'penalty': ('elasticnet',),
'alpha': [float(x) for x in np.logspace(-9, 0, 10)],
'l1_ratio': [float(x) for x in np.linspace(0, 1, 11)],
'fit_intercept': (True, True, True, False),
'epsilon': [float(x) for x in np.logspace(-2, 0, 5)],
'learning_rate': ('optimal', 'invscaling'),
'eta0': (0.1, 0.01, 0.001),
'power_t': [float(x) for x in np.linspace(0, 1, 5)]
}
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def fit(self, X, y):
n_samples = X.shape[0]
self.kwargs['n_iter'] = max(5, int(10**6 / n_samples))
self.sgd_regressor = SGDRegressor(*self.args, **self.kwargs)
self.sgd_regressor.fit(X, y)
def predict(self, X):
return self.sgd_regressor.predict(X)
# TODO: inherit AbstractEstimator, grab param_distributions from cv_setup_map.py in the old slacker,
class RBFSamplerSGDClassifierEstimator(BaseEstimator, TransformerMixin):
def __init__(self, gamma=1.0, n_components=100, random_state=None, **kwargs):
kwargs['random_state'] = random_state
self.rbf_sampler = RBFSampler(gamma=gamma, n_components=n_components, random_state=random_state)
self.sgdclassifier = SGDClassifier(**kwargs)
def fit(self, X, y):
X = self.rbf_sampler.fit_transform(X)
self.sgdclassifier.fit(X, y)
return self
def transform(self, X, y=None):
return np.sqrt(self.rbf_sampler.n_components) / np.sqrt(2.) * self.rbf_sampler.transform(X)
def predict(self, X):
return self.sgdclassifier.predict(self.transform(X))
def decision_function(self, X):
return self.sgdclassifier.decision_function(self.transform(X))
# TODO: inherit AbstractEstimator, grab param_distributions from cv_setup_map.py in the old slacker,
class RBFSamplerSGDRegressorEstimator(BaseEstimator, TransformerMixin):
def __init__(self, gamma=1.0, n_components=100, random_state=None, **kwargs):
kwargs['random_state'] = random_state
self.rbf_sampler = RBFSampler(gamma=gamma, n_components=n_components, random_state=random_state)
self.sgdregressor = SGDRegressor(**kwargs)
def fit(self, X, y):
X = self.rbf_sampler.fit_transform(X)
self.sgdregressor.fit(X, y)
return self
def transform(self, X, y=None):
return np.sqrt(self.rbf_sampler.n_components) / np.sqrt(2.) * self.rbf_sampler.transform(X)
def predict(self, X):
return self.sgdregressor.predict(self.transform(X))
# TODO: Add kernel SVM
# TODO: Add kernel ridge regressor
# TODO: Add random forests / xgboost
|
d3m-model-search-master
|
test_data/test_cases_only/1491_one_hundred_plants_margin/1491_one_hundred_plants_margin_solution/modules/estimation.py
|
d3m-model-search-master
|
test_data/test_cases_only/1491_one_hundred_plants_margin/1491_one_hundred_plants_margin_solution/modules/__init__.py
|
|
from collections import defaultdict, OrderedDict
import numpy as np
from scipy import signal
from scipy.sparse import csr_matrix, hstack
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import Imputer, OneHotEncoder, StandardScaler
from sklearn.utils.validation import check_is_fitted
from base import AbstractFeatureExtractor
class DenseMixedStrategyImputer(BaseEstimator, TransformerMixin):
def __init__(self, missing_values='NaN', strategies=None, add_missing_indicator=True, verbose=False):
self.missing_values = missing_values
if strategies is None:
raise ValueError('Must provide strategy.')
allowed_strategies = ['mean', 'median', 'most_frequent']
if any(s not in allowed_strategies for s in strategies):
raise ValueError('Invalid strategy in list.')
self.strategies = strategies
self.add_missing_indicator = add_missing_indicator
self.verbose = verbose
def fit(self, X, y=None):
n_samples, n_features = X.shape
print('n_features',n_features)
if len(self.strategies) != n_features:
raise ValueError('Number of strategies must equal number of features.')
self.impute_strategies = list(set(self.strategies))
self.impute_indices = [np.array([i for i, x in enumerate(self.strategies) if x == s]) for s in self.impute_strategies]
self.impute_valid_indices = []
self.imputers = [Imputer(missing_values=self.missing_values, strategy=s, verbose=self.verbose) for s in
self.impute_strategies]
for indices, imputer in zip(self.impute_indices, self.imputers):
imputer.fit(X[:, indices])
valid_mask = np.logical_not(np.isnan(imputer.statistics_))
self.impute_valid_indices.append(indices[valid_mask])
return self
def transform(self, X):
n_samples, n_features = X.shape
if len(self.strategies) != n_features:
raise ValueError('Number of strategies must equal number of features.')
check_is_fitted(self, 'imputers')
if self.add_missing_indicator:
output_scale = 2
else:
output_scale = 1
X_out = np.zeros((n_samples, output_scale*n_features))
for input_indices, output_indices, imputer in zip(self.impute_indices, self.impute_valid_indices, self.imputers):
X_out[:, output_scale*output_indices] = imputer.transform(X[:, input_indices])
if self.add_missing_indicator:
X_out[:, np.arange(1, 2*n_features, 2)] = np.isnan(X).astype('float', copy=False)
return X_out
class DataFrameCategoricalEncoder(BaseEstimator, TransformerMixin):
def fit(self, X, y=None):
self.code_maps = {}
for k in X.columns:
self.code_maps[k] = defaultdict(lambda: np.nan)
self.code_maps[k].update({v: k for k, v in enumerate(X[k].astype('category').cat.categories)})
return self
def transform(self, X):
if set(X.columns) != set(self.code_maps):
raise ValueError('Columns do not match fit model.')
return X.apply(lambda x: x.apply(lambda y: self.code_maps[x.name][y])).as_matrix()
class AnnotatedTabularExtractor(AbstractFeatureExtractor):
param_distributions = {
'normalize_text': [True, False],
'categorize': [True, False],
'numeric_strategy': ['mean', 'median'],
'add_missing_indicator': [True, False]
}
def __init__(self, normalize_text=False, categorize=False, numeric_strategy='mean', add_missing_indicator=True):
self.normalize_text = normalize_text
self.categorize = categorize
self.numeric_strategy = numeric_strategy
self.add_missing_indicator = add_missing_indicator
def set_cols_info(self, cols_info):
self.cols_info = cols_info
def determine_colType(self, column):
variables = self.cols_info
for var in variables:
var_colName = var['colName']
if str(var_colName) != str(column):
continue
var_colType = var['colType']
if var_colType in {'categorical', 'boolean'}:
return 'categorical'
elif var_colType in {'integer', 'real'}:
return 'numeric'
elif var_colType == 'string':
return 'text'
elif var_colType == 'dateTime':
raise RuntimeError('datTime not implemented in this feature extractor yet !!')
def fit_transform(self, df, variables):
df = self.copy_normalize_text(df)
self.column_types = OrderedDict()
for column in df:
itype = self.determine_colType(column)
# print('itype',itype)
self.column_types[column] = itype
self.numeric_columns = [column for column, type in self.column_types.items() if type == 'numeric']
self.categorical_columns = [column for column, type in self.column_types.items() if type == 'categorical']
self.text_columns = [column for column, type in self.column_types.items() if type == 'text']
output_arrays = []
if len(self.numeric_columns) > 0:
X = df[self.numeric_columns].apply(lambda x: pd.to_numeric(x, errors='coerce')).as_matrix()
self.numeric_imputer = DenseMixedStrategyImputer(
strategies=[self.numeric_strategy]*len(self.numeric_columns),
add_missing_indicator=self.add_missing_indicator
)
X = self.numeric_imputer.fit_transform(X)
self.numeric_scaler = StandardScaler()
output_arrays.append(self.numeric_scaler.fit_transform(X))
if len(self.categorical_columns) > 0:
self.categorical_encoder = DataFrameCategoricalEncoder()
X = self.categorical_encoder.fit_transform(df[self.categorical_columns])
self.categorical_imputer = DenseMixedStrategyImputer(
strategies=['most_frequent']*len(self.categorical_columns),
add_missing_indicator=self.add_missing_indicator
)
X = self.categorical_imputer.fit_transform(X)
self.one_hot_encoder = OneHotEncoder(
categorical_features=np.arange(len(self.categorical_columns)) * (2 if self.add_missing_indicator else 1)
)
output_arrays.append(self.one_hot_encoder.fit_transform(X))
return hstack([csr_matrix(X) for X in output_arrays], format='csr')
def transform(self, df):
check_is_fitted(self, 'column_types')
if list(df) != list(self.column_types):
raise ValueError('Data to be transformed does not match fitting data.')
df = self.copy_normalize_text(df)
output_arrays = []
if len(self.numeric_columns) > 0:
X = df[self.numeric_columns].apply(lambda x: pd.to_numeric(x, errors='coerce')).as_matrix()
output_arrays.append(self.numeric_scaler.transform(self.numeric_imputer.transform(X)))
if len(self.categorical_columns) > 0:
X = self.categorical_encoder.transform(df[self.categorical_columns])
output_arrays.append(self.one_hot_encoder.transform(self.categorical_imputer.transform(X)))
return hstack([csr_matrix(X) for X in output_arrays], format='csr')
def copy_normalize_text(self, df):
df = df.copy()
if self.normalize_text:
for column in df:
try:
df[column] = df[column].str.lower().str.strip()
except:
df[column] = df[column]
return df
|
d3m-model-search-master
|
test_data/test_cases_only/1491_one_hundred_plants_margin/1491_one_hundred_plants_margin_solution/modules/feature_extraction.py
|
# -*- coding: utf-8 -*-
# file: d3mds.py
# lab: MIT Lincoln Lab
# author(s): sw26425
# description: a rudimentary API for interacting with D3MDataSupply, which mainly consists of a Dataset and a Problem
import os, json
import pandas as pd
import numpy as np
import warnings
DATASET_SCHEMA_VERSION = '3.0'
PROBLEM_SCHEMA_VERSION = '3.0'
class D3MDataset:
dsHome = None
dsDoc = None
learningDataFile = None
def __init__(self, datasetPath):
self.dsHome = datasetPath
# read the schema in dsHome
_dsDoc = os.path.join(self.dsHome, 'datasetDoc.json')
assert os.path.exists(_dsDoc)
with open(_dsDoc, 'r') as f:
self.dsDoc = json.load(f)
# make sure the versions line up
if self.get_datasetSchemaVersion() != DATASET_SCHEMA_VERSION:
warnings.warn("the datasetSchemaVersions in the API and datasetDoc do not match !!!!!!!")
# locate the special learningData file
self.learningDataFile = self._get_learning_data_path()
def get_datasetID(self):
"""
Returns the datasetID from datasetDoc
"""
return self.dsDoc['about']['datasetID']
def get_datasetSchemaVersion(self):
"""
Returns the dataset schema version that was used to create this dataset
"""
return self.dsDoc['about']['datasetSchemaVersion']
def get_learning_data(self, view=None, problem=None):
"""
Returns the contents of learningData.doc as a DataFrame.
If view is 'TRAIN' or 'TEST', then the full learningData is filtered to return learningData only for that view.
For view-based filtering, the problem object has to be passed because this method used the splitsData from the problem.
"""
df = pd.read_csv(self.learningDataFile, index_col='d3mIndex')
if view is None:
return df
if view.upper() == 'TRAIN' or view.upper() == 'TEST':
if problem is None:
raise RuntimeError('asking for learningData for a split, but the problem is not given')
splitsdf = problem.get_datasplits(view)
df = df.iloc[splitsdf.index]
return df
def get_learning_data_columns(self):
res = self._get_learning_data_resource()
return res['columns']
def set_learning_data(self, df):
"""
Sets the contents of the learningData file to df
"""
df.to_csv(self.learningDataFile)
def delete_column_entries(self, target):
"""
Deletes all the entries of a particular column of a particular tabular data resource.
The deleted entries are set to numpy.NaN
"""
resID = target['resID']
colIndex = target['colIndex']
colName = target['colName']
for res in self.dsDoc['dataResources']:
_resID = res['resID']
if _resID != resID:
continue
_resPath = res['resPath']
_resPath = os.path.join(self.dsHome, _resPath)
_resType = res['resType']
assert _resType == 'table'
for col in res['columns']:
_colIndex = col['colIndex']
if _colIndex != colIndex:
continue
_colName = col['colName']
assert _colName == colName
df = pd.read_csv(_resPath)
df[_colName] = [np.NaN]*len(df[_colName])
df.to_csv(_resPath, index=None)
return True
raise RuntimeError('could not find the column')
raise RuntimeError('could not find the resource')
def delete_identifying_fields(self, view):
"""
Deletes some fields that might contain identifying information.
These fields should not be in the train or test view during the blinds evaluation.
"""
assert view.upper()=='TRAIN' or view.upper()=='TEST' # ensures we perform this only if view is train or test
self.dsDoc['about']['datasetName']='redacted'
self.dsDoc['about']['redacted'] = True
try:
del self.dsDoc['about']['description']
except KeyError:
pass
try:
del self.dsDoc['about']['citation']
except KeyError:
pass
try:
del self.dsDoc['about']['source']
except KeyError:
pass
try:
del self.dsDoc['about']['sourceURI']
except KeyError:
pass
# save datasetDoc.json file
with open(os.path.join(self.dsHome, 'datasetDoc.json'), 'w') as fp:
json.dump(self.dsDoc, fp, indent=2, sort_keys=False)
############# private methods
def _get_learning_data_path(self):
"""
Returns the path of learningData.csv in a dataset
"""
for res in self.dsDoc['dataResources']:
resID = res['resID']
resPath = res['resPath']
resType = res['resType']
resFormat = res['resFormat']
if resType =='table':
if 'learningData.csv' in res['resPath'] :
return os.path.join(self.dsHome, resPath)
else:
raise RuntimeError('non-CSV learningData (not implemented yet ...)')
# if the for loop is over and learningDoc is not found, then return None
raise RuntimeError('could not find learningData file the dataset')
def _get_learning_data_resource(self):
"""
Returns the path of learningData.csv in a dataset
"""
for res in self.dsDoc['dataResources']:
resID = res['resID']
resPath = res['resPath']
resType = res['resType']
resFormat = res['resFormat']
if resType =='table':
if 'learningData.csv' in res['resPath'] :
return res
else:
raise RuntimeError('could not find learningData.csv')
# if the for loop is over and learningDoc is not found, then return None
raise RuntimeError('could not find learningData resource')
class D3MProblem:
prHome = None
prDoc = None
splitsFile = None
def __init__(self, problemPath):
self.prHome = problemPath
# read the schema in prHome
_prDoc = os.path.join(self.prHome, 'problemDoc.json')
assert os.path.exists(_prDoc)
with open(_prDoc, 'r') as f:
self.prDoc = json.load(f)
# make sure the versions line up
if self.get_problemSchemaVersion() != PROBLEM_SCHEMA_VERSION:
warnings.warn("the problemSchemaVersions in the API and datasetDoc do not match !!!!!!!")
# locate the splitsFile
self.splitsFile = self._get_datasplits_file()
def get_problemID(self):
"""
Returns the problemID from problemDoc
"""
return self.prDoc['about']['problemID']
def get_problemSchemaVersion(self):
"""
Returns the problem schema version that was used to create this dataset
"""
return self.prDoc['about']['problemSchemaVersion']
def get_datasetID(self):
"""
Returns the ID of the dataset referenced in the problem
"""
return self.prDoc['inputs']['data'][0]['datasetID']
def get_targets(self):
"""
Looks at the problemDoc and returns the colIndex and colName of the target variable
"""
return self.prDoc['inputs']['data'][0]['targets']
def get_datasplits(self, view=None):
"""
Returns the data splits splits in a dataframe
"""
df = pd.read_csv(self.splitsFile, index_col='d3mIndex')
if view is None:
return df
elif view.upper() == 'TRAIN':
df = df[df['type']=='TRAIN']
return df
elif view.upper() == 'TEST':
df = df[df['type']=='TEST']
return df
def set_datasplits(self, df):
"""
Sets the contents of the dataSplits file to df
"""
df.to_csv(self.splitsFile)
def delete_identifying_fields(self, view):
"""
Deletes some fields that might contain identifying information.
These fields should not be in the train or test view during the blinds evaluation.
"""
assert view.upper()=='TRAIN' or view.upper()=='TEST' # ensures we perform this only if view is train or test
self.prDoc['about']['problemName']='redacted'
try:
del self.prDoc['about']['problemDescription']
except KeyError:
pass
# save datasetDoc.json file
with open(os.path.join(self.prHome, 'problemDoc.json'), 'w') as fp:
json.dump(self.prDoc, fp, indent=2, sort_keys=False)
def get_performance_metrics(self):
return self.prDoc['inputs']['performanceMetrics']
############# private methods
def _get_datasplits_file(self):
splitsFile = self.prDoc['inputs']['dataSplits']['splitsFile']
splitsFile = os.path.join(self.prHome, splitsFile)
assert os.path.exists(splitsFile)
return splitsFile
class D3MDS:
dataset = None
problem = None
def __init__(self, datasetPath, problemPath):
self.dataset = D3MDataset(datasetPath)
self.problem = D3MProblem(problemPath)
# sanity check
assert self.dataset.get_datasetID() == self.problem.get_datasetID()
def _get_target_columns(self, df):
target_cols = []
targets = self.problem.get_targets()
for target in targets:
colIndex = target['colIndex']-1 # 0th column is d3mIndex
colName = df.columns[colIndex]
assert colName == target['colName']
target_cols.append(colIndex)
return target_cols
def get_data_all(self):
df = self.dataset.get_learning_data(view=None, problem=None)
return df
def get_train_data(self):
df = self.dataset.get_learning_data(view='train', problem=self.problem)
target_cols = self._get_target_columns(df)
df.drop(df.columns[target_cols],axis=1,inplace=True)
return df
def get_train_targets(self):
df = self.dataset.get_learning_data(view='train', problem=self.problem)
target_cols = self._get_target_columns(df)
return np.ravel(df[df.columns[target_cols]])
def get_test_data(self):
df = self.dataset.get_learning_data(view='test', problem=self.problem)
target_cols = self._get_target_columns(df)
df.drop(df.columns[target_cols],axis=1,inplace=True)
return df
def get_test_targets(self):
df = self.dataset.get_learning_data(view='test', problem=self.problem)
target_cols = self._get_target_columns(df)
return np.ravel(df[df.columns[target_cols]])
|
d3m-model-search-master
|
test_data/test_cases_only/1491_one_hundred_plants_margin/1491_one_hundred_plants_margin_solution/modules/d3mds.py
|
import os, sys, json
import pandas as pd
from sklearn.pipeline import Pipeline
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import f1_score, mean_squared_error
here = os.path.dirname(os.path.abspath(__file__))
from d3mds import D3MDataset, D3MProblem, D3MDS
from feature_extraction import *
from feature_selection import *
from estimation import *
if __name__ == '__main__':
# get the paths of the dataset and problem
try:
dspath = (sys.argv[1])
except:
dspath = input('Enter the path to the dataset: ')
# dspath = os.path.join(here, '..', '..', 'data', '185_baseball_dataset')
assert os.path.exists(dspath)
try:
prpath = (sys.argv[2])
except:
prpath = input('Enter the path to the problem: ')
# prpath = os.path.join(here, '..', '..', 'data', '185_baseball_problem')
assert os.path.exists(prpath)
# check the pipeline JSON file
pipe_json = os.path.join(here, 'pipeline.json')
assert os.path.exists(pipe_json)
# read the JSON file
with open(pipe_json) as data_file:
ps = json.load(data_file)
## TBD: we need to make a check that that JSON aligns with the dataset and problem
# initialize the API class
d3mds = D3MDS(dspath, prpath) # this checks that the problem and dataset correspond
# get the train and test data
X_train = d3mds.get_train_data()
y_train = d3mds.get_train_targets()
X_test = d3mds.get_test_data()
y_test = d3mds.get_test_targets()
# get columns information
cols_info = d3mds.dataset.get_learning_data_columns()
## instantiate feature extractor
key, fe = ps['feature_extractors'].popitem()
fe_class = fe['feature_extractor']
fe_params = fe['params']
FE = eval(fe_class)(**fe_params)
if isinstance(FE, AnnotatedTabularExtractor):
FE.set_cols_info(cols_info)
## instantiate feature selector
fs = ps['feature_selector']
fs_class = fs['feature_selector']
fs_params = fs['params']
FS = eval(fs_class)(**fs_params)
## instantiate estimator
est = ps['estimator']
est_class = est['estimator']
est_params = est['params']
EST = eval(est_class)(**est_params)
## make a pipeline from the above three components
pipeline = Pipeline([
('vect', FE),
('sel', FS),
('clf', EST),
])
## train the pipeline on train data
pipeline.fit(X_train, y_train)
## predict on test data
y_pred = pipeline.predict(X_test)
targetCols = [col['colName'] for col in d3mds.problem.get_targets()]
y_pred_df = pd.DataFrame(index=X_test.index, data=y_pred, columns=targetCols)
y_pred_df.to_csv(os.path.join('.','predictions.csv'))
## compute the score on test data
metrics = d3mds.problem.get_performance_metrics()
scoresdf = pd.DataFrame(columns=['metric','value'])
for item in metrics:
metric = item['metric']
if metric == 'f1Macro':
score = f1_score(y_test, y_pred, average='macro')
print('f1Macro', score)
scoresdf.loc[len(scoresdf)]=['f1Macro', score]
elif metric == 'meanSquaredError':
score = mean_squared_error(y_test, y_pred)
print('meanSquaredError', score)
scoresdf.loc[len(scoresdf)]=['meanSquaredError', score]
scoresdf.to_csv(os.path.join('.','scores.csv'))
|
d3m-model-search-master
|
test_data/test_cases_only/1491_one_hundred_plants_margin/1491_one_hundred_plants_margin_solution/modules/pipeline.py
|
from abc import ABC, abstractmethod
from collections import OrderedDict
import numpy as np
from numpy import ndarray
from scipy.sparse import csr_matrix
from pandas import DataFrame
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.feature_selection.base import SelectorMixin
# https://stackoverflow.com/a/3862957
def get_all_subclasses(cls):
return cls.__subclasses__() + [g for s in cls.__subclasses__() for g in get_all_subclasses(s)]
def sample_param_distributions(param_distributions):
try:
return sample_param_distributions_dict(param_distributions)
except AttributeError:
i = np.random.randint(len(param_distributions))
return sample_param_distributions_dict(param_distributions[i])
def sample_param_distributions_dict(param_distributions_dict):
params = {}
for k, v in param_distributions_dict.items():
i = np.random.randint(len(v))
params[k] = v[i]
return params
class AbstractParameterized(ABC):
param_distributions = {}
@classmethod
def get_random_parameters(cls):
return sample_param_distributions(cls.param_distributions)
class AbstractFeatureExtractor(AbstractParameterized, BaseEstimator):
def fit(self, df, variables):
self.fit_transform(df, variables)
return self
@abstractmethod
def fit_transform(self, df, variables):
""" Fits the feature extractor
:param df:
:type df: DataFrame
:param variables:
:type variables: list[D3MVariable]
:return:
:rtype: csr_matrix
"""
pass
@abstractmethod
def transform(self, df):
""" Transforms the data
:param df:
:type df: DataFrame
:return:
:rtype: csr_matrix
"""
pass
class AbstractFeatureSelector(AbstractParameterized, BaseEstimator, SelectorMixin):
pass
class AbstractEstimator(AbstractParameterized, BaseEstimator):
@abstractmethod
def fit(self, X, y):
"""
:param X:
:type X: csr_matrix
:param y:
:type y: ndarray
:return:
:rtype: AbstractEstimator
"""
return self
@abstractmethod
def predict(self, X):
"""
:param X:
:type X: csr_matrix
:return:
:rtype: ndarray
"""
pass
|
d3m-model-search-master
|
test_data/test_cases_only/1491_one_hundred_plants_margin/1491_one_hundred_plants_margin_solution/modules/base.py
|
# -*- coding: utf-8 -*-
# file: d3mds.py
# lab: MIT Lincoln Lab
# author(s): sw26425
# description: a rudimentary API for interacting with D3MDataSupply, which mainly consists of a Dataset and a Problem
import os, json, sys
import pandas as pd
import numpy as np
import warnings
DATASET_SCHEMA_VERSION = '3.0'
PROBLEM_SCHEMA_VERSION = '3.0'
class D3MDataset:
dsHome = None
dsDoc = None
learningDataFile = None
def __init__(self, datasetPath):
self.dsHome = datasetPath
# read the schema in dsHome
_dsDoc = os.path.join(self.dsHome, 'datasetDoc.json')
assert os.path.exists(_dsDoc)
with open(_dsDoc, 'r') as f:
self.dsDoc = json.load(f)
# make sure the versions line up
if self.get_datasetSchemaVersion() != DATASET_SCHEMA_VERSION:
warnings.warn("the datasetSchemaVersions in the API and datasetDoc do not match !!!!!!!")
# locate the special learningData file
self.learningDataFile = self._get_learning_data_path()
def get_datasetID(self):
"""
Returns the datasetID from datasetDoc
"""
return self.dsDoc['about']['datasetID']
def get_datasetSchemaVersion(self):
"""
Returns the dataset schema version that was used to create this dataset
"""
return self.dsDoc['about']['datasetSchemaVersion']
def get_learning_data(self, view=None, problem=None):
"""
Returns the contents of learningData.doc as a DataFrame.
If view is 'TRAIN' or 'TEST', then the full learningData is filtered to return learningData only for that view.
For view-based filtering, the problem object has to be passed because this method used the splitsData from the problem.
"""
df = pd.read_csv(self.learningDataFile, index_col='d3mIndex')
if view is None:
return df
if view.upper() == 'TRAIN' or view.upper() == 'TEST':
if problem is None:
raise RuntimeError('asking for learningData for a split, but the problem is not given')
splitsdf = problem.get_datasplits(view)
df = df.loc[splitsdf.index]
return df
def get_learning_data_columns(self):
res = self._get_learning_data_resource()
return res['columns']
def set_learning_data(self, df):
"""
Sets the contents of the learningData file to df
"""
df.to_csv(self.learningDataFile)
def delete_column_entries(self, target):
"""
Deletes all the entries of a particular column of a particular tabular data resource.
The deleted entries are set to numpy.NaN
"""
resID = target['resID']
colIndex = target['colIndex']
colName = target['colName']
for res in self.dsDoc['dataResources']:
_resID = res['resID']
if _resID != resID:
continue
_resPath = res['resPath']
_resPath = os.path.join(self.dsHome, _resPath)
_resType = res['resType']
assert _resType == 'table'
for col in res['columns']:
_colIndex = col['colIndex']
if _colIndex != colIndex:
continue
_colName = col['colName']
assert _colName == colName
df = pd.read_csv(_resPath)
df[_colName] = [np.NaN]*len(df[_colName])
df.to_csv(_resPath, index=None)
return True
raise RuntimeError('could not find the column')
raise RuntimeError('could not find the resource')
def delete_identifying_fields(self, view):
"""
Deletes some fields that might contain identifying information.
These fields should not be in the train or test view during the blinds evaluation.
"""
assert view.upper()=='TRAIN' or view.upper()=='TEST' # ensures we perform this only if view is train or test
self.dsDoc['about']['datasetName']='NULL'
self.dsDoc['about']['redacted'] = True
try:
del self.dsDoc['about']['description']
except KeyError:
pass
try:
del self.dsDoc['about']['citation']
except KeyError:
pass
try:
del self.dsDoc['about']['source']
except KeyError:
pass
try:
del self.dsDoc['about']['sourceURI']
except KeyError:
pass
# save datasetDoc.json file
with open(os.path.join(self.dsHome, 'datasetDoc.json'), 'w') as fp:
json.dump(self.dsDoc, fp, indent=2, sort_keys=False)
############# private methods
def _get_learning_data_path(self):
"""
Returns the path of learningData.csv in a dataset
"""
for res in self.dsDoc['dataResources']:
resID = res['resID']
resPath = res['resPath']
resType = res['resType']
resFormat = res['resFormat']
dirname = os.path.basename(os.path.normpath(os.path.dirname(resPath)))
if resType =='table' and dirname=='tables':
if 'learningData.csv' in res['resPath'] :
return os.path.join(self.dsHome, resPath)
else:
raise RuntimeError('non-CSV learningData (not implemented yet ...)')
# if the for loop is over and learningDoc is not found, then return None
raise RuntimeError('could not find learningData file the dataset')
def _get_learning_data_resource(self):
"""
Returns the path of learningData.csv in a dataset
"""
for res in self.dsDoc['dataResources']:
resID = res['resID']
resPath = res['resPath']
resType = res['resType']
resFormat = res['resFormat']
if resType =='table':
if 'learningData.csv' in res['resPath'] :
return res
else:
raise RuntimeError('could not find learningData.csv')
# if the for loop is over and learningDoc is not found, then return None
raise RuntimeError('could not find learningData resource')
class D3MProblem:
prHome = None
prDoc = None
splitsFile = None
def __init__(self, problemPath):
self.prHome = problemPath
# read the schema in prHome
_prDoc = os.path.join(self.prHome, 'problemDoc.json')
assert os.path.exists(_prDoc)
with open(_prDoc, 'r') as f:
self.prDoc = json.load(f)
# make sure the versions line up
if self.get_problemSchemaVersion() != PROBLEM_SCHEMA_VERSION:
warnings.warn("the problemSchemaVersions in the API and datasetDoc do not match !!!!!!!")
# locate the splitsFile
self.splitsFile = self._get_datasplits_file()
def get_problemID(self):
"""
Returns the problemID from problemDoc
"""
return self.prDoc['about']['problemID']
def get_problemSchemaVersion(self):
"""
Returns the problem schema version that was used to create this dataset
"""
return self.prDoc['about']['problemSchemaVersion']
def get_datasetID(self):
"""
Returns the ID of the dataset referenced in the problem
"""
return self.prDoc['inputs']['data'][0]['datasetID']
def get_targets(self):
"""
Looks at the problemDoc and returns the colIndex and colName of the target variable
"""
return self.prDoc['inputs']['data'][0]['targets']
def get_datasplits(self, view=None):
"""
Returns the data splits splits in a dataframe
"""
df = pd.read_csv(self.splitsFile, index_col='d3mIndex')
if view is None:
return df
elif view.upper() == 'TRAIN':
df = df[df['type']=='TRAIN']
return df
elif view.upper() == 'TEST':
df = df[df['type']=='TEST']
return df
def set_datasplits(self, df):
"""
Sets the contents of the dataSplits file to df
"""
df.to_csv(self.splitsFile)
def delete_identifying_fields(self, view):
"""
Deletes some fields that might contain identifying information.
These fields should not be in the train or test view during the blinds evaluation.
"""
assert view.upper()=='TRAIN' or view.upper()=='TEST' # ensures we perform this only if view is train or test
self.prDoc['about']['problemName']='NULL'
try:
del self.prDoc['about']['problemDescription']
except KeyError:
pass
# save datasetDoc.json file
with open(os.path.join(self.prHome, 'problemDoc.json'), 'w') as fp:
json.dump(self.prDoc, fp, indent=2, sort_keys=False)
def get_performance_metrics(self):
return self.prDoc['inputs']['performanceMetrics']
############# private methods
def _get_datasplits_file(self):
splitsFile = self.prDoc['inputs']['dataSplits']['splitsFile']
splitsFile = os.path.join(self.prHome, splitsFile)
assert os.path.exists(splitsFile)
return splitsFile
class D3MDS:
dataset = None
problem = None
def __init__(self, datasetPath, problemPath):
self.dataset = D3MDataset(datasetPath)
self.problem = D3MProblem(problemPath)
# sanity check
assert self.dataset.get_datasetID() == self.problem.get_datasetID()
def _get_target_columns(self, df):
target_cols = []
targets = self.problem.get_targets()
for target in targets:
colIndex = target['colIndex']-1 # 0th column is d3mIndex
colName = df.columns[colIndex]
assert colName == target['colName']
target_cols.append(colIndex)
return target_cols
def get_data_all(self):
df = self.dataset.get_learning_data(view=None, problem=None)
return df
def get_train_data(self):
df = self.dataset.get_learning_data(view='train', problem=self.problem)
target_cols = self._get_target_columns(df)
df.drop(df.columns[target_cols],axis=1,inplace=True)
return df
def get_train_targets(self):
df = self.dataset.get_learning_data(view='train', problem=self.problem)
target_cols = self._get_target_columns(df)
X = df.shape[0]
Y = len(target_cols)
return (df[df.columns[target_cols]]).as_matrix().reshape(X,Y)
# return np.ravel(df[df.columns[target_cols]])
def get_test_data(self):
df = self.dataset.get_learning_data(view='test', problem=self.problem)
target_cols = self._get_target_columns(df)
df.drop(df.columns[target_cols],axis=1,inplace=True)
return df
def get_test_targets(self):
df = self.dataset.get_learning_data(view='test', problem=self.problem)
target_cols = self._get_target_columns(df)
X = df.shape[0]
Y = len(target_cols)
return (df[df.columns[target_cols]]).as_matrix().reshape(X,Y)
# return np.ravel(df[df.columns[target_cols]])
|
d3m-model-search-master
|
test_data/test_cases_only/59_umls/59_umls_solution/src/d3mds.py
|
# coding: utf-8
# In[23]:
import networkx as nx
import numpy as np
from scipy.io.matlab import loadmat
import sktensor, random
import pandas as pd
from scipy.sparse import lil_matrix
from sktensor.rescal import als as rescal_als
from numpy import zeros, dot
from numpy.linalg import norm
from sklearn.metrics import precision_recall_curve, auc, accuracy_score, roc_auc_score, roc_curve
from sklearn.preprocessing import normalize
import os, sys, json
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
from collections import OrderedDict
here = os.path.dirname(os.path.abspath(__file__))
from d3mds import D3MDataset, D3MProblem, D3MDS
dspath = os.path.join(here, '..', '..', '59_umls_dataset')
prpath = os.path.join(here, '..', '..', '59_umls_problem')
rawDataDir = os.path.join(dspath, "graphs")
solpath = os.path.join(here, '..')
assert os.path.exists(dspath)
assert os.path.exists(prpath)
assert os.path.exists(rawDataDir)
d3mds = D3MDS(dspath, prpath) # this checks that the problem and dataset correspond
## LinkPrediction model
def tensorCompletion(T, V=[]):
"""
Complete the tensor by tensor factorization and recomposition (we use Rescal)
"""
def __predict_rescal_als(T, V=[]):
if V==[]:
A, R, _, _, _ = rescal_als(T, 100, init='nvecs', conv=1e-3, lambda_A=10, lambda_R=10)
else:
A, R, _, _, _ = rescal_als(T, 100, attr=[V], init='nvecs', conv=1e-3, lambda_A=10, lambda_R=10)
n = A.shape[0]
P = zeros((n, n, len(R)))
for k in range(len(R)):
P[:, :, k] = dot(A, dot(R[k], A.T))
return P
def __normalize_predictions(P, e, k):
for a in range(e):
for b in range(e):
nrm = norm(P[a, b, :k])
if nrm != 0:
# round values for faster computation of AUC-PR
P[a, b, :k] = np.round_(P[a, b, :k] / nrm, decimals=3)
return P
e, k = T.shape[0], T.shape[2]
# Convert T into list of sparse matrices as required by Rescal
T = [lil_matrix(T[:, :, i]) for i in range(k)]
Tc = [Ti.copy() for Ti in T]
# call Rescal and normalize
P = __predict_rescal_als(Tc, V)
P = __normalize_predictions(P, e, k)
return P
# In[4]:
class LinkPrediciton():
def __init__(self, G):
"""
G is an instance of nx.MultiGraph
"""
# convert the graph into adjacency tensor
I = len(G.nodes())
J = I
K = len(set(nx.get_edge_attributes(G,'linkType').values()))
shape = (I, J, K)
# print(shape)
self.A = np.zeros(shape=shape)
for i,j,data in G.edges(data=True):
k = (data['linkType'])
self.A[i][j][k] = 1.
# print(self.A.shape)
def fit(self):
# self.A_completed = tensorCompletion(self.A, attrDF.as_matrix())
self.A_completed = tensorCompletion(self.A)
# print(np.amin(self.A_completed))
# print(np.amax(self.A_completed))
def predict(self, X):
"""
X is a DataFrame with columns=[source_nodeID, target_nodeID, linkType]
"""
def __predictLink(row, T):
k = int(row.linkType)
i = int(row.source_nodeID)
j = int(row.target_nodeID)
return int(round(T[i][j][k]))
X['linkExists']=X.apply(__predictLink, T=self.A_completed, axis=1)
return X
# ## Make pipeline
# initializations
random.seed(0)
graph = '%s/graph.gml'%rawDataDir
# In[20]:
# read the graph from gml file
print('read graph ...')
G = nx.read_gml(graph, label='id')
# set aside some edges (10%) validation of the model
print('setting aside 10% of edges for validation and remove them from graph ....')
edges_validation=pd.DataFrame(columns=['source_nodeID','target_nodeID','linkType'])
for i, (u,v,key,data) in enumerate(G.edges(data=True, keys=True)):
if random.random() < 0.1:
G.remove_edge(u,v,key=key)
edges_validation.loc[len(edges_validation)] = [u,v,data['linkType']]
print('number of edge set aside for validation:',len(edges_validation))
# In[21]:
# initialize the model
print('initializing the linkPrediction model ...')
lp = LinkPrediciton(G)
# fit the training graph
print('fitting the training graph ...')
lp.fit()
# make predictions on the validation data
print('making predicitons on validation edges ...')
edges_prediction=lp.predict(edges_validation)
# compute accuracy on validation data
print('computing accuracy on validation data ...')
accuracy = len(edges_prediction[edges_prediction['linkExists']==1])/len(edges_prediction)
print('model accuracy:', accuracy)
# now train the model on the whole graph
print('training the model on the whole graph ...')
# read the graph from gml file
G = nx.read_gml(graph, label='id')
# initialize the model
lp = LinkPrediciton(G)
# fit the graph
lp.fit()
print('===============================================================================')
## Submit predictions on test data
print('predictions on test data ...')
testData = d3mds.get_test_data()
predictions = lp.predict(testData)
y_pred = pd.DataFrame(predictions['linkExists'])
y_truth = d3mds.get_test_targets().ravel()
score = accuracy_score(y_truth, y_pred)
print('model accuracy on test data:', score)
# saving the predictions.csv file
y_pred_df = pd.DataFrame(index=testData.index, data=y_pred, columns=[target['colName'] for target in d3mds.problem.get_targets()])
y_pred_df.to_csv(os.path.join(solpath, 'predictions.csv'))
# saving the scores.csv file
df = pd.DataFrame(columns=['metric', 'value'])
df.loc[len(df)] = ['accuracy', score]
df.to_csv(os.path.join(solpath, 'scores.csv'))
|
d3m-model-search-master
|
test_data/test_cases_only/59_umls/59_umls_solution/src/pipeline.py
|
from unittest import TestCase
from base import AbstractFeatureSelector
import numpy as np
from scipy import stats
from scipy.sparse import issparse
from sklearn.feature_selection import f_classif, SelectFromModel, SelectPercentile
from sklearn.linear_model import Lasso
from sklearn.svm import LinearSVC
from sklearn.utils import check_X_y
from sklearn.utils.extmath import safe_sparse_dot, row_norms
from scipy.linalg import norm
# modified to address the issue of centering sparse matrices with a bit of algebra
def better_f_regression(X, y, center=True):
"""Univariate linear regression tests.
Quick linear model for testing the effect of a single regressor,
sequentially for many regressors.
This is done in 2 steps:
1. The cross correlation between each regressor and the target is computed,
that is, ((X[:, i] - mean(X[:, i])) * (y - mean_y)) / (std(X[:, i]) *
std(y)).
2. It is converted to an F score then to a p-value.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix} shape = (n_samples, n_features)
The set of regressors that will be tested sequentially.
y : array of shape(n_samples).
The data matrix
center : True, bool,
If true, X and y will be centered.
Returns
-------
F : array, shape=(n_features,)
F values of features.
pval : array, shape=(n_features,)
p-values of F-scores.
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
chi2: Chi-squared stats of non-negative features for classification tasks.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64)
n_samples = X.shape[0]
if center:
y = y - np.mean(y)
if issparse(X):
X_means = X.mean(axis=0).getA1()
else:
X_means = X.mean(axis=0)
X_norms = np.sqrt(row_norms(X.T, squared=True) - n_samples*X_means**2)
else:
X_norms = row_norms(X.T)
# compute the correlation
corr = safe_sparse_dot(y, X)
corr /= X_norms
corr /= norm(y)
# convert to p-value
degrees_of_freedom = y.size - (2 if center else 1)
F = corr ** 2 / (1 - corr ** 2) * degrees_of_freedom
pv = stats.f.sf(F, 1, degrees_of_freedom)
return F, pv
class SelectFromLinearSVC(AbstractFeatureSelector):
param_distributions = {
'threshold': (1e-5,),
'C': [float(x) for x in np.logspace(-2, 5, 100)]
}
def __init__(self, threshold=None, penalty='l1', loss='squared_hinge', dual=False, tol=0.0001, C=1.0, fit_intercept=True, random_state=None, max_iter=1000):
self.threshold = threshold
self.penalty = penalty
self.loss = loss
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.random_state = random_state
self.max_iter = max_iter
def fit(self, X, y):
self.linear_svc = LinearSVC(penalty=self.penalty, loss=self.loss, dual=self.dual, tol=self.tol,
fit_intercept=self.fit_intercept, random_state=self.random_state,
max_iter=self.max_iter)
self.linear_svc.fit(X, y)
self.select_from_model = SelectFromModel(self.linear_svc, threshold=self.threshold, prefit=True)
return self
def _get_support_mask(self):
return self.select_from_model._get_support_mask()
class SelectPercentileClassification(AbstractFeatureSelector, SelectPercentile):
param_distributions = {
'score_func': ('f_classif',),
'percentile': [int(x) for x in np.linspace(10, 100, 100)]
}
score_funcs = {
'f_classif': f_classif
}
def __init__(self, *args, **kwargs):
if 'score_func' in kwargs:
kwargs['score_func'] = self.score_funcs[kwargs['score_func']]
super().__init__(*args, **kwargs)
class SelectFromLasso(AbstractFeatureSelector):
param_distributions = {
'threshold': (1e-5,),
'alpha': [float(x) for x in np.logspace(-5, 2, 100)]
}
def __init__(self, threshold=None, alpha=1.0, fit_intercept=True, normalize=False, max_iter=1000, tol=0.0001, positive=False, selection='cyclic', random_state=None):
self.threshold = threshold
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.tol = tol
self.positive = positive
self.selection = selection
self.random_state = random_state
def fit(self, X, y):
# NOTE: y is an ndarray of strings
self.lasso = Lasso(alpha=self.alpha, fit_intercept=self.fit_intercept, normalize=self.normalize,
max_iter=self.max_iter, tol=self.tol, positive=self.positive, selection=self.selection,
random_state=self.random_state)
self.lasso.fit(X, y)
self.select_from_model = SelectFromModel(self.lasso, threshold=self.threshold, prefit=True)
return self
def _get_support_mask(self):
return self.select_from_model._get_support_mask()
class SelectPercentileRegression(AbstractFeatureSelector, SelectPercentile):
param_distributions = {
'score_func': ('f_regression',),
'percentile': [int(x) for x in np.linspace(10, 100, 100)]
}
score_funcs = {
'f_regression': better_f_regression
}
def __init__(self, *args, **kwargs):
if 'score_func' in kwargs:
kwargs['score_func'] = self.score_funcs[kwargs['score_func']]
super().__init__(*args, **kwargs)
def fit(self, X, y):
# NOTE: y is an ndarray of strings
super().fit(X, y)
return self
|
d3m-model-search-master
|
test_data/test_cases_only/534_cps_85_wages/534_cps_85_wages_solution/modules/feature_selection.py
|
from base import AbstractEstimator
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.kernel_approximation import RBFSampler
from sklearn.linear_model import SGDClassifier, SGDRegressor
class SGDClassifierEstimator(AbstractEstimator):
param_distributions = {
'loss': ('hinge', 'log', 'squared_hinge', 'perceptron'),
'penalty': ('elasticnet',),
'alpha': [float(x) for x in np.logspace(-9, 0, 10)],
'l1_ratio': [float(x) for x in np.linspace(0, 1, 11)],
'fit_intercept': (True, True, True, False)
}
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def fit(self, X, y):
n_samples = X.shape[0]
self.kwargs['n_iter'] = max(5, int(10**6 / n_samples))
self.sgd_classifier = SGDClassifier(*self.args, **self.kwargs)
self.sgd_classifier.fit(X, y)
def predict(self, X):
return self.sgd_classifier.predict(X)
class SGDRegressorEstimator(AbstractEstimator):
param_distributions = {
'loss': ('squared_loss', 'huber'),
'penalty': ('elasticnet',),
'alpha': [float(x) for x in np.logspace(-9, 0, 10)],
'l1_ratio': [float(x) for x in np.linspace(0, 1, 11)],
'fit_intercept': (True, True, True, False),
'epsilon': [float(x) for x in np.logspace(-2, 0, 5)],
'learning_rate': ('optimal', 'invscaling'),
'eta0': (0.1, 0.01, 0.001),
'power_t': [float(x) for x in np.linspace(0, 1, 5)]
}
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def fit(self, X, y):
n_samples = X.shape[0]
self.kwargs['n_iter'] = max(5, int(10**6 / n_samples))
self.sgd_regressor = SGDRegressor(*self.args, **self.kwargs)
self.sgd_regressor.fit(X, y)
def predict(self, X):
return self.sgd_regressor.predict(X)
# TODO: inherit AbstractEstimator, grab param_distributions from cv_setup_map.py in the old slacker,
class RBFSamplerSGDClassifierEstimator(BaseEstimator, TransformerMixin):
def __init__(self, gamma=1.0, n_components=100, random_state=None, **kwargs):
kwargs['random_state'] = random_state
self.rbf_sampler = RBFSampler(gamma=gamma, n_components=n_components, random_state=random_state)
self.sgdclassifier = SGDClassifier(**kwargs)
def fit(self, X, y):
X = self.rbf_sampler.fit_transform(X)
self.sgdclassifier.fit(X, y)
return self
def transform(self, X, y=None):
return np.sqrt(self.rbf_sampler.n_components) / np.sqrt(2.) * self.rbf_sampler.transform(X)
def predict(self, X):
return self.sgdclassifier.predict(self.transform(X))
def decision_function(self, X):
return self.sgdclassifier.decision_function(self.transform(X))
# TODO: inherit AbstractEstimator, grab param_distributions from cv_setup_map.py in the old slacker,
class RBFSamplerSGDRegressorEstimator(BaseEstimator, TransformerMixin):
def __init__(self, gamma=1.0, n_components=100, random_state=None, **kwargs):
kwargs['random_state'] = random_state
self.rbf_sampler = RBFSampler(gamma=gamma, n_components=n_components, random_state=random_state)
self.sgdregressor = SGDRegressor(**kwargs)
def fit(self, X, y):
X = self.rbf_sampler.fit_transform(X)
self.sgdregressor.fit(X, y)
return self
def transform(self, X, y=None):
return np.sqrt(self.rbf_sampler.n_components) / np.sqrt(2.) * self.rbf_sampler.transform(X)
def predict(self, X):
return self.sgdregressor.predict(self.transform(X))
# TODO: Add kernel SVM
# TODO: Add kernel ridge regressor
# TODO: Add random forests / xgboost
|
d3m-model-search-master
|
test_data/test_cases_only/534_cps_85_wages/534_cps_85_wages_solution/modules/estimation.py
|
d3m-model-search-master
|
test_data/test_cases_only/534_cps_85_wages/534_cps_85_wages_solution/modules/__init__.py
|
|
from collections import defaultdict, OrderedDict
import numpy as np
from scipy import signal
from scipy.sparse import csr_matrix, hstack
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import Imputer, OneHotEncoder, StandardScaler
from sklearn.utils.validation import check_is_fitted
from base import AbstractFeatureExtractor
class DenseMixedStrategyImputer(BaseEstimator, TransformerMixin):
def __init__(self, missing_values='NaN', strategies=None, add_missing_indicator=True, verbose=False):
self.missing_values = missing_values
if strategies is None:
raise ValueError('Must provide strategy.')
allowed_strategies = ['mean', 'median', 'most_frequent']
if any(s not in allowed_strategies for s in strategies):
raise ValueError('Invalid strategy in list.')
self.strategies = strategies
self.add_missing_indicator = add_missing_indicator
self.verbose = verbose
def fit(self, X, y=None):
n_samples, n_features = X.shape
print('n_features',n_features)
if len(self.strategies) != n_features:
raise ValueError('Number of strategies must equal number of features.')
self.impute_strategies = list(set(self.strategies))
self.impute_indices = [np.array([i for i, x in enumerate(self.strategies) if x == s]) for s in self.impute_strategies]
self.impute_valid_indices = []
self.imputers = [Imputer(missing_values=self.missing_values, strategy=s, verbose=self.verbose) for s in
self.impute_strategies]
for indices, imputer in zip(self.impute_indices, self.imputers):
imputer.fit(X[:, indices])
valid_mask = np.logical_not(np.isnan(imputer.statistics_))
self.impute_valid_indices.append(indices[valid_mask])
return self
def transform(self, X):
n_samples, n_features = X.shape
if len(self.strategies) != n_features:
raise ValueError('Number of strategies must equal number of features.')
check_is_fitted(self, 'imputers')
if self.add_missing_indicator:
output_scale = 2
else:
output_scale = 1
X_out = np.zeros((n_samples, output_scale*n_features))
for input_indices, output_indices, imputer in zip(self.impute_indices, self.impute_valid_indices, self.imputers):
X_out[:, output_scale*output_indices] = imputer.transform(X[:, input_indices])
if self.add_missing_indicator:
X_out[:, np.arange(1, 2*n_features, 2)] = np.isnan(X).astype('float', copy=False)
return X_out
class DataFrameCategoricalEncoder(BaseEstimator, TransformerMixin):
def fit(self, X, y=None):
self.code_maps = {}
for k in X.columns:
self.code_maps[k] = defaultdict(lambda: np.nan)
self.code_maps[k].update({v: k for k, v in enumerate(X[k].astype('category').cat.categories)})
return self
def transform(self, X):
if set(X.columns) != set(self.code_maps):
raise ValueError('Columns do not match fit model.')
return X.apply(lambda x: x.apply(lambda y: self.code_maps[x.name][y])).as_matrix()
class AnnotatedTabularExtractor(AbstractFeatureExtractor):
param_distributions = {
'normalize_text': [True, False],
'categorize': [True, False],
'numeric_strategy': ['mean', 'median'],
'add_missing_indicator': [True, False]
}
def __init__(self, normalize_text=False, categorize=False, numeric_strategy='mean', add_missing_indicator=True):
self.normalize_text = normalize_text
self.categorize = categorize
self.numeric_strategy = numeric_strategy
self.add_missing_indicator = add_missing_indicator
def set_cols_info(self, cols_info):
self.cols_info = cols_info
def determine_colType(self, column):
variables = self.cols_info
for var in variables:
var_colName = var['colName']
if str(var_colName) != str(column):
continue
var_colType = var['colType']
if var_colType in {'categorical', 'boolean'}:
return 'categorical'
elif var_colType in {'integer', 'real'}:
return 'numeric'
elif var_colType == 'string':
return 'text'
elif var_colType == 'dateTime':
raise RuntimeError('datTime not implemented in this feature extractor yet !!')
def fit_transform(self, df, variables):
df = self.copy_normalize_text(df)
self.column_types = OrderedDict()
for column in df:
itype = self.determine_colType(column)
# print('itype',itype)
self.column_types[column] = itype
self.numeric_columns = [column for column, type in self.column_types.items() if type == 'numeric']
self.categorical_columns = [column for column, type in self.column_types.items() if type == 'categorical']
self.text_columns = [column for column, type in self.column_types.items() if type == 'text']
output_arrays = []
if len(self.numeric_columns) > 0:
X = df[self.numeric_columns].apply(lambda x: pd.to_numeric(x, errors='coerce')).as_matrix()
self.numeric_imputer = DenseMixedStrategyImputer(
strategies=[self.numeric_strategy]*len(self.numeric_columns),
add_missing_indicator=self.add_missing_indicator
)
X = self.numeric_imputer.fit_transform(X)
self.numeric_scaler = StandardScaler()
output_arrays.append(self.numeric_scaler.fit_transform(X))
if len(self.categorical_columns) > 0:
self.categorical_encoder = DataFrameCategoricalEncoder()
X = self.categorical_encoder.fit_transform(df[self.categorical_columns])
self.categorical_imputer = DenseMixedStrategyImputer(
strategies=['most_frequent']*len(self.categorical_columns),
add_missing_indicator=self.add_missing_indicator
)
X = self.categorical_imputer.fit_transform(X)
self.one_hot_encoder = OneHotEncoder(
categorical_features=np.arange(len(self.categorical_columns)) * (2 if self.add_missing_indicator else 1)
)
output_arrays.append(self.one_hot_encoder.fit_transform(X))
return hstack([csr_matrix(X) for X in output_arrays], format='csr')
def transform(self, df):
check_is_fitted(self, 'column_types')
if list(df) != list(self.column_types):
raise ValueError('Data to be transformed does not match fitting data.')
df = self.copy_normalize_text(df)
output_arrays = []
if len(self.numeric_columns) > 0:
X = df[self.numeric_columns].apply(lambda x: pd.to_numeric(x, errors='coerce')).as_matrix()
output_arrays.append(self.numeric_scaler.transform(self.numeric_imputer.transform(X)))
if len(self.categorical_columns) > 0:
X = self.categorical_encoder.transform(df[self.categorical_columns])
output_arrays.append(self.one_hot_encoder.transform(self.categorical_imputer.transform(X)))
return hstack([csr_matrix(X) for X in output_arrays], format='csr')
def copy_normalize_text(self, df):
df = df.copy()
if self.normalize_text:
for column in df:
try:
df[column] = df[column].str.lower().str.strip()
except:
df[column] = df[column]
return df
|
d3m-model-search-master
|
test_data/test_cases_only/534_cps_85_wages/534_cps_85_wages_solution/modules/feature_extraction.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.