python_code
stringlengths 0
4.04M
| repo_name
stringlengths 8
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
|
co3d-main
|
co3d/dataset/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import sys
import dataclasses
import gzip
import json
from dataclasses import dataclass, Field, MISSING
from typing import Any, cast, Dict, IO, Optional, Tuple, Type, TypeVar, Union
import numpy as np
if sys.version_info >= (3, 8, 0):
from typing import get_args, get_origin
elif sys.version_info >= (3, 7, 0):
def get_origin(cls): # pragma: no cover
return getattr(cls, "__origin__", None)
def get_args(cls): # pragma: no cover
return getattr(cls, "__args__", None)
else:
raise ImportError("This module requires Python 3.7+")
_X = TypeVar("_X")
TF3 = Tuple[float, float, float]
@dataclass
class ImageAnnotation:
# path to jpg file, relative w.r.t. dataset_root
path: str
# H x W
size: Tuple[int, int] # TODO: rename size_hw?
@dataclass
class DepthAnnotation:
# path to png file, relative w.r.t. dataset_root, storing `depth / scale_adjustment`
path: str
# a factor to convert png values to actual depth: `depth = png * scale_adjustment`
scale_adjustment: float
# path to png file, relative w.r.t. dataset_root, storing binary `depth` mask
mask_path: Optional[str]
@dataclass
class MaskAnnotation:
# path to png file storing (Prob(fg | pixel) * 255)
path: str
# (soft) number of pixels in the mask; sum(Prob(fg | pixel))
mass: Optional[float] = None
@dataclass
class ViewpointAnnotation:
# In right-multiply (PyTorch3D) format. X_cam = X_world @ R + T
R: Tuple[TF3, TF3, TF3]
T: TF3
focal_length: Tuple[float, float]
principal_point: Tuple[float, float]
intrinsics_format: str = "ndc_norm_image_bounds"
# Defines the co-ordinate system where focal_length and principal_point live.
# Possible values: ndc_isotropic | ndc_norm_image_bounds (default)
# ndc_norm_image_bounds: legacy PyTorch3D NDC format, where image boundaries
# correspond to [-1, 1] x [-1, 1], and the scale along x and y may differ
# ndc_isotropic: PyTorch3D 0.5+ NDC convention where the shorter side has
# the range [-1, 1], and the longer one has the range [-s, s]; s >= 1,
# where s is the aspect ratio. The scale is same along x and y.
@dataclass
class FrameAnnotation:
"""A dataclass used to load annotations from json."""
# can be used to join with `SequenceAnnotation`
sequence_name: str
# 0-based, continuous frame number within sequence
frame_number: int
# timestamp in seconds from the video start
frame_timestamp: float
image: ImageAnnotation
depth: Optional[DepthAnnotation] = None
mask: Optional[MaskAnnotation] = None
viewpoint: Optional[ViewpointAnnotation] = None
meta: Optional[Dict[str, Any]] = None
@dataclass
class PointCloudAnnotation:
# path to ply file with points only, relative w.r.t. dataset_root
path: str
# the bigger the better
quality_score: float
n_points: Optional[int]
@dataclass
class VideoAnnotation:
# path to the original video file, relative w.r.t. dataset_root
path: str
# length of the video in seconds
length: float
@dataclass
class SequenceAnnotation:
sequence_name: str
category: str
video: Optional[VideoAnnotation] = None
point_cloud: Optional[PointCloudAnnotation] = None
# the bigger the better
viewpoint_quality_score: Optional[float] = None
def dump_dataclass(obj: Any, f: IO, binary: bool = False) -> None:
"""
Args:
f: Either a path to a file, or a file opened for writing.
obj: A @dataclass or collection hierarchy including dataclasses.
binary: Set to True if `f` is a file handle, else False.
"""
if binary:
f.write(json.dumps(_asdict_rec(obj)).encode("utf8"))
else:
json.dump(_asdict_rec(obj), f)
def load_dataclass(f: IO, cls: Type[_X], binary: bool = False) -> _X:
"""
Loads to a @dataclass or collection hierarchy including dataclasses
from a json recursively.
Call it like load_dataclass(f, typing.List[FrameAnnotationAnnotation]).
raises KeyError if json has keys not mapping to the dataclass fields.
Args:
f: Either a path to a file, or a file opened for writing.
cls: The class of the loaded dataclass.
binary: Set to True if `f` is a file handle, else False.
"""
if binary:
asdict = json.loads(f.read().decode("utf8"))
else:
asdict = json.load(f)
if isinstance(asdict, list):
# in the list case, run a faster "vectorized" version
cls = get_args(cls)[0]
res = list(_dataclass_list_from_dict_list(asdict, cls))
else:
res = _dataclass_from_dict(asdict, cls)
return res
def _dataclass_list_from_dict_list(dlist, typeannot):
"""
Vectorised version of `_dataclass_from_dict`.
The output should be equivalent to
`[_dataclass_from_dict(d, typeannot) for d in dlist]`.
Args:
dlist: list of objects to convert.
typeannot: type of each of those objects.
Returns:
iterator or list over converted objects of the same length as `dlist`.
Raises:
ValueError: it assumes the objects have None's in consistent places across
objects, otherwise it would ignore some values. This generally holds for
auto-generated annotations, but otherwise use `_dataclass_from_dict`.
"""
cls = get_origin(typeannot) or typeannot
if typeannot is Any:
return dlist
if all(obj is None for obj in dlist): # 1st recursion base: all None nodes
return dlist
if any(obj is None for obj in dlist):
# filter out Nones and recurse on the resulting list
idx_notnone = [(i, obj) for i, obj in enumerate(dlist) if obj is not None]
idx, notnone = zip(*idx_notnone)
converted = _dataclass_list_from_dict_list(notnone, typeannot)
res = [None] * len(dlist)
for i, obj in zip(idx, converted):
res[i] = obj
return res
is_optional, contained_type = _resolve_optional(typeannot)
if is_optional:
return _dataclass_list_from_dict_list(dlist, contained_type)
# otherwise, we dispatch by the type of the provided annotation to convert to
if issubclass(cls, tuple) and hasattr(cls, "_fields"): # namedtuple
# For namedtuple, call the function recursively on the lists of corresponding keys
types = cls._field_types.values()
dlist_T = zip(*dlist)
res_T = [
_dataclass_list_from_dict_list(key_list, tp)
for key_list, tp in zip(dlist_T, types)
]
return [cls(*converted_as_tuple) for converted_as_tuple in zip(*res_T)]
elif issubclass(cls, (list, tuple)):
# For list/tuple, call the function recursively on the lists of corresponding positions
types = get_args(typeannot)
if len(types) == 1: # probably List; replicate for all items
types = types * len(dlist[0])
dlist_T = zip(*dlist)
res_T = (
_dataclass_list_from_dict_list(pos_list, tp)
for pos_list, tp in zip(dlist_T, types)
)
if issubclass(cls, tuple):
return list(zip(*res_T))
else:
return [cls(converted_as_tuple) for converted_as_tuple in zip(*res_T)]
elif issubclass(cls, dict):
# For the dictionary, call the function recursively on concatenated keys and vertices
key_t, val_t = get_args(typeannot)
all_keys_res = _dataclass_list_from_dict_list(
[k for obj in dlist for k in obj.keys()], key_t
)
all_vals_res = _dataclass_list_from_dict_list(
[k for obj in dlist for k in obj.values()], val_t
)
indices = np.cumsum([len(obj) for obj in dlist])
assert indices[-1] == len(all_keys_res)
keys = np.split(list(all_keys_res), indices[:-1])
# vals = np.split(all_vals_res, indices[:-1])
all_vals_res_iter = iter(all_vals_res)
return [cls(zip(k, all_vals_res_iter)) for k in keys]
elif not dataclasses.is_dataclass(typeannot):
return dlist
# dataclass node: 2nd recursion base; call the function recursively on the lists
# of the corresponding fields
assert dataclasses.is_dataclass(cls)
fieldtypes = {
f.name: (_unwrap_type(f.type), _get_dataclass_field_default(f))
for f in dataclasses.fields(typeannot)
}
# NOTE the default object is shared here
key_lists = (
_dataclass_list_from_dict_list([obj.get(k, default) for obj in dlist], type_)
for k, (type_, default) in fieldtypes.items()
)
transposed = zip(*key_lists)
return [cls(*vals_as_tuple) for vals_as_tuple in transposed]
def _dataclass_from_dict(d, typeannot):
if d is None or typeannot is Any:
return d
is_optional, contained_type = _resolve_optional(typeannot)
if is_optional:
# an Optional not set to None, just use the contents of the Optional.
return _dataclass_from_dict(d, contained_type)
cls = get_origin(typeannot) or typeannot
if issubclass(cls, tuple) and hasattr(cls, "_fields"): # namedtuple
types = cls._field_types.values()
return cls(*[_dataclass_from_dict(v, tp) for v, tp in zip(d, types)])
elif issubclass(cls, (list, tuple)):
types = get_args(typeannot)
if len(types) == 1: # probably List; replicate for all items
types = types * len(d)
return cls(_dataclass_from_dict(v, tp) for v, tp in zip(d, types))
elif issubclass(cls, dict):
key_t, val_t = get_args(typeannot)
return cls(
(_dataclass_from_dict(k, key_t), _dataclass_from_dict(v, val_t))
for k, v in d.items()
)
elif not dataclasses.is_dataclass(typeannot):
return d
assert dataclasses.is_dataclass(cls)
fieldtypes = {f.name: _unwrap_type(f.type) for f in dataclasses.fields(typeannot)}
return cls(**{k: _dataclass_from_dict(v, fieldtypes[k]) for k, v in d.items()})
def _unwrap_type(tp):
# strips Optional wrapper, if any
if get_origin(tp) is Union:
args = get_args(tp)
if len(args) == 2 and any(a is type(None) for a in args): # noqa: E721
# this is typing.Optional
return args[0] if args[1] is type(None) else args[1] # noqa: E721
return tp
def _get_dataclass_field_default(field: Field) -> Any:
if field.default_factory is not MISSING:
# pyre-fixme[29]: `Union[dataclasses._MISSING_TYPE,
# dataclasses._DefaultFactory[typing.Any]]` is not a function.
return field.default_factory()
elif field.default is not MISSING:
return field.default
else:
return None
def _asdict_rec(obj):
return dataclasses._asdict_inner(obj, dict)
def dump_dataclass_jgzip(outfile: str, obj: Any) -> None:
"""
Dumps obj to a gzipped json outfile.
Args:
obj: A @dataclass or collection hiererchy including dataclasses.
outfile: The path to the output file.
"""
with gzip.GzipFile(outfile, "wb") as f:
dump_dataclass(obj, cast(IO, f), binary=True)
def load_dataclass_jgzip(outfile, cls):
"""
Loads a dataclass from a gzipped json outfile.
Args:
outfile: The path to the loaded file.
cls: The type annotation of the loaded dataclass.
Returns:
loaded_dataclass: The loaded dataclass.
"""
with gzip.GzipFile(outfile, "rb") as f:
return load_dataclass(cast(IO, f), cls, binary=True)
def _resolve_optional(type_: Any) -> Tuple[bool, Any]:
"""Check whether `type_` is equivalent to `typing.Optional[T]` for some T."""
if get_origin(type_) is Union:
args = get_args(type_)
if len(args) == 2 and args[1] == type(None): # noqa E721
return True, args[0]
if type_ is Any:
return True, Any
return False, type_
|
co3d-main
|
co3d/dataset/data_types.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch
import copy
from pytorch3d.implicitron.dataset.dataset_base import FrameData
from co3d.challenge.data_types import CO3DTask, CO3DSequenceSet
def redact_eval_frame_data(fd: FrameData) -> FrameData:
"""
Redact all information about the test element (1st image)
of the evaluation frame data `fd`.
This is done by zeroing all elements of the relevant tensors in `fd`
followed by removing the sequence_point_cloud field.
"""
fd_redacted = copy.deepcopy(fd)
for redact_field_name in [
"fg_probability",
"image_rgb",
"depth_map",
"mask_crop",
]:
# zero-out all elements in the redacted tensor
field_val = getattr(fd, redact_field_name)
field_val[:1] *= 0
# also remove the point cloud info
fd_redacted.sequence_point_cloud_idx = None
fd_redacted.sequence_point_cloud = None
return fd_redacted
def _check_valid_eval_frame_data(
fd: FrameData,
task: CO3DTask,
sequence_set: CO3DSequenceSet,
):
"""
Check that the evaluation batch `fd` is redacted correctly.
"""
is_redacted = torch.stack(
[
getattr(fd, k).abs().sum((1,2,3)) <= 0
for k in ["image_rgb", "depth_map", "fg_probability"]
]
)
if sequence_set==CO3DSequenceSet.TEST:
# first image has to be redacted
assert is_redacted[:, 0].all()
# all depth maps have to be redacted
assert is_redacted[1, :].all()
# no known views should be redacted
assert not is_redacted[:, 1:].all(dim=0).any()
elif sequence_set==CO3DSequenceSet.DEV:
# nothing should be redacted
assert not is_redacted.all(dim=0).any()
else:
raise ValueError(sequence_set)
|
co3d-main
|
co3d/dataset/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import shutil
import requests
import functools
import json
import warnings
from argparse import ArgumentParser
from typing import List, Optional
from multiprocessing import Pool
from tqdm import tqdm
from .check_checksum import check_co3d_sha256
def download_dataset(
link_list_file: str,
download_folder: str,
n_download_workers: int = 4,
n_extract_workers: int = 4,
download_categories: Optional[List[str]] = None,
checksum_check: bool = False,
single_sequence_subset: bool = False,
clear_archives_after_unpacking: bool = False,
skip_downloaded_archives: bool = True,
sha256s_file: Optional[str] = None,
):
"""
Downloads and unpacks the dataset in CO3D format.
Note: The script will make a folder `<download_folder>/_in_progress`, which
stores files whose download is in progress. The folder can be safely deleted
the download is finished.
Args:
link_list_file: A text file with the list of zip file download links.
download_folder: A local target folder for downloading the
the dataset files.
n_download_workers: The number of parallel workers
for downloading the dataset files.
n_extract_workers: The number of parallel workers
for extracting the dataset files.
download_categories: A list of categories to download.
If `None`, downloads all.
checksum_check: Enable validation of the downloaded file's checksum before
extraction.
single_sequence_subset: Whether the downloaded dataset is the single-sequence
subset of the full dataset.
clear_archives_after_unpacking: Delete the unnecessary downloaded archive files
after unpacking.
skip_downloaded_archives: Skip re-downloading already downloaded archives.
"""
if checksum_check and not sha256s_file:
raise ValueError(
"checksum_check is requested but ground-truth SHA256 file not provided!"
)
if not os.path.isfile(link_list_file):
raise ValueError(
"Please specify `link_list_file` with a valid path to a json"
" with zip file download links."
" For CO3Dv2, the file is stored in the co3d github:"
" https://github.com/facebookresearch/co3d/blob/main/co3d/links.json"
)
if not os.path.isdir(download_folder):
raise ValueError(
"Please specify `download_folder` with a valid path to a target folder"
+ " for downloading the dataset."
+ f" {download_folder} does not exist."
)
# read the link file
with open(link_list_file, "r") as f:
links = json.load(f)
# get the full dataset links or the single-sequence subset links
links = links["singlesequence"] if single_sequence_subset else links["full"]
# split to data links and the links containing json metadata
metadata_links = []
data_links = []
for category_name, urls in links.items():
for url in urls:
link_name = os.path.split(url)[-1]
if single_sequence_subset:
link_name = link_name.replace("_singlesequence", "")
if category_name.upper() == "METADATA":
metadata_links.append((link_name, url))
else:
data_links.append((category_name, link_name, url))
if download_categories is not None:
co3d_categories = set(l[0] for l in data_links)
not_in_co3d = [c for c in download_categories if c not in co3d_categories]
if len(not_in_co3d) > 0:
raise ValueError(
f"download_categories {str(not_in_co3d)} are not valid"
+ "dataset categories."
)
data_links = [(c, ln, l) for c, ln, l in data_links if c in download_categories]
with Pool(processes=n_download_workers) as download_pool:
print(f"Downloading {len(metadata_links)} dataset metadata files ...")
for _ in tqdm(
download_pool.imap(
functools.partial(_download_metadata_file, download_folder),
metadata_links,
),
total=len(metadata_links),
):
pass
print(f"Downloading {len(data_links)} dataset files ...")
download_ok = {}
for link_name, ok in tqdm(
download_pool.imap(
functools.partial(
_download_category_file,
download_folder,
checksum_check,
single_sequence_subset,
sha256s_file,
skip_downloaded_archives,
),
data_links,
),
total=len(data_links),
):
download_ok[link_name] = ok
if not all(download_ok.values()):
not_ok_links = [n for n, ok in download_ok.items() if not ok]
not_ok_links_str = "\n".join(not_ok_links)
raise AssertionError(
"The SHA256 checksums did not match for some of the downloaded files:\n"
+ not_ok_links_str + "\n"
+ "This is most likely due to a network failure."
+ " Please restart the download script."
)
metadata_links = [ml for ml in metadata_links if ml[1].endswith(".zip")]
print(f"Extracting {len(data_links)} dataset files and {len(metadata_links)} metadata files...")
with Pool(processes=n_extract_workers) as extract_pool:
for _ in tqdm(
extract_pool.imap(
functools.partial(
_unpack_category_file,
download_folder,
clear_archives_after_unpacking,
),
metadata_links + data_links,
),
total=len(metadata_links) + len(data_links),
):
pass
print("Done")
def build_arg_parser(
dataset_name: str,
default_link_list_file: str,
default_sha256_file: str,
) -> ArgumentParser:
parser = ArgumentParser(description=f"Download the {dataset_name} dataset.")
parser.add_argument(
"--download_folder",
type=str,
required=True,
help="A local target folder for downloading the the dataset files.",
)
parser.add_argument(
"--n_download_workers",
type=int,
default=4,
help="The number of parallel workers for downloading the dataset files.",
)
parser.add_argument(
"--n_extract_workers",
type=int,
default=4,
help="The number of parallel workers for extracting the dataset files.",
)
parser.add_argument(
"--download_categories",
type=lambda x: [x_.strip() for x_ in x.split(",")],
default=None,
help=f"A comma-separated list of {dataset_name} categories to download."
+ " Example: 'orange,car' will download only oranges and cars",
)
parser.add_argument(
"--link_list_file",
type=str,
default=default_link_list_file,
help=(
f"The file with html links to the {dataset_name} dataset files."
+ " In most cases the default local file `links.json` should be used."
),
)
parser.add_argument(
"--sha256_file",
type=str,
default=default_sha256_file,
help=(
f"The file with SHA256 hashes of {dataset_name} dataset files."
+ " In most cases the default local file `co3d_sha256.json` should be used."
),
)
parser.add_argument(
"--checksum_check",
action="store_true",
default=True,
help="Check the SHA256 checksum of each downloaded file before extraction.",
)
parser.add_argument(
"--no_checksum_check",
action="store_false",
dest="checksum_check",
default=False,
help="Does not check the SHA256 checksum of each downloaded file before extraction.",
)
parser.set_defaults(checksum_check=True)
parser.add_argument(
"--clear_archives_after_unpacking",
action="store_true",
default=False,
help="Delete the unnecessary downloaded archive files after unpacking.",
)
parser.add_argument(
"--redownload_existing_archives",
action="store_true",
default=False,
help="Redownload the already-downloaded archives.",
)
return parser
def _unpack_category_file(
download_folder: str,
clear_archive: bool,
link: str,
):
*_, link_name, url = link
local_fl = os.path.join(download_folder, link_name)
print(f"Unpacking dataset file {local_fl} ({link_name}) to {download_folder}.")
shutil.unpack_archive(local_fl, download_folder)
if clear_archive:
os.remove(local_fl)
def _download_category_file(
download_folder: str,
checksum_check: bool,
single_sequence_subset: bool,
sha256s_file: Optional[str],
skip_downloaded_files: bool,
link: str,
):
category, link_name, url = link
local_fl_final = os.path.join(download_folder, link_name)
if skip_downloaded_files and os.path.isfile(local_fl_final):
print(f"Skipping {local_fl_final}, already downloaded!")
return link_name, True
in_progress_folder = os.path.join(download_folder, "_in_progress")
os.makedirs(in_progress_folder, exist_ok=True)
local_fl = os.path.join(in_progress_folder, link_name)
print(f"Downloading dataset file {link_name} ({url}) to {local_fl}.")
_download_with_progress_bar(url, local_fl, link_name)
if checksum_check:
print(f"Checking SHA256 for {local_fl}.")
try:
check_co3d_sha256(
local_fl,
sha256s_file=sha256s_file,
single_sequence_subset=single_sequence_subset,
)
except AssertionError:
warnings.warn(
f"Checksums for {local_fl} did not match!"
+ " This is likely due to a network failure,"
+ " please restart the download script."
)
return link_name, False
os.rename(local_fl, local_fl_final)
return link_name, True
def _download_metadata_file(download_folder: str, link: str):
local_fl = os.path.join(download_folder, link[0])
# remove the singlesequence postfix in case we are downloading the s.s. subset
local_fl = local_fl.replace("_singlesequence", "")
print(f"Downloading dataset metadata file {link[1]} ({link[0]}) to {local_fl}.")
_download_with_progress_bar(link[1], local_fl, link[0])
def _download_with_progress_bar(url: str, fname: str, filename: str):
# taken from https://stackoverflow.com/a/62113293/986477
resp = requests.get(url, stream=True)
print(url)
total = int(resp.headers.get("content-length", 0))
with open(fname, "wb") as file, tqdm(
desc=fname,
total=total,
unit="iB",
unit_scale=True,
unit_divisor=1024,
) as bar:
for datai, data in enumerate(resp.iter_content(chunk_size=1024)):
size = file.write(data)
bar.update(size)
if datai % max((max(total // 1024, 1) // 20), 1) == 0:
print(f"{filename}: Downloaded {100.0*(float(bar.n)/max(total, 1)):3.1f}%.")
print(bar)
|
co3d-main
|
co3d/dataset/download_dataset_impl.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import glob
import argparse
import hashlib
import json
from typing import Optional
from multiprocessing import Pool
from tqdm import tqdm
DEFAULT_SHA256S_FILE = os.path.join(__file__.rsplit(os.sep, 2)[0], "co3d_sha256.json")
BLOCKSIZE = 65536
def main(
download_folder: str,
sha256s_file: str,
dump: bool = False,
n_sha256_workers: int = 4,
single_sequence_subset: bool = False,
):
if not os.path.isfile(sha256s_file):
raise ValueError(f"The SHA256 file does not exist ({sha256s_file}).")
expected_sha256s = get_expected_sha256s(
sha256s_file=sha256s_file,
single_sequence_subset=single_sequence_subset,
)
zipfiles = sorted(glob.glob(os.path.join(download_folder, "*.zip")))
print(f"Extracting SHA256 hashes for {len(zipfiles)} files in {download_folder}.")
extracted_sha256s_list = []
with Pool(processes=n_sha256_workers) as sha_pool:
for extracted_hash in tqdm(
sha_pool.imap(_sha256_file_and_print, zipfiles),
total=len(zipfiles),
):
extracted_sha256s_list.append(extracted_hash)
pass
extracted_sha256s = dict(
zip([os.path.split(z)[-1] for z in zipfiles], extracted_sha256s_list)
)
if dump:
print(extracted_sha256s)
with open(sha256s_file, "w") as f:
json.dump(extracted_sha256s, f, indent=2)
missing_keys, invalid_keys = [], []
for k in expected_sha256s.keys():
if k not in extracted_sha256s:
print(f"{k} missing!")
missing_keys.append(k)
elif expected_sha256s[k] != extracted_sha256s[k]:
print(
f"'{k}' does not match!"
+ f" ({expected_sha256s[k]} != {extracted_sha256s[k]})"
)
invalid_keys.append(k)
if len(invalid_keys) + len(missing_keys) > 0:
raise ValueError(
f"Checksum checker failed!"
+ f" Non-matching checksums: {str(invalid_keys)};"
+ f" missing files: {str(missing_keys)}."
)
def get_expected_sha256s(
sha256s_file: str,
single_sequence_subset: bool = False,
):
with open(sha256s_file, "r") as f:
expected_sha256s = json.load(f)
if single_sequence_subset:
return expected_sha256s["singlesequence"]
else:
return expected_sha256s["full"]
def check_co3d_sha256(
path: str,
sha256s_file: str,
expected_sha256s: Optional[dict] = None,
single_sequence_subset: bool = False,
do_assertion: bool = True,
):
zipname = os.path.split(path)[-1]
if expected_sha256s is None:
expected_sha256s = get_expected_sha256s(
sha256s_file=sha256s_file,
single_sequence_subset=single_sequence_subset,
)
extracted_hash = sha256_file(path)
if do_assertion:
assert (
extracted_hash == expected_sha256s[zipname]
), f"{zipname}: ({extracted_hash} != {expected_sha256s[zipname]})"
else:
return extracted_hash == expected_sha256s[zipname]
def sha256_file(path: str):
sha256_hash = hashlib.sha256()
with open(path, "rb") as f:
file_buffer = f.read(BLOCKSIZE)
while len(file_buffer) > 0:
sha256_hash.update(file_buffer)
file_buffer = f.read(BLOCKSIZE)
digest_ = sha256_hash.hexdigest()
# print(f"{digest_} {path}")
return digest_
def _sha256_file_and_print(path: str):
digest_ = sha256_file(path)
print(f"{path}: {digest_}")
return digest_
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Check SHA256 hashes of the CO3D dataset."
)
parser.add_argument(
"--download_folder",
type=str,
help="A local target folder for downloading the the dataset files.",
)
parser.add_argument(
"--sha256s_file",
type=str,
help="A local target folder for downloading the the dataset files.",
default=DEFAULT_SHA256S_FILE,
)
parser.add_argument(
"--num_workers",
type=int,
default=4,
help="The number of sha256 extraction workers.",
)
parser.add_argument(
"--dump_sha256s",
action="store_true",
help="Store sha256s hashes.",
)
parser.add_argument(
"--single_sequence_subset",
action="store_true",
default=False,
help="Check the single-sequence subset of the dataset.",
)
args = parser.parse_args()
main(
str(args.download_folder),
dump=bool(args.dump_sha256s),
n_sha256_workers=int(args.num_workers),
single_sequence_subset=bool(args.single_sequence_subset),
sha256s_file=str(args.sha256s_file),
)
|
co3d-main
|
co3d/dataset/check_checksum.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""
Evaluation of Implicitron models on CO3Dv2 challenge.
"""
import logging
import os
import torch
import json
import warnings
from typing import Optional, Union, Dict, Tuple
from tqdm import tqdm
from omegaconf import DictConfig, OmegaConf
import numpy as np
import pytorch3d
from pytorch3d.implicitron.models.generic_model import ImplicitronRender, GenericModel
from pytorch3d.implicitron.tools.config import get_default_args
from pytorch3d.implicitron.dataset.dataset_base import FrameData
from pytorch3d.implicitron.dataset.dataset_map_provider import DatasetMap
from pytorch3d.implicitron.dataset.json_index_dataset_map_provider_v2 import (
JsonIndexDatasetMapProviderV2
)
from pytorch3d.implicitron.tools.config import expand_args_fields
from pytorch3d.implicitron.tools.model_io import (
parse_epoch_from_model_path,
find_last_checkpoint,
)
from pytorch3d.implicitron.models.renderer.base import (
# BaseRenderer,
EvaluationMode,
# ImplicitFunctionWrapper,
# RendererOutput,
# RenderSamplingMode,
)
from co3d.utils import dbir_utils
from co3d.challenge.co3d_submission import CO3DSubmission
from co3d.challenge.data_types import CO3DTask, CO3DSequenceSet
from co3d.challenge.utils import (
get_co3d_task_from_subset_name,
get_co3d_sequence_set_from_subset_name,
)
from co3d.dataset.utils import redact_eval_frame_data, _check_valid_eval_frame_data
from co3d.challenge.metric_utils import EVAL_METRIC_NAMES
DATASET_ROOT = os.getenv("CO3DV2_DATASET_ROOT")
DATASET_ROOT_HIDDEN = os.getenv("CO3DV2_HIDDEN_DATASET_ROOT")
# HACK: implicitron_trainer is not part of a package; forcing it in the path
_pytorch3d_root = os.path.dirname(os.path.dirname(pytorch3d.__file__))
implicitron_trainer_dir = os.path.join(_pytorch3d_root, "projects", "implicitron_trainer")
# sys.path.insert(0, implicitron_trainer_dir)
from projects.implicitron_trainer.experiment import Experiment
logger = logging.getLogger(__name__)
def evaluate_implicitron_exp_dir_map(
category_subset_implicitron_exp_dirs: Union[Dict[Tuple[str, str], str], str],
task: CO3DTask,
sequence_set: CO3DSequenceSet,
submission_output_folder: str,
num_eval_workers: int = 4,
submit_to_eval_ai: bool = False,
skip_evaluation: bool = False,
fill_results_from_cache: bool = False,
implicitron_exp_dir_submission_output_subfolder: Optional[str] = None,
):
"""
Evalulates and submits to EvalAI either:
1) all Implicitron class-specific models, or
2) a single model trained for all categories.
Args:
category_subset_implicitron_exp_dirs: Two options:
1) a dict {(category_name, subset_name): implicitron_exp_dir_path} containing
a mapping from each CO3Dv2 category and subset to the path of the
corresponding implicitron model exp dir.
2) a string containing the path to a single model used for reconstructing
all categories.
task: The co3d task - either CO3DTask.MANY_VIEW or CO3DTask.FEW_VIEW.
sequence_set: The sequence set to evaluate on:
CO3DSequenceSet.DEV for for the development set
CO3DSequenceSet.TEST for for the test set
submission_output_folder: Directory containing the submission output files.
num_eval_workers: Number of processes that conduct evaluation.
submit_to_eval_ai: If `True`, will automatically submit the exported result
archive to EvalAI using the CLI interface (needs to be installed with
`pip install evalai`). This requires setting the EVAL_AI_PERSONAL_TOKEN
environment variable to your personal EVAL_AI token.
skip_evaluation: Skip the local evaluation.
implicitron_exp_dir_submission_output_subfolder:
If set to a string, loads precomputed results from
```
category_subset_implicitron_exp_dirs[(category, subset)]
/implicitron_exp_dir_submission_output_subfolder
```
for each (category, subset).
Such precomputed results are typically output by:
```
evaluate_implicitron_exp_dir(
category_subset_implicitron_exp_dirs[(category, subset)],
...
)
"""
submission = CO3DSubmission(
task=task,
sequence_set=sequence_set,
output_folder=submission_output_folder,
dataset_root=DATASET_ROOT,
)
if fill_results_from_cache:
submission.fill_results_from_cache()
else:
if not isinstance(category_subset_implicitron_exp_dirs, str):
# check that we have all models in case the we were given one model per
# category/subset_name
for category, subset_name in submission.get_eval_batches_map():
if (category, subset_name) not in category_subset_implicitron_exp_dirs:
raise ValueError(
f"Missing implicitron exp dir for {category}/{subset_name}."
)
for category, subset_name in submission.get_eval_batches_map():
if isinstance(category_subset_implicitron_exp_dirs, str):
# a single model that does it all
current_implicitron_exp_dir = category_subset_implicitron_exp_dirs
else:
# subset-specific models
current_implicitron_exp_dir = category_subset_implicitron_exp_dirs[
(category, subset_name)
]
if implicitron_exp_dir_submission_output_subfolder is not None:
submission.link_results_from_existing_output_folder(
os.path.join(
current_implicitron_exp_dir,
implicitron_exp_dir_submission_output_subfolder,
)
)
else:
update_implicitron_submission_with_category_and_subset_predictions(
submission=submission,
implicitron_exp_dir=current_implicitron_exp_dir,
dataset_root=DATASET_ROOT,
category=category,
subset_name=subset_name,
n_known_frames_for_test=9 if task==CO3DTask.MANY_VIEW else 0,
)
# Locally evaluate the submission in case we dont evaluate on the hidden test set.
if sequence_set != CO3DSequenceSet.TEST and not skip_evaluation:
submission.evaluate(num_workers=num_eval_workers)
if submit_to_eval_ai:
# Export the submission predictions for submition to the evaluation server.
# This also validates completeness of the produced predictions.
submission.export_results(validate_results=True)
# submit the results to the EvalAI server.
submission.submit_to_eval_ai()
def evaluate_implicitron_exp_dir(
implicitron_exp_dir: str,
task: Optional[CO3DTask] = None,
sequence_set: Optional[CO3DSequenceSet] = None,
subset_name: Optional[str] = None,
category: Optional[str] = None,
result_dump_file: Optional[str] = None,
clear_submission_cache_before_evaluation: bool = False,
clear_submission_cache_after_evaluation: bool = False,
submission_output_folder: Optional[str] = None,
num_eval_workers: int = 4,
):
"""
Run evaluation for an experiment directory of Implicitron.
Unless overriden by the user, this function automatically parses the
category / subset / task / sequence_set / dataset_root
from the implicitron experiment config stored in implicitron_exp_dir.
Args:
implicitron_exp_dir: The directory of an Implicitron experiment.
task: The co3d task - either CO3DTask.MANY_VIEW or CO3DTask.FEW_VIEW.
sequence_set: The sequence set to evaluate on:
CO3DSequenceSet.DEV for for the development set
CO3DSequenceSet.TEST for for the test set
subset_name: The name of the CO3Dv2 subset.
E.g. "manyview_dev_0", "fewview_dev", ...
category: The name of the CO3Dv2 category to evaluate.
result_dump_file: Path to the json file with evaluation results.
clear_submission_cache_before_evaluation: Delete all previous intermediate
submission files before commencing the current evaluation run.
clear_submission_cache_after_evaluation: Delete all intermediate
submission files after the evaluation run.
submission_output_folder: The path to the folder with intermediate
submission files.
num_eval_workers: Number of processes that conduct evaluation.
"""
if result_dump_file is None:
result_dump_file = os.path.join(
implicitron_exp_dir, "results_challenge_eval.json"
)
cfg = load_implicitron_config_from_exp_dir(implicitron_exp_dir)
# assert few config settings
assert (
cfg.data_source_ImplicitronDataSource_args.dataset_map_provider_class_type
=="JsonIndexDatasetMapProviderV2"
)
# read the category / subset / task / sequence_set / dataset_root from
# the implicitron config
dataset_provider_args = (
cfg
.data_source_ImplicitronDataSource_args
.dataset_map_provider_JsonIndexDatasetMapProviderV2_args
)
if subset_name is None:
subset_name = dataset_provider_args.subset_name
if category is None:
category = dataset_provider_args.category
if task is None:
task = get_co3d_task_from_subset_name(subset_name)
if sequence_set is None:
sequence_set = get_co3d_sequence_set_from_subset_name(subset_name)
dataset_root = (
DATASET_ROOT
if DATASET_ROOT is not None
else dataset_provider_args.dataset_root
)
logger.info(
f"Evaluating Implicitron model on category {category}; subset {subset_name}"
)
# the folder storing all predictions and results of the submission
if submission_output_folder is None:
submission_output_folder = get_default_implicitron_exp_dir_submission_output_folder(
implicitron_exp_dir,
task,
sequence_set,
)
# create the submission object
submission = CO3DSubmission(
task=task,
sequence_set=sequence_set,
output_folder=submission_output_folder,
dataset_root=DATASET_ROOT,
)
if task==CO3DTask.FEW_VIEW and submission.has_only_single_sequence_subset():
# if only a single-sequence dataset is downloaded, only the many-view task
# is available
raise ValueError(
f"Cannot evaluate the few-view task in {sequence_set.value} when only the"
" singlesequence subset of CO3D is present."
)
if clear_submission_cache_before_evaluation:
submission.clear_files()
# Generate new views for all evaluation examples in category/subset_name.
update_implicitron_submission_with_category_and_subset_predictions(
submission=submission,
implicitron_exp_dir=implicitron_exp_dir,
dataset_root=dataset_root,
category=category,
subset_name=subset_name,
n_known_frames_for_test=9 if task==CO3DTask.MANY_VIEW else 0,
)
# Locally evaluate the submission in case we dont evaluate on the hidden test set.
if sequence_set == CO3DSequenceSet.TEST:
logger.warning("Cannot evaluate on the hidden test set. Skipping evaluation.")
category_subset_results = {m: 0.0 for m in EVAL_METRIC_NAMES}
else:
results = submission.evaluate(num_workers=num_eval_workers)
category_subset_results = results[(category, subset_name)][0]
# add the eval epoch as well
category_subset_results["eval_epoch"] = parse_epoch_from_model_path(
find_last_checkpoint(implicitron_exp_dir)
)
logger.info("Implicitron model results:")
logger.info(f"category={category} / subset_name={subset_name}")
print_category_subset_results(category_subset_results)
if clear_submission_cache_after_evaluation:
submission.clear_files()
logger.info(f"Dumping challenge eval results to {result_dump_file}.")
with open(result_dump_file, "w") as f:
json.dump(category_subset_results, f)
return category_subset_results
@torch.no_grad()
def update_implicitron_submission_with_category_and_subset_predictions(
submission: CO3DSubmission,
implicitron_exp_dir: str,
dataset_root: str,
category: str,
subset_name: str,
num_workers: int = 12,
n_known_frames_for_test: int = 0,
):
"""
Updates the CO3DSubmission object `submission` with predictions of a DBIR
model extracted for a given category, and a dataset subset.
Args:
submission: CO3DSubmission object.
implicitron_exp_dir: Implicitron experiment directory to load the model from.
dataset_root: Path to the root dataset folder containing CO3Dv2.
category: A CO3Dv2 category to evaluate.
subset_name: The name of the evaluation subset of the category.
num_workers: Number of processes to use for evaluation.
n_known_frames_for_test: The number of known frames to append to the test batches.
"""
logger.info(
"Runing depth-based image rendering (DBIR) new view synthesis "
f"on category '{category}' subset '{subset_name}'"
)
# Get the evaluation device.
device = torch.device("cuda") if torch.cuda.is_available() else device("cpu")
# load the implicitron model
model = load_model_from_implicitron_exp_dir(implicitron_exp_dir)
# Determine the sequence set and the task we are solving
sequence_set = submission.sequence_set
task = submission.task
# Obtain the CO3Dv2 dataset map
dataset_map = get_dataset_map(
dataset_root,
category,
subset_name,
n_known_frames_for_test=n_known_frames_for_test,
)
# The test dataloader simply iterates over test_dataset.eval_batches
# this is done by setting test_dataset.eval_batches as the batch sampler
test_dataset = dataset_map["test"]
eval_batches = test_dataset.get_eval_batches()
test_dataloader = torch.utils.data.DataLoader(
test_dataset,
batch_sampler=eval_batches,
num_workers=num_workers,
collate_fn=FrameData.collate,
)
# loop over eval examples
logger.info(
f"Rendering {len(test_dataloader)} test views for {category}/{subset_name}"
)
if sequence_set==CO3DSequenceSet.TEST:
# the test set contains images with redacted foreground masks which cause
# the test dataloader to spam a warning message,
# we suppress this warning with the following line
warnings.filterwarnings("ignore", message="Empty masks_for_bbox.*")
for eval_index, eval_frame_data in enumerate(tqdm(test_dataloader)):
# the first element of eval_frame_data is the actual evaluation image,
# the 2nd-to-last elements are the knwon source images used for building
# the reconstruction (source images are present only for the few-view task)
# move the eval data to the requested device
eval_frame_data = eval_frame_data.to(device)
# sanity check that the eval frame data has correctly redacted entries
_check_valid_eval_frame_data(eval_frame_data, task, sequence_set)
# Redact the frame data so we are sure we cannot use the data
# from the actual unobserved evaluation sample
eval_frame_data = redact_eval_frame_data(eval_frame_data)
# Obtain the image render. In case dataset_test.box_crop==True,
# we need to paste the render back to the original image bounds.
model_preds = model(
**eval_frame_data,
eval_mode=EvaluationMode.EVALUATION,
)
render_crop = model_preds["implicitron_render"]
# cut the valid part of the render and paste into the original image canvas
render_full_image = dbir_utils.paste_render_to_original_image(
eval_frame_data, render_crop
)
# get the image, mask, depth as numpy arrays for the challenge submission
image, mask, depth = [
getattr(render_full_image, f"{data_type}_render").cpu().numpy()[0]
for data_type in ["image", "mask", "depth"]
]
# clip the rendered image to [0, 1] range
image = image.clip(0.0, 1.0)
# add the results to the submission object
submission.add_result(
category=category,
subset_name=subset_name,
sequence_name=eval_frame_data.sequence_name[0],
frame_number=int(eval_frame_data.frame_number[0]),
image=image,
mask=mask,
depth=depth,
)
# reset all warnings
warnings.simplefilter("always")
def get_default_implicitron_exp_dir_submission_output_folder(
implicitron_exp_dir: str,
task: CO3DTask,
sequence_set: CO3DSequenceSet,
):
return os.path.join(
implicitron_exp_dir,
f"implicitron_submission_output_{task.value}_{sequence_set.value}",
)
def parse_co3d_challenge_settings_from_implicitron_exp_dir(
implicitron_exp_dir: str
) -> Tuple[CO3DSequenceSet, CO3DTask, str, str]:
"""
Reads the config of an implicitron experiment stored in `implicitron_exp_dir` and
returns the configuration of the corresponding challenge entry.
Args:
implicitron_exp_dir: The directory of an Implicitron experiment.
Returns:
sequence_set: CO3D sequence set of the experiment.
task: The CO3D task of the experiment.
category: The category of the experiment.
subset_name: The name of the CO3D subset.
"""
cfg = load_implicitron_config_from_exp_dir(implicitron_exp_dir)
dataset_provider_args = (
cfg
.data_source_ImplicitronDataSource_args
.dataset_map_provider_JsonIndexDatasetMapProviderV2_args
)
subset_name = dataset_provider_args.subset_name
category = dataset_provider_args.category
task = get_co3d_task_from_subset_name(subset_name)
sequence_set = get_co3d_sequence_set_from_subset_name(subset_name)
return sequence_set, task, category, subset_name
def load_implicitron_config_from_exp_dir(implicitron_exp_dir: str):
cfg_filename = os.path.join(implicitron_exp_dir, "expconfig.yaml")
cfg_load = OmegaConf.load(cfg_filename)
cfg_default = get_default_args(Experiment)
cfg = OmegaConf.merge(cfg_default, cfg_load)
cfg.exp_dir = implicitron_exp_dir
return cfg
def load_model_from_implicitron_exp_dir(exp_dir: str) -> GenericModel:
cfg = load_implicitron_config_from_exp_dir(exp_dir)
experiment = Experiment(**cfg)
experiment.model_factory.force_resume = True
model = experiment.model_factory(accelerator=None, exp_dir=exp_dir)
model.cuda()
model.eval()
return model
def get_dataset_map(
dataset_root: str,
category: str,
subset_name: str,
n_known_frames_for_test: int = 0,
) -> DatasetMap:
"""
Obtain the dataset map that contains the train/val/test dataset objects.
"""
expand_args_fields(JsonIndexDatasetMapProviderV2)
dataset_map_provider = JsonIndexDatasetMapProviderV2(
category=category,
subset_name=subset_name,
dataset_root=dataset_root,
test_on_train=False,
only_test_set=False,
load_eval_batches=True,
dataset_JsonIndexDataset_args=DictConfig({"remove_empty_masks": False}),
n_known_frames_for_test=n_known_frames_for_test,
)
return dataset_map_provider.get_dataset_map()
def print_category_subset_results(category_subset_results: Dict[str, float]):
for k, v in category_subset_results.items():
print(f"{k:20s}: {v:1.3f}")
|
co3d-main
|
co3d/utils/evaluate_implicitron_model.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import dataclasses
import torch
from typing import Tuple
from pytorch3d.renderer.cameras import CamerasBase
from pytorch3d.implicitron.dataset.json_index_dataset import JsonIndexDataset
from pytorch3d.implicitron.dataset.dataset_base import FrameData
from pytorch3d.structures import Pointclouds
from pytorch3d.implicitron.dataset.json_index_dataset import _get_clamp_bbox
from pytorch3d.implicitron.models.base_model import ImplicitronRender
from pytorch3d.implicitron.dataset.visualize import get_implicitron_sequence_pointcloud
from pytorch3d.implicitron.tools.point_cloud_utils import (
render_point_cloud_pytorch3d,
get_rgbd_point_cloud,
)
def render_point_cloud(
camera: CamerasBase,
render_size: Tuple[int, int],
pointcloud: Pointclouds,
point_radius: float = 0.03,
) -> ImplicitronRender:
"""
Render the point cloud `pointcloud` to the camera `camera` using the
PyTorch3D point cloud renderer.
Args:
camera: Rendering camera.
render_size: 2-tuple of integers denoting the render size (HxW)
pointcloud: The point cloud to render.
point_radius: Radius of the rendered points.
"""
# render the sequence point cloud to each evaluation view
data_rendered, render_mask, depth_rendered = render_point_cloud_pytorch3d(
camera,
pointcloud,
render_size=render_size,
point_radius=point_radius,
topk=10,
eps=1e-2,
bin_size=0,
)
# cast to the implicitron render
return ImplicitronRender(
depth_render=depth_rendered,
image_render=data_rendered,
mask_render=render_mask,
)
def paste_render_to_original_image(
frame_data: FrameData,
render: ImplicitronRender,
) -> ImplicitronRender:
"""
Paste a rendering result `render` into the original image coordinate frame.
Args:
frame_data: The `FrameData` object as returned by the `JsonIndexDataset`.
render: A render to be pasted into the original image coordinates.
"""
# size of the render
render_size = render.image_render.shape[2:]
# estimate render scale w.r.t. the frame_data images
render_scale_factors = [
sr / s for sr, s in zip(render_size, frame_data.image_rgb.shape[2:])
]
assert abs(render_scale_factors[0]-render_scale_factors[1]) <= 1e-2, (
"non-isotropic render rescale"
)
# original image size
orig_size = frame_data.image_size_hw[0].tolist()
# bounding box of the crop in the original image
if frame_data.crop_bbox_xywh is not None:
bbox_xywh = frame_data.crop_bbox_xywh[0]
else:
bbox_xywh = torch.LongTensor([0, 0, orig_size[1], orig_size[0]])
# get the valid part of the render
render_bounds_wh = [None, None]
for axis in [0, 1]:
# resize the mask crop to the size of the render
if render_size != frame_data.mask_crop.shape[2:]:
mask_crop_render_size = torch.nn.functional.interpolate(
frame_data.mask_crop, size=render_size, mode="nearest"
)
else:
mask_crop_render_size = frame_data.mask_crop
# get the bounds of the mask_crop along dimemsion = 1-axis
valid_dim_pix = mask_crop_render_size[0, 0].sum(dim=axis).reshape(-1).nonzero()
assert valid_dim_pix.min()==0
render_bounds_wh[axis] = valid_dim_pix.max().item() + 1
render_out = {}
for render_type, render_val in dataclasses.asdict(render).items():
if render_val is None:
continue
# get the valid part of the render
render_valid_ = render_val[..., :render_bounds_wh[1], :render_bounds_wh[0]]
# resize the valid part to the original size
render_resize_ = torch.nn.functional.interpolate(
render_valid_,
size=tuple(reversed(bbox_xywh[2:].tolist())),
mode="bilinear" if render_type=="image_render" else "nearest",
align_corners=False if render_type=="image_render" else None,
)
# paste the original-sized crop to the original image
render_pasted_ = render_resize_.new_zeros(1, render_resize_.shape[1], *orig_size)
render_pasted_[
...,
bbox_xywh[1]:(bbox_xywh[1]+render_resize_.shape[2]),
bbox_xywh[0]:(bbox_xywh[0]+render_resize_.shape[3]),
] = render_resize_
render_out[render_type] = render_pasted_
# if True:
# # debug visualize
# from visdom import Visdom
# viz = Visdom()
# visdom_env = "debug_paste_render_to_original_image"
# viz.image(
# render.image_render[0],
# env=visdom_env,
# win="original",
# )
# viz.image(
# render_out["image_render"][0],
# env=visdom_env,
# win="pasted",
# )
# import pdb; pdb.set_trace()
# pass
return ImplicitronRender(**render_out)
def get_sequence_pointcloud(
dataset: JsonIndexDataset,
sequence_name: str,
num_workers: int = 12,
max_loaded_frames: int = 50,
max_n_points: int = int(1e5),
seed: int = 42,
load_dataset_pointcloud: bool = False,
) -> Pointclouds:
"""
Given a `dataset` object and the name of a sequence in it (`sequence_name`),
generate a 3D pointcloud containing the main foreground object of the scene.
Args:
dataset: A dataset of containing sequence annotations.
sequence_name: The name of the sequence to reconstruct.
num_workers: Number of cores to use for loading the sequence data.
max_n_points: Maximum number of points to keep in the point cloud.
seed: Random seed for reproducibility.
load_dataset_pointcloud: If `True` uses the CO3D ground truth dataset
point cloud, otherwise generates the point cloud by unprojecting
the depth maps of known frames.
"""
with torch.random.fork_rng(): # fork rng for reproducibility
torch.manual_seed(seed)
sequence_pointcloud, _ = get_implicitron_sequence_pointcloud(
dataset,
sequence_name,
mask_points=True,
max_frames=max_loaded_frames,
num_workers=num_workers,
load_dataset_point_cloud=load_dataset_pointcloud,
)
sequence_pointcloud = _subsample_pointcloud(sequence_pointcloud, max_n_points)
return sequence_pointcloud
def get_eval_frame_data_pointcloud(
eval_frame_data: FrameData,
max_n_points: int = int(3e4),
):
"""
Generate a pointcloud by unprojecting the known depth maps of a `FrameData` object
`eval_frame_data`.
Args:
eval_frame_data: `FrameData` to unproject.
max_n_points: Maximum number of points to keep in the point cloud.
"""
batch_size = eval_frame_data.image_rgb.shape[0]
pointcloud = get_rgbd_point_cloud(
eval_frame_data.camera[list(range(1, batch_size))],
eval_frame_data.image_rgb[1:],
eval_frame_data.depth_map[1:],
(eval_frame_data.fg_probability[1:] > 0.5).float(),
mask_points=True,
)
return _subsample_pointcloud(pointcloud, max_n_points)
def _subsample_pointcloud(p: Pointclouds, n: int):
n_points = p.num_points_per_cloud().item()
if n_points > n:
# subsample the point cloud in case it is bigger than max_n_points
subsample_idx = torch.randperm(
n_points,
device=p.points_padded().device,
)[:n]
p = Pointclouds(
points=p.points_padded()[:, subsample_idx],
features=p.features_padded()[:, subsample_idx],
)
return p
|
co3d-main
|
co3d/utils/dbir_utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import shutil
import logging
import errno
import pickle
import glob
import hashlib
import time
from tabulate import tabulate
from typing import Optional, Tuple, List
from dataclasses import dataclass
import numpy as np
import csv
from co3d.challenge.metric_utils import EVAL_METRIC_NAMES, EVAL_METRIC_MISSING_VALUE
from .blank_predictions_results import BLANK_PREDICTION_RESULTS
from .utils import evaluate_file_folders, get_result_directory_file_names
from .data_types import RGBDAFrame, CO3DTask, CO3DSequenceSet
from .io import (
load_all_eval_batches,
store_rgbda_frame,
export_result_file_dict_to_hdf5,
make_hdf5_file_links,
link_file_to_db_file,
link_rgbda_frame_files,
)
CO3D_CHALLENGE_ID = 1819
CO3D_PHASE_ID = {
(CO3DTask.MANY_VIEW, CO3DSequenceSet.DEV): 3541,
(CO3DTask.MANY_VIEW, CO3DSequenceSet.TEST): 3542,
(CO3DTask.FEW_VIEW, CO3DSequenceSet.DEV): 3543,
(CO3DTask.FEW_VIEW, CO3DSequenceSet.TEST): 3544,
}
EVAL_AI_PERSONAL_TOKEN = os.getenv("EVAL_AI_PERSONAL_TOKEN")
MAX_EXPORT_ARCHIVE_SIZE_GB = 2.0
logger = logging.getLogger(__file__)
@dataclass
class CO3DSubmissionRender:
"""
Contains information about a single predicted image.
category: The name of the category of the prediction.
subset_name: The dataset subset of the prediction.
frame_number: The number of the corresponding ground truth frame.
rgbda_frame: The actual render.
"""
category: str
subset_name: str
sequence_name: str
frame_number: int
rgbda_frame: Optional[RGBDAFrame] = None
def get_image_path(self, root_dir: str):
return os.path.join(
CO3DSubmission.get_submission_cache_image_dir(
root_dir,
self.category,
self.subset_name,
),
self.get_image_name(),
)
def get_hash(self):
return (self.category, self.subset_name, self.sequence_name, self.frame_number)
def get_image_name(self):
return get_submission_image_name(
self.category, self.sequence_name, self.frame_number
)
class CO3DSubmission:
"""
Maintains all data needed for a sucessful submission to the CO3D Challenge
evaluation server. The class can also locally evaluate predictions if
a local copy of the CO3Dv2 dataset is present.
See https://eval.ai/web/challenges/challenge-page/1819/overview for more details
about the challenge.
In order to create a CO3Dv2 submission, evaluate and submit the results, please follow
these steps:
1) Start by importing the `CO3DSubmission` class and instantiate a submission run.
For example, the following code:
```python
from co3d.challenge.co3d_submission import CO3DSubmission
output_folder = "./co3d_submission_files"
task = CO3DTask.MANY_VIEW
sequence_set = CO3DSequenceSet.TEST
submission = CO3DSubmission(
task=task
sequence_set=sequence_set,
output_folder=output_folder,
dataset_root=dataset_root,
)
```
will instantiate a CO3D submission object `submission` that stores (and optionally
evaluates) results of the `manyview` task on the `test` set. All results will be
stored in the `output_folder`. Note that a user has to also specify the local root
folder of the CO3D dataset in `dataset_root`.
2) Obtain the dictionary of evaluation examples `eval_batches_map` from `submission`.
```python
eval_batches_map = submission.get_eval_batches_map()
```
here, `eval_batches_map` is a dictionary of the following form:
```
{(category: str, subset_name: str): eval_batches} # eval_batches_map
```
where `eval_batches` look as follows:
```python
[
[
(sequence_name_0: str, frame_number_0: int),
(sequence_name_0: str, frame_number_1: int),
...
(sequence_name_0: str, frame_number_M_0: int),
],
...
[
(sequence_name_N: str, frame_number_0: int),
(sequence_name_N: str, frame_number_1: int),
...
(sequence_name_N: str, frame_number_M_N: int),
]
] # eval_batches
```
Containing a list of `N` evaluation examples, each consisting of a tuple of
`M_i` frames with numbers `frame_number_j` from a given sequence name `sequence_name_i`.
Note that the mapping between `frame_number` and `sequence_name` to the CO3D data
is stored in the respective `frame_annotations.jgz` and `sequence_annotation.jgz`
files in `<dataset_root>/<sequence_category>`.
For the <b>Many-view task</b> (`CO3DTask.MANYVIEW`), each evaluation batch has a single
(`M_i=1`) frame, which is the target evaluation frame.
For the <b>Few-view task</b> (`CO3DTask.FEWVIEW`), each batch has several frames (`M_i>1`),
where the first frame is the target frame which should be predicted given the knowledge
of the source frames that correspondond oto the 2nd-to-last elements of each batch.
3) Next we iterate over eval_batches, predict new views, and store our predictions
with the `submission` object.
```python
# iterate over evaluation subsets and categories
for (category, subset_name), eval_batches in eval_batches_map.items():
# iterate over all evaluation examples of a given category and subset
for eval_batch in eval_batches:
# parse the evaluation sequence name and target frame number from eval_batch
sequence_name, frame_number = eval_batch[0][:2]
# `predict_new_view` is a user-defined function which generates
# the test view (corresponding to the first element of the eval batch)
image, depth, mask = predict_new_view(eval_batch, ...)
# add the render to the submission
submission.add_result(
category=category,
subset_name=subset_name,
sequence_name=sequence_name,
frame_number=frame_number,
image=image,
mask=mask,
depth=depth,
)
```
4) Export the submission object to a hdf5 file that can be uploaded to the EvalAI server:
```
submission.export_results()
```
5) Submit the submission to the EvalAI server:
```
submission.submit_to_eval_ai()
```
"""
def __init__(
self,
task: CO3DTask,
sequence_set: CO3DSequenceSet,
output_folder: str,
dataset_root: Optional[str] = None,
eval_ai_personal_token: Optional[str] = EVAL_AI_PERSONAL_TOKEN,
export_format: str = "hdf5",
# ---- the following are only for internal use, do not modify ----
on_server: bool = False,
server_data_folder: Optional[str] = None,
max_processing_time: int = -1,
):
"""
Initialize the CO3DSubmission object.
task: The CO3D task=track:
`CO3DTask.manyview` for the "Many-view" task.
`CO3DTask.fewview` for the "Few-view" task.
sequence_set: The challenge sequence set.
`CO3DSequenceSet.dev` for the development set.
`CO3DSequenceSet.test` for the test set.
output_folder: The folder containing all outputs needed for the challenge submission.
dataset_root: The path to the root folder of a local copy of the CO3Dv2 dataset.
eval_ai_personal_token: A personal eval_ai token. Required for the cli
submission with `self.submit_to_eval_ai`.
export_format: The format of the exported archive. Currently only "hdf5" is supported.
server_data_folder: (Internal-use-only)
on_server: (Internal-use-only)
max_processing_time: (Internal-use-only)
"""
self.task = task
self.sequence_set = sequence_set
self.output_folder = output_folder
self.dataset_root = dataset_root
self.server_data_folder = server_data_folder
self.on_server = on_server
self.export_format = export_format
self.eval_ai_personal_token = eval_ai_personal_token
self.max_processing_time = max_processing_time
submission_archive_ext = self.export_format
self.submission_archive = os.path.join(
output_folder, f"submission_{task.value}_{sequence_set.value}.{submission_archive_ext}"
)
self.evaluate_exceptions_file = os.path.join(output_folder, "eval_exceptions.pkl")
self.submission_cache = os.path.join(output_folder, "submission_cache")
os.makedirs(self.submission_cache, exist_ok=True)
self._result_list: List[CO3DSubmissionRender] = []
self._eval_batches_map = None
@staticmethod
def get_submission_cache_image_dir(
output_folder: str,
category: str,
subset_name: str,
):
"""
Get the cache folder containing all predictions of a given category frame set.
Args:
output_folder: The root submission folder.
category: CO3D category name (e.g. "apple", "orange")
subset_name: CO3D subset name (e.g. "manyview_dev_0", "manyview_test_0")
"""
return os.path.join(output_folder, category, subset_name)
def has_only_single_sequence_subset(self):
"""
Returns:
has_only_single_sequence: Returns `True` if the present version of the CO3Dv2
dataset contains only single-sequence data. Otherwise returns `False`.
"""
if self.dataset_root is None:
raise ValueError("dataset_root has to be specified.")
eval_batches_map = load_all_eval_batches(self.dataset_root)
if any(
"fewview_" in subset_name for category, subset_name in eval_batches_map.keys()
):
return False
else:
return True
def add_result(
self,
category: str,
subset_name: str,
sequence_name: str,
frame_number: int,
image: np.ndarray,
mask: np.ndarray,
depth: np.ndarray,
) -> None:
"""
Adds a single user-predicted image to the current submission.
Args:
category: The CO3D category of the image (e.g. "apple", "car").
subset_name: The name of the subset which the image comes from
(e.g. "manyview_dev_0", "manyview_test_0").
sequence_name: The name of the sequence which the image comes from.
frame_number: The number of the corresponding ground truth frame.
image: 3xHxW numpy.ndarray containing the RGB image.
The color range is [0-1] and `image` should be of the same size
as the corresponding ground truth image.
mask: `1xHxW numpy.ndarray containing the binary foreground mask of the
rendered object.
The values should be in {0, 1} and `mask` should be of the same size
as the corresponding ground truth image.
depth: `1xHxW numpy.ndarray containing the rendered depth map of the predicted
image.
The depth map should be of the same size as the corresponding
ground truth image.
"""
res = self._add_result_metadata(
category,
subset_name,
sequence_name,
frame_number,
)
res_file = res.get_image_path(self.submission_cache)
os.makedirs(os.path.dirname(res_file), exist_ok=True)
logger.debug(f"Storing submission files {res_file}.")
store_rgbda_frame(
RGBDAFrame(image=image, mask=mask, depth=depth),
res_file,
)
def _link_existing_render(
self,
render_submission_cache: str,
render: CO3DSubmissionRender,
) -> None:
"""
Link a single stored existing render to the current submission.
Args:
render_submission_cache: The path to the submission cache of the render.
render: The linked render.
"""
res = self._add_result_metadata(
render.category,
render.subset_name,
render.sequence_name,
render.frame_number,
)
rgbda_file_link_src = res.get_image_path(self.submission_cache)
rgbda_file_existing = render.get_image_path(render_submission_cache)
os.makedirs(os.path.dirname(rgbda_file_link_src), exist_ok=True)
logger.debug(
f"Linking submission file {rgbda_file_link_src} to {rgbda_file_existing}."
)
link_rgbda_frame_files(rgbda_file_existing, rgbda_file_link_src)
def _add_result_metadata(
self,
category: str,
subset_name: str,
sequence_name: str,
frame_number: int,
) -> CO3DSubmissionRender:
res = CO3DSubmissionRender(
category=category,
subset_name=subset_name,
sequence_name=sequence_name,
frame_number=frame_number,
rgbda_frame=None,
)
self._result_list.append(res)
# if res.get_hash() in [r.get_hash() for r in self._result_list]:
# logger.warning(
# f"{str(res.get_hash())} already in the result list! Skipping."
# )
# else:
# self._result_list.append(res)
return res
def _get_result_frame_index(self):
return {(res.sequence_name, res.frame_number): res for res in self._result_list}
def get_eval_batches_map(self, only_target_frame: bool = False):
"""
Returns a dictionary of evaluation examples of the following form:
```
{(category: str, subset_name: str): eval_batches} # eval_batches_map
```
where `eval_batches` look as follows:
```
[
[
(sequence_name_0: str, frame_number_0: int),
(sequence_name_0: str, frame_number_1: int),
...
(sequence_name_0: str, frame_number_M: int),
],
...
[
(sequence_name_N: str, frame_number_0: int),
(sequence_name_N: str, frame_number_1: int),
...
(sequence_name_N: str, frame_number_M: int),
]
] # eval_batches
```
Here, `eval_batches' containing a list of `N` evaluation examples,
each consisting of a tuple of frames with numbers `frame_number_j`
from a given sequence name `sequence_name_i`.
Note that the mapping between `frame_number` and `sequence_name` to the CO3D
data is stored in the respective `frame_annotations.jgz` and `sequence_annotation.jgz`
files in `<dataset_root>/<category>`.
Args:
only_target_frame: Returns only the first (target evaluation) frame
for each eval batch.
Returns:
eval_batches_map: A dictionary of evaluation examples for each category.
"""
if self._eval_batches_map is None:
self._eval_batches_map = load_all_eval_batches(
self.dataset_root,
self.task,
self.sequence_set,
remove_frame_paths=False,
only_target_frame=False,
)
if only_target_frame:
# take only the first (target evaluation) frame for each eval batch
eval_batches_map = {}
for (category, subset_name), eval_batches in self._eval_batches_map.items():
eval_batches_map[(category, subset_name)] = [
b[0] for b in eval_batches
]
else:
eval_batches_map = self._eval_batches_map
return eval_batches_map
def clear_files(self):
"""
Remove all generated submission files.
"""
if os.path.isdir(self.output_folder):
shutil.rmtree(self.output_folder)
if os.path.isdir(self.submission_cache):
shutil.rmtree(self.submission_cache)
if os.path.isfile(self.submission_archive):
os.remove(self.submission_archive)
def validate_export_results(self):
"""
Validate the submission by checking whether all required prediction files
are present.
"""
if self.dataset_root is None or not os.path.isdir(self.dataset_root):
raise ValueError(
"For validating the results, dataset_root has to be defined"
+ " and has to point to a valid root folder of the CO3D dataset."
)
eval_batches_map = self.get_eval_batches_map(only_target_frame=True)
result_frame_index = self._get_result_frame_index()
valid = True
for (category, subset_name), eval_batches in eval_batches_map.items():
eval_batches_2tuple = [tuple(b[:2]) for b in eval_batches]
missing_preds = [
b for b in eval_batches_2tuple if b not in result_frame_index
]
if len(missing_preds) > 0:
valid = False
logger.info(
f"{category}/{subset_name} is missing predictions."
)
logger.debug(str(missing_preds))
additional_results = [
idx for idx, res in result_frame_index.items() if (
idx not in eval_batches_2tuple
and res.category==category and res.subset_name==subset_name
)
]
if len(additional_results) > 0:
valid = False
logger.info(
f"{category}/{subset_name} has additional results."
)
logger.debug(str(additional_results))
return valid
def submit_to_eval_ai(
self,
challenge_id: int = CO3D_CHALLENGE_ID,
):
"""
Submit the exported results to the EvalAI server.
"""
logger.info(f"Submitting {self.submission_archive} to EvalAI.")
if not os.path.isfile(self.submission_archive):
raise ValueError(
f"Submission archive {self.submission_archive} does not exist."
" Please run submission.export_results() first."
)
try:
import evalai
except ModuleNotFoundError:
raise ValueError(
"Cannot find EvalAI cli package."
" Please install it with pip: `pip install evalai`"
)
if self.eval_ai_personal_token is None or len(self.eval_ai_personal_token)==0:
raise ValueError(
"For EvalAI submission, the personal token"
+" self.eval_ai_personal_token has to be set!"
+" Please obtain it from you EvalAI profile page https://eval.ai/web/profile"
+" by clicking on 'Get your Auth Token' button."
)
# run the evalai imports
from click.testing import CliRunner
from evalai.challenges import challenge
from evalai.add_token import set_token
runner = CliRunner()
# set the eval ai auth token
result = runner.invoke(set_token, [self.eval_ai_personal_token])
if result.exit_code!=0:
raise ValueError("Could not set the eval_ai personal token.")
# get the challenge phase ID
phase_id = CO3D_PHASE_ID[(self.task, self.sequence_set)]
# run the submission script
os.system(
f"evalai challenge {challenge_id} phase {phase_id}"
+ f" submit --file {self.submission_archive} --large"
)
# the following, unfortunately, does not accept keyboard input
# result = runner.invoke(
# challenge, [
# str(challenge_id),
# "phase", str(phase_id),
# "submit",
# "--file", self.submission_archive,
# "--large",
# ],
# input="/n",
# )
# if result.output != 0:
# raise ValueError(
# "Submission failed:"
# + result.output
# )
def export_results(self, validate_results: bool = True):
"""
Export the generated evaluation images for a submission to the EvalAI server.
Args:
validate_results: If `True`, checks whether the added results are valid
before submission. This requires setting `self.dataset_root` to a directory
containing a local copy of the CO3D dataset.
"""
if validate_results:
# optionally check that all results are correct
valid_results = self.validate_export_results()
if not valid_results:
logger.warning(
"The submission results are invalid."
" The evaluation will be incomplete."
)
# zip the directory
logger.info(f"Archiving {self.submission_cache} to {self.submission_archive}.")
if self.export_format=="zip":
raise ValueError(
f"Please export the data using the 'hdf5' format."
f"'zip' is no longer supported."
)
# First we need to remove all links to the ground truth directories
# that were potentially created during a call to self.evaluate().
self._clear_gt_links()
shutil.make_archive(
base_name=self.submission_archive.replace(".zip", ""),
format="zip",
root_dir=self.submission_cache,
base_dir=".",
)
elif self.export_format=="hdf5":
self._export_results_to_hdf5()
else:
raise ValueError(f"Unknown export format {self.export_format}.")
exported_file_size = os.path.getsize(self.submission_archive) / 1e9
if exported_file_size > MAX_EXPORT_ARCHIVE_SIZE_GB:
logger.warning(
f"The exported result file {self.submission_archive} is bigger"
f" than {exported_file_size} GB! Please ensure that your submission file"
f" is smaller to prevent submission upload failures."
)
# finally export the result
logger.warning(
f"Exported result file ({exported_file_size:.2f} GB):"
f"\n\n ===> {self.submission_archive} <==="
f"\n\nYou can now submit the file to the EvalAI server:"
f" In order to do so, run submission.submit_to_eval_ai() to directly"
f" submit the results file using EvalAI-cli (command line interface)."
f" For the latter, make sure to `pip install evalai` and to set"
f" the EVAL_AI_PERSONAL_TOKEN env. variable to your EvalAI Auth token."
f"\n\nAlternativelly, you can submit the file using the submission webpage:"
f" https://eval.ai/web/challenges/challenge-page/{CO3D_CHALLENGE_ID}/submission"
f" ('{self.task.value}-{self.sequence_set.value}' track)\n"
f"Please note a submission using the 'Upload file' option will fail"
f" due the large size of the file. Use the 'File URL' option instead."
)
def _clear_gt_links(self):
gt_folders = glob.glob(os.path.join(self.submission_cache, "*", "GT_*"))
for gt_folder in gt_folders:
logger.debug(f"Clearing GT link directory {gt_folder}.")
shutil.rmtree(gt_folder)
def _export_results_to_hdf5(self):
# get all fls in the submission cache
all_fls = sorted(glob.glob(os.path.join(self.submission_cache, "*", "*", "*.png")))
result_dict = {
os.path.join(*(os.path.normpath(f).split(os.path.sep)[-3:])): f
for f in all_fls
if not os.path.split(os.path.dirname(f))[-1].startswith("GT_")
}
export_result_file_dict_to_hdf5(self.submission_archive, result_dict)
def link_results_from_existing_output_folder(self, output_folder: str) -> None:
"""
Link all results stored in a different output folder to the current
submission object.
Args:
output_folder: The output folder containing all results that will be
linked to the current submission object.
"""
other = CO3DSubmission(
task=self.task,
sequence_set=self.sequence_set,
output_folder=output_folder,
)
other.fill_results_from_cache()
for other_res in other._result_list:
self._link_existing_render(
os.path.join(output_folder, "submission_cache"),
other_res,
)
def fill_results_from_cache(self):
"""
Analyze the results already stored in self.submission_cache and register them
with the submission object.
"""
if not os.path.isdir(self.submission_cache):
logger.info(f"{self.submission_cache} folder does not exist.")
return
categories = os.listdir(self.submission_cache)
for category in categories:
cat_dir = os.path.join(self.submission_cache, category)
if not os.path.isdir(cat_dir):
continue
subset_names = os.listdir(cat_dir)
for subset_name in subset_names:
if subset_name.startswith("GT_"):
continue
submission_dir = os.path.join(cat_dir, subset_name)
submission_files = get_result_directory_file_names(submission_dir)
logger.info(
f"Adding {len(submission_files)} cached results"
f" from {category}/{subset_name}"
)
for submission_file in submission_files:
category_, sequence_name, frame_number = (
_submision_file_to_category_sequence_name_frame_number(
submission_file
)
)
assert category_==category
self._add_result_metadata(
category,
subset_name,
sequence_name,
frame_number,
)
def _fill_cache_from_hdf5(self, archive_path: str):
make_hdf5_file_links(archive_path, self.submission_cache)
def _is_timed_out(self):
if self.max_processing_time > 0:
return (time.time() - self._eval_start_time) > self.max_processing_time
else:
return False
def _get_remaining_submission_time(self):
if self.max_processing_time > 0:
return self.max_processing_time - (time.time() - self._eval_start_time)
else:
return float("Inf")
def evaluate_archive_file(
self,
archive_path: str,
num_workers: int = 0,
print_per_example_results: bool = False,
):
"""
Extract a file with exported results `archive_path` and evaluate.
Args:
archive_path: A path to the archive file cantaining exported results.
Such archive file can be exported using `self.export_results`.
"""
os.makedirs(self.submission_cache, exist_ok=True)
logger.info(f"Extracting {archive_path} into {self.submission_cache}.")
if self.export_format=="zip":
shutil.unpack_archive(archive_path, self.submission_cache, "zip")
elif self.export_format=="hdf5":
self._fill_cache_from_hdf5(archive_path)
else:
raise ValueError(f"Unknown export format {self.export_format}")
logger.info(f"Filling results from cache {self.submission_cache}.")
self.fill_results_from_cache()
return self.evaluate(
num_workers=num_workers,
print_per_example_results=print_per_example_results,
)
def evaluate(
self,
num_workers: int = 0,
print_per_example_results: bool = False,
):
"""
Locally evaluate the submission. Please not that this is possible only
on the unredacted development set.
"""
if not self.on_server:
if not os.path.isdir(self.dataset_root):
raise ValueError("For evaluation dataset_root has to be specified.")
if self.sequence_set == CO3DSequenceSet.TEST:
raise ValueError("Cannot evaluate on the hidden test set!")
else:
# server-side evaluation, do not use
if (
self.server_data_folder is not None
and os.path.isfile(self.server_data_folder)
and self.server_data_folder.endswith(".hdf5")
):
# this is ok, we allow hdf5 files here
logger.info(f"Server folder {self.server_data_folder} is a HDF5 file!")
# with open(self.server_data_folder,'rb') as f:
# md5hash = hashlib.md5(f.read()).hexdigest()
# logger.info(f"HDF5 file hash = {md5hash}")
elif (
self.server_data_folder is not None
and self.server_data_folder.endswith(".dbm")
):
logger.info(f"Server folder {self.server_data_folder} is a DBM file!")
for pfix in [".dat", ".dir"]:
if not os.path.isfile(self.server_data_folder + pfix):
raise ValueError(
f"The DBM {pfix} file for {self.server_data_folder} is missing!"
)
# ok again dbm is good
pass
elif (
self.server_data_folder is None
or not os.path.isdir(self.server_data_folder)
):
raise ValueError(
"For evaluation on the server server_data_folder has to be specified."
)
self._eval_start_time = time.time()
eval_batches_map = self.get_eval_batches_map(only_target_frame=True)
# buffers for results and exceptions
eval_exceptions = {}
eval_results = {}
for subset_i, ((category, subset_name), eval_batches) in enumerate(
eval_batches_map.items()
):
subset_eval_start = time.time()
logger.info(
f"Evaluating {category}/{subset_name} ({subset_i}/{len(eval_batches_map)})."
)
if self.max_processing_time > 0:
logger.info(
f"Remaining submission time: {self._get_remaining_submission_time():1.2f}."
)
pred_category_subset_dir = CO3DSubmission.get_submission_cache_image_dir(
self.submission_cache,
category,
subset_name,
)
# The case with no predicted results, or timed-out eval
if (
(not os.path.isdir(pred_category_subset_dir))
or (len(os.listdir(pred_category_subset_dir))==0)
or self._is_timed_out()
):
if self._is_timed_out():
logger.warning(f"Evaluation timed-out for {category}/{subset_name}!")
else:
logger.info(f"No evaluation predictions for {category}/{subset_name}")
eval_results[(category, subset_name)] = (None, None)
eval_exceptions[(category, subset_name)] = None
continue
# Make a temporary GT folder with symlinks to GT data based on eval batches
gt_category_subset_dir = CO3DSubmission.get_submission_cache_image_dir(
self.submission_cache,
category,
"GT_" + subset_name,
)
for b in eval_batches:
if self.on_server:
_link_eval_batch_data_from_server_db_to_gt_tempdir(
self.server_data_folder,
gt_category_subset_dir,
category,
b,
)
else:
_link_eval_batch_data_from_dataset_root_to_gt_tempdir(
self.dataset_root,
gt_category_subset_dir,
category,
b,
)
# Evaluate and catch any exceptions.
try:
eval_results[(category, subset_name)] = evaluate_file_folders(
pred_category_subset_dir,
gt_category_subset_dir,
num_workers=num_workers,
remaining_time=self._get_remaining_submission_time(),
print_per_example_results=print_per_example_results,
)
except Exception as exc:
logger.warning(f"Evaluation of {category}/{subset_name} failed!", exc_info=True)
eval_results[(category, subset_name)] = (None, None)
eval_exceptions[(category, subset_name)] = exc
if eval_results[(category, subset_name)][0] is not None:
# Print the current subset result
eval_result_string = " ".join([
f"{k}={v:.3f}"
for k, v in eval_results[(category, subset_name)][0].items()
])
logger.info(f"{category}/{subset_name} result: {eval_result_string}")
subset_eval_time = time.time() - subset_eval_start
logger.info(f"Evaluated {category}/{subset_name} in {subset_eval_time:.1f} sec")
# fill in missing eval results with blank prediction results
for (category, subset_name), (eval_result, _) in eval_results.items():
if eval_result is None:
logger.info(
f"Replacing metrics in {category}/{subset_name}"
+" with a blank prediction result."
)
eval_result_ = {}
for m in EVAL_METRIC_NAMES:
blank_render_metric_val = BLANK_PREDICTION_RESULTS[
(self.task, self.sequence_set)
][(category, subset_name)][m]
# eval_result_[m] = _get_missing_metric_val(m)
eval_result_[m] = blank_render_metric_val
eval_results[(category, subset_name)] = eval_result_, None
# Get the average results.
average_results = {}
for m in EVAL_METRIC_NAMES:
average_results[m] = sum(
eval_result[m] for eval_result, _ in eval_results.values()
) / len(eval_results)
eval_results[("MEAN", "-")] = average_results, None
# Generate a nice table and print.
tab_rows = []
for (category, subset_name), (eval_result, _) in eval_results.items():
tab_row = [category, subset_name]
tab_row.extend([eval_result[k] for k in EVAL_METRIC_NAMES])
tab_rows.append(tab_row)
table_str = tabulate(
tab_rows, headers=["Category", "Subset name", *EVAL_METRIC_NAMES]
)
logger.info("\n"+table_str)
# Store the human-readable table
table_txt_file = os.path.join(self.output_folder, "results.csv")
logger.info(f"Dumping the results table to {table_txt_file}.")
header=["Category", "Subset name", *EVAL_METRIC_NAMES]
with open(table_txt_file, 'w', encoding='UTF8', newline='') as f:
writer = csv.writer(f)
writer.writerow(header)
writer.writerows(tab_rows)
# Store the recorded exceptions in the submissions folder.
with open(self.evaluate_exceptions_file, "wb") as f:
pickle.dump(eval_exceptions, f)
return eval_results
def _get_missing_metric_val(m: str):
return EVAL_METRIC_MISSING_VALUE[m]
def get_submission_image_name(category: str, sequence_name: str, frame_number: str):
return f"{category}_{sequence_name}_{frame_number}"
def _link_eval_batch_data_from_dataset_root_to_gt_tempdir(
dataset_root: str,
temp_dir: str,
category: str,
frame_index: Tuple[str, int, str],
):
sequence_name, frame_number, gt_image_path = frame_index
image_name = get_submission_image_name(category, sequence_name, frame_number)
os.makedirs(temp_dir, exist_ok=True)
for data_type in ["image", "depth", "mask", "depth_mask"]:
gt_data_path = gt_image_path.replace("/images/", f"/{data_type}s/")
if data_type=="depth":
gt_data_path = gt_data_path.replace(".jpg", ".jpg.geometric.png")
elif data_type in ("mask", "depth_mask"):
gt_data_path = gt_data_path.replace(".jpg", ".png")
tgt_image_name = f"{image_name}_{data_type}.png"
src = os.path.join(dataset_root, gt_data_path)
dst = os.path.join(temp_dir, tgt_image_name)
logger.debug(f"{src} <--- {dst}")
_symlink_force(src, dst)
def _link_eval_batch_data_from_server_db_to_gt_tempdir(
server_folder: str,
temp_dir: str,
category: str,
frame_index: Tuple[str, int, str],
):
sequence_name, frame_number, _ = frame_index
image_name = get_submission_image_name(category, sequence_name, frame_number)
os.makedirs(temp_dir, exist_ok=True)
for data_type in ["image", "depth", "mask", "depth_mask"]:
image_name_postfixed = image_name + f"_{data_type}.png"
dst = os.path.join(temp_dir, image_name_postfixed)
if server_folder.endswith(".hdf5") or server_folder.endswith(".dbm"):
# the folder is in fact an hdf5/dbm file
# so we just make a symlink pointing from the `dst` file
# to the hdf5/dbm database
db_file = server_folder
logger.debug(f"{dst}<---HDF5/DBM file path: {server_folder}")
link_file_to_db_file(db_file, dst)
else:
src = os.path.join(server_folder, image_name_postfixed)
logger.debug(f"{src}<---{dst}")
_symlink_force(src, dst)
def _submision_file_to_category_sequence_name_frame_number(file: str):
toks = os.path.split(file)[-1].split("_")
category = toks[0]
frame_number = int(toks[-1])
sequence_name = "_".join(toks[1:-1])
return category, sequence_name, frame_number
def _symlink_force(target, link_name):
try:
os.symlink(target, link_name)
except OSError as e:
if e.errno == errno.EEXIST:
os.remove(link_name)
os.symlink(target, link_name)
else:
raise e
|
co3d-main
|
co3d/challenge/co3d_submission.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import json
import logging
import numpy as np
import dbm
import functools
import h5py
from io import BytesIO
from PIL import Image
from typing import Optional, Callable, Dict, Union
from tqdm import tqdm
from .data_types import CO3DSequenceSet, CO3DTask, RGBDAFrame
logger = logging.getLogger(__file__)
def store_rgbda_frame(rgbda_frame: RGBDAFrame, fl: str):
assert np.isfinite(rgbda_frame.depth).all()
store_mask(rgbda_frame.mask[0], fl + "_mask.png")
store_depth(rgbda_frame.depth[0], fl + "_depth.png")
store_image(rgbda_frame.image, fl + "_image.png")
if rgbda_frame.depth_mask is not None:
store_1bit_png_mask(rgbda_frame.depth_mask[0], fl + "depth_mask.png")
def link_rgbda_frame_files(fl_existing: str, fl_src_link: str):
for pfix in ["_mask.png", "_depth.png", "_image.png", "_depth_mask.png"]:
link_tgt = fl_existing+pfix
link_src = fl_src_link+pfix
if os.path.islink(link_src):
os.remove(link_src)
elif os.path.isfile(link_src):
raise ValueError(f"Link source {link_src} is an actual file (not a link).")
if not os.path.isfile(link_tgt):
if pfix=="_depth_mask.png":
pass
else:
raise ValueError(f"Target file {link_tgt} does not exist!")
else:
if os.path.islink(link_src):
os.remove(link_src)
os.symlink(link_tgt, link_src)
def load_rgbda_frame(fl: str, check_for_depth_mask: bool = False) -> RGBDAFrame:
f = RGBDAFrame(
mask=load_mask(fl + "_mask.png")[None],
depth=load_depth(fl + "_depth.png")[None],
image=load_image(fl + "_image.png"),
)
if not np.isfinite(f.depth).all():
f.depth[~np.isfinite(f.depth)] = 0.0 # chuck the infs in depth
if check_for_depth_mask:
depth_mask_path = fl + "_depth_mask.png"
if os.path.isfile(depth_mask_path):
f.depth_mask = load_1bit_png_mask(depth_mask_path)[None]
return f
def store_1bit_png_mask(mask: np.ndarray, fl: str):
"""
mask: HxW
"""
Image.fromarray((mask*255).astype('u1'), mode='L').convert('1').save(fl, "PNG")
def load_1bit_png_mask(file: str) -> np.ndarray:
with Image.open(_handle_db_file(file)) as pil_im:
mask = (np.array(pil_im.convert("L")) > 0.0).astype(np.float32)
return mask
def load_mask(fl: str):
return np.array(Image.open(_handle_db_file(fl))).astype(np.float32) / 255.0
def store_mask(mask: np.ndarray, fl: str, mode: str = "L"):
"""
mask: HxW
"""
assert mask.ndim == 2
if mode == "L":
mpil = Image.fromarray((mask * 255.0).astype(np.uint8), mode="L").convert("L")
elif mode == "I;16":
mpil = Image.fromarray((mask * 255.0).astype(np.uint8), mode="I;16").convert(
"I;16"
)
else:
raise ValueError(mode)
mpil.save(fl, "PNG")
def load_depth(fl: str):
depth_pil = Image.open(_handle_db_file(fl))
depth = (
np.frombuffer(np.array(depth_pil, dtype=np.uint16), dtype=np.float16)
.astype(np.float32)
.reshape((depth_pil.size[1], depth_pil.size[0]))
)
assert depth.ndim == 2
return depth
def store_depth(depth: np.ndarray, fl: str):
assert depth.ndim == 2
depth_uint16 = np.frombuffer(depth.astype(np.float16), dtype=np.uint16).reshape(
depth.shape
)
Image.fromarray(depth_uint16).save(fl)
def load_image(fl: str):
return np.array(Image.open(_handle_db_file(fl))).astype(np.float32).transpose(2, 0, 1) / 255.0
def store_image(image: np.ndarray, fl: str):
assert image.ndim == 3
Image.fromarray((image.transpose(1, 2, 0) * 255.0).astype(np.uint8)).save(fl)
def _handle_db_file(fl_or_db_link: str):
"""
In case `fl_or_db_link` is a symlink pointing at an .hdf5 or .dbm database file,
this function returns a BytesIO object yielding the underlying file's binary data.
Otherwise, the function simply returns `fl_or_db_link`.
"""
fl_or_bytes_io = fl_or_db_link
for db_format, data_load_fun in (
(".hdf5", _get_image_data_from_h5),
(".dbm", _get_image_data_from_dbm),
):
fl_or_bytes_io = _maybe_get_db_image_data_bytes_io_from_file(
fl_or_db_link,
db_format,
data_load_fun,
)
if not isinstance(fl_or_bytes_io, str):
# logger.info(f"{fl} is {db_format}!")
break
return fl_or_bytes_io
def _maybe_get_db_image_data_bytes_io_from_file(
fl_or_db_link: str,
db_format: str,
data_load_fun: Callable,
) -> Union[str, BytesIO]:
"""
In case `fl_or_db_link` is a symlink pointing at a database file `db_path` with
of type `db_format`, this function calls `data_load_fun(fl_or_db_link, db_path)`
to retrieve a BytesIO object yielding the `fl`s binary data.
Otherwise, the function simply returns `fl_or_db_link`.
"""
if os.path.islink(fl_or_db_link):
realpath = os.readlink(fl_or_db_link)
if not realpath.endswith(db_format):
return fl_or_db_link
db_path = fl_or_db_link
else:
return fl_or_db_link
return data_load_fun(realpath, db_path)
@functools.lru_cache(maxsize=1)
def _cached_dbm_open_for_read(dbmpath: str):
db = dbm.open(dbmpath, "r")
return db
def _get_image_data_from_dbm(dbmpath: str, fl: str):
flname = os.path.split(fl)[-1]
db = _cached_dbm_open_for_read(dbmpath)
# with dbm.open(dbmpath, "r") as db:
bin_data = db[flname]
return BytesIO(bin_data)
def _get_image_data_from_h5(h5path: str, fl: str):
with h5py.File(h5path, "r") as f:
flname = os.path.split(fl)[-1]
file_index = f["binary_data"].attrs
if flname not in file_index:
raise IndexError(f"{flname} not in {h5path}!")
idx = file_index[flname]
bin_data = f["binary_data"][idx]
return BytesIO(bin_data)
def get_category_to_subset_name_list(
dataset_root: str,
task: Optional[CO3DTask] = None,
sequence_set: Optional[CO3DSequenceSet] = None,
):
"""
Get the mapping from categories to existing subset names.
Args:
dataset_root: The dataset root folder.
task: CO3D Challenge task.
sequence_set: CO3D Challenge sequence_set.
Returns:
category_to_subset_name_list: A dict of the following form:
{
category: [subset_name_0, subset_name_1, ...],
...
}
"""
json_file = os.path.join(dataset_root, "category_to_subset_name_list.json")
with open(json_file, "r") as f:
category_to_subset_name_list = json.load(f)
# filter per-category subset lists by the selected task
if task is not None:
category_to_subset_name_list = {
category: [
subset_name
for subset_name in subset_name_list
if subset_name.startswith(task.value)
]
for category, subset_name_list in category_to_subset_name_list.items()
}
# filter per-category subset lists by the selected sequence set
if sequence_set is not None:
category_to_subset_name_list = {
category: [
subset_name
for subset_name in subset_name_list
if f"_{sequence_set.value}" in subset_name
]
for category, subset_name_list in category_to_subset_name_list.items()
}
# remove the categories with completely empty subset_name_lists
category_to_subset_name_list = {
c: l for c, l in category_to_subset_name_list.items() if len(l) > 0
}
# sort by category
category_to_subset_name_list = dict(sorted(category_to_subset_name_list.items()))
return category_to_subset_name_list
def load_all_eval_batches(
dataset_root: str,
task: Optional[CO3DTask] = None,
sequence_set: Optional[CO3DSequenceSet] = None,
remove_frame_paths: bool = False,
only_target_frame: bool = True,
):
"""
Load eval batches files stored in dataset_root into a dictionary:
{
(category, subset_name): eval_batches_index,
...
}
Args:
dataset_root: The root of the CO3DV2 dataset.
task: CO3D challenge task.
sequence_set: CO3D challenge sequence set.
remove_frame_paths: If `True`, removes the paths to frames from the loaded
dataset index.
only_target_frame: Loads only the first (evaluation) frame from each eval batch.
Returns:
eval_batches_dict: Output dictionary.
"""
category_to_subset_name_list = get_category_to_subset_name_list(
dataset_root,
task=task,
sequence_set=sequence_set,
)
eval_batches_dict = {}
for category, subset_name_list in category_to_subset_name_list.items():
for subset_name in subset_name_list:
# load the subset eval batches
eval_batches_dict[(category, subset_name)] = _load_eval_batches_file(
dataset_root,
category,
subset_name,
remove_frame_paths=remove_frame_paths,
only_target_frame=only_target_frame,
)
return eval_batches_dict
def _load_eval_batches_file(
dataset_root: str,
category: str,
subset_name: str,
remove_frame_paths: bool = True,
only_target_frame: bool = True,
):
eval_batches_fl = os.path.join(
dataset_root,
category,
"eval_batches",
f"eval_batches_{subset_name}.json",
)
with open(eval_batches_fl, "r") as f:
eval_batches = json.load(f)
if only_target_frame:
eval_batches = [
b[0] for b in eval_batches
] # take only the first (target evaluation) frame
if remove_frame_paths:
eval_batches = [b[:2] for b in eval_batches]
return eval_batches
def export_result_file_dict_to_hdf5(h5path: str, filedict: Dict[str, str]):
"""
Export the result files to an hdf5 file that will be sent to the EvalAI server:
Args:
h5path: Target hdf5 file path.
filedict: Dict in form {relative_file_path: absolute_file_path}
"""
logger.info(f"Exporting {len(filedict)} files to HDF5 file {h5path}.")
if len(filedict)==0:
raise ValueError("No data to export!")
assert h5path.endswith(".hdf5")
if os.path.isfile(h5path):
os.remove(h5path)
os.makedirs(os.path.dirname(h5path), exist_ok=True)
with h5py.File(h5path, "w", libver='latest') as fh5:
dt = h5py.special_dtype(vlen=np.dtype('uint8'))
max_path_len = max(len(p) for p in filedict.keys())
dset = fh5.create_dataset(
'binary_data', (len(filedict), ), dtype=dt, compression="gzip"
)
filepath_dset = fh5.create_dataset(
'filepaths',
(len(filedict), ),
dtype=h5py.string_dtype('utf-8', max_path_len),
# dtype=np.dtype(f'U{max_path_len}'),
compression="gzip"
)
index = {}
for idx, (rel_path, store_file) in enumerate(tqdm(filedict.items(), total=len(filedict))):
_store_binary_file_data_to_hd5_dataset(dset, store_file, idx)
flname = os.path.split(rel_path)[-1]
assert flname not in index, "Duplicate filenames!"
index[flname] = idx
filepath_dset[idx] = rel_path
logger.info(f"Updating index of {h5path}.")
dset.attrs.update(index)
def make_hdf5_file_links(h5path: str, root: str):
"""
Link all files whose binary data are stored in an HDF5 file `h5path` to
files under the root folder.
Args:
h5path: HDF5 file.
root: The root folder for exporting symlinks.
"""
logger.info(f"Making file links in {root} to DB data in {h5path}.")
assert h5path.endswith(".hdf5")
with h5py.File(h5path, "r") as fh5:
filepaths = [f.decode("UTF-8") for f in np.array(fh5["filepaths"])]
file_name_to_tgt_file = {
os.path.split(p)[-1]: os.path.join(root, p) for p in filepaths
}
dset = fh5["binary_data"]
index = dset.attrs
all_dirs = set(os.path.dirname(p) for p in file_name_to_tgt_file.values())
for dir_ in all_dirs:
os.makedirs(dir_, exist_ok=True)
for flname, _ in tqdm(index.items(), total=len(index)):
tgt_file = file_name_to_tgt_file[flname]
link_file_to_db_file(h5path, tgt_file)
def link_file_to_db_file(db_file: str, file: str, overwrite: bool = True):
"""
Make a symlink file->db_file
"""
if db_file.endswith(".hdf5"):
token = "__HDF5__:"
elif db_file.endswith(".dbm"):
token = "__DBM__:"
else:
raise ValueError(db_file)
if overwrite and (os.path.isfile(file) or os.path.islink(file)):
os.remove(file)
os.symlink(db_file, file)
# symlinks are cleaner ... do not use this anymore:
# with open(file, "w") as f:
# f.write(token+os.path.normpath(os.path.abspath(db_file)))
def _store_binary_file_data_to_hd5_dataset(dset, fl: str, idx: int):
with open(fl, "rb") as fin:
binary_data = fin.read()
dset[idx] = np.fromstring(binary_data, dtype='uint8')
|
co3d-main
|
co3d/challenge/io.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
|
co3d-main
|
co3d/challenge/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import math
import numpy as np
import logging
import time
from typing import Optional
from typing import Tuple
from .data_types import RGBDAFrame
EVAL_METRIC_NAMES = ["psnr_masked", "psnr_fg", "psnr_full_image", "depth_abs_fg", "iou"]
EVAL_METRIC_MISSING_VALUE = {
"psnr_masked": 0.0,
"psnr_fg": 0.0,
"psnr_full_image": 0.0,
"depth_abs_fg": 100000.0,
"iou": 0.0,
}
logger = logging.getLogger(__file__)
def eval_one(
pred: RGBDAFrame,
target: RGBDAFrame,
):
return eval_one_rgbda(
pred.image,
pred.depth,
pred.mask,
target.image,
target.depth,
target.mask,
gt_depth_mask=target.depth_mask,
)
def eval_one_rgbda(
image_rgb: np.ndarray,
depth_map: np.ndarray,
fg_mask: np.ndarray,
gt_image_rgb: np.ndarray,
gt_depth_map: np.ndarray,
gt_fg_mask: np.ndarray,
gt_depth_mask: Optional[np.ndarray] = None,
crop_around_fg_mask: bool = False,
gt_fg_mask_threshold: Optional[float] = 0.5,
):
"""
Args:
image_rgb: 3xHxW, black background
depth_map: 1xHxW
fg_mask: 1xHxW in {0, 1}
gt_image_rgb: 3xHxW, black background
gt_depth_map: 1xHxW
gt_fg_mask: 1xHxW in {0, 1}
gt_depth_mask: 1xHxW in {0, 1}
Returns:
eval_result: a dictionary {metric_name: str: metric_value: float}
"""
# with Timer("start"):
for xn, x in zip(
("image_rgb", "fg_mask", "depth_map"),
(image_rgb, fg_mask, depth_map),
):
if not np.isfinite(x).all():
raise ValueError(f"Non-finite element in {xn}")
if gt_fg_mask_threshold is not None:
# threshold the gt mask if note done before
gt_fg_mask = (gt_fg_mask > gt_fg_mask_threshold).astype(np.float32)
# chuck non-finite depth
gt_depth_map[~np.isfinite(gt_depth_map)] = 0
if gt_depth_mask is not None:
gt_depth_map = gt_depth_map * gt_depth_mask
if crop_around_fg_mask:
raise NotImplementedError("")
fg_mask_box_xxyy = _get_bbox_from_mask(gt_fg_mask[0])
[
image_rgb,
depth_map,
fg_mask,
gt_image_rgb,
gt_depth_map,
gt_fg_mask,
gt_depth_mask,
] = [
x[
:,
fg_mask_box_xxyy[2]:fg_mask_box_xxyy[3],
fg_mask_box_xxyy[0]:fg_mask_box_xxyy[1],
] for x in [
image_rgb,
depth_map,
fg_mask,
gt_image_rgb,
gt_depth_map,
gt_fg_mask,
gt_depth_mask,
]
]
gt_image_rgb_masked = gt_image_rgb * gt_fg_mask
# with Timer("psnrs"):
psnr_masked = calc_psnr(image_rgb, gt_image_rgb_masked)
psnr_full_image = calc_psnr(image_rgb, gt_image_rgb)
psnr_fg = calc_psnr(image_rgb, gt_image_rgb_masked, mask=gt_fg_mask)
# with Timer("depth"):
mse_depth, abs_depth, aux_depth = calc_mse_abs_depth(
depth_map,
gt_depth_map,
gt_fg_mask,
crop=5,
)
# with Timer("iou"):
iou = calc_iou(fg_mask, gt_fg_mask)
return {
"psnr_masked": psnr_masked,
"psnr_fg": psnr_fg,
"psnr_full_image": psnr_full_image,
"depth_abs_fg": abs_depth,
"iou": iou,
}
def calc_psnr(
x: np.ndarray,
y: np.ndarray,
mask: Optional[np.ndarray] = None,
) -> np.float32:
"""
Calculates the Peak-signal-to-noise ratio between tensors `x` and `y`.
"""
mse = calc_mse(x, y, mask=mask)
psnr = np.log10(np.clip(mse, 1e-10, None)) * (-10.0)
return psnr
def calc_mse(
x: np.ndarray,
y: np.ndarray,
mask: Optional[np.ndarray] = None,
) -> np.float32:
"""
Calculates the mean square error between tensors `x` and `y`.
"""
if mask is None:
return np.mean((x - y) ** 2)
else:
mask_expand = np.broadcast_to(mask, x.shape)
return (((x - y) ** 2) * mask_expand).sum() / np.clip(
mask_expand.sum(), 1e-5, None
)
def rgb_l1(
pred: np.ndarray, target: np.ndarray, mask: Optional[np.ndarray] = None
) -> np.float32:
"""
Calculates the mean absolute error between the predicted colors `pred`
and ground truth colors `target`.
"""
if mask is None:
mask = np.ones_like(pred[:1])
return (np.abs(pred - target) * mask).sum() / np.clip(mask.sum(), 1, None)
def calc_mse_abs_depth(
pred: np.ndarray,
target: np.ndarray,
mask: np.ndarray,
crop: int,
get_best_scale: bool = True,
best_scale_clamp_thr: float = 1e-4,
) -> np.float32:
# crop
if crop > 0:
target = target[:, crop:-crop, crop:-crop]
pred = pred[:, crop:-crop, crop:-crop]
mask = mask[:, crop:-crop, crop:-crop]
target = target * mask
dmask = (target > 0.0).astype(np.float32)
dmask_mass = np.clip(dmask.sum(), 1e-4, None)
scale_l1 = scale_l2 = None
for l_norm in ["l1", "l2"]:
if get_best_scale:
# mult preds by a scalar "scale_best"
# s.t. we get best possible mse error
_optimal_scale = {
"l1": _optimal_l1_scale,
"l2": _optimal_l2_scale,
}[l_norm]
scale_best = _optimal_scale(
pred * dmask, target * dmask, best_scale_clamp_thr
)
pred_scaled = pred * scale_best
if l_norm=="l1":
scale_l1 = scale_best
elif l_norm=="l2":
scale_l2 = scale_best
else:
raise ValueError(l_norm)
else:
pred_scaled = pred
df = target - pred_scaled
if l_norm=="l1":
abs_depth = (dmask * np.abs(df)).sum() / dmask_mass
elif l_norm=="l2":
mse_depth = (dmask * (df ** 2)).sum() / dmask_mass
else:
raise ValueError(l_norm)
return mse_depth, abs_depth, {"scale_l1": scale_l1, "scale_l2": scale_l2}
def _optimal_l2_scale(pred, gt, clamp_thr):
"""
Return the scale s that minimizes ||gt - s pred||^2.
The inverse scale is clamped to [eps, Inf]
"""
xy = pred * gt
xx = pred * pred
scale_best = xy.mean() / np.clip(xx.mean(), clamp_thr, None)
return scale_best
def _optimal_l1_scale(pred, gt, clamp_thr):
"""
Return the scale s that minimizes |gt - s pred|_1.
The scale is clamped in [-max_scale, max_scale].
This function operates along the specified axis.
"""
max_scale = 1 / clamp_thr
x, y = pred.reshape(-1), gt.reshape(-1)
pivots = y / np.clip(x, 1e-10, None)
perm = np.argsort(pivots)
pivots = pivots[perm]
x_sorted = x[perm]
score = -np.abs(x).sum() + 2 * np.cumsum(np.abs(x_sorted))
# find the index of first positive score
i = (score <= 0).astype(np.float32).sum().astype(np.int64)
# i = torch.unsqueeze(i, dim)
if i >= len(pivots.reshape(-1)):
# logger.warning("Scale outside of bounds!")
return 1.0
else:
scale = pivots[i]
scale = np.clip(scale, -max_scale, max_scale)
# scale = torch.take_along_dim(pivots, i, dim=dim)
# scale = torch.clip(scale, min=-max_scale, max=max_scale)
# outshape = [s for si, s in enumerate(y.shape) if si != dim]
# scale = scale.view(outshape)
return float(scale)
def calc_iou(
predict: np.ndarray,
target: np.ndarray,
mask: Optional[np.ndarray] = None,
threshold: Optional[float] = 0.5,
) -> np.float32:
"""
This is a great loss because it emphasizes on the active
regions of the predict and targets
"""
if threshold is not None:
predict = (predict >= threshold).astype(np.float32)
target = (target >= threshold).astype(np.float32)
if mask is not None:
predict = predict * mask
target = target * mask
intersect = (predict * target).sum()
union = (predict + target - predict * target).sum() + 1e-4
return intersect / union
def _get_bbox_from_mask(
mask: np.ndarray,
box_crop_context: float = 0.1,
thr: float = 0.5,
decrease_quant: float = 0.05,
) -> Tuple[int, int, int, int]:
# bbox in xywh
masks_for_box = np.zeros_like(mask)
while masks_for_box.sum() <= 1.0:
masks_for_box = (mask > thr).astype(np.float32)
thr -= decrease_quant
assert thr > 0.0
x0, x1 = _get_1d_bounds(masks_for_box.sum(axis=-2))
y0, y1 = _get_1d_bounds(masks_for_box.sum(axis=-1))
h, w = y1 - y0 + 1, x1 - x0 + 1
if box_crop_context > 0.0:
c = box_crop_context
x0 -= w * c / 2
y0 -= h * c / 2
h += h * c
w += w * c
x1 = x0 + w
y1 = y0 + h
x0, x1 = [np.clip(x_, 0, mask.shape[1]) for x_ in [x0, x1]]
y0, y1 = [np.clip(y_, 0, mask.shape[0]) for y_ in [y0, y1]]
return np.round(np.array(x0, x1, y0, y1)).astype(int).tolist()
def _get_1d_bounds(arr: np.ndarray) -> Tuple[int, int]:
nz = np.flatnonzero(arr)
return nz[0], nz[-1]
class Timer:
def __init__(self, name=None):
self.name = name if name is not None else "timer"
def __enter__(self):
self.start = time.time()
def __exit__(self, exc_type, exc_value, traceback):
logger.info(f"{self.name} - {time.time() - self.start:.3e} sec")
|
co3d-main
|
co3d/challenge/metric_utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum
import numpy as np
from dataclasses import dataclass
from typing import Optional
@dataclass
class RGBDAFrame:
image: np.ndarray
mask: np.ndarray
depth: np.ndarray
depth_mask: Optional[np.ndarray] = None
class CO3DTask(Enum):
MANY_VIEW = "manyview"
FEW_VIEW = "fewview"
class CO3DSequenceSet(Enum):
TRAIN = "train"
DEV = "dev"
TEST = "test"
|
co3d-main
|
co3d/challenge/data_types.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import zipfile
import glob
import logging
import multiprocessing
import numpy as np
import time
from tqdm import tqdm
from collections import defaultdict
from typing import List, Dict, Tuple
from .data_types import CO3DSequenceSet, CO3DTask, RGBDAFrame
from .metric_utils import eval_one, EVAL_METRIC_NAMES, Timer
from .io import load_rgbda_frame
logger = logging.getLogger(__file__)
def get_co3d_task_from_subset_name(subset_name: str) -> CO3DTask:
if subset_name.startswith("manyview"):
return CO3DTask.MANY_VIEW
elif subset_name.startswith("fewview"):
return CO3DTask.FEW_VIEW
else:
raise ValueError(f"Invalid subset name {subset_name}!")
def get_co3d_sequence_set_from_subset_name(subset_name: str) -> CO3DSequenceSet:
return CO3DSequenceSet(subset_name.split("_")[1])
def unzip(file_path: str, output_dir: str):
with zipfile.ZipFile(file_path, "r") as zip_ref:
zip_ref.extractall(output_dir)
def check_user_submission_file_paths(
ground_truth_files: Dict[str, str],
user_submission_files: Dict[str, str],
):
missing_gt_examples = [
gt_example_name
for gt_example_name in ground_truth_files
if gt_example_name not in user_submission_files
]
if len(missing_gt_examples) > 0:
raise ValueError(
f"There are missing evaluation examples: {str(missing_gt_examples)}"
)
additional_user_examples = [
user_example
for user_example in user_submission_files
if user_example not in ground_truth_files
]
if len(additional_user_examples) > 0:
raise ValueError(
f"Unexpected submitted evaluation examples {str(additional_user_examples)}"
)
def get_data_type_postfix(data_type: str):
assert data_type in ["image", "mask", "depth", "depth_mask"]
return f"_{data_type}.png"
def get_result_directory_file_names(
result_dir: str, has_depth_masks: bool = False,
) -> Dict[str, str]:
"""
Result directory structure:
<test_example_name>-image.png
<test_example_name>-mask.png
<test_example_name>-depth.png
...
Returns:
result_files: dict {test_example_name_i: root_path_i}
"""
result_type_files = {}
for result_type in ("image", "mask", "depth"):
postfix = get_data_type_postfix(result_type)
matching_files = sorted(glob.glob(os.path.join(result_dir, f"*{postfix}")))
if has_depth_masks and result_type=="mask":
matching_files = [
f for f in matching_files
if not f.endswith(get_data_type_postfix("depth_mask"))
]
result_type_files[result_type] = {
os.path.split(f)[-1][: -len(postfix)]: f for f in matching_files
}
example_names = sorted(
list(
set(
[
n
for t in ("image", "mask", "depth")
for n in result_type_files[t].keys()
]
)
)
)
missing_examples = defaultdict(list)
for example_name in example_names:
for result_type in ("image", "mask", "depth"):
if example_name not in result_type_files[result_type]:
missing_examples[example_name].append(result_type)
if len(missing_examples) > 0:
msg = "\n".join(
[f" {k} missing {str(v)}" for k, v in missing_examples.items()]
)
raise ValueError(
f"Some evaluation examples in {result_dir} are incomplete:\n"
+ msg
)
result_files = {
example_name: result_type_files["image"][example_name][: -len("_image.png")]
for example_name in example_names
}
return result_files
def _evaluate_pred_gt_pair(args: Tuple[str, str, str, float, bool]):
gt_example, gt_file, pred_file, max_time, print_status = args
cur_time = time.time()
if cur_time > max_time:
raise ValueError(
" @@@@@@@@@@@@@@@@@@@@@\n"
" Evaluation timed out!\n"
" @@@@@@@@@@@@@@@@@@@@@"
)
# with Timer("io"):
gt_rgbda = load_rgbda_frame(gt_file, check_for_depth_mask=True)
pred_rgbda = load_rgbda_frame(pred_file)
# with Timer("check"):
check_same_rgbda_sizes(gt_rgbda, pred_rgbda, gt_example)
# with Timer("eval"):
eval_result_one = eval_one(pred_rgbda, gt_rgbda)
for k, v in eval_result_one.items():
if not np.isfinite(v):
raise ValueError(f"{gt_example} - {k} is does not have a finite value.")
if print_status:
msg = "; ".join([f"{k}={v:.3f}" for k, v in eval_result_one.items()])
sz = str(list(gt_rgbda.image.shape[-2:])).replace(" ", "")
logger.info(
f"eval_one({gt_example}-[{sz}]): {msg}; {max_time-cur_time:.1f} sec left"
)
return eval_result_one
def evaluate_file_folders(
pred_folder: str,
gt_folder: str,
num_workers: int = 0,
remaining_time: float = float("Inf"),
print_per_example_results: bool = True,
):
# determine how much time do we have for the evaluation
max_time = time.time() + remaining_time
user_submission_files = get_result_directory_file_names(pred_folder)
ground_truth_files = get_result_directory_file_names(gt_folder, has_depth_masks=True)
logger.info(f"Evaluating folders: prediction={pred_folder}; gt={gt_folder}")
check_user_submission_file_paths(
ground_truth_files,
user_submission_files,
)
# At this point we are sure that ground_truth_files contain the same
# examples as user_submission_files.
if num_workers <= 0:
# Iterate over the gt examples:
per_example_results = [
_evaluate_pred_gt_pair(
(
gt_example,
ground_truth_files[gt_example],
user_submission_files[gt_example],
max_time,
print_per_example_results,
)
) for gt_example in tqdm(list(ground_truth_files))
]
# gt_rgbda = load_rgbda_frame(ground_truth_files[gt_example], check_for_depth_mask=True)
# pred_rgbda = load_rgbda_frame(user_submission_files[gt_example])
# check_same_rgbda_sizes(gt_rgbda, pred_rgbda, gt_example)
# per_example_results.append(eval_one(pred_rgbda, gt_rgbda))
else:
# parallel processing
arg_list = [
(
gt_example,
ground_truth_files[gt_example],
user_submission_files[gt_example],
max_time,
print_per_example_results,
) for gt_example in list(ground_truth_files)
]
pool = multiprocessing.Pool(num_workers)
per_example_results = [
result for result in tqdm(
pool.imap(_evaluate_pred_gt_pair, arg_list),
total=len(arg_list),
)
]
pool.terminate()
result = {
metric: (sum(r[metric] for r in per_example_results) / len(per_example_results))
for metric in EVAL_METRIC_NAMES
}
return result, per_example_results
def check_same_rgbda_sizes(gt: RGBDAFrame, pred: RGBDAFrame, example_name: str):
for data_type in ("image", "mask", "depth"):
gt_size, pred_size = [getattr(x, data_type).shape for x in [gt, pred]]
if gt_size != pred_size:
raise ValueError(
f"{example_name}'s size does not match the ground truth."
f"{data_type} size: {str(gt_size)} != {str(pred_size)}"
" (ground-truth vs. prediction)."
)
return True
def get_annotations_folder(phase_codename: str):
assert phase_codename in {"dev", "test"}
return os.path.join("annotations", phase_codename)
|
co3d-main
|
co3d/challenge/utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from io import StringIO
import os
import csv
from typing import List, Any
from .data_types import CO3DTask, CO3DSequenceSet
BLANK_PREDICTION_RESULTS = {}
def _read_result_csv(s: str):
# with open(fl, "r") as f:
f = StringIO(s)
csvreader = csv.reader(f)
rows = [row for row in csvreader]
rows = rows[1:]
header = rows[0]
data = rows[1:-1]
def _getcol(col_name: str, row: List[Any]) -> Any:
c = row[header.index(col_name)]
try:
return float(c)
except:
return c
parsed = {
(_getcol("Category", r), _getcol("Subset name", r)): {
k: _getcol(k, r) for k in header
} for r in data
}
return parsed
CSVs = {
"fewview_dev": """
Category,Subset name,psnr_masked,psnr_fg,psnr_full_image,depth_abs_fg,iou
apple,fewview_dev,18.40938866633708,6.884780900276403,5.732459292886711,0.48950375965076004,0.0
backpack,fewview_dev,18.375179837644755,11.884768822089297,5.492699127831022,0.580444590643848,0.0
ball,fewview_dev,15.65596825167019,5.697924649467918,5.391241119316918,0.43991856992712286,0.0
banana,fewview_dev,18.461971791362227,6.118058719441003,5.8697287026999625,0.5312080518960041,0.0
baseballbat,fewview_dev,20.451565072548348,6.7702838462526325,6.133595679990426,0.787964382936369,0.0
baseballglove,fewview_dev,15.899123723379235,8.491206359449485,5.952075366998026,0.5044438522210485,0.0
bench,fewview_dev,13.835660454286623,6.1021708060060185,5.338972434739994,0.8728473659927769,0.0
bicycle,fewview_dev,14.85079899106894,7.178515383648441,5.4468849723020165,0.7596495667817377,0.0
book,fewview_dev,13.526778301589218,5.929520397898452,6.1038428839075625,0.7119168557685552,0.0
bottle,fewview_dev,17.756936987543572,7.695879675777415,5.792669536453962,1.1126274259151023,0.0
bowl,fewview_dev,12.117324340446702,3.522034136500667,6.132690804727037,0.548212652825193,0.0
broccoli,fewview_dev,17.60270342882336,8.135587140185267,5.636059385848195,0.48109570750702163,0.0
cake,fewview_dev,14.831394456777907,6.641730746137352,5.778288244687103,0.4713467452914664,0.0
car,fewview_dev,12.199833440326447,6.2695458065545955,5.7147062915561,0.6731242096715442,0.0
carrot,fewview_dev,18.42032666772822,6.336027619876071,5.2655157144357,0.7425826445279987,0.0
cellphone,fewview_dev,18.54815997270957,9.132949039155196,5.920507132031587,0.7256476083461838,0.0
chair,fewview_dev,14.254104990224922,6.8885175096457525,5.42230365019509,0.8701949198272996,0.0
couch,fewview_dev,12.096141908081652,8.498063614467037,6.839693292778098,0.6672055849897333,0.0
cup,fewview_dev,16.30300593190912,6.263725950094426,5.419278138684526,1.109737605178693,0.0
donut,fewview_dev,17.760249549810045,7.19401090262162,5.406775287613137,0.5831024075924244,0.0
frisbee,fewview_dev,13.006974807290442,5.348851057119092,6.081314892526941,0.6282357528069842,0.0
hairdryer,fewview_dev,18.307693246477385,7.653327373043194,5.796698293526376,0.5692578716769887,0.0
handbag,fewview_dev,16.863888776603684,9.668777191048893,5.885582988575421,0.6140305534695657,0.0
hotdog,fewview_dev,16.576000201780598,6.7813353163227275,6.479828364566311,0.5515738226619902,0.0
hydrant,fewview_dev,14.35863704229326,5.557106534568748,5.486735221682155,0.7370800150837736,0.0
keyboard,fewview_dev,18.319239151881423,10.9398173290579,5.471888028766401,0.591969625411462,0.0
kite,fewview_dev,13.759580600059902,6.095096560743659,5.5797533716568335,0.3686704352187232,0.0
laptop,fewview_dev,17.958107529829775,10.58932076091378,5.9870485037655365,0.6760399403943799,0.0
microwave,fewview_dev,12.641232654595555,7.5579894876019935,5.7736075695959785,0.7816656712123962,0.0
motorcycle,fewview_dev,13.902730964332383,7.837737363341203,5.6993349939287,0.8026270041676278,0.0
mouse,fewview_dev,22.139654039699753,11.380540045528843,5.26534717648027,0.6258851366555073,0.0
orange,fewview_dev,16.965398815565717,5.392140191707388,5.868309801114943,0.45518186645635506,0.0
parkingmeter,fewview_dev,17.969579417828633,8.303453741571293,5.550653705252322,2.7703986799279625,0.0
pizza,fewview_dev,14.044388259713267,6.467125499434811,6.349638051827558,0.5445261030741094,0.0
plant,fewview_dev,15.912698636112678,8.209728015160032,5.41847542705161,0.9729385734872266,0.0
remote,fewview_dev,18.901389746835065,8.809855001539868,5.6508358729724995,0.5809070430213752,0.0
sandwich,fewview_dev,14.961081916655587,5.359419050654777,6.486182655727676,0.5273259918746086,0.0
skateboard,fewview_dev,15.12940600031295,6.633805444460857,6.075841409914119,0.5708218125938797,0.0
stopsign,fewview_dev,18.52676122564753,6.61671306856769,5.412139613407474,6.290707304470178,0.0
suitcase,fewview_dev,16.493029339685542,10.757954804495968,6.232275999259873,0.5967537541074001,0.0
teddybear,fewview_dev,12.49373038673622,5.562061567728542,5.8834174182726855,0.6012993745910462,0.0
toaster,fewview_dev,15.590308176317933,8.571510283192422,5.8223530170835565,0.7087675899756055,0.0
toilet,fewview_dev,11.053325723237059,3.745954412389449,5.831752233322646,0.7324808735388084,0.0
toybus,fewview_dev,15.74397288343334,5.87386919966778,5.694742423634763,0.644572040998336,0.0
toyplane,fewview_dev,15.271423476084475,4.920347774565625,5.603913746917713,0.5686183372913356,0.0
toytrain,fewview_dev,19.250492955217194,8.365187557837626,5.5957012947860445,0.6429103676877059,0.0
toytruck,fewview_dev,15.813126824200825,7.367196186168707,5.59287438907558,0.5748745851615271,0.0
tv,fewview_dev,18.455985344741848,11.821412211957313,5.87636504861574,0.6193668766022515,0.0
umbrella,fewview_dev,13.388214509185625,6.669691460242465,5.398996667950242,0.5547154568934756,0.0
vase,fewview_dev,17.385895374160103,7.695607020715037,5.667400967410725,1.0544596567185702,0.0
wineglass,fewview_dev,14.92593215613611,5.489494483032894,5.883318241506832,2.09036588666451,0.0
MEAN,-,16.028754842096472,7.3270142749005025,5.768476753918801,0.8374863237526772,0.0
""",
"fewview_test": """
Category,Subset name,psnr_masked,psnr_fg,psnr_full_image,depth_abs_fg,iou
apple,fewview_test,18.51983235506069,6.710896207691665,5.622396257710374,0.45868530307683764,0.0
backpack,fewview_test,15.329616295156082,9.704246779430184,6.021398266902823,0.5274631579925675,0.0
ball,fewview_test,16.999140797902346,6.393148333684946,6.167099298585788,0.42074640466733093,0.0
banana,fewview_test,17.20449002482513,6.2347690237546765,5.337301584435467,0.5906480660508107,0.0
baseballbat,fewview_test,20.598735999896142,6.724621984421882,5.929346230877072,0.46383516633969724,0.0
baseballglove,fewview_test,16.250018316676424,8.485414452103313,5.35050821728197,0.5755057054113818,0.0
bench,fewview_test,13.380691505741307,6.217615311139159,5.389882231932645,0.8591881917970259,0.0
bicycle,fewview_test,15.804150486121728,8.539006404409536,7.293404052140095,0.7740858337090635,0.0
book,fewview_test,14.350489743207989,5.356299926470255,5.138131270946916,0.6249600811612394,0.0
bottle,fewview_test,17.257503711230473,7.332068784914889,5.825424785199224,1.0062512850600411,0.0
bowl,fewview_test,12.7586871865527,5.952472495887487,7.350451995400975,0.7734948803009338,0.0
broccoli,fewview_test,17.69069033947863,8.250871950138103,5.718669980890903,0.5437043438960382,0.0
cake,fewview_test,14.809462963950144,6.142164342026519,6.145654847812541,0.45489466623242036,0.0
car,fewview_test,11.914391205648087,6.5335541836879925,5.90360267479956,0.9021454444786102,0.0
carrot,fewview_test,20.060924545297425,6.219697054467009,5.261149123525815,0.7081597814658059,0.0
cellphone,fewview_test,21.520117285013956,10.847631110964242,5.41747877060995,1.0517241006106035,0.0
chair,fewview_test,14.691657730804202,8.959579180137167,6.878377818012938,0.8045192519054911,0.0
couch,fewview_test,11.545670382508696,8.419983656626247,6.902446179473004,0.6761085327114593,0.0
cup,fewview_test,17.79448614165711,6.495705819546957,5.5050360165654855,0.8834131631626546,0.0
donut,fewview_test,18.596152225400257,6.892531195772306,6.240000810567556,0.5443665622620474,0.0
frisbee,fewview_test,14.370690470903668,6.048295011020775,6.136056575421687,0.4830201400666513,0.0
hairdryer,fewview_test,18.47390481689051,7.494774772300304,5.743646634555602,0.5239972887128962,0.0
handbag,fewview_test,13.87987101022776,8.280409779606966,6.572322491579377,0.6866448922525301,0.0
hotdog,fewview_test,18.436410464732152,7.713564800659037,5.859372904290447,0.5873852722036716,0.0
hydrant,fewview_test,14.768617799865435,5.67036284794227,5.71565321761019,0.9328092564314482,0.0
keyboard,fewview_test,18.875163364703024,10.97846088231997,5.392007807994692,0.42114457863505195,0.0
kite,fewview_test,12.882975207164943,6.079375329369365,5.243720977367847,0.571440938913041,0.0
laptop,fewview_test,16.68965246676936,9.765618650745138,6.127183977142236,0.8968296529628422,0.0
microwave,fewview_test,13.859058432153368,8.649172226048128,6.809269971869398,0.8740670698190732,0.0
motorcycle,fewview_test,12.922201328542098,7.659321482648036,5.3469570020173816,0.7923491167407205,0.0
mouse,fewview_test,25.03083236821661,10.870194079196883,5.61381320415904,0.5803283306516662,0.0
orange,fewview_test,17.906264108511905,5.863058031859002,5.902648030774557,0.4927651700044394,0.0
parkingmeter,fewview_test,24.486359595107576,10.777998512312754,4.875545759481984,3.9189161735406275,0.0
pizza,fewview_test,15.25053153218815,6.195657831341678,5.888809317232928,0.5366542850357786,0.0
plant,fewview_test,14.533347345876026,8.213483475587314,5.9657101837783895,0.8745105580745663,0.0
remote,fewview_test,18.685696193857062,9.167126712684974,5.283444994288521,0.5784209284648094,0.0
sandwich,fewview_test,14.954638830523134,5.489779040424508,6.203690658497073,0.582476274688696,0.0
skateboard,fewview_test,18.921604245076754,8.111335322871586,4.540996792864179,0.8144729054641098,0.0
stopsign,fewview_test,20.83021952727707,7.7066182145576425,5.596606825038416,6.195708155269956,0.0
suitcase,fewview_test,14.568523293458965,8.872585021337093,5.526936386940414,0.5437482494754128,0.0
teddybear,fewview_test,13.184137897313038,5.667378086474551,5.638538121962938,0.6289599526865502,0.0
toaster,fewview_test,15.398766247640951,8.138341096517484,6.073562974743127,0.7335666912630792,0.0
toilet,fewview_test,10.138714105703048,3.8756171226863025,5.85450160774978,0.7892172212095283,0.0
toybus,fewview_test,15.925097991923954,6.517829456639026,5.691133527297476,0.6022958688384993,0.0
toyplane,fewview_test,16.703705769834098,5.323541429433026,5.46165954412417,0.5639341931778066,0.0
toytrain,fewview_test,17.859279914562713,7.8933999002371715,5.604032948369101,0.6932112812874591,0.0
toytruck,fewview_test,16.971557700694344,7.745719186191729,5.794916102483104,0.564653671235697,0.0
tv,fewview_test,18.037750946556894,13.741247943038163,8.747561838523023,0.5162819237405952,0.0
umbrella,fewview_test,13.092407842058238,6.756963662911218,5.447907114523638,0.534506784839016,0.0
vase,fewview_test,18.54297573271471,8.090029952142554,5.668374190385807,0.84122947818443,0.0
wineglass,fewview_test,16.386668940524114,5.5524702294978345,5.735686759902533,1.4353355366647544,0.0
MEAN,-,16.463618328111792,7.555333495840728,5.871765271698825,0.8516623875064206,0.0
""",
"manyview_dev": """
Category,Subset name,psnr_masked,psnr_fg,psnr_full_image,depth_abs_fg,iou
apple,manyview_dev_0,18.264030492114536,8.350223131127144,4.366539721003419,0.4195637484678012,0.0
apple,manyview_dev_1,14.137138507072345,6.6045994842301345,6.240087240624211,0.43567804409070654,0.0
ball,manyview_dev_0,14.673712693605873,6.091306495279248,5.217217027846326,0.35927968102112323,0.0
ball,manyview_dev_1,11.090845071075146,4.64095367064294,2.463653189968876,0.30228020972164427,0.0
bench,manyview_dev_0,13.333540945296608,4.137188797564715,3.844656341335867,0.8008696769825814,0.0
bench,manyview_dev_1,11.474174975542255,3.892151505117967,4.14563643434561,0.8577265682977291,0.0
book,manyview_dev_0,13.964168705937992,5.302433873449493,5.950633752149304,0.668803861808978,0.0
book,manyview_dev_1,12.398406799192342,4.119572830245314,6.039375672561894,0.8608240982086351,0.0
bowl,manyview_dev_0,16.958798002755774,4.9461020198227335,5.578702964374623,0.6690737351712432,0.0
bowl,manyview_dev_1,12.420483353954074,5.756645234213993,6.069489156010504,0.5819949787763078,0.0
broccoli,manyview_dev_0,19.630737300870244,9.406282525085935,6.402535226376115,0.7907156923061898,0.0
broccoli,manyview_dev_1,18.781287064441447,8.09672300742875,4.67134680549106,0.4626196557341922,0.0
cake,manyview_dev_0,14.799043006158593,5.867235047104056,5.7329760554862945,0.5205964759006821,0.0
cake,manyview_dev_1,17.84162321617,9.41822453353167,3.7158681607815254,0.3612821873000541,0.0
donut,manyview_dev_0,19.315033141413654,9.455566547834058,3.910254156226572,0.5413953368124613,0.0
donut,manyview_dev_1,22.26734997183049,10.174649831308487,4.199195894665875,0.5521516658527057,0.0
hydrant,manyview_dev_0,14.599159376924849,5.655154414726878,5.289620369144585,0.9737327772204973,0.0
hydrant,manyview_dev_1,14.544431000855953,5.876377992594626,4.506377178812374,1.0210153410111495,0.0
mouse,manyview_dev_0,22.553107676356586,12.793445604091437,5.927286492328659,0.5816200334131308,0.0
mouse,manyview_dev_1,17.89414321396086,8.956320087603723,7.097351162295129,0.5222896946353802,0.0
orange,manyview_dev_0,13.732343455171254,5.052956697685929,5.679024711561304,0.40213060027513875,0.0
orange,manyview_dev_1,14.71190574360874,4.956667990371484,5.836996460679712,0.43328379232231895,0.0
plant,manyview_dev_0,17.56722473025224,10.851111767732277,6.940102616941581,0.9601928359930311,0.0
plant,manyview_dev_1,18.62091024389777,11.114146143571679,8.919832772445316,0.845715675126882,0.0
remote,manyview_dev_0,12.004470911615606,2.3372367853347664,5.928692360063941,0.6355222400483482,0.0
remote,manyview_dev_1,13.035720177392095,4.368321832863184,3.7645273565115303,0.6257342864206513,0.0
skateboard,manyview_dev_0,14.087374862144243,6.183930758291541,7.7026533167035085,0.7381270587952287,0.0
skateboard,manyview_dev_1,15.24606555170737,6.935641480347134,6.728247832458047,0.6846367731825937,0.0
suitcase,manyview_dev_0,13.819257223346327,5.727869083939035,5.9663188950446795,0.42728104332046707,0.0
suitcase,manyview_dev_1,23.33527836247522,12.70130752964975,5.440617175698944,0.7376517524662343,0.0
teddybear,manyview_dev_0,15.310590723595963,7.5183318102880765,5.187722505560557,0.6132311702409632,0.0
teddybear,manyview_dev_1,19.00287693135702,11.380410989980264,5.372428296399181,0.655451568067443,0.0
toaster,manyview_dev_0,16.09490094737935,7.357336873218335,5.733018822009381,0.6335824697011363,0.0
toaster,manyview_dev_1,13.391233953784758,6.32606222531527,6.035255066975607,0.7543408733149064,0.0
toytrain,manyview_dev_0,14.60365232137707,8.252354438191217,7.28055045581793,0.5177963318470418,0.0
toytrain,manyview_dev_1,20.508004149463403,10.310151926704073,8.745624247957407,0.4164560185628414,0.0
toytruck,manyview_dev_0,18.495843812347488,9.077851138541167,4.742593752879244,0.8234759152694971,0.0
toytruck,manyview_dev_1,12.550467820571148,5.368998580430165,6.689171662380995,0.581289871598415,0.0
vase,manyview_dev_0,18.188943183563104,9.441252383753767,3.3505357321672142,0.7542355580664746,0.0
vase,manyview_dev_1,18.434184156563,9.303826519080554,6.071437833814365,0.9019223769623579,0.0
MEAN,-,16.092061594428568,7.352673089707325,5.58710387189748,0.635639291857879,0.0
""",
"manyview_test": """
Category,Subset name,psnr_masked,psnr_fg,psnr_full_image,depth_abs_fg,iou
apple,manyview_test_0,16.22478731544839,6.660985912339718,8.662890866941595,0.5735152991789598,0.0
backpack,manyview_test_0,18.664239087697137,12.092836660079621,3.9911394799946835,0.7187691122198704,0.0
ball,manyview_test_0,17.053273275949497,11.47813547143793,5.494760070704971,0.24760313752451854,0.0
banana,manyview_test_0,19.09250116156104,5.624412642679121,4.915562631182255,0.6388887597635459,0.0
baseballbat,manyview_test_0,17.662719299079523,3.56448996833759,6.856655466723437,0.5858372717711078,0.0
baseballglove,manyview_test_0,15.822024491958919,9.008496845518556,4.958078518403922,0.517665349356982,0.0
bench,manyview_test_0,16.177405149477067,5.64144135201049,6.639758049666188,0.9396015318702626,0.0
bicycle,manyview_test_0,18.929300038845177,8.384269505927424,4.978158575183426,0.7192708133061682,0.0
book,manyview_test_0,14.243260388807064,6.680398318324483,5.9082871869853735,0.9097958583065434,0.0
bottle,manyview_test_0,14.627587579689477,5.485474059329347,5.806882899714011,1.2365226740951725,0.0
bowl,manyview_test_0,12.58297015755071,4.721445807873399,6.174942733659999,0.5651215302382757,0.0
broccoli,manyview_test_0,15.348378477682894,9.138928269423888,6.406522886996562,0.46622630548488525,0.0
cake,manyview_test_0,12.406031259153915,9.13497199802905,6.954300602123617,0.7135451548332193,0.0
car,manyview_test_0,10.536444455719398,6.3033794761422826,5.589254154468083,0.6075981188742273,0.0
carrot,manyview_test_0,15.052122330808963,5.001683408210913,6.975324034802911,0.6913476205193215,0.0
cellphone,manyview_test_0,18.548592045129272,5.477199696294225,5.405821575968376,0.8925134146832333,0.0
chair,manyview_test_0,9.288750627933801,5.559044610507649,5.063084903423689,0.5832447059416495,0.0
couch,manyview_test_0,15.542901771081734,10.090205474555033,7.091879909602398,0.530379736402723,0.0
cup,manyview_test_0,14.565042555686277,4.3989084024686305,5.8416712646107225,0.9809843195171222,0.0
donut,manyview_test_0,15.455254561260311,7.186638190791148,6.08943365801032,0.42916104004956795,0.0
frisbee,manyview_test_0,16.030436839496698,8.25580372425949,3.6125508386557295,0.7820506512812717,0.0
hairdryer,manyview_test_0,22.640570140053246,11.702523731191262,4.159711019086314,0.616971255937149,0.0
handbag,manyview_test_0,24.14781075331437,15.091930028917984,5.223221264801334,0.562664145074455,0.0
hotdog,manyview_test_0,12.244917262623947,4.72460505473762,6.9914703226785,0.5147290560374835,0.0
hydrant,manyview_test_0,16.892200853920816,6.5057584631969645,6.307555495359107,0.8690763104982895,0.0
keyboard,manyview_test_0,14.937059706035933,10.816605585432766,4.857196169187754,0.5188802050007122,0.0
kite,manyview_test_0,15.068337896849323,6.205118297721433,5.276287557112783,0.7494832801627337,0.0
laptop,manyview_test_0,14.59345603707514,7.090074167371421,6.2162237610589814,0.7413216109605885,0.0
motorcycle,manyview_test_0,14.442903913583953,8.56222345535462,6.50899995433291,0.7010114811016933,0.0
mouse,manyview_test_0,29.8885518296015,14.145685466149715,5.406173914859613,0.5942925002348606,0.0
orange,manyview_test_0,11.525661011646141,5.745001890928845,5.983235030110308,0.327592487953461,0.0
parkingmeter,manyview_test_0,18.046203929985666,6.461002560728408,5.027716754597319,1.5829406195750064,0.0
pizza,manyview_test_0,15.152783189315754,6.578112135320982,7.482842326935612,0.7078538179251567,0.0
plant,manyview_test_0,20.369369422864448,11.73336728848978,5.490938199184393,0.5563616188902266,0.0
remote,manyview_test_0,21.93996425442841,9.915599775483262,3.2277628694594647,0.8952884887902877,0.0
sandwich,manyview_test_0,14.156122339232516,4.782614236412581,5.172885855269289,0.4726663784145917,0.0
skateboard,manyview_test_0,17.199716318802558,9.3986630162228,6.582697215433262,0.7526901207787688,0.0
suitcase,manyview_test_0,20.5543872349586,15.449636313939182,6.392103915747007,0.5623042520735794,0.0
teddybear,manyview_test_0,15.056483227336162,6.023824258666201,2.385989674021068,0.6859612539860361,0.0
toaster,manyview_test_0,17.538889427176077,10.389092700641873,7.350896986214959,0.6917412312874205,0.0
toilet,manyview_test_0,8.581683038527455,4.304701570881858,5.715072710684154,0.5228074506396895,0.0
toybus,manyview_test_0,13.421701717928093,5.104459961535013,7.832131890256459,0.5177220835646305,0.0
toyplane,manyview_test_0,25.939823270757692,11.015747754038403,5.005751206904976,0.5705696772343116,0.0
toytrain,manyview_test_0,17.831418296523193,7.494011795501741,4.629191510823262,0.6318052729776739,0.0
toytruck,manyview_test_0,20.369297725379987,9.285414438061778,4.844672681479939,0.48828556766453685,0.0
umbrella,manyview_test_0,12.752391495654509,6.657169727823324,2.556125460617257,0.428359657679186,0.0
vase,manyview_test_0,20.277671704818363,6.07655429478755,4.941408622390838,0.8391219139438616,0.0
wineglass,manyview_test_0,19.455250191811363,7.197566433072046,6.442702595780869,3.173690609010777,0.0
MEAN,-,16.64330518875463,7.882212795773946,5.6547484431710435,0.7209548906794958,0.0
"""
}
for task in [CO3DTask.FEW_VIEW, CO3DTask.MANY_VIEW]:
for seq_set in [CO3DSequenceSet.DEV, CO3DSequenceSet.TEST]:
BLANK_PREDICTION_RESULTS[(task, seq_set)] = _read_result_csv(
CSVs[f"{task.value}_{seq_set.value}"]
)
|
co3d-main
|
co3d/challenge/blank_predictions_results.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
from setuptools import setup # type: ignore
setup(
name="cc_net",
version="1.0.0",
packages=["cc_net"],
# metadata to display on PyPI
author="Guillaume Wenzek",
author_email="guw@fb.com",
description="Tools to download and clean Common Crawl",
keywords="common crawl dataset",
url="https://github.com/facebookresearch/cc_net",
license="CC-BY-NC-4.0",
long_description=Path("README.md").read_text(),
long_description_content_type="text/markdown",
project_urls={
"Bug Tracker": "https://github.com/facebookresearch/cc_net/issues",
"Source Code": "https://github.com/facebookresearch/cc_net",
},
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3.7",
],
python_requires=">=3.7",
install_requires=[
"beautifulsoup4>=4.7.1",
"pandas>=0.23.4",
"requests>=2.22.0",
"fasttext>=0.9.1",
"sentencepiece>=0.1.82",
"kenlm @ git+https://github.com/kpu/kenlm.git@master",
"func_argparse>=1.1.1",
"psutil>=5.6.3",
"sacremoses",
"submitit>=1.0.0",
"typing_extensions",
],
extras_require={
"dev": ["mypy==0.790", "pytest", "black==19.3b0", "isort==5.6.4"],
# To use scripts inside cc_net/tools
"tools": ["lxml", "sentence_splitter"],
# Memory-efficient hashset.
# This fork only compiles the kind of dict used by cc_net.
# Full version is at https://github.com/atom-moyer/getpy
"getpy": ["getpy @ git+https://github.com/gwenzek/getpy.git@v0.9.10-subset"],
},
package_data={"cc_net": ["data/*"]},
)
|
cc_net-main
|
setup.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Main script to download a CC dump, remove duplicates, split by language and
filter the documents.
The pipeline parameters are described in the `Config` class.
"""
import hashlib
import json
import time
import warnings
from argparse import ArgumentParser
from collections import defaultdict
from itertools import repeat
from pathlib import Path
from typing import Any, Dict, Iterable, List, NamedTuple, Optional, Sequence, Tuple
import func_argparse
# Local scripts
from cc_net import dedup, execution, jsonql, minify, perplexity, process_wet_file
from cc_net import regroup as regroup_module
from cc_net import split_by_lang
from cc_net.execution import Executor
# Constant
FILE_DIR = Path(__file__).parent
CUTOFF_CSV = FILE_DIR / "data" / "cutoff.csv"
DEFAULT_PIPELINE = [
"dedup",
"lid",
"keep_lang",
"sp",
"lm",
"pp_bucket",
"drop",
"split_by_lang",
]
class Config(NamedTuple):
"""
Mine Common Crawl with the given settings.
config_name
dump: CC dump id
output_dir: working directory
mined_dir: name of the destination folder, full path will be {ouput_dir}/{mined_dir}/{dump_id}
execution: chose how to parallelize the execution
num_shards: number of shards to split the dump
num_segments_per_shard: allow to download a small portion of CC (eg for tests)
min_len: remove documents shorter than this (in chars)
hashes_in_mem: number of shards hashes to use for dedup
lang_whitelist: only treat those languages
lang_blacklist: ignore those languages
lang_threshold: remove docs whose top language score is lower than this
keep_bucket: keep only those perplexity bucket chose from (head, middle, tail, all)
lm_dir: folder containing LMs
lm_languages: only use LMs for the following languages
cutoff: cutoff file to use for split in head/middle/tail
mine_num_processes: number of processes to use for mining
target_size: size of finals files produce during `regroup` stage
cleanup_after_regroup: delete intermediary files after regroup
task_parallelism: max number of task to run in parallel
pipeline: restricts the mining pipeline to the given steps. Order is important !
experiments: (HACK) enable specific experiments in the code
"""
config_name: str = "base"
dump: str = "2017-51"
output_dir: Path = Path("data")
mined_dir: str = "mined"
execution: str = "auto"
num_shards: int = 1600
num_segments_per_shard: int = -1
metadata: Optional[str] = None
min_len: int = 300
hash_in_mem: int = 50
lang_whitelist: Sequence[str] = []
lang_blacklist: Sequence[str] = []
lang_threshold: float = 0.5
keep_bucket: Sequence[str] = []
lm_dir: Path = Path("data/lm_sp")
cutoff: Path = CUTOFF_CSV
lm_languages: Optional[Sequence[str]] = None
mine_num_processes: int = 16
target_size: str = "4G"
cleanup_after_regroup: bool = True
task_parallelism: int = -1
pipeline: Sequence[str] = DEFAULT_PIPELINE
experiments: Sequence[str] = []
cache_dir: Optional[Path] = None
def get_executor(
self, name: str, timeout_hour: int = 1, mem_gb: int = 1, cpus: int = 1
) -> Executor:
name = "_".join((name, self.config_name, *self.experiments))
return execution.get_executor(
name,
self.output_dir / "logs",
self.execution,
timeout_hour=timeout_hour,
mem_gb=mem_gb,
cpus=cpus,
task_parallelism=self.task_parallelism,
)
def get_cc_shard(self, shard: int) -> process_wet_file.CCShardReader:
dump_cache: Optional[Path] = None
if self.cache_dir:
self.cache_dir.mkdir(exist_ok=True)
dump_cache = self.cache_dir / self.dump
dump_cache.mkdir(exist_ok=True)
return process_wet_file.CCShardReader(
self.dump,
shard=shard,
num_shards=self.num_shards,
num_segments_per_shard=self.num_segments_per_shard,
min_len=self.min_len,
cache_dir=dump_cache,
)
@classmethod
def from_json(cls, json_file: Path) -> "Config":
raw_lines = json_file.read_text().splitlines()
raw_lines = [l for l in raw_lines if not l.strip().startswith("//")]
json_config = json.loads("".join(raw_lines))
path_keys = ["cache_dir", "lm_dir", "output_dir"]
for key in path_keys:
if key in json_config:
json_config[key] = Path(json_config[key])
return Config(**json_config)
@property
def will_split(self) -> bool:
return "split_by_lang" in self.pipeline or "split_by_segment" in self.pipeline
def get_lm_languages(self) -> Sequence[str]:
if self.lm_languages is not None:
return self.lm_languages
if self.lang_whitelist:
return self.lang_whitelist
languages = [m.name.split(".")[0] for m in self.lm_dir.glob("*.arpa.bin")]
if self.lang_blacklist:
languages = [l for l in languages if l not in self.lang_blacklist]
return languages
def get_mined_dir(self, regroup: bool = False) -> Path:
if self.will_split and not regroup:
return self.output_dir / f"{self.mined_dir}_split" / self.dump
return self.output_dir / self.mined_dir / self.dump
BASE_CONFIG = Config()
BYLANG_CONFIG = Config(
config_name="by_lang",
mined_dir="mined_by_lang",
pipeline=list(BASE_CONFIG.pipeline[:-1]) + ["split_by_lang"],
)
REPRODUCE_CONFIG = Config(
config_name="reproduce",
dump="2019-09",
mined_dir="reproduce",
pipeline=["fetch_metadata", "keep_lang", "keep_bucket", "split_by_lang"],
metadata="https://dl.fbaipublicfiles.com/cc_net/1.0.0",
# Optional filtering:
# It won't change much the execution speed, but decreases the disk requirement.
# Restrict languages
lang_whitelist=["fr"],
# Restrict perplexity buckets
# Top languages have been split in perplexity buckets according
# to a Wikipedia trained LM.
# The buckets from low perplexity (good) to high (bad) are:
# ["head", "middle", "tail"]
# Languages without a LM have only one bucket "all".
# It won't change much the execution speed, but decreases the disk requirement.
keep_bucket=["head", "all"],
mine_num_processes=1,
)
TEST_CONFIG = BASE_CONFIG._replace(
config_name="test",
dump="2019-09",
output_dir=Path("test_data"),
execution="local",
num_shards=4,
num_segments_per_shard=1,
hash_in_mem=2,
mine_num_processes=2,
lang_whitelist=["de", "it", "fr"],
target_size="32M",
cleanup_after_regroup=False,
cache_dir=Path("test_data/wet_cache"),
)
PREDEF_CONFIGS = {
"base": BASE_CONFIG,
"by_lang": BYLANG_CONFIG,
"test": TEST_CONFIG,
"test_slurm": TEST_CONFIG._replace(execution="slurm,partition=dev"),
"debug": TEST_CONFIG._replace(config_name="debug", mine_num_processes=0),
"reproduce": REPRODUCE_CONFIG,
"augment": BASE_CONFIG._replace(
config_name="augment", dump="2019-13", lang_blacklist=["en"]
),
}
def tmp(output: Path) -> Path:
return output.parent / (output.stem + ".tmp" + output.suffix)
def finalize(tmp_output: Path, output: Path) -> None:
if not tmp_output.exists():
warnings.warn(f"Targeted tmp output {tmp_output} doesn't exists.")
return
tmp_index = tmp_output.parent / (tmp_output.name + ".index")
tmp_output.rename(output)
if tmp_index.exists():
tmp_index.rename(output.parent / (output.name + ".index"))
def _transpose(iterable: Sequence[Tuple[Any, ...]], n=-1) -> Tuple[List, ...]:
if n < 0:
n = len(iterable[0])
columns: tuple = tuple([] for _ in range(n))
for row in iterable:
assert len(row) == n, f"Found tuple of len({len(row)}, expected {n}: {row}"
for i in range(n):
columns[i].append(row[i])
return columns
def hashes(conf: Config) -> List[Path]:
"""Computes hashes for each shard."""
hashes_dir = conf.output_dir / "hashes" / conf.dump
outputs = [hashes_dir / f"{shard:04d}.bin" for shard in range(conf.num_shards)]
missing_outputs = [(shard, o) for shard, o in enumerate(outputs) if not o.exists()]
if not missing_outputs:
return outputs
hashes_dir.mkdir(parents=True, exist_ok=True)
# With FlatHashSet we need ~2Gb of RAM / shard, but we need to account for
# overhead due to how the dynamic allocation works.
ex = conf.get_executor(f"hashes_{conf.dump}", mem_gb=4, timeout_hour=6, cpus=2)
ex(_hashes_shard, repeat(conf), *_transpose(missing_outputs))
# Wait a bit so that files appears on the disk.
time.sleep(20)
assert all(o.exists() for o in outputs)
return outputs
def _hashes_shard(conf: Config, shard: int, output: Path):
tmp_output = tmp(output)
jsonql.run_pipes(
dedup.HashesCollector(field="raw_content", output=tmp_output),
inputs=conf.get_cc_shard(shard),
)
finalize(tmp_output, output)
return f"Hashed {output}"
HASHES_IN_MEM = [0, 1, 2, 5, 10, 20, 50, 100, 200, 400]
def mine(conf: Config) -> List[Path]:
"""Remove dups, run LID and LMs, and split by lang and quality."""
mined_dir = conf.get_mined_dir()
if conf.will_split:
# Give a directories when splitting
outputs = [mined_dir / f"{shard:04d}" for shard in range(conf.num_shards)]
else:
# Files otherwise
outputs = [
mined_dir / f"{shard:04d}.json.gz" for shard in range(conf.num_shards)
]
if "mini_again" in conf.experiments:
mined_dir = conf.output_dir / "mini_again" / conf.dump
outputs = [mined_dir / f"{shard:04d}" for shard in range(conf.num_shards)]
# TODO: try to reduce this / make it a function of "hash_in_mem" / num_langs
mem_gb = 60 + 1 * conf.hash_in_mem
timeout_hour = 5
if "hashes" in conf.experiments:
# HACK: used for generating paper figures
outputs = [
conf.output_dir / f"hashes_exp/{conf.dump}_0000_dedup{h:03d}.json.gz"
for h in HASHES_IN_MEM
]
mem_gb = int(max(HASHES_IN_MEM) * 1.2)
timeout_hour = 8
missing_outputs = [(shard, o) for shard, o in enumerate(outputs) if not o.exists()]
if "mini_again" in conf.experiments:
missing_outputs = [
(shard, o)
for shard, o in enumerate(outputs)
if shard in [5, 139] and not o.exists()
]
if not missing_outputs:
return outputs
mined_dir.mkdir(parents=True, exist_ok=True)
ex = conf.get_executor(
f"mine_{conf.dump}",
mem_gb=mem_gb,
timeout_hour=timeout_hour,
cpus=conf.mine_num_processes + 1,
)
# Compute hashes firsts.
if "dedup" in conf.pipeline:
hashes_groups = list(jsonql.grouper(hashes(conf), conf.hash_in_mem))
hashes_files: Iterable[List[Path]] = [
hashes_groups[shard // conf.hash_in_mem] for shard, o in missing_outputs
]
else:
hashes_files = repeat([])
ex(_mine_shard, repeat(conf), hashes_files, *_transpose(missing_outputs))
assert all(o.exists() for o in outputs)
return outputs
def _get_segment(tmp_output: Path, doc: dict) -> str:
segment: str = doc["cc_segment"].split("/")[-1]
return str(tmp_output / segment.replace(".warc.wet.gz", ".json.gz"))
def _mine_shard(conf: Config, hashes: List[Path], shard: int, output: Path) -> str:
assert conf.pipeline
tmp_output = tmp(output)
if "hashes" in conf.experiments:
# HACK: used for generating paper figures
hashes_in_mem = shard
hashes = hashes[: HASHES_IN_MEM[hashes_in_mem]]
shard = 0
cc_shard = conf.get_cc_shard(shard)
steps: Dict[str, Optional[jsonql.Transformer]] = {}
lang_id = Path("bin") / "lid.bin"
steps["lid_before_dedup"] = split_by_lang.Classifier(
model=lang_id, field="raw_content", out_field="lid_before_dedup", top=5
)
steps["dedup"] = dedup.DuplicatesRemover(field="raw_content", hashes_files=hashes)
steps["lid"] = split_by_lang.Classifier(
model=lang_id,
field="raw_content",
out_field="language",
top=1,
threshold=conf.lang_threshold,
)
steps["lid_after_dedup"] = split_by_lang.Classifier(
model=lang_id, field="raw_content", out_field="lid_after_dedup", top=5
)
if conf.lang_blacklist:
steps["keep_lang"] = jsonql.where(
[lambda doc: doc.get("language") not in set(conf.lang_blacklist)]
)
elif conf.lang_whitelist:
steps["keep_lang"] = jsonql.where(
[lambda doc: doc.get("language") in set(conf.lang_whitelist)]
)
else:
steps["keep_lang"] = None
tok_field = "tokenized"
steps["sp"] = perplexity.MultiSentencePiece(
{l: conf.lm_dir / f"{l}.sp.model" for l in conf.get_lm_languages()},
field="raw_content",
output_field=tok_field,
normalize=True,
)
steps["lm"] = perplexity.DocLM(
{l: conf.lm_dir / f"{l}.arpa.bin" for l in conf.get_lm_languages()},
field=tok_field,
output_field="perplexity",
normalize=False, # Normalization is done before SentencePiece
# load_method=kenlm.LoadMethod.PARALLEL_READ,
)
steps["pp_bucket"] = perplexity.PerplexityBucket(CUTOFF_CSV)
steps["drop"] = perplexity.DropKeys(tok_field)
steps["keep_bucket"] = None
if conf.keep_bucket:
steps["keep_bucket"] = jsonql.where(
[lambda doc: doc.get("bucket", "all") in conf.keep_bucket]
)
if "fetch_metadata" in conf.pipeline:
# TODO: better default
assert conf.metadata is not None
steps["fetch_metadata"] = minify.MetadataFetcher(
f"{conf.metadata}/{conf.dump}/"
)
steps["minify"] = minify.Minifier()
pattern = str(tmp_output / "{language}_{bucket}.json.gz")
steps["split_by_lang"] = jsonql.split(pattern=str(pattern), mkdir=True)
steps["split_by_segment"] = jsonql.split(
split_fn=lambda doc: _get_segment(tmp_output, doc), mkdir=True
)
pipeline = filter(None, (steps[s] for s in conf.pipeline))
jsonql.run_pipes(
*pipeline,
inputs=cc_shard,
processes=conf.mine_num_processes,
chunksize=100,
# The splitter takes care of writing to files.
output=tmp_output if not conf.will_split else None,
)
finalize(tmp_output, output)
return f"Mined {output}"
def regroup(conf: Config, all_dirs: List[Path]) -> Path:
"""Reshards each language/quality after 'mine'."""
regroup_dir = conf.get_mined_dir(regroup=True)
assert all_dirs
all_files = [f for d in all_dirs for f in d.glob("*.json.gz")]
if not all_files:
print(f"No .json.gz file found in {all_dirs[0]}")
splits: Dict[str, List[Path]] = defaultdict(list)
for f in all_files:
split = f.name.split(".")[0]
splits[split].append(f)
print(f"Identified {len(all_files)} files to regroup from {len(splits)} splits.")
inputs: List[List[Path]] = []
outputs: List[Path] = []
target_size = jsonql.parse_size(conf.target_size)
for split, files in splits.items():
cuts = list(regroup_module.determine_groups(files, target_size=target_size))
if not cuts:
continue
pattern = f"{split}_????.json.gz"
existing_outputs = sorted(regroup_dir.glob(pattern))
if not conf.cleanup_after_regroup:
# We still have all the inputs so it is safe to overwrite existing outputs.
assert len(existing_outputs) <= len(cuts)
existing_outputs = []
if len(existing_outputs) > 0 and len(cuts) == 1:
# append to existing file if size allows it.
new_size = (
sum(f.stat().st_size for f in cuts[0])
+ existing_outputs[-1].stat().st_size
)
if new_size < target_size:
print(f"Will append {cuts[0]} to {existing_outputs[-1]}")
cuts[0].insert(0, existing_outputs.pop(-1))
n_existing = len(existing_outputs)
for i, cut in enumerate(cuts):
# avoid overwriting existing files.
j = i + n_existing
output = regroup_dir / f"{split}_{j:04}.json.gz"
inputs.append(cut)
outputs.append(output)
print(
str(regroup_dir / pattern),
"->",
len(cuts),
f"shards ({n_existing} already there).",
)
ex = conf.get_executor(f"regroup_{conf.dump}", mem_gb=1, timeout_hour=12, cpus=2)
ex(_regroup, repeat(conf), inputs, outputs)
return regroup_dir
def _regroup(conf: Config, inputs: List[Path], output: Path) -> str:
output.parent.mkdir(parents=True, exist_ok=True)
regroup_module.fast_reshard(
inputs, output, tmp=tmp(output), rm_original=conf.cleanup_after_regroup
)
return f"Regrouped {output}"
def move_segments(conf: Config, all_dirs: Sequence[Path]) -> Path:
"""Reshards each language/quality after 'mine'."""
# check that mining is over.
regroup_dir = conf.get_mined_dir(regroup=True)
assert all_dirs, "Received no dirs to move"
assert all(
d.is_dir() for d in all_dirs
), f"move_segments was expecting dirs received files: {all_dirs[:10]}..."
regroup_dir.parent.mkdir(exist_ok=True)
regroup_dir.mkdir(exist_ok=True)
ex = conf.get_executor(f"moveseg_{conf.dump}", mem_gb=1, timeout_hour=1, cpus=2)
def _move_segments(subdir: Path, regroup_dir: Path) -> str:
n = 0
for f in subdir.iterdir():
if not f.is_file() or f.is_symlink():
continue
n += f.name.endswith(".json.gz")
new_name = regroup_dir / f.name
target = new_name.resolve()
assert f.resolve() != target
# this make the job idempotent.
f.rename(new_name)
f.symlink_to(target)
if n == 0:
return ""
return f"Moved {n} .json.gz files from {subdir} to {regroup_dir}"
ex(_move_segments, all_dirs, repeat(regroup_dir))
print(f"Results are in {regroup_dir}")
return regroup_dir
def _validate_test(conf: Config, output_dir: Path, generate: bool = False):
stats: Dict[str, dict] = {}
for file in sorted(output_dir.glob("*.json.gz")):
fname = "/".join((file.parent.name, file.name))
# The order of documents is not guaranteed inside a shard,
lines = sorted(jsonql.open_read(file))
content = "\n".join(lines)
size = len(content)
checksum = hashlib.sha1(bytes(content, encoding="utf-8")).hexdigest()
# first_document = json.loads(lines[0])
stats[fname] = {"size": size, "checksum": checksum}
def dump(x):
return json.dumps(x, indent=2, ensure_ascii=False)
print("*** Stats ***")
stats_raw = dump(stats)
stats_file = FILE_DIR / "data" / "test_stats.json"
if generate:
print("Saving stats to", stats_file)
stats_file.write_text(stats_raw)
return
expected_stats: Dict[str, dict] = {}
if stats_file.exists():
expected_stats = json.loads(stats_file.read_text())
if expected_stats == stats:
print("Everything looks good !")
return
stats_file.with_suffix(".actual.json").write_text(stats_raw)
print("*** Expected Stats ***")
print(dump(expected_stats))
print("*** Diff ***")
for fname in sorted(expected_stats.keys()):
print(fname)
assert fname in expected_stats, "missing file " + fname
if expected_stats[fname]["size"] != stats[fname]["size"]:
print(
" - Expected size",
expected_stats[fname]["size"],
", size",
stats[fname]["size"],
)
if expected_stats[fname]["checksum"] != stats[fname]["checksum"]:
print(
" - Expected checksum",
expected_stats[fname]["checksum"],
", checksum",
stats[fname]["checksum"],
)
def get_main_parser() -> ArgumentParser:
# Generates the 'main' parser by patching a 'Config' parser
p = func_argparse.func_argparser(Config)
# Override defaults value to None, so we know what was set by the user.
# Note that it will keep the original default values in the help message.
p.set_defaults(**{f: None for f in Config._fields})
p.add_argument("--config", type=str, default="base")
p.set_defaults(__command=main)
return p
def main(config: str = "base", **config_as_dict: Any) -> None:
# Use the given 'config' as default value.
config_base = config
if config_base in PREDEF_CONFIGS:
conf = PREDEF_CONFIGS[config_base]
elif Path(config_base).exists():
conf = Config.from_json(Path(config_base))
else:
raise ValueError(
f"Invalid value {config_base} for --config. "
f"Choose from ({', '.join(PREDEF_CONFIGS)}) or give an existing .json file."
)
conf = conf._replace(**{k: v for (k, v) in config_as_dict.items() if v is not None})
print(f"Will run cc_net.mine.main with the following config:", conf)
all_files = mine(conf)
if conf.will_split:
assert all_files
assert all(d.is_dir() for d in all_files)
all_dirs = all_files
if "split_by_lang" in conf.pipeline:
# Only try regrouping if we split the shards.
regroup(conf, all_dirs)
elif "split_by_segment" in conf.pipeline:
# If we split by segment then regrouping is trivial, since segments appear in only one shard.
move_segments(conf, all_dirs)
if conf.config_name == "test":
_validate_test(conf, conf.get_mined_dir(regroup=True))
if __name__ == "__main__":
func_argparse.parse_and_call(get_main_parser())
|
cc_net-main
|
cc_net/mine.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Creates mono-lingual corpus from Wikipedia.
"""
import functools
import re
import subprocess
import urllib.request
from pathlib import Path
from typing import Dict
import func_argparse
from bs4 import BeautifulSoup # type: ignore
from cc_net import jsonql, text_normalizer
CIRRUS_URL = "https://dumps.wikimedia.org/other/cirrussearch"
CIRRUS_DUMP_RE = re.compile(r"^(.*)wiki-\d+-cirrussearch-content\.json\.gz")
def tmp(file: Path) -> Path:
return file.parent / ("tmp." + file.name)
def opening(file: Path, output: Path = None, n_docs: int = 1_000_000):
"""Will dump the tokenized opening text of the given Wikipedia.
Args:
- file: File containing the Wikipedia dump.
- output: Output file.
- n_docs: How many docs to parse
- tokenize: whether to tokenize the text
- lang: Language code used to chose the tokenizer
"""
assert file.exists()
return jsonql.run_pipes(
functools.partial(extract_opening_text, n_docs=n_docs),
file=file,
output=tmp(output) if output else None,
)
if output:
tmp(output).replace(output)
def extract_opening_text(source, n_docs: int = 10_000):
i = 0
for doc in jsonql.read_jsons(source):
if not doc:
continue
text = doc.get("opening_text")
if not text:
continue
yield text_normalizer.normalize(text)
i += 1
if i >= n_docs:
break
def dl(lang: str, output_dir: Path, date: str = None):
"""Download the cirrus extract for the given lang.
See https://dumps.wikimedia.org/other/cirrussearch for the full list of files.
Args:
- lang: The Wikipedia code for the language.
- output_dir: Output directory. File will be `{lang}.json.gz`
- date: Date of a specific Cirrus dump.
"""
urls = get_cirrus_urls(date)
assert (
lang in urls
), f"--lang {lang} not found. Available languages are: {urls.keys()}"
assert output_dir, "--output_dir folder needed."
output_dir.mkdir(exist_ok=True)
output = output_dir / (lang + ".json.gz")
print(f"Downloading {lang} wiki from {urls[lang]} to {output}")
wget(urls[lang], output)
def get_cirrus_urls(date: str = None) -> Dict[str, str]:
if date is None:
cirrus_page = BeautifulSoup(
urllib.request.urlopen(CIRRUS_URL), features="html.parser"
)
dumps = [a.get("href").strip("/") for a in cirrus_page.findAll("a")]
dumps.remove("..")
dumps.remove("current")
# We take the oldest dump since the most recent might be incomplete.
# The page only link to the N latest dumps so the dump won't be too old.
date = min(dumps)
cirrus_url = "/".join((CIRRUS_URL, date))
print("Will use the Wikipedia dump from:", date, cirrus_url)
cirrus_page = BeautifulSoup(
urllib.request.urlopen(cirrus_url), features="html.parser"
)
urls = {}
for link in cirrus_page.findAll("a"):
match = CIRRUS_DUMP_RE.match(link.get("href"))
if not match:
continue
urls[match.group(1)] = "/".join([cirrus_url, link.get("href")])
assert urls, f"No valid download urls found at {cirrus_url}"
return urls
def wget(url: str, output: Path):
subprocess.run(["wget", url, "-O", tmp(output), "-q"], check=True)
tmp(output).replace(output)
assert (
output.stat().st_size > 10_000
), f"File {output} downloaded from {url} looks too small"
if __name__ == "__main__":
func_argparse.main(dl, opening)
|
cc_net-main
|
cc_net/get_wiki_cirrus.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Manipulate files containing one json per line.
"""
import argparse
import collections
import contextlib
import functools
import glob
import gzip
import importlib
import inspect
import io
import itertools
import json
import logging
import multiprocessing
import os
import re
import sys
import tempfile
import time
import typing as tp
import warnings
import zlib
from pathlib import Path
from typing import (
Callable,
Dict,
Iterable,
Iterator,
List,
Optional,
Sequence,
TextIO,
Tuple,
Union,
)
import numpy as np
import psutil # type: ignore
import requests
from typing_extensions import Protocol
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(levelname)s %(process)d:%(name)s - %(message)s",
datefmt="%Y-%m-%d %H:%M",
)
NEWLINE = " N3WL1N3 "
FilterFn = Callable[[dict], bool]
FileDescriptor = Union[Path, List[Path], str]
WritableFileLike = Union[FileDescriptor, TextIO, "SimpleIO", None]
ReadableFileLike = Union[Iterable[str], FileDescriptor, None]
def io_parser():
"""Parser shared by all commands to get input/output files."""
parser = argparse.ArgumentParser(add_help=False)
file_help = """File to read from. Can be specified several times for several files.
Be careful that bash will expand glob patterns **before** sending the args
to python. To use globs put it inside single quotes:
jsonql where --file 'data/perplexity/*.json' '{length} > 100' | head -1
jsonql --file 'data/perplexity/*.json' where '{length} > 100' | head -1
[Invalid] jsonql where '{length} > 100' --file data/perplexity/*.json | head -1
[Invalid] jsonql where --file data/perplexity/*.json '{length} > 100' | head -1
"""
parser.add_argument("-f", "--file", type=Path, action="append", help=file_help)
parser.add_argument("-o", "--output", type=Path, default="-")
parser.add_argument("--processes", type=int, default=1)
return parser
def get_parser():
parser = argparse.ArgumentParser(
description="Read a set of json files and allow to query them"
)
subparsers = parser.add_subparsers()
def add_subparser(function, arguments):
doc = function.__doc__.split("\n")[0]
p = subparsers.add_parser(function.__name__, help=doc, parents=[io_parser()])
p.set_defaults(command=function)
for k, v in arguments.items():
p.add_argument(k, **v)
add_subparser(
select,
{
"columns": dict(nargs="+", help="Extract the value of the given fields"),
"--skip_empty": dict(
action="store_true", help="Skip lines without the requested fields"
),
"--separator": dict(
default="\t", help="Separator to use between the different columns"
),
"--newline": dict(
default=NEWLINE,
help="Replace newlines found in the text by the given string",
),
},
)
add_subparser(
where,
{
"clauses": dict(nargs="+", help=""),
"--requires": dict(
action="append", help="Python module required by the clauses code."
),
},
)
add_subparser(
merge,
{
"columns": dict(nargs="+", help=""),
"--separator": dict(
default="\t", help="Separator to use between the different columns"
),
"--newline": dict(
default=NEWLINE, help="Replace the given string by actual newlines"
),
},
)
add_subparser(
describe,
{
"columns": dict(nargs="*", help=""),
"--bins": dict(
default="auto", help="Number of bins for computing the histograms"
),
"--cumulative": dict(
action="store_true", help="Compute cumulative histograms"
),
"--weights": dict(type=str, help="Column used to weight histograms"),
},
)
add_subparser(split, {"--pattern": dict(type=str)})
add_subparser(shard, {})
return parser
def _split_array(array, sep):
last = 0
for i, x in enumerate(array):
if x != sep:
continue
yield array[last:i]
last = i + 1
if last != len(array):
yield array[last:]
def main(raw_args):
parser = get_parser()
pipeline = []
file = "-"
output = "-"
processes = 1
for args_group in _split_array(raw_args, "--"):
args = vars(parser.parse_args(args_group))
command = args.pop("command")
file = args.pop("file") or file
output = args.pop("output") or output
processes = args.pop("processes") or processes
pipeline.append(as_pipe(command, args))
if not pipeline:
parser.print_help()
return
run_pipes(*pipeline, file=Path(file), output=Path(output), processes=processes)
class Transformer:
"""
Wrapper around functions transforming documents.
This allows `run_pipes` to automatically parallelize the pipeline.
Provides:
* Automatic logging. Logging can be changed with the `summary` method.
Loggin frequency with _log_freq (in second) or $JSONQL_LOG_FREQ env variable.
* Automatic parallelization without pickling. The transformers are shared
across processes, and the object is usually not pickled.
* Basic pickling / unpickling in case it's still needed.
By default will only pickle the arguments passed to the constructor.
* Delayed initialization. Internal state which is not pickable should be set
inside the `_prepare` function.
"""
parallelisable: bool = True
expect_json: bool = False
warn_when_pickling: bool = False
ready: bool = False
def __init_subclass__(cls, expect_json: bool = None):
"""Detects if the subclass expects json as input."""
spec = inspect.getfullargspec(cls.do)
if expect_json is None:
expect_json = spec.annotations.get(spec.args[1], None) == dict
cls.expect_json = expect_json
def __new__(cls, *args, **kwargs):
"""Creates the transformer and save the arguments passed to the constructor."""
t = super().__new__(cls)
Transformer.__init__(t, args, kwargs)
return t
def __init__(self, state_args: tuple = None, state_kwargs: dict = None):
"""
Init the transformer counters.
If state_args/state_kwargs are set they will override whatever was
originally passed to the subclass constructor.
"""
if state_args is not None:
self.__args = state_args
if state_kwargs is not None:
self.__kwargs = state_kwargs
self.start_time = time.time()
self.__last_log = self.start_time
self.processed = 0
# Log every 5 min unless specified other wise.
self._log_freq = int(os.environ.get("JSONQL_LOG_FREQ", 5 * 60))
self.__cls = type(self)
self._logger = logging.getLogger(self.__cls.__name__)
def __call__(self, x):
assert self.ready, f"{self} is not ready."
if x is None:
return
y = self.do(x)
self.processed += 1
if time.time() - self.__last_log > self._log_freq:
self.log_summary()
return y
def do(self, x):
raise NotImplementedError(f"'do' not implemented in {type(self)}")
def summary(self) -> List[str]:
return [self.speed_summary()]
def speed_summary(self) -> str:
delay = time.time() - self.start_time
h = delay / 3600
s = self.processed / delay
return f"Processed {self.processed:_} documents in {h:.2}h ({s:5.1f} doc/s)."
def log(self, message):
self._logger.info(message)
def log_summary(self) -> None:
if not self.ready:
self.log("Not ready.")
return
summ = self.summary() or []
for line in summ:
self.log(line)
self.__last_log = time.time()
def map(self, source: Iterable) -> Iterator:
if self.ready:
for x in source:
yield self(x)
# since we have been prepared by caller,
# caller is also responsible for calling `close`.
return
else:
with self:
for x in source:
yield self(x)
def __getstate__(self) -> Tuple[tuple, dict, bool]:
return (self.__args, self.__kwargs, self.expect_json)
def __setstate__(self, state: Tuple[tuple, dict, bool]):
if self.warn_when_pickling:
warnings.warn(f"Unpickling transformer: {type(self)}. This can be slow.")
(args, kwargs, expect_json) = state
# When unpickling `__new__` isn't called so we have to doit ourselves.
Transformer.__init__(self, state_args=args, state_kwargs=kwargs)
type(self).__init__(self, *args, **kwargs)
assert self.expect_json == expect_json
# __setstate__ is called by multiprocessing right before calling
# the object so we need to initialize everything.
self.__enter__()
def _prepare(self) -> None:
pass
def __enter__(self) -> "Transformer":
# In multiprocessing __enter__ is always called twice, so we are idempotent.
# Because we call __enter__ when deserializing this transformer and
# also when the parent transformer is deserialized.
self.start_time = time.time()
if self.ready:
return self
self._prepare()
self.ready = True
return self
def __exit__(self, *args) -> None:
self.close()
self.log_summary()
def close(self) -> None:
pass
def as_pipe(transformer, kwargs):
if isinstance(transformer, type):
return transformer(**kwargs)
return lambda source: transformer(source, **kwargs)
def compose(fns: List[Transformer]) -> Transformer:
if len(fns) == 1:
return fns[0]
return MultiTransformer(fns)
class MultiTransformer(Transformer):
def __init__(self, transformers: List[Transformer]):
super().__init__()
self.transformers = transformers
def __repr__(self) -> str:
pipeline = " | ".join(type(t).__name__ for t in self.transformers)
return f"<{pipeline}>"
def do(self, x):
for t in self.transformers:
x = t(x)
return x
def _prepare(self):
for t in self.transformers:
t.__enter__()
return self
def __exit__(self, *args):
for t in self.transformers:
t.__exit__(*args)
def summary(self):
return itertools.chain(*(t.summary() for t in self.transformers))
class Mapper(Transformer):
def __init__(self, fn):
super().__init__()
self.fn = fn
def do(self, x):
return self.fn(x)
def run_pipe(
command,
kwargs: dict = None,
file: ReadableFileLike = None,
output: WritableFileLike = None,
):
kwargs = kwargs or {}
if isinstance(kwargs, argparse.ArgumentParser):
kwargs = vars(kwargs.parse_args())
file = file or Path(kwargs.pop("file", "-"))
output = output or Path(kwargs.pop("output", "-"))
return run_pipes(as_pipe(command, kwargs), file=file, output=output)
def run_pipes(
*fns: Union[Transformer, Callable[[Iterable], Iterable]],
inputs: Iterable[dict] = None,
file: ReadableFileLike = None,
output: WritableFileLike = None,
processes: int = 1,
chunksize: int = 10_000,
):
"""
Run full document processing pipeline.
- fns: list of functions to run over the documents. Can be:
* `Iterable -> Iterable` function
* jsonql.Transformer instance
Using transformers allow the pipeline to process documents in parallel.
- inputs: iterable to read the documents from
- file: if inputs is not given, will read documents from this file.
- output: writable file like.
- processes: number of processes to use. -1 means all CPU available.
- chunksize: chunksize for multiprocessing.Pool.imap_unordered
"""
expect_json = len(fns) and isinstance(fns[0], Transformer) and fns[0].expect_json
if expect_json and inputs is None:
fns = (JsonReader(),) + fns
transformers = []
for t in fns:
if not isinstance(t, Transformer):
break
if not t.parallelisable:
break
transformers.append(t)
pipes = fns[len(transformers) :]
log = logging.getLogger(__name__).info
if inputs is None:
data: Iterable = open_read(file)
else:
data = inputs
if processes == -1:
processes = os.cpu_count() or 0
with contextlib.suppress(BrokenPipeError), contextlib.ExitStack() as stack:
if transformers:
log(f"preparing {transformers}")
transform = stack.enter_context(compose(transformers))
if processes <= 1:
data = transform.map(data)
else:
p = multiprocessing.current_process()
log(f"Will start {processes} processes from {p.name}, Pid: {p.pid}")
pool = stack.enter_context(
multiprocessing.Pool(
processes=processes,
initializer=_set_global_transformer,
initargs=(transform,),
)
)
data = pool.imap_unordered(
_global_transformer, data, chunksize=chunksize
)
for fn in pipes:
if isinstance(fn, Transformer):
data = fn.map(data)
else:
data = fn(data)
write_jsons(data, output)
# Allows to share transformer acroos subprocess.
# Used by `run_pipes`
_GLOBAL_TRANSFORMER: Optional[Transformer] = None
def _set_global_transformer(transformer: Transformer):
global _GLOBAL_TRANSFORMER
p = multiprocessing.current_process()
logging.info(
f"Started subprocess {p.name}:{p.pid} from {os.getppid()} for {transformer}"
)
assert transformer.ready, f"{transformer} isn't ready"
_GLOBAL_TRANSFORMER = transformer
def _global_transformer(document: str) -> Optional[dict]:
assert _GLOBAL_TRANSFORMER is not None
return _GLOBAL_TRANSFORMER(document)
def lines(file: ReadableFileLike) -> Iterator[str]:
return (line.strip("\n") for line in open_read(file))
def read_jsons(file: ReadableFileLike, strict=False) -> Iterator[dict]:
reader = JsonReader(strict=strict)
lines = open_read(file)
for line in lines:
if line is None:
continue
yield reader(line)
reader.log_summary()
def write_jsons(source: Iterable[dict], file: WritableFileLike) -> None:
eol = os.linesep
with open_write(file) as o:
for res in source:
if res is None:
continue
if isinstance(res, dict):
json.dump(res, o, ensure_ascii=False)
o.write(eol)
continue
if isinstance(res, str):
res = res.rstrip("\n")
print(res, file=o)
class JsonReader(Transformer):
def __init__(self, strict: bool = False):
super().__init__()
self.ready = True
self.strict = strict
self.num_errors = 0
def do(self, line: str) -> Optional[dict]:
if line is None:
return None
if isinstance(line, dict):
return line
line = line.rstrip("\n")
if not line:
return None
try:
return json.loads(line)
except json.decoder.JSONDecodeError as e:
self.log_error(e)
if self.strict:
raise
return None
def log_error(self, e: json.decoder.JSONDecodeError):
self.num_errors += 1
if self.num_errors > 10:
return
MAX_LEN = 80
snippet, snippet_len = e.doc, len(e.doc)
col = e.pos
if snippet_len > MAX_LEN:
if col < MAX_LEN:
start = 0
elif snippet_len - col < MAX_LEN:
start = snippet_len - MAX_LEN
else:
start = col - MAX_LEN // 2
snippet = e.doc[start : start + MAX_LEN]
col = col - start
logging.warning(
"\n".join(
[
f"Invalid json (length={len(e.doc)}) {e}",
snippet,
" " * (col - 1) + "^",
]
)
)
def summary(self):
summ = super().summary()
if self.num_errors > 0:
summ.append(f"Skipped {self.num_errors} invalid json.")
return summ
def compile_column(column, newline):
if callable(column):
return column
if column == "*":
return json.dumps
if re.match(r"[_a-z][_a-z0-9]*", column):
def extract_col(doc):
v = doc.get(column, "")
if isinstance(v, str) and newline != "\n":
v = v.rstrip("\n").replace("\n", newline)
return v
return extract_col
return compile_expr(column)
def select(lines, columns, skip_empty=False, separator="\t", newline="\n"):
"""Yields the content of the requested columns."""
column_parsers = [compile_column(c, newline) for c in columns]
for doc in read_jsons(lines):
values = []
empty = True
for parse_col in column_parsers:
v = parse_col(doc)
values.append(str(v) or "")
empty = empty and v is None
if skip_empty and empty:
continue
yield separator.join(values)
def compile_expr(clause: Union[str, FilterFn], requires: List[str] = None):
if not isinstance(clause, str):
return clause
args_re = r"(?i:\{([_a-z][_a-z0-9]*)\})"
args_list = list(re.findall(args_re, clause))
if not args_list:
# This is only a warning because you may want to have eg random sampling
# that doesn't depend on the document.
logging.warn(
f"Warning: No variable found in expression: <{clause}>\n"
"Variables should be written inside braces, eg: {language}=='en'"
)
python_like = re.sub(args_re, r"doc.get('\1', None)", clause)
requires = requires or []
modules = {r: importlib.import_module(r) for r in requires}
return eval(f"lambda doc: {python_like}", modules)
class where(Transformer):
"""Filters the data using python code.
Ex: `jsonql where 'len({text}) > 100'`
"""
def __init__(
self, clauses: Sequence[Union[str, FilterFn]], requires: List[str] = []
):
super().__init__()
self.raw_clauses = clauses
self.requires = requires
self.n_selected = 0
self.clauses: List[FilterFn] = []
def _prepare(self):
self.clauses = [compile_expr(c, self.requires) for c in self.raw_clauses]
def do(self, doc: dict) -> Optional[dict]:
assert self.clauses
if not doc or not all((c(doc) for c in self.clauses)):
return None
self.n_selected += 1
return doc
def summary(self):
n_selected, n_docs = self.n_selected, self.processed
selectivity = n_selected / n_docs if n_docs else 0
return [f"Selected {n_selected} documents out of {n_docs} ({selectivity:5.1%})"]
def merge(lines, columns, separator="\t", newline=NEWLINE):
"""Reads tab separated columns and output a json using the given headers.
Headers are of form {key}[%{type}]
{type} can be one of {"f": float, "i": int, "b": bool, "s": string}.
Default type is string.
A special header "_" means interpret this column as json, and append all other
columns to it. Must appear only once and on last position.
Ex:
`echo '1\thello' | jsonql merge n t` --> `{"n": "1", "t": "hello"}`
`echo '1\thello" | jsonql merge n%i t` --> `{"n": 1, "t": "hello"}`
`echo '1\thello\t{"f": "bar"}' | jsonql merge n%i t _` --> `{"n": 1, "t": "hello", "f": "bar"}`
"""
handle_newlines = lambda s: s.replace(newline, "\n")
type_mapping: Dict[str, Callable] = {
"f": float,
"i": int,
"b": bool,
"s": handle_newlines,
}
type_parsing = [
type_mapping.get(f.split("%")[-1], handle_newlines) for f in columns
]
columns = [f.split("%")[0] for f in columns]
doc_index = columns.index("_") if "_" in columns else -1
read_json = JsonReader()
def parse(line):
parts = line.split(separator, len(columns) - 1)
doc: Dict[str, tp.Any] = {}
for i, value in enumerate(parts):
if columns[i] == "_":
doc.update(read_json(parts[doc_index]))
else:
try:
doc[columns[i]] = type_parsing[i](value)
except ValueError:
logging.error(
f"Error when parsing column {i} of line: {line[:100]}..."
)
return doc
for line in lines:
yield json.dumps(parse(line))
class split(Transformer):
"""Split a files in several smaller files based on the value of a field."""
# Not parallelisable since we are writing to files.
parallelisable = False
def __init__(
self,
pattern: Union[Path, str] = None,
split_fn: Callable[[dict], str] = None,
mkdir: bool = False,
):
super().__init__()
assert not (
pattern and split_fn
), "split can't have both a pattern and a split_fn"
if split_fn is not None:
self.split_fn = split_fn
else:
assert pattern, "split need either a pattern or a split_fn"
self.split_fn = self.make_split_fn(str(pattern))
self.mkdir = mkdir
self.o: dict = {}
def make_split_fn(self, pattern: str) -> Callable[[dict], str]:
candidates = list(re.findall(r"(?i:\{([_a-z][_a-z0-9]*)\})", pattern))
return lambda doc: pattern.format(**{c: doc[c] for c in candidates})
def do(self, doc):
filename = self.split_fn(doc)
if not filename:
return
o = self.o.get(filename, None)
if o is None:
if self.mkdir:
Path(filename).parent.mkdir(parents=True, exist_ok=True)
self.o[filename] = open_write(filename)
print(json.dumps(doc, ensure_ascii=False), file=self.o[filename], flush=True)
def summary(self):
summ = super().summary()
summ.append(f"Found {len(self.o)} splits.")
return summ
def close(self):
for file in self.o.values():
file.close()
def histogram(values, bins, weights):
hist, bins = np.histogram(values, bins=bins)
# n_bins = len(hist)
if weights is not None:
# Bins can't be auto-determined if weights is supplied.
# So we first compute the bins without the weights then recompute
# the histogram with the weights.
hist, bins = np.histogram(values, bins=bins, weights=weights)
# cumsum = np.cumsum(hist)
# total = cumsum[-1]
# for i in range(n_bins - 1):
# if cumsum[i] / total > 0.9:
# useful_range = np.linspace(bins[0], bins[i + 1], n_bins)
# new_bins = np.append(useful_range, [bins[-1]])
# return np.histogram(values, bins=new_bins, weights=weights)
return hist, bins
def _parse_bins(bins):
try:
if isinstance(bins, str):
if "," in bins:
bins = [int(b) for b in bins.split(",")]
else:
bins = int(bins)
except ValueError:
pass
return bins
ALL_DOCUMENTS = "<ALL_DOCUMENTS>"
MAX_LABEL_LEN = 100
def bar_chart(hist, bins):
n = sum(hist)
max_h = max(hist)
out = []
for i, h in enumerate(hist):
h_size = 80 * h // max_h
dh_size = 80 * (h - hist[i - 1]) // max_h
if h_size == 0 or dh_size == 0:
continue
bar = "█" * h_size
out.append(f"{bins[i]:8.3f} {bar:80} ({h:5d}, {h / n:5.1%}) {bins[i+1]:8.3f}")
out.append(f"{bins[-1]:8.3f}")
return out
def display_stats(stats, key, weights=None, bins="auto", cumulative=False):
out = []
documents = stats[ALL_DOCUMENTS]
count = stats.get(key, 0)
r = count / documents if documents else 0
out.append(f"Field {key} saw {count} times ({r:5.1%})")
length = stats.get(key + ".length", None)
avg_length = length // count if length else 0
if length is not None:
out[-1] += f", average length is {length // count}"
values = stats.get(key + ".val", None)
if values:
out[-1] += f", histogram is: (bins={bins})"
if weights:
if weights not in stats:
logging.warn(f"Warning: weights column {weights} not found.")
if weights + ".val" not in stats:
logging.warn(
f"Warning: weights column {weights} is not a numeric column."
)
weights = stats.get(weights + ".val")
hist, bins = histogram(values, _parse_bins(bins), weights)
if cumulative:
hist = np.cumsum(hist)
out += bar_chart(hist, bins)
cnt = stats.get(key + ".cnt", None)
if avg_length < MAX_LABEL_LEN and cnt and max(cnt.values()) > 1:
cnt = sorted(cnt.items(), key=lambda kv: kv[1], reverse=True)
out[-1] += ", top 100 labels:"
for label, n in cnt[:100]:
if n < 5:
continue
out.append(f"{label:25}: {n:6} ({n / count:5.1%})")
return out
def describe(source, columns=None, weights=None, **kwargs):
"""Compute some statistics about a dataset.
Stats can be restricted to a subset of columns."""
MAX_HIST_SIZE = 100_000_000
MAX_CNT_SIZE = 1000
stats = {ALL_DOCUMENTS: 0}
needed = columns + [weights] if columns else None
for doc in read_jsons(source):
stats[ALL_DOCUMENTS] += 1
for k, v in doc.items():
if needed and k not in needed:
continue
stats[k] = get_or_set(stats, k, 0) + 1
if isinstance(v, str):
stats[k + ".length"] = get_or_set(stats, k + ".length", 0) + len(v)
if len(v) > MAX_LABEL_LEN: # Don't treat too long string as labels
continue
cnt = get_or_set(stats, k + ".cnt", collections.defaultdict(int))
if v in cnt or len(cnt) < MAX_CNT_SIZE:
cnt[v] += 1
elif type(v) in (int, float):
values = get_or_set(stats, k + ".val", [])
if len(values) < MAX_HIST_SIZE:
values.append(v)
elif type(v) is list and len(v) and type(v[0]) in (int, float):
values = get_or_set(stats, k + ".val", [])
if len(values) < MAX_HIST_SIZE:
values += v
elif type(v) is dict:
cnt = get_or_set(stats, k + ".cnt", collections.defaultdict(int))
for label in v:
if label in cnt or len(cnt) < MAX_CNT_SIZE:
cnt[label] += 1
documents = stats[ALL_DOCUMENTS]
yield f"Stats computed on {documents} documents:"
for k in stats:
if columns and k not in columns:
continue
if "." in k or k == ALL_DOCUMENTS:
continue
for line in display_stats(stats, k, weights=weights, **kwargs):
yield line
def shard(lines):
"""Shard a file in several smaller ones."""
# The creation of the shard is handle in a generic way. Do we need this ?
return lines
# *** Utils ***
def get_or_set(dictionary, key, default):
if key not in dictionary:
dictionary[key] = default
return dictionary[key]
class SimpleIO(Protocol):
"""A subset of methods from TextIO."""
def close(self) -> None:
...
def write(self, line: str) -> int:
...
def __enter__(self) -> "SimpleIO":
...
def __exit__(self, exc_type, exc_value, traceback):
...
def open_read(filename: ReadableFileLike) -> Iterable[str]:
"""Open the given file, list of files or files matching the given glob and read lines.
`filename` is None or "-" -> reads from stdin
`filename` is a Path / str -> interprets filename as a glob and open files matching it
`filename` is a list -> opens sequentially all files from the list using `open_read`
`filename` is something else -> returns the object wrapped in a `nullcontext`
This allows to pass already openened files or iterables.
`open_read` will decompress gzip files, given they have ".gz" suffix.
"""
if filename is None:
return sys.stdin
if isinstance(filename, list):
assert isinstance(filename[0], Path)
if len(filename) == 0:
return []
if len(filename) > 1:
return _yield_from(filename)
filename = tp.cast(Path, filename[0])
if isinstance(filename, str):
if filename.startswith("http://") or filename.startswith("https://"):
return open_remote_file(filename)
filename = Path(filename)
if not isinstance(filename, Path):
# we might have received an iterable, return it unmodified.
return filename # type: ignore
# Expand glob patterns only when reading
files = [Path(f) for f in sorted(glob.glob(str(filename)))]
if len(files) > 1:
return _yield_from(files)
if len(files) == 1:
filename = files[0]
assert isinstance(filename, Path)
if filename.name.endswith("]"):
return block_reader(filename)
logging.getLogger(__name__).info(f"Opening {filename} with mode 'rt'")
if filename.suffix == ".gz":
file: TextIO = gzip.open(filename, "rt") # type: ignore
else:
file = open(filename, "rt")
return _close_when_exhausted(file)
def _close_when_exhausted(file: TextIO) -> Iterable[str]:
with file:
yield from file
def _yield_from(files: list) -> Iterable[str]:
for file in files:
yield from open_read(file)
def open_write(
filename: WritableFileLike, max_size: str = "4G"
) -> tp.ContextManager[TextIO]:
"""Open the given file, list of files or files matching the given glob.
The return value is a ContextManager meant to be used inside a `with` block:
```
with open_write("foo.txt") as o:
...
Write mode:
replaces "?" from filename by numbers ranging from 0 to 9, generatings files of size `max_size`.
If filename ends with ".gz", creates a blocked gzip file with random access.
"""
if filename is None:
return contextlib.nullcontext(sys.stdout)
if isinstance(filename, list):
if len(filename) > 1:
return MultiFile(filename, "w", max_size)
else:
filename = tp.cast(Path, filename[0])
if isinstance(filename, str):
filename = Path(filename)
if not isinstance(filename, Path):
assert hasattr(filename, "write"), f"{filename} doesn't have a .write method."
# We return a 'TextIO' even though we only check for `.write` method,
# this works better with eg `print`.
return contextlib.nullcontext(tp.cast(TextIO, filename))
mode = "wt"
if "?" in filename.name:
return sharded_file(filename, mode, max_size)
logging.getLogger(__name__).info(f"Opening {filename} with mode {mode}")
# TODO: should we use another format ?
if filename.suffix == ".gz":
return BlockedGzipWriter(Path(filename), mode, block_size="64M")
return open(filename, "wt")
def parse_size(size):
unit_map = {"B": 1, "K": 1024, "M": 1024 ** 2, "G": 1024 ** 3}
unit = size[-1].upper()
assert (
unit in unit_map
), f"Unsupported size unit for {size}. Use one of: {unit_map.keys()}."
return int(size[:-1]) * unit_map[unit]
class MultiFile(SimpleIO):
def __init__(self, files: Iterable[Path], mode="w", max_size="4G"):
self.name = str(files)
self.mode = mode
self.files = iter(files)
self.max_size = parse_size(max_size)
self.current_handle: Optional[TextIO] = None
self.current_block_size = 0
self._open_next_handle() # Opening 1st handle allows to write directly.
def write(self, content) -> int:
# Avoid splitting newlines to a new file.
# use current_block_size since it's faster than `tell()`
if content != "\n" and self.current_block_size >= self.max_size:
self._open_next_handle()
if self.current_handle is None:
raise Exception("No more files to write to...")
written = self.current_handle.write(content)
self.current_block_size += written
return written
def _open_next_handle(self) -> bool:
self.close()
file = next(self.files, None)
if file is None:
return False
self.current_handle = open_write(file).__enter__()
self.current_block_size = 0
return True
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
@property
def closed(self):
return self.current_handle is None
def close(self):
if self.current_handle is None:
return
# log("Closing", self.current_handle.name, "with mode", self.current_handle.mode)
self.current_handle.__exit__(None, None, None)
self.current_handle = None
# not sure it helps since connections are reseted anyway.
_session = functools.lru_cache()(requests.Session)
def request_get_content(url: str, n_retry: int = 3) -> bytes:
"""Retrieve the binary content at url.
Retry on connection errors.
"""
t0 = time.time()
logging.info(f"Starting download of {url}")
for i in range(1, n_retry + 1):
try:
r = _session().get(url)
r.raise_for_status()
break
except requests.exceptions.RequestException as e:
# Sleep and try again on error, unless it's a 404.
message = e.args[0] if isinstance(e.args[0], str) else ""
if i == n_retry or "Client Error" in message:
raise e
warnings.warn(
f"Swallowed error {e} while downloading {url} ({i} out of {n_retry})"
)
time.sleep(10 * 2 ** i)
dl_time = time.time() - t0
dl_speed = len(r.content) / dl_time / 1024
logging.info(
f"Downloaded {url} [{r.status_code}] took {dl_time:.0f}s ({dl_speed:.1f}kB/s)"
)
return r.content
def open_remote_file(url: str, cache: Path = None) -> Iterable[str]:
"""Download the files at the given url to memory and opens it as a file.
Assumes that the file is small, and fetch it when this function is called.
"""
if cache and cache.exists():
return open_read(cache)
# TODO: open the remote file in streaming mode.
# The hard part is that we need to write the content on disk at the same time,
# to implement disk caching.
raw_bytes = request_get_content(url)
content = io.BytesIO(raw_bytes)
if url.endswith(".gz"):
f: TextIO = gzip.open(content, mode="rt") # type: ignore
else:
f = io.TextIOWrapper(content)
if cache and not cache.exists():
# The file might have been created while downloading/writing.
tmp_cache = _tmp(cache)
tmp_cache.write_bytes(raw_bytes)
if not cache.exists():
tmp_cache.replace(cache)
else:
tmp_cache.unlink()
return _close_when_exhausted(f)
def sharded_file(file_pattern: Path, mode: str, max_size: str = "4G") -> MultiFile:
folder, name = file_pattern.parent, file_pattern.name
assert "?" in name, f"Can't expand give file_pattern: {file_pattern}"
n = name.count("?")
assert 0 < n < 8
assert "?" * n in name, f"The '?' need to be adjacents in {file_pattern}"
assert "r" not in mode
files = (folder / name.replace("?" * n, f"%0{n}d" % i) for i in range(10 ** n))
return MultiFile(files, mode, max_size)
class SplitFile:
def __init__(self, filename: Path, chunk: int, n_chunks: int, mode: str = "r"):
assert mode == "r"
size = os.path.getsize(filename)
self.handle = open(filename, mode)
start = chunk * size // n_chunks
self.end: int = (chunk + 1) * size // n_chunks
if start > 0:
self.handle.seek(start - 1)
# Skip incomplete line. This avoid crashing when reading eg the middle
# of a unicode char. `self.handle.buffer` is a binary file reader.
self.handle.buffer.readline() # type: ignore
def __enter__(self):
return self
def __iter__(self):
while True:
line = self.handle.readline()
if not line:
return
yield line
if self.handle.tell() >= self.end:
return
def readlines(self):
return list(self.__iter__())
def close(self):
self.handle.close()
def __exit__(self, *args):
self.close()
def get_block_readers(filename: Path, n_readers, mode="t"):
index_filename = filename.parent / (filename.name + ".index")
if not index_filename.exists():
return [gzip.open(filename, "r" + mode)]
index: List[int] = np.load(index_filename)
n_chunks = len(index)
chunk_per_reader = int(np.ceil(n_chunks / n_readers))
n_readers = int(np.ceil(n_chunks / chunk_per_reader))
start = 0
readers = []
for i in range(n_readers):
end = index[min((i + 1) * chunk_per_reader - 1, n_chunks - 1)]
r = _blocked_gzip_reader(filename, start, end, mode)
readers.append(r)
start = end
return readers
def block_reader(filename: Path) -> Iterable[str]:
root, pattern = str(filename)[:-1].split("[", 1)
assert root.endswith(".gz"), "Can only read block of a .gz file for now."
ii, nn = pattern.strip().split("/")
i, n_readers = int(ii), int(nn)
index_filename = root + ".index"
assert os.path.exists(
index_filename
), f"Index {index_filename} not found for {filename}"
index: List[int] = np.load(index_filename)
n_chunks = len(index)
chunk_per_reader = int(np.ceil(n_chunks / n_readers))
n_readers = int(np.ceil(n_chunks / chunk_per_reader))
# I'm not sure how to handle the case where there is less reader than expected.
# Currently we return empty readers.
start = 0
if i > 0:
start = index[min((i - 1) * chunk_per_reader, n_chunks - 1)]
end = index[min(i * chunk_per_reader, n_chunks - 1)]
return _blocked_gzip_reader(root, start, end, mode="t")
def _blocked_gzip_reader(filename, start, end, mode="t") -> Iterable[str]:
handle = gzip.open(filename, "r" + mode)
handle.seek(start)
try:
while handle.tell() < end:
line = handle.readline()
if not line:
break
yield line
finally:
handle.close()
class BlockedGzipWriter(MultiFile):
"""Writes a Gzip files which can be read by block.
Decreasing the block size may hurt compression, but provides more split points.
"""
def __init__(self, filename: Path, mode: str, block_size: str = "256M"):
assert "w" in mode
self.filename = Path(filename)
self.index: List[int] = []
self.zipfile: Optional[gzip.GzipFile] = None
super().__init__([], mode, block_size)
def _open_next_handle(self) -> bool:
"""Here we never actually close/open handles,
we just write the end of block sequence."""
if not self.current_handle:
mode = self.mode + "t"
self.current_handle = tp.cast(TextIO, gzip.open(self.filename, mode))
assert isinstance(self.current_handle.buffer, gzip.GzipFile)
self.zipfile = self.current_handle.buffer
return True
# Use Z_FULL_FLUSH to allow random access:
# https://github.com/madler/zlib/blob/cacf7f1d4e3d44d871b605da3b647f07d718623f/zlib.h#L313
self.current_handle.buffer.flush(zlib_mode=zlib.Z_FULL_FLUSH) # type: ignore
self.index.append(self.current_handle.tell())
self.current_block_size = 0
return True
def flush(self):
assert self.current_handle is not None
self.current_handle.flush()
def close(self):
if self.current_handle is None:
return
self.current_handle.flush()
self.index.append(self.current_handle.tell())
self.current_handle.close()
self.current_handle = None
index = np.array(self.index, dtype=np.uint64)
with open(str(self.filename) + ".index", "wb") as o:
np.save(o, index)
def grouper(iterable, n):
group = []
for x in iterable:
group.append(x)
if len(group) == n:
yield group
group = []
if group:
yield group
PROCESS = psutil.Process()
def mem_footprint_gb(pid=None):
rss = PROCESS.memory_info().rss
return rss / 1_000_000_000
def _tmp(output: Path) -> Path:
suffix = "".join(output.suffixes)
suffix = ".tmp" + suffix
prefix = output.name[: -len(suffix)]
_, tmp_path = tempfile.mkstemp(dir=output.parent, prefix=prefix, suffix=suffix)
return Path(tmp_path)
@functools.lru_cache()
def _tmp_dir() -> Path:
job_id = os.environ.get("SLURM_JOB_ID")
if job_id:
return Path("/scratch/slurm_tmpdir") / job_id
checkpoint = Path("/checkpoint") / os.environ.get("USER", "")
if checkpoint.exists():
tmp = checkpoint / "tmp"
tmp.mkdir(exist_ok=True)
return tmp
return Path("/tmp")
if __name__ == "__main__":
multiprocessing.set_start_method("fork")
main(sys.argv[1:])
|
cc_net-main
|
cc_net/jsonql.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import functools
import itertools
import logging
import os
import sys
import time
import warnings
from pathlib import Path
from typing import Callable, Dict, Iterable, List, Optional, Sequence, Sized
import submitit
from typing_extensions import Protocol
class Executor(Protocol):
def __call__(self, function: Callable[..., str], *args: Iterable) -> None:
...
class SubmititRetryOnTimeout(submitit.helpers.Checkpointable):
def __init__(self, fn: Callable):
self.fn = fn
self.__name__ = fn.__name__
def __call__(self, *args, **kwargs):
return self.fn(*args, **kwargs)
def get_executor(
name: str,
log_dir: Path,
execution: str,
timeout_hour: float = 1.0,
mem_gb: int = 1,
cpus: int = 1,
task_parallelism: int = -1,
options: dict = {},
) -> Executor:
execution_mode = execution.split(",")[0]
options.update(
{kv.split("=", 1)[0]: kv.split("=", 1)[1] for kv in execution.split(",")[1:]}
)
if execution_mode == "mp":
warnings.warn("Execution mode 'mp' is deprecated, use 'local'.")
execution_mode = "local"
cluster = None if execution_mode == "auto" else execution_mode
# use submitit to detect which executor is available
ex = submitit.AutoExecutor(log_dir, cluster=cluster)
if ex.cluster == "local":
# LocalExecutor doesn't respect task_parallelism
return functools.partial(custom_map_array, ex, task_parallelism)
if ex.cluster == "debug":
return debug_executor
# We are on slurm
if task_parallelism == -1:
task_parallelism = 500
ex.update_parameters(
name=name,
timeout_min=int(timeout_hour * 60),
mem_gb=mem_gb,
cpus_per_task=cpus,
slurm_array_parallelism=task_parallelism,
**options,
)
return functools.partial(map_array_and_wait, ex)
def map_array_and_wait(
ex: submitit.AutoExecutor, function: Callable[..., str], *args: Iterable
):
f_name = function.__name__
assert len(args) > 0, f"No arguments passed to {f_name}"
approx_length = _approx_length(*args)
print(f"Submitting {f_name} in a job array ({approx_length} jobs)")
jobs = ex.map_array(function, *args)
if not jobs:
return
failed_jobs = []
done = 0
total = len(jobs)
job_array_id = jobs[0].job_id.split("_")[0]
print(f"Started {f_name} in job array {job_array_id} ({len(jobs)} jobs).")
for job in submitit.helpers.as_completed(jobs):
done += 1
e = job.exception()
if not e:
print(f"Finished job {job.job_id} ({done} / {total}).", job.result())
continue
print(f"Failed job {job.job_id} ({done} / {total}):", e)
failed_jobs.append(job)
if failed_jobs:
n_failures = 10
message = f"{len(failed_jobs)} / {done} jobs failed while running {f_name}"
print(message)
for job in failed_jobs[:n_failures]:
print(f"Failed {job.job_id} -> {job.paths.stderr}")
if len(failed_jobs) > n_failures:
print(f"... ({len(failed_jobs) - n_failures} failed job skipped)")
raise Exception(message)
def debug_executor(function: Callable[..., Optional[str]], *args: Iterable) -> None:
logging.getLogger().setLevel(logging.DEBUG)
approx_length = _approx_length(*args)
for i, x in enumerate(zip(*args)):
try:
message = function(*x)
except Exception:
try:
import ipdb as pdb # type: ignore
except ImportError:
import pdb # type: ignore
import traceback
traceback.print_exc()
print("")
pdb.post_mortem()
sys.exit(1)
if message is not None:
print(message, f"({i + 1} / {approx_length})")
def _approx_length(*args: Iterable):
for a in args:
if isinstance(a, Sized):
return len(a)
return -1
def custom_map_array(
ex: submitit.AutoExecutor,
parallelism: int,
function: Callable[..., Optional[str]],
*args: Iterable,
) -> None:
f_name = function.__name__
assert len(args) > 0, f"No arguments passed to {f_name}"
jobs_args = list(zip(*args))
total = len(jobs_args)
if parallelism < 0:
parallelism = os.cpu_count() or 0
assert parallelism >= 0, f"Can't run any jobs with task_parallelism={parallelism}"
print(f"Submitting {total} jobs for {f_name}, with task_parallelism={parallelism}")
enqueued = 0
done = 0
running_jobs: List[submitit.Job] = []
failed_jobs: List[submitit.Job] = []
while done < len(jobs_args):
# Try to queue more job if we have some bandwidth.
if enqueued < total and len(running_jobs) < parallelism:
running_jobs.append(ex.submit(function, *jobs_args[enqueued]))
enqueued += 1
continue
# Else wait for some job to finish
if not running_jobs:
warnings.warn(
f"No more running jobs, yet we submitted only {enqueued} / {total} and finished {done} / {total}"
)
break
job = get_next_job(running_jobs)
running_jobs.remove(job)
done += 1
e = job.exception()
if not e:
print(f"Finished job {job.job_id} ({done} / {total}).", job.result())
continue
print(f"Failed job {job.job_id} ({done} / {total}):", e)
failed_jobs.append(job)
if failed_jobs:
n_failures = 10
message = f"{len(failed_jobs)} / {done} jobs failed while running {f_name}"
print(message)
for job in failed_jobs[:n_failures]:
print(f"Failed {job.job_id} -> {job.paths.stderr}")
if len(failed_jobs) > n_failures:
print(f"... ({len(failed_jobs) - n_failures} failed job skipped)")
raise Exception(message)
def get_next_job(
jobs: Sequence[submitit.Job], poll_frequency: float = 10
) -> submitit.Job:
"""
Waits for any of the job to finish and returns it.
jobs: list of jobs
poll_frequency: frequency in second at which we check job status
"""
start = time.time()
waiting = False
while True:
for job in jobs:
if job.done():
return job
if not waiting:
job_ids = [j.job_id for j in jobs[:4]]
suffix = "..." if len(jobs) > 4 else ""
print(
f"Waiting on {len(jobs)} running jobs. Job ids: {','.join(job_ids)}{suffix}"
)
waiting = True
time.sleep(poll_frequency)
|
cc_net-main
|
cc_net/execution.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import sys
import time
import warnings
from typing import Iterable, Iterator, Sequence, Sized, Tuple, Type
import numpy as np
HASH_TYPE: Type[np.uint64] = np.uint64
GETPY_WARNING = False
class AbstractDedupHashSet(Sized, Iterable[np.uint64]):
"""A dict-like that returns `True` for keys that have been added more than once.
The API is batched and expect np.array as input. This batching grants better
perf when using the C++ implementation.
"""
dtype: Type[np.uint64] = HASH_TYPE
def __repr__(self):
implementation = type(self).__name__
return f"[{implementation}, len: {len(self)}"
def __len__(self) -> int:
...
def __contains__(self, values: Sequence[np.uint64]) -> np.ndarray:
...
def __getitem__(self, values) -> np.ndarray:
...
def __setitem__(self, keys, values) -> None:
...
def items(self) -> Iterable[Tuple[np.uint64, np.uint8]]:
...
def keys(self) -> Iterable[np.uint64]:
...
def __iter__(self) -> Iterator[np.uint64]:
return iter(self.keys())
def add(self, h, contains=None):
"""Add the given keys. First time a key is added the value is set to 0,
then it's set to one."""
if not isinstance(h, np.ndarray):
h = np.array(h, dtype=HASH_TYPE)
if contains is None:
contains = self.__contains__(h)
self.__setitem__(h, contains)
return contains
def merge(self, keys, values):
contains = self.__contains__(keys)
self.__setitem__(keys, contains | values)
def dump(self, filename):
return self.dump_np(filename)
def load(self, filename):
return self.load_np(filename)
def dump_np(self, filename):
kv_type = np.dtype([("k", HASH_TYPE), ("v", np.uint8)])
items = np.fromiter(self.items(), dtype=kv_type, count=len(self))
with open(filename, "wb") as f:
np.save(f, items)
def load_np(self, filename):
items = np.load(str(filename))
keys = items["k"].copy()
values = items["v"].copy()
self.merge(keys, values)
def dump_np2(self, filename):
keys = np.fromiter(
(k for (k, v) in self.items()), dtype=HASH_TYPE, count=len(self)
)
with open(filename, "wb") as f:
np.save(f, keys)
values = np.fromiter(
(v for (k, v) in self.items()), dtype=np.uint8, count=len(self)
)
with open(str(filename) + ".val", "wb") as f:
np.save(f, values)
def load_np2(self, filename):
keys = np.load(filename)
values = np.load(str(filename) + ".val")
self.merge(keys, values)
class NaiveHashSet(dict, AbstractDedupHashSet):
"""Pure python implementation of AbstractDedupHashSet.
This implementation is quite fast, since Python dict are heavily optimized.
"""
def __init__(self, iterable=None):
super().__init__()
global GETPY_WARNING
if GETPY_WARNING:
warnings.warn(
"Module 'getpy' not found. Deduplication will take more RAM."
" Try `pip install cc_net[getpy]"
)
GETPY_WARNING = False
def __contains__(self, values):
"""Returns `True` if the object has been added at list once."""
contains_point = super().__contains__
return np.fromiter(
map(contains_point, values), count=len(values), dtype=np.uint8
)
def __getitem__(self, values):
"""Returns `True` if the object has been added at list twice."""
get_point = super().get
return np.fromiter(
map(lambda x: get_point(x, False), values),
count=len(values),
dtype=np.uint8,
)
def __setitem__(self, keys, values):
assert len(keys) == len(values)
for k, v in zip(keys, values):
dict.__setitem__(self, k, v)
try:
import getpy as gp # type: ignore
class _FlatHashSet(gp.Dict, AbstractDedupHashSet):
"""C++ backed implementation of AbstractDedupHashSet.
This implementation is slightly slower than the Python one but uses
3x less RAM.
See https://github.com/atom-moyer/getpy.
"""
def __init__(self):
super().__init__(HASH_TYPE, np.uint8, default_value=False)
def __contains__(self, h):
"""Returns `True` if the object has been added at list once."""
if not isinstance(h, np.ndarray):
h = np.array(h, dtype=HASH_TYPE)
c = gp.Dict.__contains__(self, h)
c.dtype = np.uint8
return c
def dump(self, filename):
return self.dump_gp(filename)
def load(self, filename):
return self.load_gp(filename)
def dump_gp(self, filename):
return gp.Dict.dump(self, str(filename))
def load_gp(self, filename):
"""Override gp.Dict.load, to correctly merge values instead of overwriting."""
other = gp.Dict(HASH_TYPE, np.uint8, default_value=False)
other.load(str(filename))
n = len(other)
keys = np.fromiter(
(k for (k, v) in other.items()), dtype=HASH_TYPE, count=n
)
values = np.fromiter(
(v for (k, v) in other.items()), dtype=np.uint8, count=n
)
self.merge(keys, values)
FlatHashSet: Type[AbstractDedupHashSet] = _FlatHashSet
except ImportError:
GETPY_WARNING = True
FlatHashSet = NaiveHashSet
def timeit(message, function, *args):
start = time.time()
function(*args)
end = time.time()
print(message, f"took {end - start:.0f}s")
def compare_load(*filenames):
assert filenames, "No file given"
def load_list():
hashes = []
for f in filenames:
h = FlatHashSet()
h.load(f)
print(f"Loaded {h} from {f}.")
hashes.append(h)
return hashes
def load_all(load, ext):
hashes = FlatHashSet()
for f in filenames:
load(hashes, f + ext)
def dump_all(hashes, dump, ext):
for h, f in zip(hashes, filenames):
dump(h, f + ext)
hashes = load_list()
dump_gp = getattr(FlatHashSet, "dump_gp")
if dump_gp is not None:
timeit("Dumping using gp.dump", dump_all, hashes, dump_gp, ".gp.test")
timeit("Dumping using dump_np", dump_all, hashes, FlatHashSet.dump_np, ".npy.test")
timeit(
"Dumping using dump_np2", dump_all, hashes, FlatHashSet.dump_np2, ".npy2.test"
)
load_gp = getattr(FlatHashSet, "load_gp")
if load_gp is not None:
timeit("Loading using gp.load", load_all, load_gp, ".gp.test")
timeit("Loading using load_np", load_all, FlatHashSet.load_np, ".npy.test")
timeit("Loading using load_np2", load_all, FlatHashSet.load_np2, ".npy2.test")
# Loading 10 shards:
# [dedup] Dumping using gp.dump took 52s
# [dedup] Dumping using dump_np took 270s
# [dedup] Dumping using dump_np2 took 483s
#
# [dedup] Loading using gp.load took 654s
# [dedup] Loading using load_np took 82s
# [dedup] Loading using load_np2 took 76s
if __name__ == "__main__":
compare_load(*sys.argv[1:])
|
cc_net-main
|
cc_net/flat_hash_set.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import base64
import hashlib
import itertools
import urllib.parse
from pathlib import Path
from typing import Dict, Iterable, List, Optional, Sequence, Set, Union
import numpy as np
from cc_net import jsonql
from cc_net.execution import get_executor
from cc_net.jsonql import mem_footprint_gb
HASH_SIZE = 4
HASH_TYPE = np.uint32
PUBLIC_FIELDS = ["url", "digest"]
COMPUTED_FIELDS = ["cc_segment", "language", "language_score", "bucket", "perplexity"]
DATA = Path(__file__).parent.parent / "data"
# This is similar to dedup methods but with use 32 bits hashes.
def _b2i(b: bytes) -> int:
return np.frombuffer(b[:HASH_SIZE], dtype=HASH_TYPE, count=1, offset=0).item(0)
def _str_hash(s: str) -> int:
h = hashlib.sha1(bytes(s, encoding="utf-8"))
return _b2i(h.digest())
def get_hashes(lines: Iterable[str]) -> List[bytes]:
h = HASH_SIZE
return [hashlib.sha1(bytes(l, encoding="utf-8")).digest()[:h] for l in lines]
def encode_hashes(hashes: Iterable[bytes]) -> str:
return base64.b64encode(b"".join(hashes)).decode("ascii")
def encode_as_hashes(lines: Iterable[str]) -> str:
return encode_hashes(get_hashes(lines))
def decode_hashes(compact: str) -> List[bytes]:
all_hashes = base64.b64decode(compact)
res = []
assert len(all_hashes) % HASH_SIZE == 0
for i in range(len(all_hashes) // HASH_SIZE):
chunk = all_hashes[i * HASH_SIZE : (i + 1) * HASH_SIZE]
res.append(chunk)
return res
def encode_line_ids(line_ids: Sequence[int]) -> str:
arr = np.array(line_ids, dtype="<u2")
return base64.b64encode(arr.tobytes()).decode("ascii")
def decode_line_ids(compact: str) -> List[int]:
ids_bytes = bytearray(base64.b64decode(compact))
return np.ndarray(len(ids_bytes) // 2, dtype="<i2", buffer=ids_bytes)
def get_doc_key(digest: str) -> int:
assert digest.startswith("sha1:")
h = base64.b32decode(digest[5:])
return _b2i(h[:HASH_SIZE])
class Minifier(jsonql.Transformer):
ready = True
def __init__(self):
self.fields = frozenset(COMPUTED_FIELDS + PUBLIC_FIELDS)
def do(self, doc: dict) -> Optional[dict]:
line_ids: List[int] = doc.pop("line_ids")
fields = self.fields
keys = list(doc.keys())
for k in keys:
if k not in fields:
doc.pop(k, None)
p = doc.get("perplexity", 0)
doc["line_ids"] = encode_line_ids(line_ids)
if p:
doc["perplexity"] = round(p, 1)
s = doc.get("language_score", 0)
if s:
doc["language_score"] = round(s, 2)
return doc
class MetadataFetcher(jsonql.Transformer):
"""Reads documents from CC snapshot and join precomputed metadata.
CC snapshots are split in segments. Each segment is 64Mb long.
The metadata must also be stored in segments of the same size and names.
"""
def __init__(self, folder: Union[Path, str]):
self.ready = True
self.metadata: Dict[int, dict] = {}
self._segments: Set[str] = set()
self.read_doc = 0
self.missed_doc = 0
self.missed_par = 0
self.processed_par = 0
if isinstance(folder, str):
# detect path passed as string
if urllib.parse.urlparse(folder).scheme == "":
folder = Path(folder)
assert folder.exists(), f"Metadata folder not found: {folder}"
self.folder = folder
self.segment: str = ""
self.segments_read_twice = 0
def meta_file(self, segment: str) -> str:
file_name = segment.split("/")[-1]
assert file_name.endswith(".warc.wet.gz") or file_name.endswith(".warc.wet")
if isinstance(self.folder, str):
return urllib.parse.urljoin(
self.folder, file_name.replace(".warc.wet", ".json")
)
meta_file = self.folder / file_name.replace(".warc.wet", ".json")
assert (
meta_file.exists()
), f"Couldn't find metadata file for segment {segment} at {meta_file}"
return str(meta_file)
def fetch_metadata(self, segment: str) -> None:
meta_file = self.meta_file(segment)
k = get_doc_key
self.metadata = {}
collision = 0
for m in jsonql.read_jsons(meta_file):
key = k(m["digest"])
if key in self.metadata:
collision += 1
self.metadata[key] = m
self.log(f"Loaded {len(self.metadata)} metadatas from {meta_file}")
if collision > 0:
self._logger.warning(f"Found {collision} collisions !")
self.segment = segment
if segment in self._segments:
self.log("Cache miss")
self.segments_read_twice += 1
self._segments.add(segment)
def do(self, doc: dict) -> Optional[dict]:
if self.segment != doc["cc_segment"]:
self.fetch_metadata(doc["cc_segment"])
digest = doc["digest"]
key = get_doc_key(digest)
if key not in self.metadata:
return None
metadata = self.metadata.pop(key)
return self.clean(metadata, doc)
def clean(self, metadata: dict, full_doc: dict) -> Optional[dict]:
line_ids = decode_line_ids(metadata.pop("line_ids"))
lines = full_doc["raw_content"].split("\n")
cleaned = []
for l in line_ids:
if l >= len(lines) or l < 0:
self.missed_par += 1
continue
cleaned.append(lines[l])
self.processed_par += len(line_ids)
if not cleaned:
self.missed_doc += 1
return None
full_doc["raw_content"] = "\n".join(cleaned)
full_doc["original_nlines"] = full_doc["nlines"]
full_doc["original_length"] = full_doc["length"]
full_doc["nlines"] = len(cleaned)
full_doc["length"] = len(full_doc["raw_content"])
for key, value in metadata.items():
full_doc[key] = value
return full_doc
def summary(self) -> List[str]:
summ = super().summary()
mem = mem_footprint_gb()
len_cache = len(self.metadata)
summ.append(
f"Read {self.read_doc:_}, stocking {len_cache:_} doc in {mem:.1f}g."
)
if self.missed_doc:
r = self.missed_doc / self.processed
summ.append(f"! Missed {self.missed_doc} documents ({r:.1%}) !")
if self.missed_par:
r = self.missed_par / self.processed
summ.append(f"! Missed {self.missed_par} paragraphs ({r:.1%}) !")
return summ
def _expand_files(files: List[Path]) -> List[Path]:
if len(files) == 1 and files[0].is_dir():
folder = files[0]
files = sorted(folder.glob("*.json.gz"))
print(f"Found {len(files)} files under {folder}/*.json.gz")
assert files, "No files found"
return files
def minify_file(file: Path, output: Path) -> str:
"""Minify the given file."""
jsonql.run_pipes(Minifier(), file=file, output=output)
return f"Minified {output}"
def minify(
files: List[Path], output_dir: Path, execution: str = "mp", parallelism: int = -1
):
"""Minify all the files in the given folder."""
files = _expand_files(files)
output_dir.mkdir(exist_ok=True)
with open(output_dir / "files.txt", "w") as o:
for f in files:
print(f.name, file=o)
outputs = [output_dir / f.name for f in files]
ex = get_executor(
"minify",
output_dir / "logs",
execution,
timeout_hour=2,
cpus=1,
task_parallelism=parallelism,
)
ex(minify_file, files, outputs)
def fetch_metadata_file(
file: Union[Path, str],
metadata_dir: Union[Path, str],
output: Path,
cache_dir: Path = None,
):
unminifier = MetadataFetcher(metadata_dir)
tmp = output.with_name("tmp." + output.name)
jsonql.run_pipes(unminifier, file=file, output=tmp)
tmp.rename(output)
return f"Fetched metadata for {file}. Results at {output}."
def fetch_metadata(
files: List[str],
metadata_dir: Union[Path, str],
output_dir: Path,
execution: str = "mp",
parallelism: int = -1,
cache_dir: Path = None,
):
if len(files) == 1 and Path(files[0]).is_dir():
folder = Path(files[0])
files = [str(f) for f in sorted(folder.glob("*.json.gz"))]
print(f"Found {len(files)} files under {folder}/*.json.gz")
assert len(files) > 0, "No files given."
output_dir.mkdir(exist_ok=True)
outputs = [output_dir / str(f).split("/")[-1] for f in files]
if cache_dir is None:
cache_dir = output_dir / "wet_cache"
cache_dir.mkdir(exist_ok=True)
if str(cache_dir) == "none":
cache_dir = None
files = [f for f, o in zip(files, outputs) if not o.exists()]
outputs = [o for o in outputs if not o.exists()]
if not files:
return
ex = get_executor(
"unminify",
output_dir / "logs",
execution,
timeout_hour=8,
cpus=1,
task_parallelism=parallelism,
mem_gb=32,
)
ex(fetch_metadata_file, files, outputs, itertools.repeat(cache_dir))
if __name__ == "__main__":
import func_argparse
func_argparse.main(minify_file, minify, fetch_metadata, fetch_metadata_file)
|
cc_net-main
|
cc_net/minify.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import re
import unicodedata
UNICODE_PUNCT = {
",": ",",
"。": ".",
"、": ",",
"„": '"',
"”": '"',
"“": '"',
"«": '"',
"»": '"',
"1": '"',
"」": '"',
"「": '"',
"《": '"',
"》": '"',
"´": "'",
"∶": ":",
":": ":",
"?": "?",
"!": "!",
"(": "(",
")": ")",
";": ";",
"–": "-",
"—": " - ",
".": ". ",
"~": "~",
"’": "'",
"…": "...",
"━": "-",
"〈": "<",
"〉": ">",
"【": "[",
"】": "]",
"%": "%",
"►": "-",
}
UNICODE_PUNCT_RE = re.compile(f"[{''.join(UNICODE_PUNCT.keys())}]")
def replace_unicode_punct(text: str) -> str:
return "".join((UNICODE_PUNCT.get(c, c) for c in text))
def remove_unicode_punct(text: str) -> str:
"""More aggressive version of replace_unicode_punct but also faster."""
return UNICODE_PUNCT_RE.sub("", text)
def strip_accents(line: str) -> str:
"""Strips accents from a piece of text."""
nfd = unicodedata.normalize("NFD", line)
output = [c for c in nfd if unicodedata.category(c) != "Mn"]
if len(output) == line:
return line
return "".join(output)
# Build a regex matching all control characters.
NON_PRINTING_CHARS_RE = re.compile(
f"[{''.join(map(chr, list(range(0,32)) + list(range(127,160))))}]"
)
DIGIT_RE = re.compile(r"\d")
PUNCT_OR_NON_PRINTING_CHARS_RE = re.compile(
(UNICODE_PUNCT_RE.pattern + NON_PRINTING_CHARS_RE.pattern).replace("][", "")
)
def remove_non_printing_char(text: str) -> str:
return NON_PRINTING_CHARS_RE.sub("", text)
def normalize_spacing_for_tok(text: str, language: str = "en") -> str:
res = (
text.replace("\r", "")
# remove extra spaces
.replace("(", " (")
.replace(")", ") ")
.replace(" +", " ")
)
res = re.sub(r"\) ([\.\!\:\?\;\,])", r"\)\1", res)
res = res.replace("( ", "(").replace(" )", ")")
res = re.sub(r"(\d) \%", r"\1\%", res)
res = res.replace(" :", ":").replace(" ;", ";")
res = res.replace("`", "'").replace("''", ' " ')
res = (
res.replace("„", '"')
.replace("“", '"')
.replace("”", '"')
.replace("–", "-")
.replace("—", " - ")
.replace(" +", " ")
.replace("´", "'")
.replace("([a-z])‘([a-z])", r"\1'\2/")
.replace("([a-z])’([a-z])", r"\1'\2/")
.replace("‘", '"')
.replace("‚", '"')
.replace("’", '"')
.replace("''", '"')
.replace("´´", '"')
.replace("…", "...")
# French quotes
.replace(" « ", ' "')
.replace("« ", '"')
.replace("«", '"')
.replace(" » ", '" ')
.replace(" »", '"')
.replace("»", '"')
# handle pseudo-spaces
.replace(" %", "%")
.replace("nº ", "nº ")
.replace(" :", ":")
.replace(" ºC", " ºC")
.replace(" cm", " cm")
.replace(" ?", "?")
.replace(" !", "!")
.replace(" ;", ";")
.replace(", ", ", ")
.replace(" +", " ")
.replace(".", ". ")
)
# English "quotation," followed by comma, style
if language == "en":
res = re.sub(r"\"([,\.]+)", r"\1\"", res)
# Czech is confused
elif language == "cs" or language == "cz":
pass
# German/Spanish/French "quotation", followed by comma, style
else:
res = res.replace(',"', '",')
res = re.sub(
r"(\.+)\"(\s*[^<])", r"\"\1\2", res
) # don't fix period at end of sentence
if (
language == "de"
or language == "es"
or language == "cz"
or language == "cs"
or language == "fr"
):
res = re.sub(r"(\d) (\d)", r"\1,\2", res)
else:
res = re.sub(r"(\d) (\d)", r"\1.\2", res)
return res
def normalize(line: str, accent=True, case=True, numbers=True, punct=1) -> str:
line = line.strip()
if not line:
return line
if case:
line = line.lower()
if accent:
line = strip_accents(line)
if numbers:
line = DIGIT_RE.sub("0", line)
if punct == 1:
line = replace_unicode_punct(line)
elif punct == 2:
line = remove_unicode_punct(line)
line = remove_non_printing_char(line)
return line
def slow_normalize_for_dedup(line: str) -> str:
return normalize(line, accent=False, case=True, numbers=True, punct=2)
def normalize_for_dedup(line: str) -> str:
line = line.strip()
if not line:
return line
# case
line = line.lower()
# numbers
line = DIGIT_RE.sub("0", line)
line = PUNCT_OR_NON_PRINTING_CHARS_RE.sub("", line)
return line
|
cc_net-main
|
cc_net/text_normalizer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import logging
import subprocess
from pathlib import Path
from typing import List
import func_argparse
import numpy as np
from cc_net import jsonql
def get_index(file: Path) -> Path:
return file.parent / (file.name + ".index")
def _get_tmp(output: Path) -> Path:
return output.parent / (output.stem + ".tmp" + output.suffix)
def reshard(
inputs: List[Path],
output: Path,
tmp: Path = None,
free_original: bool = False,
rm_original: bool = False,
) -> Path:
"""Read the given files and concatenate them to the output file.
Can remove original files on completion, or just write dummy content into them to free disk.
"""
if tmp is None:
tmp = _get_tmp(output)
logging.info(f"Resharding {inputs} to {tmp}, will move later to {output}")
jsonql.run_pipes(file=inputs, output=tmp)
tmp.replace(output)
tmp_index = get_index(tmp)
if tmp_index.exists():
tmp_index.replace(get_index(output))
if not (free_original or rm_original):
return output
for _input in inputs:
if rm_original:
_input.unlink()
elif free_original:
# Overwrite the previous file.
# This frees up disk space and allows doit to properly track the success.
_input.write_text(f"Resharded into {output}")
if get_index(_input).is_file():
get_index(_input).unlink()
return output
def fast_reshard(
inputs: List[Path],
output: Path,
tmp: Path = None,
free_original: bool = False,
rm_original: bool = False,
) -> Path:
"""Same as reshard but don't re-compress the output.
This will lead to a bigger output file, especially if the shards are very small.
"""
if tmp is None:
tmp = _get_tmp(output)
with open(tmp, "wb") as o:
subprocess.run(["cat"] + [str(f) for f in inputs], stdout=o)
tmp.replace(output)
indexes_files = [get_index(i) for i in inputs]
existing_indexes = sum(i.exists() for i in indexes_files)
assert (
existing_indexes == len(indexes_files) or existing_indexes == 0
), "some indexes don't exist."
if existing_indexes > 0:
indexes = [np.load(idx) for idx in indexes_files]
for i in range(len(indexes) - 1):
indexes[i + 1] += indexes[i][-1]
with open(str(output) + ".index", "wb") as o:
np.save(o, np.concatenate(indexes))
if not (free_original or rm_original):
return output
for _input in inputs:
if rm_original:
_input.unlink()
elif free_original:
# Overwrite the previous file.
# This frees up disk space and allows doit to properly track the success.
_input.write_text(f"Resharded into {output}")
if get_index(_input).is_file():
get_index(_input).unlink()
return output
def determine_groups(
inputs: List[Path], target_size: int = 4 * 1024 ** 3
) -> List[List[Path]]:
if len(inputs) == 0:
return []
sample = inputs[:10]
typical_size = sum(s.stat().st_size for s in sample) / len(sample)
group_size = min(target_size // typical_size, len(inputs))
group_size = max(group_size, 1)
return jsonql.grouper(inputs, group_size)
if __name__ == "__main__":
func_argparse.single_main(reshard)
|
cc_net-main
|
cc_net/regroup.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import time
from pathlib import Path
from typing import Dict, List, Optional, Sequence, Tuple, Union
import kenlm # type: ignore
import numpy as np # type: ignore
import pandas as pd # type: ignore
import sentencepiece # type: ignore
from cc_net import jsonql, text_normalizer
LMDescriptor = Union[Dict[str, Path], Union[Path, str]]
def get_args():
parser = argparse.ArgumentParser(
description="Compute the score of each sentences of a document",
parents=[jsonql.io_parser()],
)
parser.add_argument("--models", type=str)
parser.add_argument("--sentences", action="store_true", default=False)
parser.add_argument(
"--languages", type=str, help="Ignore doc with another language"
)
parser.add_argument("--field", type=str, default=None)
parser.add_argument("--newline", type=str, default="\n")
return vars(parser.parse_args())
def pp(log_score, length):
return 10.0 ** (-log_score / length)
class SentencePiece(jsonql.Transformer):
# Sentence Pieces model have to be read back from disk.
warning_when_pickling = True
def __init__(
self,
model: Path,
field: str,
output_field: str = "tokenized",
normalize: bool = False,
):
super().__init__()
self.model = model
self.field = field
self.output_field = output_field
self.normalize = normalize
self.sp: sentencepiece.SentencePieceProcessor = None
def _prepare(self):
if self.sp is not None:
return
self.sp = sentencepiece.SentencePieceProcessor()
self.sp.load(str(self.model))
return self
def do(self, document: dict) -> dict:
text = document[self.field]
if self.normalize:
text = text_normalizer.normalize(text)
tokenized = self.sp.encode_as_pieces(text)
document[self.output_field] = " ".join(tokenized)
return document
class MultiSentencePiece(jsonql.Transformer):
warning_when_pickling = True
def __init__(
self,
models: Union[Path, Dict[str, Path]],
field: str,
output_field: str = "tokenized",
normalize: bool = False,
):
super().__init__()
self.field = field
self.output_field = output_field
self.normalize = normalize
self._prefetch: Sequence[str] = []
if isinstance(models, Path):
self.models = {
m.name.split(".")[0]: m for m in models.parent.glob(models.name)
}
else:
self.models = models
self._prefetch = list(models.keys())
self.sp: Dict[str, sentencepiece.SentencePieceProcessor] = {}
def _prepare(self) -> None:
for lang in self._prefetch:
assert (
self.get_sp(lang) is not None
), f"No model found for {lang} at {self.models.get(lang)}."
def get_sp(self, lang) -> Optional[sentencepiece.SentencePieceProcessor]:
sp = self.sp.get(lang)
if sp is not None:
return sp
if lang not in self.models:
return None
start_load = time.time()
self.log(f"Loading {self.models[lang]}...")
sp = sentencepiece.SentencePieceProcessor()
sp.load(str(self.models[lang]))
self.sp[lang] = sp
load_time = time.time() - start_load
self.log(f"Loaded {self.models[lang]} (took {load_time / 60:.1f}min)")
return sp
def do(self, document: dict) -> Optional[dict]:
text = document[self.field]
if self.normalize:
text = text_normalizer.normalize(text)
sp = self.get_sp(document.get("language"))
if sp is None:
return document
tokenized = sp.encode_as_pieces(text)
document[self.output_field] = " ".join(tokenized)
return document
class DocLM(jsonql.Transformer):
def __init__(
self,
models: Union[Path, Dict[str, Path]],
field: str,
output_field: str = "perplexity",
newline: str = "\n",
normalize: bool = True,
load_method: int = 2,
):
super().__init__()
self.field = field
self.output_field = output_field
self.newline = newline
self.normalize = normalize
self._prefetch: Sequence[str] = []
self.lm_config = kenlm.Config()
# This is the default settings
# POPULATE will mmap the models and populate the pages.
# Maybe that's not the best way when the models are on a network disk.
# TODO: try copying models file, try READ or PARALLEL_READ
self.lm_config.load_method = load_method
if isinstance(models, Path):
self.models = {
m.name.split(".")[0]: m for m in models.parent.glob(models.name)
}
else:
self.models = models
self._prefetch = list(models.keys())
self.lm: Dict[str, kenlm.Model] = {}
self.n_lines = 0
def _prepare(self) -> None:
for lang in self._prefetch:
assert (
self.get_lm(lang) is not None
), f"No model found for {lang} at {self.models.get(lang)}."
def get_lines(self, document: dict) -> List[str]:
lang = document.get("language")
if not lang:
return []
if lang not in self.models:
return []
content = document.get(self.field)
if not content:
return []
lines = content.split(self.newline)
self.n_lines += len(lines)
return lines
def get_lm(self, lang: Optional[str]) -> Optional[kenlm.Model]:
if lang is None:
return None
lm = self.lm.get(lang)
if lm is not None:
return lm
model = self.models.get(lang)
if model is None:
return None
start_load = time.time()
self.log(f"Loading {self.models[lang]}...")
lm = kenlm.Model(str(model), self.lm_config)
self.lm[lang] = lm
load_time = time.time() - start_load
self.log(f"Loaded {self.models[lang]} (took {load_time / 60:.1f}min)")
return lm
def do(self, document: dict) -> dict:
lines = self.get_lines(document)
model = self.get_lm(document.get("language"))
if not lines or not model:
return document
doc_log_score, doc_length = 0, 0
for line in lines:
if self.normalize:
line = text_normalizer.normalize(line)
log_score = model.score(line)
length = len(line.split()) + 1
doc_log_score += log_score
doc_length += length
document[self.output_field] = round(pp(doc_log_score, doc_length), 1)
return document
def summary(self):
delay = time.time() - self.start_time
h = delay / 3600
s = self.n_lines / delay
summ = super().summary()
summ.append(f"Processed {self.n_lines:_} lines in {h:.2}h ({s:.1} lines/s).")
return summ
class SentencesLM(DocLM):
"""Returns the score of each individual paragraph."""
def do(self, document: dict) -> Optional[str]: # type: ignore
lines = self.get_lines(document)
model = self.get_lm(document.get("language"))
if not lines or not model:
return None
sentences = []
for line in lines:
if self.normalize:
line = text_normalizer.normalize(line)
log_score = model.score(line)
length = len(line.split()) + 1
sentences.append(f"{pp(log_score, length)}\t{line}")
return "\n".join(sentences)
class PerplexityBucket(jsonql.Transformer):
def __init__(
self, cutoff_csv: Path, percentile_head: int = 30, percentile_tail: int = 60
):
super().__init__()
self.cutoff_csv = cutoff_csv
self.percentile_head = percentile_head
self.percentile_tail = percentile_tail
self.cutoffs: Dict[str, Tuple[float, float]] = {}
def _prepare(self) -> None:
cutoffs = pd.read_csv(self.cutoff_csv, index_col=0)
self.cutoffs = {
l: (cutoffs[l][self.percentile_head], cutoffs[l][self.percentile_tail])
for l in cutoffs.columns
}
def get_bucket(self, doc: dict) -> str:
perplexity = doc.get("perplexity", -1)
lang = doc.get("language")
if lang not in self.cutoffs or perplexity < 0:
return "all"
pp_head, pp_tail = self.cutoffs[lang]
if perplexity < pp_head:
return "head"
if perplexity < pp_tail:
return "middle"
return "tail"
def do(self, doc: dict) -> dict:
doc["bucket"] = self.get_bucket(doc)
return doc
class DropKeys(jsonql.Transformer):
def __init__(self, *keys):
super().__init__()
self.keys = keys
def do(self, document: dict) -> Optional[dict]:
if not document:
return None
for key in self.keys:
document.pop(key, None)
return document
class RemoveSmall(jsonql.Transformer):
def __init__(self, field, min_len):
super().__init__()
self.field = field
self.min_len = min_len
self.removed = 0
def do(self, document: dict) -> Optional[dict]:
if not document:
return None
content = document.get(self.field)
if not content or len(content) < self.min_len:
self.removed += 1
return None
return document
def summary(self):
r, n = self.removed, self.processed
ratio = r / n if n else 0
return [f"Removed {r} small documents out of {n} ({ratio:.1%})"]
def perplexity_to_bin(file: Path, output: Path, models, tok_field: str):
pp_field = "perplexity"
lm = DocLM(models, tok_field, output_field=pp_field)
stats: List[float] = []
max_stats = 1_000_000
batch_size = 100_000
i = 0
batch = []
with open(output, "wb") as o:
for doc in jsonql.read_jsons(file):
i += 1
pp = lm(doc)[pp_field]
if len(stats) < max_stats:
stats.append(pp)
batch.append(pp)
if len(batch) >= batch_size:
np.array(batch, dtype=np.float32).tofile(o)
batch = []
if len(batch) > 0:
np.array(batch, dtype=np.float32).tofile(o)
if __name__ == "__main__":
args = get_args()
output = Path(args["output"])
if output.suffix == ".bin":
perplexity_to_bin(args["file"], output, args["models"], args["field"])
else:
jsonql.run_pipe(DocLM, args)
|
cc_net-main
|
cc_net/perplexity.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
|
cc_net-main
|
cc_net/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import time
from typing import Dict, Optional
import sacremoses # type: ignore
from cc_net import jsonql, text_normalizer
class RobustTokenizer(jsonql.Transformer):
"""Moses tokenizer with the expected preprocessing."""
LANG_WITHOUT_ACCENT = {"en", "my"}
def __init__(self, lang: str):
super().__init__()
self.lang = lang
self.moses = sacremoses.MosesTokenizer(lang)
self.rm_accent = lang in self.LANG_WITHOUT_ACCENT
self.ready = True
def do(self, text: str):
text = text_normalizer.normalize(
text, accent=self.rm_accent, case=False, numbers=False, punct=True
)
text = text_normalizer.normalize_spacing_for_tok(text, language=self.lang)
return self.moses.tokenize(text, return_str=True, escape=False)
class DocTokenizer(jsonql.Transformer):
"""Tokenize the text found in `output_field and store the result in `output_field`."""
def __init__(
self,
field: str,
output_field: str = "tokenized",
language_field: str = "language",
):
super().__init__()
self.field = field
self.output_field = output_field
self.language_field = language_field
self.n_docs = 0
self.tokenizers: Dict[str, RobustTokenizer] = {}
def get_tokenizer(self, lang: str) -> Optional[RobustTokenizer]:
cache = self.tokenizers
if lang in cache:
return cache[lang]
if lang in ("th", "zh", "ja"):
# TODO find a tokenizer for those languages
return None
cache[lang] = RobustTokenizer(lang)
return cache[lang]
def do(self, document):
lang = document[self.language_field]
tok = self.get_tokenizer(lang)
if not tok:
return document
self.n_docs += 1
lines = document[self.field].split("\n")
tokenized = "\n".join(tok(l) for l in lines)
document[self.output_field] = tokenized
return document
def summary(self):
delay = (time.time() - self.start_time) / 3600
speed = self.n_docs / delay
return [
f"Tokenized {self.n_docs:_} documents in {delay:.2}h ({speed:.1} doc/s)."
]
|
cc_net-main
|
cc_net/tokenizer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Tools to remove duplicate paragraphs across one or several shards.
"""
import argparse
import gc
import hashlib
import logging
import multiprocessing
import os
import tempfile
import time
from pathlib import Path
from typing import Iterable, List, Optional, Set, Union
import numpy as np
from cc_net import jsonql
from cc_net.flat_hash_set import HASH_TYPE, AbstractDedupHashSet, FlatHashSet
from cc_net.jsonql import mem_footprint_gb
from cc_net.text_normalizer import normalize_for_dedup
BYTE_ORDER = "little"
HASH_SIZE = HASH_TYPE(0).nbytes
DISABLE_MULTI_PROCESSING = False
FilesOrDir = Union[List[Path], Path]
def get_args():
parser = argparse.ArgumentParser(
description="Read a set of json files and allow to query them",
parents=[jsonql.io_parser()],
)
parser.add_argument("--field", type=str, default="raw_content")
parser.add_argument("--output_hashes", type=str)
parser.add_argument("--no_finalize", action="store_false", dest="finalize")
# parser.add_argument("--mem_gb", type=int)
parser.add_argument("--hashes", type=str)
return vars(parser.parse_args())
def _b2i(b: bytes) -> int:
return np.frombuffer(b[:HASH_SIZE], dtype=HASH_TYPE, count=1, offset=0).item(0)
def str_hash(s: str) -> int:
h = hashlib.sha1(bytes(s, encoding="utf-8"))
return _b2i(h.digest())
log = logging.getLogger(__name__).info
def run_par(processes):
# This is different from multiprocessing.map since it allows for kwargs.
processes = list(processes)
if len(processes) == 1 or DISABLE_MULTI_PROCESSING:
for f, args, kwargs in processes:
f(*args, **kwargs)
return
log(f"Starting {len(processes)} subprocess")
processes = [
multiprocessing.Process(target=f, args=a, kwargs=kw) for (f, a, kw) in processes
]
for p in processes:
p.start()
for p in processes:
p.join()
failed = 0
for p in processes:
if p.exitcode != 0:
log(f"Process failed with code {p.exitcode}: {p}")
failed += 1
assert failed == 0, f"{failed} processes failed..."
def split_file(file, n_splits):
for i in range(n_splits):
yield jsonql.SplitFile(file, i, n_splits)
def merge(hashes_1, hashes_2, output):
if isinstance(hashes_1, str):
h1 = FlatHashSet()
h1.load(hashes_1)
else:
h1 = hashes_1
if isinstance(hashes_2, str):
h2 = FlatHashSet()
h2.load(hashes_2)
else:
h2 = hashes_2
h2_np = np.fromiter(h2.keys(), dtype=FlatHashSet.dtype, count=len(h2))
dup = h1.__contains__(h2_np)
# Dups between h1 and h2 will be set to 1, keys unique to h2 are copied to
# h1 with their value.
h1[h2_np] = dup
if output:
h1.dump(output)
return h1
def merge_shard(hash_files, output):
h = FlatHashSet()
h.load(hash_files[0])
for hash_file in hash_files[1:]:
h = merge(h, hash_file, output=None)
print(f"Merged {hash_file}. We now have {len(h)} hashes.")
h.dump(output)
print(f"Saved {len(h)} hashes to {output}.")
def _dump_sentence_hashes(source: Path, output: Path, field: str):
treated = 0
started = time.time()
with open(output, "wb") as o:
for doc in jsonql.read_jsons(source):
content = doc.get(field)
if not content:
continue
h = compute_hashes(content)
if h is None:
continue
h.tofile(o)
treated += 1
if treated % 100_000 == 0:
delay = time.time() - started
log(
f"Computed {treated} documents hashes in {delay / 3600:.2f}h ({treated / delay} doc / s)"
)
def _remove_duplicate_hashes(duplicates, source, output):
batch_size = 100_000
n_lines, n_lines_kept = 0, 0
with open(source, "rb") as f, open(output, "wb") as o:
log(f"Opening {source} with mode rb")
log(f"Opening {output} with mode wb")
while True:
hashes = np.fromfile(f, dtype=HASH_TYPE, count=batch_size)
if hashes.size == 0:
break
keep = duplicates[hashes] < 1
kept = keep.sum()
hashes *= keep
hashes.tofile(o)
n_lines += hashes.size
n_lines_kept += kept
removed = n_lines - n_lines_kept
selectivity = n_lines_kept / n_lines if n_lines else 0
log(f"Removed {removed} duplicate hashes with selectivity: {selectivity:3.1%}")
def remove_duplicates_sharded(
files: List[Path],
outputs: List[Path],
hashes_dir: FilesOrDir,
field: str,
group_hashes: int = 1,
tmp_dir: Path = None,
min_len: int = 0,
):
"""Remove duplicates in several passes, when all hashes don't fit in RAM.
Note: The current implementation is not doing a 'perfect' deduplication.
If a hash appear exactly once in each shard of hashes it won't be detected
as a duplicate. This can be fixed if hashes are fully dedup beforehand.
"""
assert len(files) == len(outputs)
if isinstance(hashes_dir, list):
hashes_files = hashes_dir
else:
hashes_files = sorted(
h for h in Path(hashes_dir).iterdir() if h.suffix == ".bin"
)
assert len(hashes_files) > 0, f"no hashes files found in: {hashes_dir}"
if len(hashes_files) <= group_hashes:
log(f"All hashes can be done in one pass, using DuplicatesRemover on {files}")
rm_dups = DuplicatesRemover(field, hashes_files)
rm_dups._prepare()
run_par(
(jsonql.run_pipes, (rm_dups,), dict(file=f, output=o))
for f, o in zip(files, outputs)
)
return
log(f"Starting deduplicate_sharded on {files}.")
tmp_directory = tempfile.TemporaryDirectory(dir=str(tmp_dir) if tmp_dir else None)
def tmp_files(i):
return [
Path(tmp_directory.name) / (f.name.split(".")[0] + f".{i}.bin")
for f in files
]
last = tmp_files(0)
run_par((_dump_sentence_hashes, (f, tmp, field), {}) for f, tmp in zip(files, last))
if isinstance(hashes_dir, list):
hashes_files = hashes_dir
else:
hashes_files = sorted(
h for h in Path(hashes_dir).iterdir() if h.suffix == ".bin"
)
for i, group in enumerate(jsonql.grouper(hashes_files, group_hashes)):
hashes = FlatHashSet()
for h in group:
hashes.load(h)
log(f"Loaded {h}, up to {len(hashes)} hashes ({mem_footprint_gb()}GB)")
intermediates = tmp_files(i + 1)
# Remove hashes in parallel. Since modern OS have "copy-on-write" and
# `hashes` is read-only, we will only have one version of it in RAM.
run_par(
(_remove_duplicate_hashes, (hashes, f, tmp), {})
for f, tmp in zip(last, intermediates)
)
# Force hashes to be freed, before we start allocating a new one.
del hashes
gc.collect()
for tmp in last:
os.remove(tmp)
last = intermediates
def finalize(source, dedup_hashes, min_len):
n_chars, n_chars_kept = 0, 0
with open(dedup_hashes, "rb") as hashes:
for doc in jsonql.read_jsons(source):
content = doc.get(field)
if not content or len(content) < min_len:
continue
sentences = content.split("\n")
doc_hashes = np.fromfile(hashes, dtype=HASH_TYPE, count=len(sentences))
chars, kept_chars = finalize_doc(doc, field, doc_hashes)
n_chars += chars
n_chars_kept += kept_chars
yield doc
selectivity = n_chars_kept / n_chars if n_chars else 0
log(f"Kept {n_chars_kept} chars out of {n_chars} ({selectivity:.1%}).")
dedup_hashes = last
run_par(
[
(
jsonql.run_pipe,
(finalize,),
dict(kwargs=dict(dedup_hashes=h, min_len=min_len), file=f, output=o),
)
for h, f, o in zip(dedup_hashes, files, outputs)
]
)
tmp_directory.cleanup()
def compute_hashes(content) -> Optional[np.ndarray]:
if not content:
return None
lines = content.split("\n")
# save hashes as bytes but reinterpret them as uint64.
hashes = np.fromiter(
(
hashlib.sha1(bytes(normalize_for_dedup(l), encoding="utf-8")).digest()[
:HASH_SIZE
]
for l in lines
),
dtype=np.dtype((bytes, HASH_SIZE)),
count=len(lines),
)
return np.ndarray(dtype=HASH_TYPE, buffer=hashes.data, shape=hashes.shape)
def finalize_doc(doc, field, hashes=None):
content = doc.get(field)
lines = content.split("\n")
n_chars = len(content)
if "original_nlines" not in doc:
doc["original_nlines"] = doc.get("nlines", len(lines))
if "original_length" not in doc:
doc["original_length"] = doc.get("length", n_chars)
if hashes is None:
hashes = doc.pop(field + "_hash")
# Remove duplicates inside doc
seen: Set[int] = set()
original_line_ids = doc.get("line_ids", range(len(hashes)))
line_ids = []
new_lines = []
for l, line, h in zip(original_line_ids, lines, hashes):
if h not in seen and h != 0:
line_ids.append(l)
new_lines.append(line)
seen.add(h)
doc[field] = "\n".join(new_lines)
doc["nlines"] = len(line_ids)
n_chars_kept = len(doc[field])
doc["length"] = n_chars_kept
doc["line_ids"] = line_ids
return n_chars, n_chars_kept
class HashesCollector(jsonql.Transformer):
"""
Collect all hashes found of lines found in the `field` of the source documents.
"""
parallelisable = False
def __init__(
self, field: str, output: Path = None, hashes: AbstractDedupHashSet = None
):
super().__init__()
self.n_lines = 0
self.field = field
self.output = output
self.hashes = FlatHashSet() if hashes is None else hashes
self.num_hashes_end = 0
self.num_hashes_start = len(self.hashes)
def summary(self) -> List[str]:
summ = super().summary()
h = self.num_hashes_end if self.hashes is None else len(self.hashes)
h = (h - self.num_hashes_start) // 1000
max_mem = mem_footprint_gb()
n = self.n_lines // 1000
summ.append(
f"Found {h:_}k unique hashes over {n:_}k lines. Using {max_mem:.1f}GB of RAM."
)
return summ
def do(self, doc: dict) -> None:
doc_hashes = compute_hashes(doc.get(self.field))
if doc_hashes is None:
return
self.hashes.add(doc_hashes)
self.n_lines += doc_hashes.size
def close(self):
if self.output and self.hashes:
self.hashes.dump(self.output)
self.log(f"Saved {len(self.hashes)} hashes to {self.output}")
# Save the number of hashes.
self.num_hashes_end = len(self.hashes)
# Free up mem even if the transformer is kept somewhere else.
self.hashes = None # type: ignore
class DuplicatesRemover(jsonql.Transformer):
"""DuplicatesRemover"""
# The hashes can't be pickled so they will have to be read back from disk.
warn_when_pickling = True
def __init__(self, field: str, hashes_files: List[Path], collect: bool = False):
"""
Remove duplicates
"""
super().__init__()
self.field = field
self.collect = collect
self.hashes_files = hashes_files
self.duplicates: Optional[AbstractDedupHashSet] = None
self.n_lines, self.n_lines_kept = 0, 0
self.n_chars, self.n_chars_kept = 0, 0
def _prepare(self):
if self.duplicates is not None:
return
self.duplicates = FlatHashSet()
start = time.time()
for h in self.hashes_files:
shard_start = time.time()
self.duplicates.load(str(h))
delay = time.time() - shard_start
self.log(
f"Loaded hashes from {h} ({mem_footprint_gb():.3f}GB total, took {delay / 60:.1}m)"
)
delay = time.time() - start
self.log(
f"Loaded {len(self.duplicates):_d} hashes from {len(self.hashes_files)} files. ({mem_footprint_gb():.1f}GB total, took {delay / 60:.1}m)"
)
def do(self, doc: dict) -> Optional[dict]:
content = doc.get(self.field)
if not content:
return None
doc_hashes = compute_hashes(content)
assert self.duplicates is not None
seen = (
self.duplicates.add(doc_hashes)
if self.collect
else self.duplicates[doc_hashes]
)
keep = seen < True
kept = keep.sum()
if kept == 0:
return None
doc_hashes = doc_hashes * keep
self.n_lines += keep.size
self.n_lines_kept += kept
chars, kept_chars = finalize_doc(doc, self.field, hashes=doc_hashes)
self.n_chars += chars
self.n_chars_kept += kept_chars
return doc
def summary(self) -> List[str]:
summ = super().summary()
end_time = time.time()
n_lines_kept, n_lines, n_docs = self.n_lines_kept, self.n_lines, self.processed
speed = n_docs / (end_time - self.start_time)
summ.append(
f"Processed {self.n_lines} lines in {n_docs} docs. [{speed:.1f} doc/s]"
)
selectivity = self.n_lines_kept / self.n_lines if n_lines else 0
summ.append(f"Kept {n_lines_kept} lines out of {n_lines} ({selectivity:.1%}).")
n_chars_kept, n_chars = self.n_chars_kept, self.n_chars
selectivity = n_chars_kept / n_chars if n_chars else 0
summ.append(f"Kept {n_chars_kept} chars out of {n_chars} ({selectivity:.1%}).")
return summ
def deduplicate(
file: jsonql.ReadableFileLike, field: str = "raw_content"
) -> Iterable[dict]:
"""Remove duplicates of the given file (but keep the first occurence)."""
dup_remover = DuplicatesRemover(field, [], collect=True)
return dup_remover.map(jsonql.read_jsons(file))
def deduplicate_two_pass(
file: jsonql.FileDescriptor, field: str = "raw_content"
) -> Iterable[dict]:
"""Remove duplicates of the given file (even removing the first occurence).
This is what is done in the paper, and in mine.py
"""
try:
if isinstance(file, Path):
hash_file: Path = file.with_suffix(".bin")
else:
hash_file = jsonql._tmp(Path("hashes.bin"))
jsonql.run_pipes(
jsonql.JsonReader(), HashesCollector(field, output=hash_file), file=file
)
dup_remover = DuplicatesRemover(field, [hash_file])
return dup_remover.map(jsonql.read_jsons(file))
finally:
if hash_file.exists():
hash_file.unlink()
|
cc_net-main
|
cc_net/dedup.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import contextlib
import functools
import logging
import re
import tempfile
import time
import urllib.request
from pathlib import Path
from typing import ContextManager, Iterable, Iterator, List, Optional, Sequence
from urllib.parse import urlparse
import func_argparse
from bs4 import BeautifulSoup # type: ignore
from cc_net import jsonql
WET_URL_ROOT = "https://commoncrawl.s3.amazonaws.com"
logger = logging.getLogger(__name__)
def cc_wet_paths_url(dump_id: str) -> str:
return "/".join([WET_URL_ROOT, "crawl-data", "CC-MAIN-" + dump_id, "wet.paths.gz"])
@functools.lru_cache()
def cc_segments(dump_id: str, cache_dir: Path = None) -> List[str]:
wet_paths = cc_wet_paths_url(dump_id)
cache_dir = cache_dir or jsonql._tmp_dir()
wet_paths_cache = cache_dir / f"wet_{dump_id}.paths.gz"
f = jsonql.open_remote_file(wet_paths, cache=wet_paths_cache)
return [segment.strip() for segment in f]
def list_dumps() -> List[str]:
home_page = BeautifulSoup(
urllib.request.urlopen("http://index.commoncrawl.org/"), features="html.parser"
)
dumps = [a.get("href").strip("/") for a in home_page.findAll("a")]
dumps = [a[8:] for a in dumps if re.match(r"^CC-MAIN-\d\d\d\d-\d\d$", a)]
return sorted(dumps)
def ls():
for dump in list_dumps():
print(dump, "->", cc_wet_paths_url(dump))
def parse_doc(headers: List[str], doc: List[str]) -> Optional[dict]:
"""Headers format is:
WARC/1.0
WARC-Type: conversion
WARC-Target-URI: [url]
WARC-Date: [crawldate: 2019-02-15T19:15:59Z]
WARC-Record-ID: <urn:uuid:8865156e-d5f1-4734-9c68-4b46eaf2bb7e>
WARC-Refers-To: <urn:uuid:340152e2-65cf-4143-b522-8ce4e2d069d7>
WARC-Block-Digest: sha1:S3DTWCONT2L6ORTGCY2KXEZ37LNBB7V2
Content-Type: text/plain
Content-Length: 7743
"""
if not headers or not doc:
return None
try:
warc_type = headers[1].split()[1]
if warc_type != "conversion":
return None
url = headers[2].split()[1]
date = headers[3].split()[1]
digest = headers[6].split()[1]
length = int(headers[8].split()[1])
except Exception as e:
logger.warning("Can't parse header:", e, headers, doc)
return None
# Docs are separated by two empty lines.
last = None
if not doc[-1] and not doc[-2]:
last = -2
title, doc = doc[0], doc[1:last]
return {
"url": url,
"date_download": date,
"digest": digest,
"length": length,
"nlines": len(doc),
"source_domain": urlparse(url).netloc,
"title": title,
"raw_content": "\n".join(doc),
}
def group_by_docs(warc_lines: Iterable[str]) -> Iterable[dict]:
doc: List[str] = []
headers, read_headers = [], True
for warc in warc_lines:
warc = warc.strip()
if read_headers:
headers.append(warc)
read_headers = warc != ""
continue
if warc == "WARC/1.0":
# We reached the beginning of the new doc.
parsed = parse_doc(headers, doc)
if parsed is not None:
yield parsed
headers, doc, read_headers = [warc], [], True
continue
doc.append(warc)
# Return the last document
if doc:
parsed = parse_doc(headers, doc)
if parsed is not None:
yield parsed
def parse_warc_file(lines: Iterable[str], min_len: int = 1) -> Iterator[dict]:
n_doc = 0
n_ok = 0
for doc in group_by_docs(lines):
n_doc += 1
if not doc or len(doc["raw_content"]) < min_len:
continue
n_ok += 1
yield doc
if n_doc > 0:
logger.info(f"Kept {n_ok:_d} documents over {n_doc:_d} ({n_ok / n_doc:.1%}).")
else:
logger.info(f"Found no documents")
def dl(
dump: str,
shard: int,
num_shards: int,
output: Path = None,
num_segments_per_shard: int = 0,
):
"""Download a shard of the common crawl, and export it to json.
Arguments:
output: filename of the output file
dump: CC dump id
shard: id of the shard
num_shards: total number of shards
num_segments_per_shard: manual control of the number of segment per shard.
"""
reader = CCShardReader(dump, shard, num_shards, num_segments_per_shard)
jsonql.run_pipes(inputs=reader, output=output)
logger.info(f"Done. {output} is ready.")
class CCSegmentsReader(Iterable[dict]):
def __init__(
self, segments: Sequence[str], min_len: int = 0, cache_dir: Path = None
):
self._segments = segments
self.min_len = min_len
if cache_dir is not None:
cache_dir = Path(cache_dir)
cache_dir.mkdir(exist_ok=True)
self.cache_dir = cache_dir
self.retrieved_segments = 0
def segment_url(self, segment: str):
return "/".join((WET_URL_ROOT, segment))
@property
def segments(self) -> Sequence[str]:
return self._segments
def open_segment(self, segment: str) -> Iterable[str]:
url = self.segment_url(segment)
file: Optional[Path] = None
if self.cache_dir:
file = self.cache_dir / segment.split("/")[-1]
if not file or not file.exists():
self.retrieved_segments += 1
return jsonql.open_remote_file(url, cache=file)
def __iter__(self) -> Iterator[dict]:
n = len(self.segments)
for i, segment in enumerate(self.segments):
start = time.time()
# TODO: start downloading the next segment in the background
for doc in parse_warc_file(self.open_segment(segment), self.min_len):
doc["cc_segment"] = segment
yield doc
if i + 1 >= n:
continue
end = time.time()
delay = (end - start) / 3600 * (n - 1 - i)
logger.info(
f"Parsed {i + 1} / {n} files. Estimated remaining time: {delay:.1f}h"
)
class CCShardReader(CCSegmentsReader):
def __init__(
self,
dump: str,
shard: int,
num_shards: int = -1,
num_segments_per_shard: int = 40,
min_len: int = 300,
cache_dir: Path = None,
):
"""Downloads a shard of Common Crawl, and yields dict.
Arguments:
dump: CC dump id
shard: id of the shard
num_shards: total number of shards
num_segments_per_shard: if set will limit the number of files by shard.
Useful for testing.
"""
super().__init__([], min_len=min_len, cache_dir=cache_dir)
self.dump = dump
self.shard = shard
assert num_shards > 0 or num_segments_per_shard > 0
self.num_shards = num_shards
self.num_segments_per_shard = num_segments_per_shard
@property
def segments(self) -> Sequence[str]:
# Delaying the initialization allows to delay the looking up of the WET files
if self._segments:
return self._segments
segments = cc_segments(self.dump, self.cache_dir)
n = len(segments)
if self.num_shards < 0:
self.num_shards = n // self.num_segments_per_shard
i_min = (self.shard * n) // self.num_shards
i_max = ((self.shard + 1) * n) // self.num_shards
if self.num_segments_per_shard > 0:
i_max = min(i_max, i_min + self.num_segments_per_shard)
self._segments = segments[i_min:i_max]
return self._segments
def _tmp(prefix: str = None, suffix: str = None, dir: Path = None) -> Path:
_, tmp_path = tempfile.mkstemp(prefix=prefix, suffix=suffix, dir=dir)
return Path(tmp_path)
@contextlib.contextmanager
def timer(name: str = "-"):
start = time.time()
yield None
delay = time.time() - start
print(f"{name} took {delay:.1f}s")
def benchmark(tmp_path: Path):
segments = [
"crawl-data/CC-MAIN-2019-09/segments/1550249406966.99/wet/CC-MAIN-20190222220601-20190223002601-00441.warc.wet.gz"
]
seg_file = tmp_path / "CC-MAIN-20190222220601-20190223002601-00441.warc.wet.gz"
with timer("from network"):
list(CCSegmentsReader(segments))
with timer("from network, with caching"):
list(CCSegmentsReader(segments, cache_dir=tmp_path))
assert seg_file.exists()
with timer("from disk"):
CCSegmentsReader(segments, cache_dir=tmp_path)
seg_file.unlink()
if __name__ == "__main__":
func_argparse.main(ls, dl)
|
cc_net-main
|
cc_net/process_wet_file.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import func_argparse
import cc_net.mine
def main():
func_argparse.parse_and_call(cc_net.mine.get_main_parser())
if __name__ == "__main__":
main()
|
cc_net-main
|
cc_net/__main__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import collections
from pathlib import Path
from typing import Dict, Optional
import fasttext # type: ignore
from cc_net import jsonql
def get_args():
parser = argparse.ArgumentParser(
description="Read a list of json files and split them ",
parents=[jsonql.io_parser()],
)
parser.add_argument("--pattern", type=str)
parser.add_argument("--field", type=str, default="raw_content")
parser.add_argument("--threshold", type=float, default=0)
parser.add_argument("--model", type=str, required=True)
parser.add_argument("--out_field", type=str, default="language")
parser.add_argument("--top", type=int, default=1)
return vars(parser.parse_args())
def predict(model, text: str, k: int = 1):
labels, scores = model.predict(text, k=k)
labels = [l.replace("__label__", "") for l in labels]
return labels, scores
def avg_predict(model, text):
# Overall gives the same results than predict(model, text.replace("\n", ""))
text = text.split("\n")
text_len = sum(len(line) for line in text)
if text_len == 0:
return None, 0
scores = [predict(model, line) for line in text]
scores_by_label: Dict[str, float] = collections.defaultdict(float)
for (label, score), line in zip(scores, text):
scores_by_label[label] += score * len(line)
label, score = max(scores_by_label.items(), key=lambda kv: kv[1])
return label, score / text_len
class Classifier(jsonql.Transformer):
def __init__(
self,
model: Path,
field: str,
out_field: str,
threshold: float = 0,
top: int = 1,
language: str = None,
rounding: int = 2,
):
super().__init__()
self.model = model
assert model.exists(), f"Model {model} doesn't exist."
self.field = field
self.out_field = out_field
self.threshold = threshold
self.top = top
self.language = language
self.rounding = rounding
# Fasttext model is a C object and can't be pickled
self.fasttext_model: fasttext._FastText = None
self.n_doc, self.n_accepted, self.n_ignored, self.n_disagreement = 0, 0, 0, 0
self.cnt: Dict[str, int] = {}
def _prepare(self):
self.log(f"Loading {self.model}")
self.fasttext_model = fasttext.load_model(str(self.model))
def predict(self, text):
return predict(self.fasttext_model, text.replace("\n", ""), k=self.top)
def do(self, doc: dict) -> Optional[dict]:
text = doc.get(self.field, None)
if not text:
return None
if self.language and doc.get("language") != self.language:
self.n_ignored += 1
return doc
self.n_doc += 1
labels, scores = self.predict(text)
scores.round(self.rounding, out=scores)
for l in labels:
self.cnt[l] = self.cnt.get(l, 0) + 1
if self.top == 1:
existing_label = doc.get(self.out_field, None)
if existing_label and labels[0] != existing_label:
self.n_disagreement += 1
if all(s < self.threshold for s in scores):
return None
self.n_accepted += 1
if self.top == 1:
doc[self.out_field] = labels[0]
doc[self.out_field + "_score"] = scores[0]
else:
doc[self.out_field] = {l: s for l, s in zip(labels, scores)}
return doc
def summary(self):
n_doc, n_accepted, n_disagreement, cnt, out_field = (
self.n_doc,
self.n_accepted,
self.n_disagreement,
self.cnt,
self.out_field,
)
summ = super().summary()
if self.threshold > 0:
ratio = n_accepted / n_doc if n_doc else 0
summ.append(f"Kept {n_accepted} docs over {n_doc} ({ratio :.1%})")
summ.append(f"Found {len(cnt)} {out_field} labels: {cnt}")
disagreement = n_disagreement / n_doc if n_doc else 0
if disagreement:
summ.append(f"{out_field} disagreement is at {disagreement:.1%}.")
return summ
def __repr__(self):
return f"Classifier({self.model})"
def classify_and_split(file, output, pattern, **kwargs):
classifier = Classifier(**kwargs)
splitter = jsonql.split(pattern)
jsonql.run_pipes(classifier, splitter, file=file, output=output)
if __name__ == "__main__":
args = get_args()
pattern = args.get("pattern")
if pattern:
classify_and_split(**args)
else:
args.pop("pattern")
jsonql.run_pipe(Classifier, args)
|
cc_net-main
|
cc_net/split_by_lang.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import contextlib
import functools
import gzip
import logging
import multiprocessing
from collections import defaultdict
from pathlib import Path
from typing import Callable, Dict, Iterator, List, NamedTuple, Optional, Tuple
import cc_net
from cc_net import jsonql
from cc_net.process_wet_file import CCSegmentsReader
# Set this to a directory to use as cache for intermediary files.
# This helps for debugging.
WET_CACHE = None
# WET_CACHE = Path("wet_cache")
S3_BUCKET = "https://dl.fbaipublicfiles.com/cc100"
VERSION = "1.0.0"
CC_100_SNAPSHOTS = [
"2018-05",
"2018-09",
"2018-13",
"2018-17",
"2018-22",
"2018-26",
"2018-30",
"2018-34",
"2018-39",
"2018-43",
"2018-47",
"2018-51",
]
BIG_LANGUAGES = {
"es_XX",
"fr_XX",
"de_DE",
"ja_XX",
"ru_RU",
"zh_CN",
"en_XX",
"it_IT",
"ar_AR",
"nl_XX",
"pl_PL",
"pt_XX",
"tr_TR",
"zh_TW",
}
class Paragraph(NamedTuple):
lang: str
text: str
lm_score: float
def _dl_shard(snapshot: str, shard: int) -> Iterator[Paragraph]:
"""
Download metadata from a shards.
Sample metadata:
{
"cc_segment": "crawl-data/CC-MAIN-2018-51/segments/1544376823009.19/wet/CC-MAIN-20181209185547-20181209211547-00000.warc.wet.gz",
"digest": "sha1:222LWNHN5FM26XGS7WJSMI6IISTVWBKJ",
"url": "http://personals.gearplay.com/ads/DRJONES.htm",
"line_ids": [10],
"languages": ["en_XX"],
"lm_scores": [-2.658],
}
"""
snapshot = snapshot.replace("-", "_")
name = f"snap_{snapshot}_batch_{shard}.json.gz"
url = "/".join([S3_BUCKET, VERSION, name])
shard_metadata: Dict[str, Dict[str, dict]] = defaultdict(dict)
try:
cache_file: Optional[Path] = None
if WET_CACHE is not None:
cache_file = WET_CACHE / name
metadata_file = jsonql.open_remote_file(url, cache_file)
except:
logging.warning(f"Couldn't open {url}")
return
for meta in jsonql.read_jsons(metadata_file):
shard_metadata[meta["cc_segment"]][meta["digest"]] = meta
found_pars, missed_pars = 0, 0
for seg, segment_metadata in shard_metadata.items():
for doc in CCSegmentsReader([seg], cache_dir=WET_CACHE):
if doc["digest"] not in segment_metadata:
continue
meta = segment_metadata[doc["digest"]]
full_pars = [doc["title"]] + doc["raw_content"].split("\n")
assert len(meta["line_ids"]) == len(meta["languages"])
assert len(meta["line_ids"]) == len(meta["lm_scores"])
for i, lang, score in zip(
meta["line_ids"], meta["languages"], meta["lm_scores"]
):
if snapshot != "2018-51" and lang in BIG_LANGUAGES:
# Big languages only come from "2018-51" snapshot
continue
if i >= len(full_pars):
# This is because CC100 was created by saving only urls.
# Some urls appears in different snapshot with slightly different
# versions, but we don't know which one is correct.
# Here we read both versions, but some index may end up
# being incorrect.
# This impact ~3% documents.
missed_pars += 1
continue
yield Paragraph(lang, full_pars[i], score)
found_pars += 1
if missed_pars > 0:
logging.warning(
f"Missed {missed_pars} ({missed_pars / found_pars:%}) paragraphes."
)
def _split_by_par(
paragraphes: Iterator[Paragraph], snapshot: str, shard: int, outdir: Path
) -> int:
outdir.mkdir(exist_ok=True)
outfiles = {}
num_pars = 0
try:
for par in paragraphes:
# MODIFY ME: filter paragraph if needed (languages, score, ...)
if par.lang not in outfiles:
(outdir / par.lang).mkdir(exist_ok=True)
outfile = outdir / par.lang / f"snap_{snapshot}_batch_{shard}.gz"
outfiles[par.lang] = gzip.open(outfile, "wt")
print(par.text, file=outfiles[par.lang])
num_pars += 1
finally:
for o in outfiles.values():
o.close()
logging.info(f"Extracted {num_pars:_d} paragraphs from shard {snapshot}_{shard}")
return num_pars
def dl_shard(snapshot: str, shard: int, outdir: Path) -> int:
return _split_by_par(_dl_shard(snapshot, shard), snapshot, shard, outdir)
@contextlib.contextmanager
def unordered_map(processes: int):
if processes == 0:
yield map
return
with multiprocessing.Pool(processes) as pool:
yield pool.imap_unordered
def dl_snapshot(snapshot: str, outdir: Path, processes: int = 1) -> None:
_dl_shard = functools.partial(dl_shard, snapshot, outdir=outdir)
with unordered_map(processes) as umap:
num_pars = sum(umap(_dl_shard, range(500)))
logging.info(f"Extracted {num_pars:_d} paragraphs from snapshot {snapshot}.")
def dl(
snapshot: str = None, outdir: Path = Path("data_cc100"), processes: int = 1
) -> None:
"""
Download CC100 corpus.
Will create one text file per language and CC snapshot.
- snapshot: restrict to one snapshot. Useful for parallelization.
- outdir: output directory
- processes: number of processes to use
"""
if snapshot is None:
snapshots = CC_100_SNAPSHOTS
else:
snapshots = snapshot.split(",")
invalids = [s for s in snapshots if s not in CC_100_SNAPSHOTS]
assert not invalids, f"Invalid snapshots {invalids}, chose from {CC_100_SNAPSHOTS}"
for snapshot in snapshots:
dl_snapshot(snapshot, outdir, processes)
if __name__ == "__main__":
import func_argparse
func_argparse.single_main(dl)
|
cc_net-main
|
cc_net/tools/dl_cc_100.py
|
cc_net-main
|
cc_net/tools/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
"""
This code is used to train a fastText classifier to label document with DMOZ categories.
The data, distributed under the cc-by 3.0 license
(https://web.archive.org/web/20140605215533/http://www.dmoz.org/license.html),
can be downloaded from
https://web.archive.org/web/20140617145301/http://rdf.dmoz.org/rdf/content.rdf.u8.gz.
"""
import urllib.request
from io import StringIO
from pathlib import Path
from typing import Dict, Set
from urllib.parse import urlparse
import func_argparse
from lxml import etree # type: ignore
from cc_net import jsonql
TaggedUrls = Dict[str, Set[str]]
DMOZ_TAGS_URL = "https://web.archive.org/web/20140617145301/http://rdf.dmoz.org/rdf/content.rdf.u8.gz"
def add_tags(url: str, tags: Set[str], url2tags: TaggedUrls):
if url in url2tags:
url2tags[url] &= tags
else:
url2tags[url] = tags
def load_tags(filename: Path = None) -> TaggedUrls:
if filename is None:
with StringIO("".join(jsonql.open_remote_file(DMOZ_TAGS_URL))) as dmoz:
tree = etree.parse(dmoz)
else:
tree = etree.parse(str(filename))
root = tree.getroot()
url2tags: Dict[str, Set[str]] = {}
for external_page in root.iterfind("{http://dmoz.org/rdf/}ExternalPage"):
url = external_page.get("about")
domain = urlparse(url).netloc
for topic in external_page.iterfind("{http://dmoz.org/rdf/}topic"):
# print(url, topic.text)
# Tags looks like Top/Arts/Animation/Anime/Collectibles
tags = set(topic.text.split("/")[1:])
add_tags(url, tags, url2tags)
add_tags(domain, tags, url2tags)
return url2tags
def dl(output: Path) -> None:
urllib.request.urlretrieve(DMOZ_TAGS_URL, output)
def make_corpus(file: Path, tags_file: Path = None, output: Path = None) -> None:
"""
Loads a tags file and create a training dataset using the given webpages.
Arguments:
- file: CC shard file
- tags_file: dmoz tagging file, (like the one produced by `dl`)
- output: ""
"""
url2tags = load_tags(tags_file)
with jsonql.open_write(output) as o:
for document in jsonql.read_jsons(file):
if not document:
continue
url = document["url"]
domain = document["source_domain"]
if url in url2tags:
tags = url2tags[url]
elif domain in url2tags:
tags = url2tags[domain]
else:
continue
if len(tags) == 0:
continue
fasttext_tags = ["__label__" + tag for tag in tags]
content = document["tokenized"].replace("\n", " ").lower()
if len(content) > 200:
print(" ".join(fasttext_tags), content, file=o) # type: ignore
if __name__ == "__main__":
func_argparse.single_main(make_corpus)
|
cc_net-main
|
cc_net/tools/make_dmoz_corpus.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Tools to search sentences in CC similar to sentences in another corpus.
"""
import functools
import logging
import math
import subprocess
from collections import Counter
from pathlib import Path
from typing import Iterable, List, Optional, Set, Tuple
import func_argparse
import submitit
from kenlm import Model as KenlmModel # type: ignore
from sentence_splitter import SentenceSplitter # type: ignore
from sentencepiece import SentencePieceProcessor # type: ignore
from cc_net import dedup, jsonql, perplexity, text_normalizer
KENLM = Path("./bin/lmplz")
KENLM_BUILD = Path("./bin/build_binary")
VOCAB_SIZE = 2 ** 16 - 10
PROCESSES = 16
def normalize(corpus: Path, output_dir: Path) -> Path:
normalized = output_dir / (corpus.stem + ".normalized")
if normalized.exists():
return normalized
print("Will normalize", corpus, "to", normalized)
jsonql.run_pipes(
jsonql.Mapper(text_normalizer.normalize),
file=corpus,
output=normalized,
processes=PROCESSES,
)
return normalized
# TODO use classic files directory.
def sp_model(lang: str) -> Path:
return Path(f"/checkpoint/guw/cc_clean/lm_sp/{lang}.sp.model")
def _dataset(dataset: Optional[Path], lang: str) -> Path:
return (
dataset
or Path("/datasets01_101/common_crawl/020919") / f"{lang}_head_*.json.gz"
)
class SentencePiece(jsonql.Transformer):
def __init__(self, model: Path):
super().__init__()
self.model = model
self.sp: SentencePieceProcessor = None # type: ignore
def _prepare(self):
self.sp = SentencePieceProcessor()
self.sp.load(str(self.model))
def do(self, line: str) -> str:
return " ".join(self.sp.encode_as_pieces(line))
class ExtractSentences(jsonql.Transformer):
def __init__(
self,
sp_model: Path,
lm_model: Path,
field: str = "raw_content",
threshold: float = float("+inf"),
):
super().__init__()
self.sp_model = sp_model
self.lm_model = lm_model
self.field = field
self.threshold = threshold
self.sp: SentencePieceProcessor = None
self.lm: KenlmModel = None
self.splitter: SentenceSplitter = None
self.hashes: Set[int] = set()
def _prepare(self):
self.sp = SentencePieceProcessor()
self.sp.load(str(self.sp_model))
self.splitter = SentenceSplitter("en")
self.lm = KenlmModel(str(self.lm_model))
def do(self, document: dict) -> Optional[str]:
content: Optional[str] = document.get(self.field)
if not content:
return None
all_sentences = [
s for l in content.split("\n") if l for s in self.splitter.split(text=l)
]
unique_sentences = []
for s in all_sentences:
if not s:
continue
h = dedup.str_hash(s)
if h in self.hashes:
continue
self.hashes.add(h)
unique_sentences.append(s)
scores = []
for sentence in unique_sentences:
normalized = text_normalizer.normalize(sentence)
pieces = self.sp.encode_as_pieces(normalized)
log_score = self.lm.score(" ".join(pieces))
pp = -1
if len(pieces):
pp = perplexity.pp(log_score, len(pieces))
scores.append(pp)
res = filter(
lambda pp_s: self.threshold > pp_s[0] > 0, zip(scores, unique_sentences)
)
return "\n".join(f"{pp}\t{s}" for (pp, s) in res) or None
def tokenize(corpus: Path, output_dir: Path, lang: str) -> Path:
tokenized = output_dir / (corpus.stem + ".tokenized")
if tokenized.exists():
return tokenized
print("Will SentencePiece", corpus, "to", tokenized)
jsonql.run_pipes(
SentencePiece(sp_model(lang)),
file=normalize(corpus, output_dir),
output=tokenized,
processes=PROCESSES,
)
return tokenized
def train_lm(
corpus: Path,
output_dir: Path,
lang: str = "en",
vocab_size: int = VOCAB_SIZE,
ngrams: int = 5,
):
lm_text_file = output_dir / (corpus.stem + ".arpa")
lm_bin_file = output_dir / (corpus.stem + ".arpa.bin")
if lm_bin_file.exists():
return lm_bin_file
assert KENLM.exists(), f"{KENLM} binary to train kenlm model not found."
normalized = normalize(corpus, output_dir)
tokenized = tokenize(normalized, output_dir, lang)
print("Will train LM", lm_text_file, "on", tokenized)
kenlm_cmd = [
str(KENLM),
f"--order={ngrams}",
"--memory=8G",
f"--temp_prefix={jsonql._tmp_dir()}",
f"--text={tokenized}",
f"--arpa={lm_text_file}",
f"--vocab_estimate={vocab_size}",
"--discount_fallback",
]
subprocess.run(kenlm_cmd, check=True)
print("Will create binary model", lm_bin_file, "from", lm_text_file)
subprocess.run([str(KENLM_BUILD), str(lm_text_file), str(lm_bin_file)], check=True)
return lm_bin_file
def uniform_sampling_wrt_perplexity(
paragraphes: Iterable[str],
rounding: float = 100.0,
cut: float = 1000.0,
samples: int = 20,
) -> Iterable[str]:
max_samples = math.floor(cut / rounding * samples)
n = 0
buckets = Counter([0.0])
logging.info(f"Will sample {max_samples} sentences.")
for lines in paragraphes:
for line in lines.split("\n"):
if not line:
continue
pp = float(line[: line.find("\t")])
pp = math.floor(pp / rounding) * rounding
if pp > cut:
continue
if buckets[pp] > samples:
continue
yield line
buckets[pp] += 1
if buckets[pp] > samples:
logging.info(f"Bucket {pp} is full ({samples} samples, {n} total)")
n += 1
if n > max_samples:
return
def sample(
corpus: Path,
output_dir: Path,
dataset: Path = None,
n: int = 10_000,
lang: str = "en",
) -> Path:
sample_file = output_dir / (corpus.stem + ".pp_sample.tsv")
if sample_file.exists():
return sample_file
dataset = _dataset(dataset, lang)
extractor = ExtractSentences(
sp_model(lang), train_lm(corpus, output_dir), field="raw_content"
)
sampling = functools.partial(
uniform_sampling_wrt_perplexity, rounding=100.0, cut=1000.0, samples=n // 10
)
print(f"Will sample data from {dataset} to {sample_file}")
try:
jsonql.run_pipes(
extractor, sampling, file=dataset, output=sample_file, processes=PROCESSES
)
except Exception:
sample_file.unlink()
raise
subprocess.run(["sort", "-n", "-o", sample_file, sample_file], check=True)
subprocess.run(["head", sample_file], check=True)
return sample_file
def mine(
corpus: Path,
output_dir: Path,
threshold: float,
dataset: Path = None,
lang: str = "en",
) -> List[Path]:
"""Search sentences in CC similar to the one in the given corpus.
Args:
- corpus: corpus to train the LM one. Assumes one sentence per line.
- output_dir: where to store the results
- threshold: maximum perplexity to have
- dataset: glob pattern matching CC shards.
- lang: search in the files of this language
"""
dataset = _dataset(dataset, lang)
files = list(dataset.parent.glob(dataset.name))
outputs = [output_dir / (f.stem + ".tsv") for f in files]
if all(o.exists() for o in outputs):
return outputs
n = len(outputs)
sp = [sp_model(lang)] * n
lm = [train_lm(corpus, output_dir)] * n
thresholds = [threshold] * n
ex = submitit.AutoExecutor(output_dir / "mining_logs")
ex.update_parameters(
name="mine",
cpus_per_task=PROCESSES,
timeout_min=60 * 24 // PROCESSES,
mem_gb=10,
)
jobs = ex.map_array(_mine, files, outputs, sp, lm, thresholds)
print("Submited job array:", jobs[0])
for j in submitit.helpers.as_completed(jobs):
(i, o) = j.result()
print("Mined sentences from", i, "to", o)
return outputs
def _mine(
file: Path, output: Path, sp: Path, lm: Path, threshold: float
) -> Tuple[Path, Path]:
extractor = ExtractSentences(sp, lm, field="raw_content", threshold=threshold)
jsonql.run_pipes(extractor, file=file, output=output, processes=PROCESSES)
return (file, output)
if __name__ == "__main__":
func_argparse.main(sample, mine)
|
cc_net-main
|
cc_net/tools/expand_corpus.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import json
from pathlib import Path
from typing import Iterable, Sequence
from cc_net import dedup, jsonql
from cc_net.dedup import str_hash
from cc_net.flat_hash_set import FlatHashSet
def text(*args: str) -> str:
return "\n".join(args)
def write_docs(file: Path, docs: Iterable[Sequence[str]]):
file.parent.mkdir(exist_ok=True)
with open(file, "w") as f:
for sentences in docs:
doc = dict(text=text(*sentences))
print(json.dumps(doc), file=f)
def as_dict(hash_set):
if not isinstance(hash_set, dict):
hash_set = {k: v for (k, v) in hash_set.items()}
return hash_set
def load_hashes(file):
results = dedup.FlatHashSet()
results.load(file)
return as_dict(results)
LENGTHS = ["original_length", "length"]
def assert_documents_equal(expected, actual, ignoring={}):
expected = [{k: doc[k] for k in doc if k not in ignoring} for doc in expected]
actual = [{k: doc[k] for k in doc if k not in ignoring} for doc in expected]
assert expected == actual
def test_simple_dedup(tmp_path: Path) -> None:
write_docs(
tmp_path / "docs.json",
[
["_Hello", "_World", "I'm so original"],
["_world", "I'm originaler", "_Hello"],
],
)
results = list(dedup.deduplicate(tmp_path / "docs.json", field="text"))
expected = [
# First document is untouched
dict(
text=text("_Hello", "_World", "I'm so original"),
original_nlines=3,
nlines=3,
line_ids=[0, 1, 2],
),
# Second documents loses several lines
dict(text="I'm originaler", original_nlines=3, nlines=1, line_ids=[1]),
]
assert_documents_equal(expected, results, ignoring=LENGTHS)
def test_dedup_with_dump(tmp_path: Path):
hashes = tmp_path / "hashes.bin"
documents = [
dict(text=text("_Hello", "_World", "I'm so original")),
dict(text=text("_world", "I'm originaler", "_Hello")),
]
collector = dedup.HashesCollector(field="text", output=hashes)
list(collector.map(documents))
results = load_hashes(hashes)
expected = {
str_hash(l): l.startswith("_")
for l in ["_hello", "_world", "i'm so original", "i'm originaler"]
}
assert expected == results
def test_dedup_with_np_dump(tmp_path: Path):
hashes = tmp_path / "hashes.bin"
documents = [
dict(text=text("_Hello", "_World", "I'm so original")),
dict(text=text("_world", "I'm originaler", "_Hello")),
]
with dedup.HashesCollector(field="text", output=hashes) as d:
list(d.map(documents))
results = FlatHashSet()
results.load_np(hashes)
expected = set(
str_hash(l) for l in ["_hello", "_world", "i'm so original", "i'm originaler"]
)
assert expected == set(results.keys())
def test_dedup_from_hashes(tmp_path: Path):
documents = [
dict(text=text("_Hello", "World", "I'm so original")),
dict(text=text("Good morning", "World", "I'm originaler")),
]
seen = ["_hello", "i'm originaler", "world"]
hashes = [str_hash(h) for h in seen]
h = dedup.FlatHashSet()
h.add(hashes)
# Note: 'world' appears only once and won't be treated as a duplicate.
h.add(hashes[:-1])
h.dump(tmp_path / "hashes.bin")
results = list(
dedup.DuplicatesRemover("text", [tmp_path / "hashes.bin"]).map(documents)
)
expected = [
dict(
text=text("World", "I'm so original"),
original_nlines=3,
nlines=2,
line_ids=[1, 2],
),
dict(
text=text("Good morning", "World"),
original_nlines=3,
nlines=2,
line_ids=[0, 1],
),
]
assert_documents_equal(expected, results, ignoring=LENGTHS)
def test_dedup_fast(tmp_path: Path):
data = tmp_path / "data"
part_0 = [["Hello", "_World", "I'm so original"]]
write_docs(data / "part_0.json", part_0)
part_1 = [["Good morning", "_World", "I'm originaler"]]
write_docs(data / "part_1.json", part_1)
parts = [data / "part_0.json", data / "part_1.json"]
res = tmp_path / "res"
res.mkdir()
h = tmp_path / "hashes.bin"
field = "text"
jsonql.run_pipes(dedup.HashesCollector(field, output=h), file=parts)
for part in parts:
jsonql.run_pipes(
dedup.DuplicatesRemover(field, [h]), file=part, output=res / part.name
)
jsonql.run_pipes(
dedup.DuplicatesRemover(field, [h]), file=part, output=res / part.name
)
results_0 = list(jsonql.read_jsons(res / "part_0.json"))
expected_0 = [
dict(
text=text("Hello", "I'm so original"),
original_nlines=3,
nlines=2,
line_ids=[0, 2],
)
]
assert_documents_equal(expected_0, results_0, ignoring=LENGTHS)
results_1 = list(jsonql.read_jsons(res / "part_1.json"))
expected_1 = [
dict(
text=text("Good morning", "I'm originaler"),
original_nlines=3,
nlines=2,
line_ids=[0, 2],
)
]
assert_documents_equal(expected_1, results_1, ignoring=LENGTHS)
words = [w for part in [part_0, part_1] for doc in part for w in doc]
expected = {str_hash(s.lower()): s.startswith("_") for s in words}
assert expected == load_hashes(h)
def test_remove_duplicates_sharded(tmp_path: Path):
data = tmp_path / "data"
part_0 = [["Hello", "_World", "I'm so original"]]
write_docs(data / "part_0.json", part_0)
part_1 = [["_Good morning", "_World", "I'm originaler"]]
write_docs(data / "part_1.json", part_1)
h = tmp_path / "hashes"
h.mkdir()
h0 = FlatHashSet()
h0.add([str_hash(s.lower()) for doc in part_0 for s in doc])
h0.add([str_hash("_world")])
h0.dump(h / "part_0.bin")
assert {
str_hash("hello"): False,
str_hash("_world"): True,
str_hash("i'm so original"): False,
} == as_dict(h0)
h1 = FlatHashSet()
h1.add([str_hash(s.lower()) for doc in part_1 for s in doc])
h1.add([str_hash("_good morning")])
h1.dump(h / "part_1.bin")
assert {
str_hash("_good morning"): True,
str_hash("_world"): False,
str_hash("i'm originaler"): False,
} == as_dict(h1)
res = tmp_path / "res"
res.mkdir()
# dedup.DISABLE_MULTI_PROCESSING = True # Simplifies debugging
dedup.remove_duplicates_sharded(
files=[data / "part_0.json", data / "part_1.json"],
outputs=[res / "part_0.json", res / "part_1.json"],
field="text",
hashes_dir=h,
)
results_0 = list(jsonql.read_jsons(res / "part_0.json"))
expected_0 = [
dict(
text=text("Hello", "I'm so original"),
original_nlines=3,
nlines=2,
line_ids=[0, 2],
)
]
assert_documents_equal(expected_0, results_0, ignoring=LENGTHS)
# First pass removes "_world", second "_good morning".
results_1 = list(jsonql.read_jsons(res / "part_1.json"))
expected_1 = [
dict(text=text("I'm originaler"), original_nlines=3, nlines=1, line_ids=[2])
]
assert_documents_equal(expected_1, results_1, ignoring=LENGTHS)
|
cc_net-main
|
tests/test_dedup.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import cc_net.text_normalizer as txt
def test_unicode_punct():
weird = ",。、„”“«»1」「《》´∶:?!();–—.~’…━〈〉【】%"
replaced = ',.,""""""""""\'::?!();- - . ~\'...-<>[]%'
assert txt.replace_unicode_punct(weird) == replaced
assert txt.remove_unicode_punct(weird) == ""
def test_numbers():
weird = "023456789 | 0123456789"
normalized = "000000000 | 0000000000"
assert txt.normalize(weird, numbers=True) == normalized
assert txt.normalize(weird, numbers=False) == weird
def test_normalize_for_dedup():
weird = "023´∶:\x10 | ;012 hèllo"
normalized = "000 | ;000 hèllo"
assert normalized == txt.slow_normalize_for_dedup(weird)
assert normalized == txt.normalize_for_dedup(weird)
|
cc_net-main
|
tests/test_normalizer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
from pathlib import Path
from cc_net import process_wet_file
def test_parsing():
sample = Path(__file__).parent / "data" / "sample.warc.txt"
with open(sample) as f:
documents = list(process_wet_file.parse_warc_file(f))
expected_urls = [
"http://sample_english.com",
"http://sample_chinese.zh",
"http://sample_russian.ru",
]
assert expected_urls == [d["url"] for d in documents]
expected_domains = ["sample_english.com", "sample_chinese.zh", "sample_russian.ru"]
assert expected_domains == [d["source_domain"] for d in documents]
expected_date = [
"2019-03-18T00:00:00Z",
"2019-03-18T00:00:01Z",
"2019-03-18T00:00:02Z",
]
assert expected_date == [d["date_download"] for d in documents]
expected_title = [
"Famous Mark Twain Quotes",
"馬克·吐溫名言",
"Цитаты знаменитого Марка Твена",
]
assert expected_title == [d["title"] for d in documents]
expected_quotes = """Don't part with your illusions. When they are gone you may still exist, but you have ceased to live.
Education: that which reveals to the wise, and conceals from the stupid, the vast limits of their knowledge.
Facts are stubborn things, but statistics are more pliable.
Fiction is obliged to stick to possibilities. Truth isn't.
"""
assert expected_quotes == documents[0]["raw_content"]
|
cc_net-main
|
tests/test_parse_wet_file.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
import pytest
from cc_net.flat_hash_set import HASH_TYPE, FlatHashSet, NaiveHashSet
def as_dict(flat_hash_set) -> dict:
return {k: v for (k, v) in flat_hash_set.items()}
need_getpy = pytest.mark.skipif(
FlatHashSet == NaiveHashSet, reason="getpy isn't installed"
)
def same_behavior(test_case):
def run_case():
naive = as_dict(test_case(FlatHashSet))
flat = as_dict(test_case(NaiveHashSet))
assert naive == flat
return need_getpy(run_case)
@same_behavior
def test_setitem(hash_set_cls):
h = hash_set_cls()
h[np.arange(10, dtype=h.dtype)] = np.zeros(10, dtype=np.uint8)
h[np.arange(5, dtype=h.dtype)] = np.ones(5, dtype=np.uint8)
return h
@same_behavior
def test_add_dup(hash_set_cls):
h = hash_set_cls()
h.add(np.arange(10, dtype=h.dtype))
h.add(np.arange(5, dtype=h.dtype))
expected = {i: i < 5 for i in range(10)}
assert expected == as_dict(h), f"add_dup with {hash_set_cls.__name__}"
return h
@need_getpy
def test_gp_dict():
import getpy as gp # type: ignore
h = gp.Dict(HASH_TYPE, np.uint8)
h[np.arange(10, dtype=HASH_TYPE)] = np.zeros(10, dtype=np.uint8)
h[np.arange(5, dtype=HASH_TYPE)] = np.ones(5, dtype=np.uint8)
expected = {i: i < 5 for i in range(10)}
assert expected == as_dict(h)
def check_reload(h, dump, load, tmp_path):
dump_path = tmp_path / dump.__name__
dump(h, dump_path)
h2 = type(h)()
load(h2, dump_path)
assert as_dict(h) == as_dict(h2)
@pytest.mark.parametrize("hash_set_cls", [FlatHashSet, NaiveHashSet])
def test_loading(tmp_path, hash_set_cls):
h = hash_set_cls()
x = np.random.randint(0, 2 ** 32, (100,), dtype=h.dtype)
h.add(x)
check_reload(h, hash_set_cls.dump, hash_set_cls.load, tmp_path)
check_reload(h, hash_set_cls.dump_np, hash_set_cls.load_np, tmp_path)
if hasattr(hash_set_cls, "dump_gp"):
check_reload(h, hash_set_cls.dump_gp, hash_set_cls.load_gp, tmp_path)
|
cc_net-main
|
tests/test_flat_hash_set.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import pytest
def _request_is_disabled(self, *args, **kwargs):
raise Exception(
f"Your code tried to call 'request' with: {args}, {kwargs}. Unit test aren't allowed to reach internet."
)
@pytest.fixture(autouse=True)
def no_requests(monkeypatch):
"""Remove requests.sessions.Session.request for all tests."""
monkeypatch.setattr("requests.sessions.Session.request", _request_is_disabled)
|
cc_net-main
|
tests/conftest.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
#
|
cc_net-main
|
tests/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import time
from cc_net import jsonql, regroup
def check_regroup(tmp_path, regroup_fn, check_blocks_boundaries=False):
n_shards = 4
n_docs = 20
shards = [
[dict(id=i, shard=s, raw_content="hello world") for i in range(n_docs)]
for s in range(n_shards)
]
shards_files = [tmp_path / f"{s:04d}.json.gz" for s in range(n_shards)]
for shard, shard_file in zip(shards, shards_files):
jsonql.run_pipes(inputs=shard, output=shard_file)
regroup_file = tmp_path / "regroup.json.gz"
start = time.time()
regroup_fn(shards_files, regroup_file)
duration = time.time() - start
print(f"{regroup_fn.__module__}.{regroup_fn.__name__} took {duration}s")
regrouped = list(jsonql.read_jsons(regroup_file))
assert [doc for shard in shards for doc in shard] == regrouped
readers = jsonql.get_block_readers(regroup_file, n_shards)
if not check_blocks_boundaries:
assert [doc for shard in shards for doc in shard] == [
doc for reader in readers for doc in jsonql.read_jsons(reader)
]
return
for shard, reader in zip(shards, readers):
block = [doc for doc in jsonql.read_jsons(reader)]
assert shard == block
def test_regroup(tmp_path):
# With regroup boundaries will be every 256Mb.
check_regroup(tmp_path, regroup.reshard, check_blocks_boundaries=False)
def test_fast_regroup(tmp_path):
# With fast regroup boundaries should match the shards.
check_regroup(tmp_path, regroup.fast_reshard, check_blocks_boundaries=True)
|
cc_net-main
|
tests/test_regroup.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import io
from pathlib import Path
from typing import Sequence
import numpy as np
import pytest
from cc_net import jsonql
def bar(small_bar: str) -> str:
return small_bar.replace(" ", " " * 10).replace("█", "█" * 10)
def get_output(transformer, data, **kwargs):
with io.StringIO() as output:
# Convert data to a generator so that it's not interpreted as a file list.
jsonql.run_pipe(transformer, kwargs, file=(x for x in data), output=output)
return output.getvalue()
def test_split(tmp_path: Path):
data = [
dict(text="Hello world", lang="en"),
dict(text="Boujour les amis", lang="fr"),
dict(text="Rock your boat", lang="en"),
]
with jsonql.split(tmp_path / "{lang}.json") as split:
list(split.map(data))
summary = split.summary()
assert "Found 2 splits." in summary
en_docs = list(jsonql.read_jsons(tmp_path / "en.json"))
assert [data[0], data[2]] == en_docs
fr_docs = list(jsonql.read_jsons(tmp_path / "fr.json"))
assert [data[1]] == fr_docs
def test_split_bad_pattern(tmp_path: Path):
data = [dict(text="Hello world", lang="en")]
with pytest.raises(KeyError):
with jsonql.split(tmp_path / "{language}.json") as split:
list(split.map(data))
def test_histogram():
data = [0.1, 0.1, 0.1, 0.1, 0.4, 0.4, 0.9, 0.9]
hist, bins = jsonql.histogram(data, bins=8, weights=None)
np.testing.assert_almost_equal(bins, [0.1 * x for x in range(1, 10)])
np.testing.assert_almost_equal(hist, [4, 0, 0, 2, 0, 0, 0, 2])
data = [0, 0.1, 0.1, 0.1, 0.1, 0.4, 0.4, 0.8, 0.8, 1]
hist, bins = jsonql.histogram(data, bins=10, weights=None)
np.testing.assert_almost_equal(bins, [0.1 * x for x in range(11)])
np.testing.assert_almost_equal(hist, [1, 4, 0, 0, 2, 0, 0, 0, 2, 1])
def test_display_stats():
stats = {
jsonql.ALL_DOCUMENTS: 100,
"title": 80,
"title.length": 80 * 50,
"text": 100,
"text.length": 100 * 1000,
"popularity": 8,
"popularity.val": [0.1, 0.1, 0.1, 0.1, 0.4, 0.4, 0.9, 0.9],
}
(title,) = jsonql.display_stats(stats, "title")
assert "title" in title
assert "saw 80 times" in title
assert "average length is" in title
assert "\n" not in title
(text,) = jsonql.display_stats(stats, "text")
assert "text" in text
assert "saw 100 times" in text
assert "average length is" in text
assert "\n" not in text
histogram = jsonql.display_stats(
stats, "popularity", bins=[x / 10 for x in range(1, 10)]
)
assert "popularity" in histogram[0]
assert "saw 8 times" in histogram[0]
assert "histogram is" in histogram[0]
assert "0.100 " + bar("████████") in histogram[1]
assert "0.400 " + bar("████ ") in histogram[2]
assert "0.800 " + bar("████ ") in histogram[3]
cum_histogram = jsonql.display_stats(stats, "popularity", bins=8, cumulative=True)
assert "popularity" in cum_histogram[0]
assert "saw 8 times" in cum_histogram[0]
assert "histogram is" in cum_histogram[0]
assert "0.100 " + bar("████ ") in cum_histogram[1]
assert "0.400 " + bar("██████ ") in cum_histogram[2]
assert "0.800 " + bar("████████") in cum_histogram[3]
def test_describe():
def sample(pop):
return dict(title="Lorem", text="Lorem ipsum dolor sit amet.", popularity=pop)
data = [sample(pop) for pop in [0.1, 0.1, 0.1, 0.1, 0.4, 0.4, 0.9, 0.9]]
desc = get_output(
jsonql.describe, data, columns=None, bins=[x / 10 for x in range(1, 10)]
)
assert "Field title saw 8 times (100.0%), average length is 5" in desc
assert "Field text saw 8 times (100.0%), average length is 27" in desc
assert "Field popularity saw 8 times (100.0%), histogram is" in desc
assert "0.100 " + bar("████████") in desc
assert "0.400 " + bar("████ ") in desc
assert "0.800 " + bar("████ ") in desc
desc = get_output(jsonql.describe, data, columns=["text"])
assert "Field title saw 8 times (100.0%), average length is 5" not in desc
assert "Field text saw 8 times (100.0%), average length is 27" in desc
assert "Field popularity, histogram is:" not in desc
def test_custom_pipe():
def transformer(source, sep=" "):
for i, line in enumerate(source):
res = f"{i}{sep}{line}"
yield res
data = ["hello", "world"]
assert get_output(transformer, data) == "0 hello\n1 world\n"
assert get_output(transformer, data, sep="_") == "0_hello\n1_world\n"
def test_open_read_write(tmp_path: Path):
def _lines(filename: Path) -> Sequence[str]:
# jsonql.lines calls open_read
return list(jsonql.lines(filename))
tmp = tmp_path
with jsonql.open_write(tmp / "a.txt") as o:
print("a", file=o)
assert _lines(tmp / "a.txt") == ["a"]
jsonql.write_jsons([{"a": 1}], tmp / "a.txt")
assert _lines(tmp / "a.txt") == ['{"a": 1}']
with jsonql.open_write(tmp / "a.gz") as o:
print("a", file=o)
assert _lines(tmp / "a.gz") == ["a"]
with jsonql.open_write([tmp / "a0.txt", tmp / "a1.txt"]) as o:
print("a", file=o)
assert _lines(tmp / "a0.txt") == ["a"]
assert not (tmp / "a1.txt").is_file()
with jsonql.open_write([tmp / "b0.txt", tmp / "b1.txt"], max_size="1k") as o:
print("0" * 2000, file=o)
print("1" * 2000, file=o)
assert _lines(tmp / "b0.txt") == ["0" * 2000]
assert _lines(tmp / "b1.txt") == ["1" * 2000]
with jsonql.open_write(tmp / "a_????.json") as o:
print("a", file=o)
assert _lines(tmp / "a_0000.json") == ["a"]
assert not (tmp / "a_0001.json").is_file()
assert _lines(tmp / "a_*.json") == ["a"]
with jsonql.open_write(tmp / "b_??.json", max_size="1k") as o:
print("0" * 2000, file=o)
print("1" * 2000, file=o)
assert _lines(tmp / "b_00.json") == ["0" * 2000]
assert _lines(tmp / "b_01.json") == ["1" * 2000]
assert _lines(tmp / "b_*.json") == ["0" * 2000, "1" * 2000]
def test_split_file(tmp_path: Path):
file = tmp_path / "test.txt"
content = "Hello\nWorld\n"
with open(file, "w") as o:
o.write(content)
with jsonql.SplitFile(file, chunk=0, n_chunks=2) as f:
assert f.readlines() == ["Hello\n"]
with jsonql.SplitFile(file, chunk=1, n_chunks=2) as f:
assert f.readlines() == ["World\n"]
def test_split_file_middle_of_line(tmp_path: Path):
file = tmp_path / "test.txt"
content = "Hello _|_\nWorld\n"
# split is here ^
with open(file, "w") as o:
o.write(content)
with jsonql.SplitFile(file, chunk=0, n_chunks=2) as f:
assert f.readlines() == ["Hello _|_\n"]
with jsonql.SplitFile(file, chunk=1, n_chunks=2) as f:
assert f.readlines() == ["World\n"]
def test_split_file_middle_of_char(tmp_path: Path):
file = tmp_path / "test.txt"
content = "Hello\U0001F40D\nWorld\n"
# split is here ^^
with open(file, "w") as o:
o.write(content)
with jsonql.SplitFile(file, chunk=0, n_chunks=2) as f:
assert f.readlines() == ["Hello🐍\n"]
with jsonql.SplitFile(file, chunk=1, n_chunks=2) as f:
assert f.readlines() == ["World\n"]
def test_blocked_gzip(tmp_path: Path):
file = tmp_path / "test.gz"
f = str(file)
# Each object is 10/11 bytes long. We have 2 of them by block.
content = ['{"xx": %d}' % i for i in range(80)]
with jsonql.BlockedGzipWriter(file, "wt", block_size="20B") as o:
for line in content:
print(line, file=o)
jr = jsonql.JsonReader(strict=True)
expected = list(jr.map(content))
# read as one file
assert expected == list(jsonql.read_jsons(file))
# read first block
assert expected[:2] == list(jsonql.read_jsons(f + "[0/40]"))
# read last block
assert expected[-2:] == list(jsonql.read_jsons(f + "[39/40]"))
readers = jsonql.get_block_readers(file, 9)
read_as_several_files = [list(jsonql.read_jsons(r)) for r in readers]
# 40 splits of 2 docs, 9 readers -> 5 splits, 10 docs per reader
assert list(jsonql.grouper(expected, 10)) == read_as_several_files
def test_enter_exit(capsys):
class MyTransformer(jsonql.Transformer):
def __enter__(self):
print("trans: started")
self.ready = True
return self
def __exit__(self, *args):
print("trans: done")
def do(self, x):
return (x, x)
def acc(values):
print("acc: started")
res = 0
for (x, _) in values:
res += int(x)
print("acc: done")
yield f"acc: result={res}"
t = MyTransformer()
data = (str(x) for x in range(10))
print("pipeline: started")
# Print to stdout.
jsonql.run_pipes(t, acc, file=data)
print("pipeline: done")
out = capsys.readouterr().out
assert (
"\n".join(
[
"pipeline: started",
"trans: started",
"acc: started",
"acc: done",
f"acc: result=45",
# Transformers are closed at the very end.
"trans: done",
"pipeline: done\n",
]
)
== out
)
def test_write_to_stdout(capsys):
lines = [str(x) for x in range(10)]
jsonql.run_pipes(file=iter(lines))
out = capsys.readouterr().out
assert out == "\n".join(lines) + "\n"
def test_write_to_stdout_handle_newlines(capsys):
lines = [str(x) + "\n" for x in range(10)]
jsonql.run_pipes(file=iter(lines))
out = capsys.readouterr().out
assert out == "".join(lines)
def test_multiprocess(capsys):
mult = jsonql.Mapper(lambda x: f"2x = {2 * int(x)}")
jsonql.run_pipes(mult, processes=2, file=(str(x) for x in range(10)))
out = set(capsys.readouterr().out.strip("\n").split("\n"))
assert set(f"2x = {2 * x}" for x in range(10)) == out
|
cc_net-main
|
tests/test_jsonql.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import json
from pathlib import Path
import pytest
import cc_net
import cc_net.minify as minify
from cc_net import jsonql, process_wet_file
from cc_net.minify import (
HASH_SIZE,
decode_hashes,
encode_hashes,
encode_line_ids,
get_hashes,
)
def test_encode_decode():
sentences = ["Hello world !", "Is everyone happy in here ?"]
hashes = get_hashes(sentences)
assert all([len(h) == HASH_SIZE for h in hashes])
hashes_int = [minify._b2i(h) for h in hashes]
encoded = encode_hashes(hashes)
decoded = decode_hashes(encoded)
assert all([len(d) == HASH_SIZE for d in decoded])
decoded_int = [minify._b2i(d) for d in decoded]
assert hashes_int == decoded_int
assert hashes == decoded
def test_minify():
doc = {
"raw_content": "Hello world !\nIs everyone happy in here ?",
"language": "en",
"perplexity": 120.0,
"line_ids": [0, 4],
}
expected = {"line_ids": "AAAEAA==", "language": "en", "perplexity": 120.0}
minifier = minify.Minifier()
assert expected == minifier(doc)
@pytest.fixture
def http_from_disk(monkeypatch):
def read_sample_file(url: str, n_retry: int = 3) -> bytes:
expected_url = process_wet_file.WET_URL_ROOT + "/crawl-data/sample.warc.wet"
assert expected_url == url
file = Path(__file__).parent / "data" / "sample.warc.txt"
return file.read_bytes()
monkeypatch.setattr(cc_net.jsonql, "request_get_content", read_sample_file)
def test_minify_and_fetch(http_from_disk, tmp_path: Path):
full_quotes = """Don't part with your illusions. When they are gone you may still exist, but you have ceased to live.
Education: that which reveals to the wise, and conceals from the stupid, the vast limits of their knowledge.
Facts are stubborn things, but statistics are more pliable.
Fiction is obliged to stick to possibilities. Truth isn't."""
# We don't need no education.
chosen_quotes = "\n".join(
l for l in full_quotes.splitlines() if "Education" not in l
)
cc_doc = {
"url": "http://sample_english.com",
"date_download": "2019-03-18T00:00:00Z",
"digest": "sha1:XQZHW7QWIG54HVAV3KPRW6MK5ILDNCER",
"source_domain": "sample_english.com",
"title": "Famous Mark Twain Quotes",
"raw_content": full_quotes,
"cc_segment": "crawl-data/sample.warc.wet",
"nlines": 4,
"length": 353,
}
ccnet_metadata = {
"language": "en",
"language_score": 0.99,
"perplexity": 151.5,
"bucket": "head",
"raw_content": chosen_quotes,
"nlines": 3,
"length": len(chosen_quotes),
"original_nlines": 4,
"original_length": 353,
"line_ids": [0, 2, 3],
}
ccnet_doc = dict(cc_doc, **ccnet_metadata)
mini = minify.Minifier()(ccnet_doc.copy())
assert mini is not ccnet_doc
important_fields = [
"url",
"digest",
"cc_segment",
"language",
"language_score",
"perplexity",
"bucket",
"line_ids",
]
expected = {k: ccnet_doc[k] for k in important_fields}
expected["line_ids"] = encode_line_ids(expected["line_ids"]) # type: ignore
assert expected == mini
with jsonql.open_write(tmp_path / "sample.json") as o:
print(json.dumps(mini), file=o)
fetcher = minify.MetadataFetcher(tmp_path)
# line_ids is removed when unminifying
ccnet_doc.pop("line_ids")
assert ccnet_doc == fetcher(cc_doc)
def test_fetch(http_from_disk, tmp_path: Path):
mini_docs = [
{
"url": "http://sample_chinese.com",
"digest": "sha1:Y4E6URVYGIAFNVRTPZ5S3J64RTZTP6HJ",
"cc_segment": "crawl-data/sample.warc.wet",
"line_ids": encode_line_ids([2]),
"bucket": "not_that_great",
},
{
"url": "http://sample_english.com",
"digest": "sha1:XQZHW7QWIG54HVAV3KPRW6MK5ILDNCER",
"cc_segment": "crawl-data/sample.warc.wet",
"line_ids": encode_line_ids([3]),
"bucket": "top_notch",
},
]
with jsonql.open_write(tmp_path / "sample.json") as o:
for mini in mini_docs:
print(json.dumps(mini), file=o)
fetcher = minify.MetadataFetcher(tmp_path)
cc = process_wet_file.CCSegmentsReader(["crawl-data/sample.warc.wet"])
docs = [d for d in fetcher.map(cc) if d is not None]
assert cc.retrieved_segments == 1
# Note: documents are retrieved as they are ordered in the .warc.wet file
assert [
"Facts are stubborn things, but statistics are more pliable.",
"事實是固執的東西,但統計數字卻比較柔和。",
] == [d["raw_content"] for d in docs]
assert ["top_notch", "not_that_great"] == [d["bucket"] for d in docs]
|
cc_net-main
|
tests/test_minify.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import inspect
import pickle
from pathlib import Path
import pytest
from cc_net import dedup, jsonql, perplexity, split_by_lang, tokenizer
def get_transformers(module):
return [
v
for v in vars(module).values()
if type(v) is type
and issubclass(v, jsonql.Transformer)
and v != jsonql.Transformer
]
ALL_TRANSFORMERS = (
get_transformers(jsonql)
+ get_transformers(dedup)
+ get_transformers(perplexity)
+ get_transformers(tokenizer)
+ get_transformers(split_by_lang)
)
def check_transformer_is_calling_super_init(cls: type):
assert issubclass(cls, jsonql.Transformer)
# accessing __init__ is generally an error, but here we do want to inspect
# the __init__method.
code = inspect.getsource(cls.__init__) # type: ignore
code = code.replace(" ", "")
# Check that super().__init__ is called.
assert "super().__init__()" in code
def test_bad_transformers_are_caught():
class BadTransformer(jsonql.Transformer):
def __init__(self, arg):
# We aren't calling super /!\
self.arg = arg
with pytest.raises(AssertionError):
check_transformer_is_calling_super_init(BadTransformer)
@pytest.mark.parametrize("transformer", ALL_TRANSFORMERS)
def test_transformer_is_correctly_implemented(transformer):
check_transformer_is_calling_super_init(transformer)
@pytest.mark.skipif(
not Path("bin/lid.bin").exists(), reason="bin/lid.bin not found, run `make install`"
)
def test_can_pickle_transformer(tmp_path):
model = Path("bin/lid.bin")
if not model.exists():
return
classifier = split_by_lang.Classifier(model, "text", "lang")
classifier.__enter__()
doc = dict(text="Hello world ! This is English btw.")
original_results = classifier(doc)
with open(tmp_path / "transformer.pkl", "wb") as o:
pickle.dump(classifier, o)
with open(tmp_path / "transformer.pkl", "rb") as f:
classifier = pickle.load(f)
assert original_results == classifier(doc)
# Do it again with the unpickled object.
with open(tmp_path / "transformer.pkl", "wb") as o:
pickle.dump(classifier, o)
with open(tmp_path / "transformer.pkl", "rb") as f:
classifier = pickle.load(f)
assert original_results == classifier(doc)
|
cc_net-main
|
tests/test_transformer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pickle
from open3d import visualization as o3dv
import random
import argparse
import numpy as np
import time
import contactopt.util as util
import contactopt.geometric_eval as geometric_eval
import pprint
from tqdm import tqdm
from joblib import Parallel, delayed
import multiprocessing as mp
import matplotlib.pyplot as plt
import matplotlib as mpl
import sklearn.metrics
import trimesh
import os
SAVE_OBJ_FOLDER = 'eval/saveobj'
def vis_sample(gt_ho, in_ho, out_ho, mje_in=None, mje_out=None):
hand_gt, obj_gt = gt_ho.get_o3d_meshes(hand_contact=True, normalize_pos=True)
hand_in, obj_in = in_ho.get_o3d_meshes(hand_contact=True, normalize_pos=True)
hand_in.translate((0.0, 0.2, 0.0))
obj_in.translate((0.0, 0.2, 0.0))
if not args.split == 'honn':
out_ho.hand_contact = in_ho.hand_contact
out_ho.obj_contact = in_ho.obj_contact
hand_out, obj_out = out_ho.get_o3d_meshes(hand_contact=True, normalize_pos=True)
hand_out.translate((0.0, 0.4, 0.0))
obj_out.translate((0.0, 0.4, 0.0))
geom_list = [hand_gt, obj_gt, hand_out, obj_out, hand_in, obj_in]
geom_list.append(util.text_3d('In', pos=[-0.4, 0.2, 0], font_size=40, density=2))
geom_list.append(util.text_3d('Refined', pos=[-0.4, 0.4, 0], font_size=40, density=2))
geom_list.append(util.text_3d('GT', pos=[-0.4, 0.0, 0], font_size=40, density=2))
if mje_in is not None:
geom_list.append(util.text_3d('MJE in {:.2f}cm out {:.2f}cm'.format(mje_in * 100, mje_out * 100), pos=[-0.4, -0.2, 0], font_size=40, density=2))
o3dv.draw_geometries(geom_list)
def calc_mean_dicts(all_dicts, phase=''):
keys = all_dicts[0].keys()
mean_dict = dict()
stds = ['pen_vol']
for k in keys:
l = list()
for d in all_dicts:
l.append(d[k])
mean_dict[k] = np.array(l).mean()
if k in stds:
mean_dict[k + '_std'] = np.array(l).std()
return mean_dict
def calc_sample(ho_test, ho_gt, idx, phase='nophase'):
stats = geometric_eval.geometric_eval(ho_test, ho_gt)
return stats
def process_sample(sample, idx):
gt_ho, in_ho, out_ho = sample['gt_ho'], sample['in_ho'], sample['out_ho']
in_stats = calc_sample(in_ho, gt_ho, idx, 'before ContactOpt')
out_stats = calc_sample(out_ho, gt_ho, idx, 'after ContactOpt')
return in_stats, out_stats
def run_eval(args):
in_file = 'data/optimized_{}.pkl'.format(args.split)
runs = pickle.load(open(in_file, 'rb'))
print('Loaded {} len {}'.format(in_file, len(runs)))
# if args.vis or args.physics:
# print('Shuffling!!!')
# random.shuffle(runs)
if args.partial > 0:
runs = runs[:args.partial]
do_parallel = not args.vis
if do_parallel:
all_data = Parallel(n_jobs=mp.cpu_count() - 2)(delayed(process_sample)(s, idx) for idx, s in enumerate(tqdm(runs)))
in_all = [item[0] for item in all_data]
out_all = [item[1] for item in all_data]
else:
all_data = [] # Do non-parallel
for idx, s in enumerate(tqdm(runs)):
all_data.append(process_sample(s, idx))
if args.vis:
print('In vs GT\n', pprint.pformat(all_data[-1][0]))
print('Out vs GT\n', pprint.pformat(all_data[-1][1]))
if args.split == 'im_pred_trans':
vis_sample(s['gt_ho'], s['in_ho'], s['out_ho'], mje_in=all_data[-1][0]['objalign_hand_joints'], mje_out=all_data[-1][1]['objalign_hand_joints'])
else:
vis_sample(s['gt_ho'], s['in_ho'], s['out_ho'], mje_in=all_data[-1][0]['unalign_hand_joints'], mje_out=all_data[-1][1]['unalign_hand_joints'])
in_all = [item[0] for item in all_data]
out_all = [item[1] for item in all_data]
mean_in = calc_mean_dicts(in_all, 'In vs GT')
mean_out = calc_mean_dicts(out_all, 'Out vs GT')
print('In vs GT\n', pprint.pformat(mean_in))
print('Out vs GT\n', pprint.pformat(mean_out))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Run eval on fitted pkl')
parser.add_argument('--split', default='aug', type=str)
parser.add_argument('--vis', action='store_true')
parser.add_argument('--contact_f1', action='store_true')
parser.add_argument('--pen', action='store_true')
parser.add_argument('--saveobj', action='store_true')
parser.add_argument('--partial', default=-1, type=int, help='Only run for n samples')
args = parser.parse_args()
start_time = time.time()
run_eval(args)
print('Eval time', time.time() - start_time)
|
ContactOpt-main
|
contactopt/run_eval.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from os import path as osp
import numpy as np
import json
import matplotlib.pyplot as plt
import torch
import pytorch3d
from manopth import manolayer
import open3d
from PIL import Image, ImageFont, ImageDraw
from pyquaternion import Quaternion
from open3d import io as o3dio
from open3d import geometry as o3dg
from open3d import utility as o3du
from open3d import visualization as o3dv
from manopth.manolayer import ManoLayer
import trimesh
SAMPLE_VERTS_NUM = 2048
DEEPCONTACT_BIN_WEIGHTS_FILE = 'data/class_bin_weights.out'
DEEPCONTACT_NUM_BINS = 10
def val_to_class(val):
"""
Converts a contact value [0-1] to a class assignment
:param val: tensor (batch, verts)
:return: class assignment (batch, verts)
"""
expanded = torch.floor(val * DEEPCONTACT_NUM_BINS)
return torch.clamp(expanded, 0, DEEPCONTACT_NUM_BINS - 1).long() # Cut off potential 1.0 inputs?
def class_to_val(raw_scores):
"""
Finds the highest softmax for each class
:param raw_scores: tensor (batch, verts, classes)
:return: highest class (batch, verts)
"""
cls = torch.argmax(raw_scores, dim=2)
val = (cls + 0.5) / DEEPCONTACT_NUM_BINS
return val
def forward_mano(mano_model, pose, beta, tforms):
"""Forward mano pass, MANO params to mesh"""
device = pose.device
batch_size = pose.shape[0]
verts, joints = mano_model(pose, beta)
verts_homo = torch.cat((verts / 1000, torch.ones(batch_size, verts.shape[1], 1, device=device)), 2)
joints_homo = torch.cat((joints / 1000, torch.ones(batch_size, joints.shape[1], 1, device=device)), 2)
tform_agg = torch.eye(4, device=device).reshape(1, 4, 4).repeat(batch_size, 1, 1)
for tform in tforms:
tform_agg = torch.bmm(tform, tform_agg) # Aggregate all transforms
# Apply aggregated transform to all points, permuting for matmul
verts_homo = torch.bmm(tform_agg, verts_homo.permute(0, 2, 1)).permute(0, 2, 1)
joints_homo = torch.bmm(tform_agg, joints_homo.permute(0, 2, 1)).permute(0, 2, 1)
return verts_homo[:, :, :3], joints_homo[:, :, :3]
def fit_pca_to_axang(mano_pose, mano_beta):
"""
This project uses the MANO model parameterized with 15 PCA components. However, many other approaches use
different parameterizations (15 joints, parameterized with 45 axis-angle parameters). This function
allows converting between the formats. It first runs the MANO model forwards to get the hand vertices of
the initial format. Then an optimization is performed to adjust the 15 PCA parameters of a second MANO model
to match the initial vertices. Perhaps there are better ways to do this, but this ensures highest accuracy.
:param mano_pose: numpy (45) axis angle coordinates
:param mano_beta: numpy (10) beta parameters
:return: numpy (15) PCA parameters of fitted hand
"""
mano_pose = np.array(mano_pose)
full_axang = torch.Tensor(mano_pose).unsqueeze(0)
mano_model = ManoLayer(mano_root='mano/models', use_pca=True, ncomps=45, side='right', flat_hand_mean=False)
beta_in = torch.Tensor(mano_beta).unsqueeze(0)
mano_model_orig = ManoLayer(mano_root='mano/models', joint_rot_mode="axisang", use_pca=False, center_idx=None, flat_hand_mean=True)
_, target_joints = forward_mano(mano_model_orig, full_axang, beta_in, [])
full_axang[:, 3:] -= mano_model.th_hands_mean
pca_mat = mano_model.th_selected_comps.T
pca_shape = full_axang[:, 3:].mm(pca_mat) # Since the HO gt is in full 45 dim axang coords, convert back to PCA shape
new_pca_shape = np.zeros(18)
new_pca_shape[:3] = mano_pose[:3] # set axang
new_pca_shape[3:] = pca_shape[0, :15] # set pca pose
# Do optimization
pca_in = torch.Tensor(new_pca_shape).unsqueeze(0)
pca_in.requires_grad = True
mano_model = ManoLayer(mano_root='mano/models', use_pca=True, ncomps=15, side='right', flat_hand_mean=False)
optimizer = torch.optim.Adam([pca_in], lr=0.03, amsgrad=True) # AMSgrad helps
loss_criterion = torch.nn.L1Loss()
for it in range(200):
optimizer.zero_grad()
hand_verts, hand_joints = forward_mano(mano_model, pca_in, beta_in, []) # 2.2ms
# vis_pointcloud(hand_joints, target_joints)
loss = loss_criterion(hand_joints, target_joints)
# print('Opt loss', loss.detach())
loss.backward()
optimizer.step()
return pca_in.detach().squeeze(0).numpy()
def hand_color():
return np.asarray([224.0, 172.0, 105.0]) / 255
def obj_color():
return np.asarray([100.0, 100.0, 100.0]) / 255
def save_trimesh(obj_mesh, output_path):
obj_raw = trimesh.exchange.obj.export_obj(obj_mesh, include_texture=False)
os.makedirs(os.path.dirname(output_path), exist_ok=True)
with open(output_path, "w") as obj_file:
obj_file.write(obj_raw)
def verts_to_name(num_verts):
"""Hacky function allowing finding the name of an object by the number of vertices.
Each object happens to have a different number."""
num_verts_dict = {100597: 'mouse', 29537: 'binoculars', 100150: 'bowl', 120611: 'camera', 64874: 'cell_phone',
177582: 'cup', 22316: 'eyeglasses', 46334: 'flashlight', 35949: 'hammer', 93324: 'headphones',
19962: 'knife', 169964: 'mug', 57938: 'pan', 95822: 'ps_controller', 57824: 'scissors',
144605: 'stapler', 19708: 'toothbrush', 42394: 'toothpaste', 126627: 'utah_teapot', 90926: 'water_bottle',
104201: 'wine_glass', 108248: 'door_knob', 71188: 'light_bulb', 42232: 'banana', 93361: 'apple',
8300: 'HO_sugar', 8251: 'HO_soap', 16763: 'HO_mug', 10983: 'HO_mustard', 9174: 'HO_drill',
8291: 'HO_cheezits', 8342: 'HO_spam', 10710: 'HO_banana', 8628: 'HO_scissors',
148245: 'train_exclude'}
if num_verts in num_verts_dict:
return num_verts_dict[num_verts]
return 'DIDNT FIND {}'.format(num_verts)
def mesh_is_thin(num_verts):
"""For thin meshes, the interpenetration loss doesn't do anything, since they're thinner than 2*2mm.
For thin objects, we set this margin to zero mm."""
thins = [19708, 19962, 22316, 16763, 8628] # Toothbrush, Knife, Eyeglasses, HO_mug, HO_scissors
is_thin = torch.zeros_like(num_verts)
for t in thins:
is_thin[num_verts == t] = 1
return is_thin
def upscale_contact(obj_mesh, obj_sampled_idx, contact_obj):
"""
When we run objects through our network, they always have a fixed number of vertices.
We need to up/downscale the contact from this to the original number of vertices
:param obj_mesh: Pytorch3d Meshes object
:param obj_sampled_idx: (batch, 2048)
:param contact_obj: (batch, 2048)
:return:
"""
obj_verts = obj_mesh.verts_padded()
_, closest_idx, _ = pytorch3d.ops.knn_points(obj_verts, batched_index_select(obj_verts, 1, obj_sampled_idx), K=1)
upscaled = batched_index_select(contact_obj, 1, closest_idx.squeeze(2))
return upscaled.detach()
def hack_filedesciptor():
"""
Sometimes needed if reading datasets very quickly? Fixes:
RuntimeError: received 0 items of ancdata
https://github.com/pytorch/pytorch/issues/973
"""
torch.multiprocessing.set_sharing_strategy('file_system')
def apply_tform(tform, verts):
"""
Applies a 4x4 rigid transform to a list of points
:param tform: tensor (batch, 4, 4)
:param verts: tensor (batch, N, 3)
:return:
"""
verts_homo = torch.cat((verts, torch.ones(verts.shape[0], verts.shape[1], 1, device=verts.device)), 2)
new_verts = torch.bmm(tform, verts_homo.permute(0, 2, 1)).permute(0, 2, 1)
return new_verts[:, :, :3]
def apply_rot(rot, verts, around_centroid=False):
"""
Applies a 3x3 rotation matrix to a list of points
:param rot: tensor (batch, 3, 3)
:param verts: tensor (batch, N, 3)
:return:
"""
if around_centroid:
centroid = verts.mean(dim=1)
verts = verts - centroid
new_verts = torch.bmm(rot, verts.permute(0, 2, 1)).permute(0, 2, 1)
if around_centroid:
new_verts = new_verts + centroid
return new_verts
def translation_to_tform(translation):
"""
(batch, 3) to (batch, 4, 4)
"""
tform_out = pytorch3d.ops.eyes(4, translation.shape[0], device=translation.device)
tform_out[:, :3, 3] = translation
return tform_out
def sharpen_contact(c, slope=10, thresh=0.6):
"""
Apply filter to input, makes into a "soft binary"
"""
out = slope * (c - thresh) + thresh
return torch.clamp(out, 0.0, 1.0)
def fit_sigmoid(colors, a=0.05):
"""Fits a sigmoid to raw contact temperature readings from the ContactPose dataset. This function is copied from that repo"""
idx = colors > 0
ci = colors[idx]
x1 = min(ci) # Find two points
y1 = a
x2 = max(ci)
y2 = 1-a
lna = np.log((1 - y1) / y1)
lnb = np.log((1 - y2) / y2)
k = (lnb - lna) / (x1 - x2)
mu = (x2*lna - x1*lnb) / (lna - lnb)
ci = np.exp(k * (ci-mu)) / (1 + np.exp(k * (ci-mu))) # Apply the sigmoid
colors[idx] = ci
return colors
def subdivide_verts(edges, verts):
"""
Takes a list of edges and vertices, and subdivides each edge and puts a vert in the middle. May not work with variable-size meshes
:param edges: (batch, E, 2)
:param verts: (batch, V, 3)
:return: new_verts (batch, E+V, 3)
"""
selected_verts = edges.view(edges.shape[0], -1) # Flatten into (batch, E*2)
new_verts = batched_index_select(verts, 1, selected_verts)
new_verts = new_verts.view(edges.shape[0], edges.shape[1], 2, 3)
new_verts = new_verts.mean(dim=2)
new_verts = torch.cat([verts, new_verts], dim=1) # (sum(V_n)+sum(E_n), 3)
return new_verts
def calc_l2_err(a, b, axis=2):
if torch.is_tensor(a):
mse = torch.sum(torch.square(a - b), dim=axis)
l2_err = torch.sqrt(mse)
return torch.mean(l2_err, 1)
else:
mse = np.linalg.norm(a - b, 2, axis=axis)
return mse.mean()
def batched_index_select(t, dim, inds):
"""
Helper function to extract batch-varying indicies along array
:param t: array to select from
:param dim: dimension to select along
:param inds: batch-vary indicies
:return:
"""
dummy = inds.unsqueeze(2).expand(inds.size(0), inds.size(1), t.size(2))
out = t.gather(dim, dummy) # b x e x f
return out
def mesh_set_color(color, mesh, colormap=plt.cm.inferno):
"""
Applies colormap to object
:param color: Tensor or numpy array, (N, 1)
:param mesh: Open3D TriangleMesh
:return:
"""
# vertex_colors = np.tile(color.squeeze(), (3, 1)).T
# mesh.vertex_colors = o3du.Vector3dVector(vertex_colors)
# geometry.apply_colormap(mesh, apply_sigmoid=False)
colors = np.asarray(color).squeeze()
if len(colors.shape) > 1:
colors = colors[:, 0]
colors[colors < 0.1] = 0.1 # TODO hack to make brighter
colors = colormap(colors)[:, :3]
colors = o3du.Vector3dVector(colors)
mesh.vertex_colors = colors
def aggregate_tforms(tforms):
"""Aggregates a list of 4x4 rigid transformation matricies"""
device = tforms[0].device
batch_size = tforms[0].shape[0]
tform_agg = pytorch3d.ops.eyes(4, batch_size, device=device)
for tform in tforms:
tform_agg = torch.bmm(tform, tform_agg) # Aggregate all transforms
return tform_agg
def axisEqual3D(ax):
"""Sets a matplotlib 3D plot to have equal-scale axes"""
extents = np.array([getattr(ax, 'get_{}lim'.format(dim))() for dim in 'xyz'])
sz = extents[:,1] - extents[:,0]
centers = np.mean(extents, axis=1)
maxsize = max(abs(sz))
r = maxsize/2
for ctr, dim in zip(centers, 'xyz'):
getattr(ax, 'set_{}lim'.format(dim))(ctr - r, ctr + r)
def vis_pointcloud(object_points, hand_points, idx=None, show=True):
if show:
plt.switch_backend('TkAgg')
else:
plt.switch_backend('agg')
if idx is None:
idx = int(np.random.randint(0, hand_points.shape[0])) # Select random sample from batch
object_points = object_points[idx, :, :].detach().cpu().numpy()
hand_points = hand_points[idx, :, :].detach().cpu().numpy()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(object_points[:, 0], object_points[:, 1], object_points[:, 2])
ax.scatter(hand_points[:, 0], hand_points[:, 1], hand_points[:, 2]) #, c=np.arange(hand_points.shape[0]))
if show:
axisEqual3D(ax)
# plt.axis('off')
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
plt.show()
return fig
def get_mano_closed_faces():
"""The default MANO mesh is "open" at the wrist. By adding additional faces, the hand mesh is closed,
which looks much better.
https://github.com/hassony2/handobjectconsist/blob/master/meshreg/models/manoutils.py"""
mano_layer = manolayer.ManoLayer(
joint_rot_mode="axisang", use_pca=False, mano_root='mano/models', center_idx=None, flat_hand_mean=True
)
close_faces = torch.Tensor(
[
[92, 38, 122],
[234, 92, 122],
[239, 234, 122],
[279, 239, 122],
[215, 279, 122],
[215, 122, 118],
[215, 118, 117],
[215, 117, 119],
[215, 119, 120],
[215, 120, 108],
[215, 108, 79],
[215, 79, 78],
[215, 78, 121],
[214, 215, 121],
]
)
closed_faces = torch.cat([mano_layer.th_faces, close_faces.long()])
# Indices of faces added during closing --> should be ignored as they match the wrist
# part of the hand, which is not an external surface of the human
# Valid because added closed faces are at the end
hand_ignore_faces = [1538, 1539, 1540, 1541, 1542, 1543, 1544, 1545, 1546, 1547, 1548, 1549, 1550, 1551]
return closed_faces.detach().cpu().numpy() #, hand_ignore_faces
def text_3d(text, pos, direction=None, degree=-90.0, density=10, font='/usr/share/fonts/truetype/freefont/FreeMono.ttf', font_size=10):
"""
Generate a Open3D text point cloud used for visualization.
https://github.com/intel-isl/Open3D/issues/2
:param text: content of the text
:param pos: 3D xyz position of the text upper left corner
:param direction: 3D normalized direction of where the text faces
:param degree: in plane rotation of text
:param font: Name of the font - change it according to your system
:param font_size: size of the font
:return: o3d.geoemtry.PointCloud object
"""
if direction is None:
direction = (0., 0., 1.)
# font_obj = ImageFont.truetype(font, font_size)
font_obj = ImageFont.truetype(font, font_size * density)
font_dim = font_obj.getsize(text)
img = Image.new('RGB', font_dim, color=(255, 255, 255))
draw = ImageDraw.Draw(img)
draw.text((0, 0), text, font=font_obj, fill=(0, 0, 0))
img = np.asarray(img)
img_mask = img[:, :, 0] < 128
indices = np.indices([*img.shape[0:2], 1])[:, img_mask, 0].reshape(3, -1).T
pcd = open3d.geometry.PointCloud()
pcd.colors = open3d.utility.Vector3dVector(img[img_mask, :].astype(float) / 255.0)
# pcd.points = o3d.utility.Vector3dVector(indices / 100.0)
pcd.points = open3d.utility.Vector3dVector(indices / 1000 / density)
raxis = np.cross([0.0, 0.0, 1.0], direction)
if np.linalg.norm(raxis) < 1e-6:
raxis = (0.0, 0.0, 1.0)
trans = (Quaternion(axis=raxis, radians=np.arccos(direction[2])) *
Quaternion(axis=direction, degrees=degree)).transformation_matrix
trans[0:3, 3] = np.asarray(pos)
pcd.transform(trans)
return pcd
def to_cpu_numpy(obj):
"""Convert torch cuda tensors to cpu, numpy tensors"""
if torch.is_tensor(obj):
return obj.detach().cpu().numpy()
elif isinstance(obj, dict):
res = {}
for k, v in obj.items():
res[k] = to_cpu_numpy(v)
return res
elif isinstance(obj, list):
res = []
for v in obj:
res.append(to_cpu_numpy(v))
return res
else:
raise TypeError("Invalid type for move_to")
def dict_to_device(data, device):
"""Move dict of tensors to device"""
out = dict()
for k in data.keys():
out[k] = data[k].to(device)
return out
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
|
ContactOpt-main
|
contactopt/util.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import datetime
def parse_dataset(args):
""" Converts the --split argument into a dataset file """
if args.split == 'aug':
args.train_dataset = 'data/perturbed_contactpose_train.pkl'
args.test_dataset = 'data/perturbed_contactpose_test.pkl'
elif args.split == 'fine':
args.test_dataset = 'data/contactpose_test.pkl'
elif args.split == 'im':
args.test_dataset = 'data/ho3d_image.pkl'
elif args.split == 'demo':
args.test_dataset = 'data/ho3d_image_demo.pkl'
else:
raise ValueError('Unknown dataset')
def run_contactopt_parse_args():
parser = argparse.ArgumentParser(description='Alignment networks training')
parser.add_argument('--batch_size', default=64, type=int)
parser.add_argument('--split', default='aug', type=str)
parser.add_argument('--lr', type=float)
parser.add_argument('--n_iter', type=int)
parser.add_argument('--partial', default=-1, type=int, help='Only run for n batches')
parser.add_argument('--w_cont_hand', type=float, help='Weight of the hand contact in optimization')
parser.add_argument('--sharpen_thresh', type=float)
parser.add_argument('--ncomps', type=int)
parser.add_argument('--w_cont_asym', type=float)
parser.add_argument('--w_opt_trans', type=float)
parser.add_argument('--w_opt_rot', type=float)
parser.add_argument('--w_opt_pose', type=float)
parser.add_argument('--caps_rad', type=float)
parser.add_argument('--caps_hand', action='store_true')
parser.add_argument('--cont_method', type=int)
parser.add_argument('--caps_top', type=float)
parser.add_argument('--caps_bot', type=float)
parser.add_argument('--w_pen_cost', type=float)
parser.add_argument('--pen_it', type=float)
parser.add_argument('--w_obj_rot', type=float)
parser.add_argument('--rand_re', type=int)
parser.add_argument('--rand_re_trans', type=float)
parser.add_argument('--rand_re_rot', type=float)
parser.add_argument('--vis_method', type=int)
parser.add_argument('--vis', action='store_true')
parser.add_argument('--video', action='store_true')
parser.add_argument('--min_cont', default=1, type=int, help='Cut grasps with less than this points of initial contact')
args = parser.parse_args()
parse_dataset(args)
if args.vis:
args.batch_size = 1
return args
def train_network_parse_args():
parser = argparse.ArgumentParser(description='Alignment networks training')
parser.add_argument('--lr', default=0.01, type=float)
parser.add_argument('--batch_size', default=32, type=int)
parser.add_argument('--optimizer', default='adam', type=str)
parser.add_argument('--split', default='aug', type=str)
# parser.add_argument('--loss_pose', default=0, type=float)
parser.add_argument('--loss_c_obj', default=1, type=float)
parser.add_argument('--loss_c_hand', default=1, type=float)
# parser.add_argument('--loss_3d', default=0, type=float)
parser.add_argument('--epochs', default=101, type=int)
parser.add_argument('--checkpoint', default='', type=str)
parser.add_argument('--desc', default='', type=str)
parser.add_argument('--vis', action='store_true')
args = parser.parse_args()
if args.desc == '':
args.desc = str(datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
all_str = ''
for key, val in vars(args).items():
all_str += '--{}={} '.format(key, val)
print(all_str) # Convert to dict and print
args.all_str = all_str
parse_dataset(args)
return args
|
ContactOpt-main
|
contactopt/arguments.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
ContactOpt-main
|
contactopt/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from contactopt.loader import *
import contactopt.util as util
from contactopt.hand_object import HandObject
import time
from open3d import io as o3dio
from open3d import geometry as o3dg
from open3d import utility as o3du
from open3d import visualization as o3dv
def show_optimization(data, opt_state, hand_contact_target=None, obj_contact_target=None, is_video=False, label=None, vis_method=1, delay=0.001):
"""Displays video/still frame of optimization process
Contact visualization options:
0 GT contact on opt
1 Predicted contact on opt
2 Live contact on opt hand
3 Live contact on both
4 No contact on any
5 No hand contact, predicted obj contact
"""
gt_ho = HandObject()
opt_ho = HandObject()
gt_ho.load_from_batch(data['hand_beta_gt'], data['hand_pose_gt'], data['hand_mTc_gt'], data['hand_contact_gt'], data['obj_contact_gt'], data['mesh_gt'])
opt_ho.load_from_batch(data['hand_beta_gt'], data['hand_pose_gt'], data['hand_mTc_gt'], data['hand_contact_gt'], data['obj_contact_gt'], data['mesh_aug'], obj_rot=opt_state[-1]['obj_rot'])
hand_mesh_gt, obj_mesh_gt = gt_ho.get_o3d_meshes()
hand_mesh_opt, obj_mesh_opt = opt_ho.get_o3d_meshes()
geom_list = [hand_mesh_gt, obj_mesh_gt, obj_mesh_opt, hand_mesh_opt]
if vis_method == 1 or vis_method == 5:
util.mesh_set_color(hand_contact_target, hand_mesh_opt)
if obj_contact_target.shape[1] == util.SAMPLE_VERTS_NUM:
obj_contact_target = upscale_contact(data['mesh_aug'], data['obj_sampled_idx'], obj_contact_target)
util.mesh_set_color(obj_contact_target, obj_mesh_opt)
if vis_method == 2 or vis_method == 3:
util.mesh_set_color(opt_state[-1]['contact_hand'].squeeze(), hand_mesh_opt)
if opt_state[-1]['contact_obj'].shape[1] == util.SAMPLE_VERTS_NUM:
c = upscale_contact(data['mesh_aug'], data['obj_sampled_idx'], opt_state[-1]['contact_obj'])
util.mesh_set_color(c, obj_mesh_opt)
else:
util.mesh_set_color(opt_state[-1]['contact_obj'].squeeze(), obj_mesh_opt)
if vis_method == 4 or vis_method == 5:
hand_mesh_gt.paint_uniform_color(np.asarray([150.0, 250.0, 150.0]) / 255) # Green
hand_mesh_opt.paint_uniform_color(np.asarray([250.0, 150.0, 150.0]) / 255) # Red
if vis_method == 4:
obj_mesh_gt.paint_uniform_color(np.asarray([100.0, 100.0, 100.0]) / 255) # Gray
obj_mesh_opt.paint_uniform_color(np.asarray([100.0, 100.0, 100.0]) / 255) # Gray
if label is not None:
lbl_verts = util.text_3d(label, pos=[0, 0.1, 0], font_size=20, density=2)
geom_list.append(lbl_verts)
hand_mesh_opt.vertices = o3du.Vector3dVector(opt_state[-1]['hand_verts'].squeeze())
hand_mesh_opt.compute_vertex_normals()
hand_mesh_gt.translate((0, 0.2, 0))
obj_mesh_gt.translate((0, 0.2, 0))
if not is_video:
o3dv.draw_geometries(geom_list)
else:
vis = o3dv.VisualizerWithKeyCallback()
vis.create_window()
for g in geom_list:
vis.add_geometry(g)
for i in range(len(opt_state) * 2):
out_dict = opt_state[i % len(opt_state)]
if out_dict['obj_rot'][0, 0, 0] < 1:
obj_verts = util.apply_rot(out_dict['obj_rot'], data['mesh_aug'].verts_padded(), around_centroid=True).squeeze()
obj_mesh_opt.vertices = o3du.Vector3dVector(obj_verts)
hand_mesh_opt.vertices = o3du.Vector3dVector(out_dict['hand_verts'].squeeze())
if vis_method == 2 or vis_method == 3:
util.mesh_set_color(out_dict['contact_hand'].squeeze(), hand_mesh_opt)
if vis_method == 3:
if out_dict['contact_obj'].shape[1] == util.SAMPLE_VERTS_NUM:
c = util.upscale_contact(data['mesh_aug'], data['obj_sampled_idx'], out_dict['contact_obj'])
util.mesh_set_color(c, obj_mesh_opt)
else:
util.mesh_set_color(out_dict['contact_obj'].squeeze(), obj_mesh_opt)
vis.update_geometry(hand_mesh_opt)
vis.update_geometry(obj_mesh_opt)
vis.poll_events()
vis.update_renderer()
if i % len(opt_state) == 0:
time.sleep(2)
# time.sleep(delay)
vis.destroy_window()
|
ContactOpt-main
|
contactopt/visualize.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import pytorch3d.ops
from contactopt.util import *
from pytorch3d.structures import Meshes
def capsule_sdf(mesh_verts, mesh_normals, query_points, query_normals, caps_rad, caps_top, caps_bot, foreach_on_mesh):
"""
Find the SDF of query points to mesh verts
Capsule SDF formulation from https://iquilezles.org/www/articles/distfunctions/distfunctions.htm
:param mesh_verts: (batch, V, 3)
:param mesh_normals: (batch, V, 3)
:param query_points: (batch, Q, 3)
:param caps_rad: scalar, radius of capsules
:param caps_top: scalar, distance from mesh to top of capsule
:param caps_bot: scalar, distance from mesh to bottom of capsule
:param foreach_on_mesh: boolean, foreach point on mesh find closest query (V), or foreach query find closest mesh (Q)
:return: normalized sdf + 1 (batch, V or Q)
"""
# TODO implement normal check?
if foreach_on_mesh: # Foreach mesh vert, find closest query point
knn_dists, nearest_idx, nearest_pos = pytorch3d.ops.knn_points(mesh_verts, query_points, K=1, return_nn=True) # TODO should attract capsule middle?
capsule_tops = mesh_verts + mesh_normals * caps_top
capsule_bots = mesh_verts + mesh_normals * caps_bot
delta_top = nearest_pos[:, :, 0, :] - capsule_tops
normal_dot = torch.sum(mesh_normals * batched_index_select(query_normals, 1, nearest_idx.squeeze(2)), dim=2)
else: # Foreach query vert, find closest mesh point
knn_dists, nearest_idx, nearest_pos = pytorch3d.ops.knn_points(query_points, mesh_verts, K=1, return_nn=True) # TODO should attract capsule middle?
closest_mesh_verts = batched_index_select(mesh_verts, 1, nearest_idx.squeeze(2)) # Shape (batch, V, 3)
closest_mesh_normals = batched_index_select(mesh_normals, 1, nearest_idx.squeeze(2)) # Shape (batch, V, 3)
capsule_tops = closest_mesh_verts + closest_mesh_normals * caps_top # Coordinates of the top focii of the capsules (batch, V, 3)
capsule_bots = closest_mesh_verts + closest_mesh_normals * caps_bot
delta_top = query_points - capsule_tops
normal_dot = torch.sum(query_normals * closest_mesh_normals, dim=2)
bot_to_top = capsule_bots - capsule_tops # Vector from capsule bottom to top
along_axis = torch.sum(delta_top * bot_to_top, dim=2) # Dot product
top_to_bot_square = torch.sum(bot_to_top * bot_to_top, dim=2)
h = torch.clamp(along_axis / top_to_bot_square, 0, 1) # Could avoid NaNs with offset in division here
dist_to_axis = torch.norm(delta_top - bot_to_top * h.unsqueeze(2), dim=2) # Distance to capsule centerline
return dist_to_axis / caps_rad, normal_dot # (Normalized SDF)+1 0 on endpoint, 1 on edge of capsule
def sdf_to_contact(sdf, dot_normal, method=0):
"""
Transform normalized SDF into some contact value
:param sdf: NORMALIZED SDF, 1 is surface of object
:param method: select method
:return: contact (batch, S, 1)
"""
if method == 0:
c = 1 / (sdf + 0.0001) # Exponential dropoff
elif method == 1:
c = -sdf + 2 # Linear dropoff
elif method == 2:
c = 1 / (sdf + 0.0001) # Exponential dropoff
c = torch.pow(c, 2)
elif method == 3:
c = torch.sigmoid(-sdf + 2.5)
elif method == 4:
c = (-dot_normal/2+0.5) / (sdf + 0.0001) # Exponential dropoff with sharp normal
elif method == 5:
c = 1 / (sdf + 0.0001) # Proxy for other stuff
return torch.clamp(c, 0.0, 1.0)
def calculate_contact_capsule(hand_verts, hand_normals, object_verts, object_normals,
caps_top=0.0005, caps_bot=-0.0015, caps_rad=0.001, caps_on_hand=False, contact_norm_method=0):
"""
Calculates contact maps on object and hand.
:param hand_verts: (batch, V, 3)
:param hand_normals: (batch, V, 3)
:param object_verts: (batch, O, 3)
:param object_normals: (batch, O, 3)
:param caps_top: ctop, distance to top capsule center
:param caps_bot: cbot, distance to bottom capsule center
:param caps_rad: crad, radius of the contact capsule
:param caps_on_hand: are contact capsules placed on hand or object vertices
:param contact_norm_method: select a distance-to-contact function
:return: object contact (batch, O, 1), hand contact (batch, V, 1)
"""
if caps_on_hand:
sdf_obj, dot_obj = capsule_sdf(hand_verts, hand_normals, object_verts, object_normals, caps_rad, caps_top, caps_bot, False)
sdf_hand, dot_hand = capsule_sdf(hand_verts, hand_normals, object_verts, object_normals, caps_rad, caps_top, caps_bot, True)
else:
sdf_obj, dot_obj = capsule_sdf(object_verts, object_normals, hand_verts, hand_normals, caps_rad, caps_top, caps_bot, True)
sdf_hand, dot_hand = capsule_sdf(object_verts, object_normals, hand_verts, hand_normals, caps_rad, caps_top, caps_bot, False)
obj_contact = sdf_to_contact(sdf_obj, dot_obj, method=contact_norm_method)# * (dot_obj/2+0.5) # TODO dotting contact normal
hand_contact = sdf_to_contact(sdf_hand, dot_hand, method=contact_norm_method)# * (dot_hand/2+0.5)
# print('oshape, nshape', obj_contact.shape, (dot_obj/2+0.5).shape)##
return obj_contact.unsqueeze(2), hand_contact.unsqueeze(2)
def calculate_penetration_cost(hand_verts, hand_normals, object_verts, object_normals, is_thin, contact_norm_method, allowable_pen=0.002):
"""
Calculates an increasing cost for hands heavily intersecting with objects.
Foreach hand vertex, find the nearest object point, dot with object normal.
Include "allowable-pen" buffer margin to account for hand deformation.
"""
allowable_pen = (torch.zeros_like(is_thin) + allowable_pen) * (1 - is_thin)
allowable_pen = allowable_pen.unsqueeze(1)
if contact_norm_method == 5:
hand_verts_offset = hand_verts + hand_normals * -0.004
else:
hand_verts_offset = hand_verts
knn_dists, nearest_idx, nearest_pos = pytorch3d.ops.knn_points(hand_verts_offset, object_verts, K=1, return_nn=True) # Foreach hand vert, find closest obj vert
closest_obj_verts = batched_index_select(object_verts, 1, nearest_idx.squeeze(2)) # Shape (batch, V, 3)
closest_obj_normals = batched_index_select(object_normals, 1, nearest_idx.squeeze(2)) # Shape (batch, V, 3)
# print('nearest shape', nearest_pos.shape, closest_obj_verts.shape)
delta_pos = hand_verts - closest_obj_verts
dist_along_normal = torch.sum(delta_pos * closest_obj_normals, dim=2) # Dot product. Negative means backward along normal
# print('d along normal', dist_along_normal.shape)
pen_score = torch.nn.functional.relu(-dist_along_normal - allowable_pen)
# print('pen score', pen_score)
return pen_score
if __name__ == '__main__':
# Plot all sdf_to_contact mappings
import matplotlib.pyplot as plt
for m in range(4):
d = torch.linspace(0, 3, 1000)
c = sdf_to_contact(d, method=m)
plt.plot(d.numpy(), c.numpy(), label=str(m))
plt.ylabel('Contact value')
plt.xlabel('Normalized SDF from center')
plt.legend()
plt.show()
|
ContactOpt-main
|
contactopt/diffcontact.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import trimesh
import json
import contactopt.util as util
import contactopt.arguments as arguments
from contactopt.hand_object import HandObject
from contactopt.run_contactopt import run_contactopt
def create_demo_dataset():
obj_mesh = trimesh.load('data/demo_obj.obj') # Load object mesh
with open('data/demo_mano.json') as json_file: # Load mano parameters
mano_params = json.load(json_file)
# Initialize the HandObject class with the given mano parameters and object mesh.
# Note that pose must be represented using the 15-dimensional PCA space
ho_pred = HandObject()
ho_pred.load_from_mano_params(hand_beta=mano_params['beta'], hand_pose=mano_params['pose'], hand_trans=mano_params['trans'],
obj_faces=obj_mesh.faces, obj_verts=obj_mesh.vertices)
# To make the dataloader happy, we need a "ground truth" H/O set.
# However, since this isn't used for this demo, just copy the ho_pred object.
ho_gt = HandObject()
ho_gt.load_from_ho(ho_pred)
new_sample = dict()
new_sample['ho_aug'] = ho_pred
new_sample['ho_gt'] = ho_gt
# Select the random object vertices which will be sampled
new_sample['obj_sampled_idx'] = np.random.randint(0, len(ho_gt.obj_verts), util.SAMPLE_VERTS_NUM)
# Calculate hand and object features. The network uses these for improved performance.
new_sample['hand_feats_aug'], new_sample['obj_feats_aug'] = ho_pred.generate_pointnet_features(new_sample['obj_sampled_idx'])
return [new_sample] # Return a dataset of length 1
if __name__ == '__main__':
dataset = create_demo_dataset()
args = arguments.run_contactopt_parse_args()
defaults = {'lr': 0.01,
'n_iter': 250,
'w_cont_hand': 2.5,
'sharpen_thresh': -1,
'ncomps': 15,
'w_cont_asym': 2,
'w_opt_trans': 0.3,
'w_opt_rot': 1,
'w_opt_pose': 1.0,
'caps_rad': 0.001,
'cont_method': 0,
'caps_top': 0.0005,
'caps_bot': -0.001,
'w_pen_cost': 320,
'pen_it': 0,
'rand_re': 8,
'rand_re_trans': 0.02,
'rand_re_rot': 5,
'w_obj_rot': 0,
'vis_method': 1}
for k in defaults.keys():
if vars(args)[k] is None:
vars(args)[k] = defaults[k]
args.test_dataset = dataset
args.split = 'user'
run_contactopt(args)
|
ContactOpt-main
|
contactopt/run_user_demo.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import pytorch3d
import time
from contactopt.loader import *
from manopth.manolayer import ManoLayer
from manopth import rodrigues_layer
import contactopt.diffcontact as calculate_contact
import contactopt.util as util
from contactopt.hand_object import HandObject
from contactopt.visualize import show_optimization
def optimize_pose(data, hand_contact_target, obj_contact_target, n_iter=250, lr=0.01, w_cont_hand=2, w_cont_obj=1,
save_history=False, ncomps=15, w_cont_asym=2, w_opt_trans=0.3, w_opt_pose=1, w_opt_rot=1,
caps_top=0.0005, caps_bot=-0.001, caps_rad=0.001, caps_on_hand=False,
contact_norm_method=0, w_pen_cost=600, w_obj_rot=0, pen_it=0):
"""Runs differentiable optimization to align the hand with the target contact map.
Minimizes the loss between ground truth contact and contact calculated with DiffContact"""
batch_size = data['hand_pose_aug'].shape[0]
device = data['hand_pose_aug'].device
opt_vector = torch.zeros((batch_size, ncomps + 6 + 3), device=device) # 3 hand rot, 3 hand trans, 3 obj rot
opt_vector.requires_grad = True
mano_model = ManoLayer(mano_root='mano/models', use_pca=True, ncomps=ncomps, side='right', flat_hand_mean=False).to(device)
if data['obj_sampled_idx'].numel() > 1:
obj_normals_sampled = util.batched_index_select(data['obj_normals_aug'], 1, data['obj_sampled_idx'])
else: # If we're optimizing over all verts
obj_normals_sampled = data['obj_normals_aug']
optimizer = torch.optim.Adam([opt_vector], lr=lr, amsgrad=True) # AMSgrad helps
loss_criterion = torch.nn.L1Loss(reduction='none') # Benchmarked, L1 performs best vs MSE/SmoothL1
opt_state = []
is_thin = mesh_is_thin(data['mesh_aug'].num_verts_per_mesh())
# print('is thin', is_thin, data['mesh_aug'].num_verts_per_mesh())
for it in range(n_iter):
optimizer.zero_grad()
mano_pose_out = torch.cat([opt_vector[:, 0:3] * w_opt_rot, opt_vector[:, 3:ncomps+3] * w_opt_pose], dim=1)
mano_pose_out[:, :18] += data['hand_pose_aug']
tform_out = util.translation_to_tform(opt_vector[:, ncomps+3:ncomps+6] * w_opt_trans)
hand_verts, hand_joints = util.forward_mano(mano_model, mano_pose_out, data['hand_beta_aug'], [data['hand_mTc_aug'], tform_out]) # 2.2ms
if contact_norm_method != 0 and not caps_on_hand:
with torch.no_grad(): # We need to calculate hand normals if using more complicated methods
mano_mesh = Meshes(verts=hand_verts, faces=mano_model.th_faces.repeat(batch_size, 1, 1))
hand_normals = mano_mesh.verts_normals_padded()
else:
hand_normals = torch.zeros(hand_verts.shape, device=device)
obj_verts = data['obj_sampled_verts_aug']
obj_normals = obj_normals_sampled
obj_rot_mat = rodrigues_layer.batch_rodrigues(opt_vector[:, ncomps+6:])
obj_rot_mat = obj_rot_mat.view(batch_size, 3, 3)
if w_obj_rot > 0:
obj_verts = util.apply_rot(obj_rot_mat, obj_verts, around_centroid=True)
obj_normals = util.apply_rot(obj_rot_mat, obj_normals)
contact_obj, contact_hand = calculate_contact.calculate_contact_capsule(hand_verts, hand_normals, obj_verts, obj_normals,
caps_top=caps_top, caps_bot=caps_bot, caps_rad=caps_rad, caps_on_hand=caps_on_hand, contact_norm_method=contact_norm_method)
contact_obj_sub = obj_contact_target - contact_obj
contact_obj_weighted = contact_obj_sub + torch.nn.functional.relu(contact_obj_sub) * w_cont_asym # Loss for 'missing' contact higher
loss_contact_obj = loss_criterion(contact_obj_weighted, torch.zeros_like(contact_obj_weighted)).mean(dim=(1, 2))
contact_hand_sub = hand_contact_target - contact_hand
contact_hand_weighted = contact_hand_sub + torch.nn.functional.relu(contact_hand_sub) * w_cont_asym # Loss for 'missing' contact higher
loss_contact_hand = loss_criterion(contact_hand_weighted, torch.zeros_like(contact_hand_weighted)).mean(dim=(1, 2))
loss = loss_contact_obj * w_cont_obj + loss_contact_hand * w_cont_hand
if w_pen_cost > 0 and it >= pen_it:
pen_cost = calculate_contact.calculate_penetration_cost(hand_verts, hand_normals, data['obj_sampled_verts_aug'], obj_normals_sampled, is_thin, contact_norm_method)
loss += pen_cost.mean(dim=1) * w_pen_cost
out_dict = {'loss': loss.detach().cpu()}
if save_history:
out_dict['hand_verts'] = hand_verts.detach().cpu()#.numpy()
out_dict['hand_joints'] = hand_joints.detach().cpu()#.numpy()
out_dict['contact_obj'] = contact_obj.detach().cpu()#.numpy()
out_dict['contact_hand'] = contact_hand.detach().cpu()#.numpy()
out_dict['obj_rot'] = obj_rot_mat.detach().cpu()#.numpy()
opt_state.append(out_dict)
loss.mean().backward()
optimizer.step()
tform_full_out = util.aggregate_tforms([data['hand_mTc_aug'], tform_out])
return mano_pose_out, tform_full_out, obj_rot_mat, opt_state
def show_optimization_video(data, device):
"""Displays video of optimization process of hand converging"""
data_gpu = util.dict_to_device(data, device)
contact_obj_pred = util.batched_index_select(data_gpu['obj_contact_gt'], 1, data_gpu['obj_sampled_idx'])
out_pose, out_tform, obj_rot_mat, opt_state = optimize_pose(data_gpu, data_gpu['hand_contact_gt'], contact_obj_pred, save_history=True)
show_optimization(data, opt_state, hand_contact_target=data['hand_contact_gt'], obj_contact_target=contact_obj_pred.detach().cpu(), is_video=True, vis_method=1)
if __name__ == '__main__':
"""Show a video optimization from perturbed pose"""
test_dataset = ContactDBDataset('data/perturbed_contactpose_test.pkl')
dataloader = DataLoader(test_dataset, batch_size=1, shuffle=True, collate_fn=ContactDBDataset.collate_fn)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
for idx, data in enumerate(dataloader):
show_optimization_video(data, device) # do optimization and show video
if idx >= 10:
break
|
ContactOpt-main
|
contactopt/optimize_pose.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from torch.utils.data import Dataset
from contactopt.util import *
import torch
import numpy as np
from pytorch3d.structures import Meshes
import pytorch3d
from torch.utils.data import DataLoader
import time
from tqdm import tqdm
import pickle
class ContactDBDataset(Dataset):
"""PyTorch Dataset object which allows batched fetching of hand/object pairs from a dataset.
PyTorch3D Meshes are used to handle batches of variable-size meshes"""
def __init__(self, data, train=False, min_num_cont=1):
start_time = time.time()
self.train = train
self.aug_vert_jitter = 0.0005
if isinstance(data, str):
self.dataset = pickle.load(open(data, 'rb')) # Load pickle, can take many seconds
else:
self.dataset = data
if 'num_verts_in_contact' in self.dataset[0]:
print('Cutting samples with less than {} points in contact. Was size {}'.format(min_num_cont, len(self.dataset)))
self.dataset = [s for s in self.dataset if s['num_verts_in_contact'] >= min_num_cont]
print('Dataset loaded in {:.2f} sec, {} samples'.format(time.time() - start_time, len(self.dataset)))
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
sample = self.dataset[idx]
out = dict()
out['obj_faces'] = torch.Tensor(sample['ho_gt'].obj_faces)
out['obj_sampled_idx'] = torch.Tensor(sample['obj_sampled_idx']).long()
out['obj_verts_gt'] = torch.Tensor(sample['ho_gt'].obj_verts)
out['obj_sampled_verts_gt'] = out['obj_verts_gt'][out['obj_sampled_idx'], :]
out['obj_contact_gt'] = torch.Tensor(sample['ho_gt'].obj_contact)
out['hand_contact_gt'] = torch.Tensor(sample['ho_gt'].hand_contact)
out['hand_pose_gt'] = torch.Tensor(sample['ho_gt'].hand_pose)
out['hand_beta_gt'] = torch.Tensor(sample['ho_gt'].hand_beta)
out['hand_mTc_gt'] = torch.Tensor(sample['ho_gt'].hand_mTc)
out['hand_verts_gt'] = torch.Tensor(sample['ho_gt'].hand_verts)
out['obj_verts_aug'] = torch.Tensor(sample['ho_aug'].obj_verts)
out['obj_sampled_verts_aug'] = out['obj_verts_aug'][out['obj_sampled_idx'], :]
out['hand_pose_aug'] = torch.Tensor(sample['ho_aug'].hand_pose)
out['hand_beta_aug'] = torch.Tensor(sample['ho_aug'].hand_beta)
out['hand_mTc_aug'] = torch.Tensor(sample['ho_aug'].hand_mTc)
out['hand_verts_aug'] = torch.Tensor(sample['ho_aug'].hand_verts)
out['hand_feats_aug'] = torch.Tensor(sample['hand_feats_aug'])
out['obj_feats_aug'] = torch.Tensor(sample['obj_feats_aug'])
out['obj_normals_aug'] = torch.Tensor(sample['ho_aug'].obj_normals)
if self.train:
out['obj_sampled_verts_aug'] += torch.randn(out['obj_sampled_verts_aug'].shape) * self.aug_vert_jitter
return out
@staticmethod
def collate_fn(batch):
out = dict()
batch_keys = batch[0].keys()
skip_keys = ['obj_faces', 'obj_verts_gt', 'obj_contact_gt', 'obj_normals_aug', 'obj_verts_aug'] # These will be manually collated
# For each not in skip_keys, use default torch collator
for key in [k for k in batch_keys if k not in skip_keys]:
out[key] = torch.utils.data._utils.collate.default_collate([d[key] for d in batch])
verts_gt_all = [sample['obj_verts_gt'] for sample in batch]
verts_aug_all = [sample['obj_verts_aug'] for sample in batch]
faces_all = [sample['obj_faces'] for sample in batch]
contact_all = [sample['obj_contact_gt'] for sample in batch]
obj_normals_aug_all = [sample['obj_normals_aug'] for sample in batch]
out['obj_contact_gt'] = pytorch3d.structures.utils.list_to_padded(contact_all, pad_value=-1)
out['obj_normals_aug'] = pytorch3d.structures.utils.list_to_padded(obj_normals_aug_all, pad_value=-1)
# out['obj_verts_gt'] = pytorch3d.structures.utils.list_to_padded(verts_gt_all, pad_value=-1)
# out['obj_verts_aug'] = pytorch3d.structures.utils.list_to_padded(verts_aug_all, pad_value=-1)
# out['obj_faces'] = pytorch3d.structures.utils.list_to_padded(faces_all, pad_value=-1)
out['mesh_gt'] = Meshes(verts=verts_gt_all, faces=faces_all) # This is slower than the above, but probably fast enough
out['mesh_aug'] = Meshes(verts=verts_aug_all, faces=faces_all)
return out
if __name__ == '__main__':
# Test the speed of the dataloader by going through the entire perturbed-contactpose train set
dataset = ContactDBDataset('data/perturbed_contactpose_train.pkl')
dataloader = DataLoader(dataset, batch_size=16, num_workers=6, collate_fn=ContactDBDataset.collate_fn)
start_time = time.time()
print('start', len(dataloader))
for idx, sample in enumerate(tqdm(dataloader)):
pass
print('Epoch dataload time: ', time.time() - start_time)
|
ContactOpt-main
|
contactopt/loader.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import pickle
from contactopt.hand_object import HandObject
import open3d
from tqdm import tqdm
from scipy.spatial.transform import Rotation as R
import random
from contactopt.util import SAMPLE_VERTS_NUM
def process_image_pkl(input_file, output_file):
"""
Reads pre-generated pkl file containing pose estimates and ground truth poses,
Generates a dataset pkl file and does preprocessing for the PyTorch dataloader
:param input_file: path of input pkl
:param output_file: path of output pkl
"""
input_pkl = pickle.load(open(input_file, 'rb'))
random.shuffle(input_pkl)
all_data = []
for idx, sample_dict in enumerate(tqdm(input_pkl)):
ho_gt = HandObject()
# Apply the extrinsic matrix to the pose axis-angle values
cam_extr = sample_dict['hand_extr_gt']
rot_pose = R.from_rotvec(sample_dict['hand_pose_gt'][:3])
rot_extr = R.from_matrix(cam_extr[:3, :3])
rot_new = rot_extr * rot_pose
sample_dict['hand_pose_gt'][:3] = rot_new.as_rotvec() # Overwrite the original axang rotation with new one
ho_gt.load_from_image(sample_dict['hand_beta_gt'], sample_dict['hand_pose_gt'], sample_dict['obj_faces'], sample_dict['obj_verts_gt'], hand_verts=sample_dict['hand_verts_gt'])
ho_gt.calc_dist_contact(hand=True, obj=True)
num_verts_in_contact = np.sum(ho_gt.hand_contact >= 0.9)
ho_gt.hand_contact *= 0
ho_gt.obj_contact *= 0
obj_verts = sample_dict['obj_verts_gt']
ho_pred = HandObject()
ho_pred.load_from_image(sample_dict['hand_beta_pred'], sample_dict['hand_pose_pred'], sample_dict['obj_faces'], obj_verts, hand_verts=sample_dict['hand_verts_pred'])
new_sample = dict()
new_sample['ho_aug'] = ho_pred
new_sample['ho_gt'] = ho_gt
new_sample['obj_sampled_idx'] = np.random.randint(0, len(ho_gt.obj_verts), SAMPLE_VERTS_NUM)
new_sample['hand_feats_aug'], new_sample['obj_feats_aug'] = ho_pred.generate_pointnet_features(new_sample['obj_sampled_idx'])
new_sample['num_verts_in_contact'] = num_verts_in_contact
all_data.append(new_sample)
if len(all_data) > 10:
print('Cutting short!')
break
pickle.dump(all_data, open(output_file, 'wb'))
if __name__ == '__main__':
IN_PKL = 'data/pose_estimates.pkl'
OUT_PKL = 'data/ho3d_image.pkl'
process_image_pkl(IN_PKL, OUT_PKL)
|
ContactOpt-main
|
contactopt/create_dataset_im.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import contactopt.pointnet as pointnet
import torch.nn.functional as F
from pytorch3d import ops, transforms
import contactopt.util as util
class DeepContactNet(nn.Module):
def __init__(self, normalize_pts=True):
super(DeepContactNet, self).__init__()
self.pointnet = pointnet.Net()
self.normalize_pts = normalize_pts
pointnet_total_params = sum(p.numel() for p in self.pointnet.parameters() if p.requires_grad)
print('Backbone params: {}'.format(pointnet_total_params))
def forward(self, hand_verts, hand_feats, obj_verts, obj_feats):
device = hand_verts.device
batch_size = hand_verts.shape[0]
out = dict()
if self.normalize_pts:
tform = self.get_normalizing_tform(hand_verts, obj_verts)
hand_verts = util.apply_tform(tform, hand_verts)
obj_verts = util.apply_tform(tform, obj_verts)
# util.vis_pointcloud(obj_verts, hand_verts) # View pointnet input
x, pos, batch = self.verts_to_pointcloud(hand_verts, hand_feats, obj_verts, obj_feats)
contact_batched = self.pointnet(x, pos, batch)
contact = contact_batched.view(batch_size, hand_verts.shape[1] + obj_verts.shape[1], 10)
out['contact_hand'] = contact[:, :hand_verts.shape[1], :]
out['contact_obj'] = contact[:, hand_verts.shape[1]:, :]
return out
@staticmethod
def get_normalizing_tform(hand_verts, obj_verts, random_rot=True):
"""
Find a 4x4 rigid transform to normalize the pointcloud. We choose the object center of mass to be the origin,
the hand center of mass to be along the +X direction, and the rotation around this axis to be random.
:param hand_verts: (batch, 778, 3)
:param obj_verts: (batch, 2048, 3)
:return: tform: (batch, 4, 4)
"""
with torch.no_grad():
obj_centroid = torch.mean(obj_verts, dim=1) # (batch, 3)
hand_centroid = torch.mean(hand_verts, dim=1)
x_vec = F.normalize(hand_centroid - obj_centroid, dim=1) # From object to hand
if random_rot:
rand_vec = transforms.random_rotations(hand_verts.shape[0], device=hand_verts.device) # Generate random rot matrix
y_vec = F.normalize(torch.cross(x_vec, rand_vec[:, :3, 0]), dim=1) # Make orthogonal
else:
ref_pt = hand_verts[:, 80, :]
y_vec = F.normalize(torch.cross(x_vec, ref_pt - obj_centroid), dim=1) # From object to hand ref point
z_vec = F.normalize(torch.cross(x_vec, y_vec), dim=1) # Z axis
tform = ops.eyes(4, hand_verts.shape[0], device=hand_verts.device)
tform[:, :3, 0] = x_vec
tform[:, :3, 1] = y_vec
tform[:, :3, 2] = z_vec
tform[:, :3, 3] = obj_centroid
return torch.inverse(tform)
@staticmethod
def verts_to_pointcloud(hand_verts, hand_feats, obj_verts, obj_feats):
"""
Convert hand and object vertices and features from Pytorch3D padded format (batch, vertices, N)
to Pytorch-Geometric packed format (all_vertices, N)
"""
batch_size = hand_verts.shape[0]
device = hand_verts.device
ptcloud_pos = torch.cat((hand_verts, obj_verts), dim=1)
ptcloud_x = torch.cat((hand_feats, obj_feats), dim=1)
_, N, _ = ptcloud_pos.shape # (batch_size, num_points, 3)
pos = ptcloud_pos.view(batch_size * N, -1)
batch = torch.zeros((batch_size, N), device=device, dtype=torch.long)
for i in range(batch_size):
batch[i, :] = i
batch = batch.view(-1)
x = ptcloud_x.view(-1, hand_feats.shape[2])
# print('x', x.shape, pos.shape, batch.shape)
return x, pos, batch
|
ContactOpt-main
|
contactopt/deepcontact_net.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from contactopt.loader import ContactDBDataset
from contactopt.deepcontact_net import DeepContactNet
import glob
import argparse
from contactopt.optimize_pose import optimize_pose
from contactopt.visualize import show_optimization
import pickle
from contactopt.hand_object import HandObject
import contactopt.util as util
from tqdm import tqdm
import contactopt.arguments as arguments
import time
import torch
import os
from torch.utils.data import DataLoader
import pytorch3d
import numpy as np
def get_newest_checkpoint():
"""
Finds the newest model checkpoint file, sorted by the date of the file
:return: Model with loaded weights
"""
list_of_files = glob.glob('checkpoints/*.pt')
latest_file = max(list_of_files, key=os.path.getctime)
print('Loading checkpoint file:', latest_file)
model = DeepContactNet()
model.load_state_dict(torch.load(latest_file))
return model
def run_contactopt(args):
"""
Actually run ContactOpt approach. Estimates target contact with DeepContact,
then optimizes it. Performs random restarts if selected.
Saves results to a pkl file.
:param args: input settings
"""
print('Running split', args.split)
dataset = ContactDBDataset(args.test_dataset, min_num_cont=args.min_cont)
shuffle = args.vis or args.partial > 0
print('Shuffle:', shuffle)
test_loader = DataLoader(dataset, batch_size=args.batch_size, shuffle=shuffle, num_workers=6, collate_fn=ContactDBDataset.collate_fn)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = get_newest_checkpoint()
model.to(device)
model.eval()
all_data = list()
for idx, data in enumerate(tqdm(test_loader)):
data_gpu = util.dict_to_device(data, device)
batch_size = data['obj_sampled_idx'].shape[0]
if args.split != 'fine':
with torch.no_grad():
network_out = model(data_gpu['hand_verts_aug'], data_gpu['hand_feats_aug'], data_gpu['obj_sampled_verts_aug'], data_gpu['obj_feats_aug'])
hand_contact_target = util.class_to_val(network_out['contact_hand']).unsqueeze(2)
obj_contact_target = util.class_to_val(network_out['contact_obj']).unsqueeze(2)
else:
hand_contact_target = data_gpu['hand_contact_gt']
obj_contact_target = util.batched_index_select(data_gpu['obj_contact_gt'], 1, data_gpu['obj_sampled_idx'])
if args.sharpen_thresh > 0: # If flag, sharpen contact
print('Sharpening')
obj_contact_target = util.sharpen_contact(obj_contact_target, slope=2, thresh=args.sharpen_thresh)
hand_contact_target = util.sharpen_contact(hand_contact_target, slope=2, thresh=args.sharpen_thresh)
if args.rand_re > 1: # If we desire random restarts
mtc_orig = data_gpu['hand_mTc_aug'].detach().clone()
print('Doing random optimization restarts')
best_loss = torch.ones(batch_size) * 100000
for re_it in range(args.rand_re):
# Add noise to hand translation and rotation
data_gpu['hand_mTc_aug'] = mtc_orig.detach().clone()
random_rot_mat = pytorch3d.transforms.euler_angles_to_matrix(torch.randn((batch_size, 3), device=device) * args.rand_re_rot / 180 * np.pi, 'ZYX')
data_gpu['hand_mTc_aug'][:, :3, :3] = torch.bmm(random_rot_mat, data_gpu['hand_mTc_aug'][:, :3, :3])
data_gpu['hand_mTc_aug'][:, :3, 3] += torch.randn((batch_size, 3), device=device) * args.rand_re_trans
cur_result = optimize_pose(data_gpu, hand_contact_target, obj_contact_target, n_iter=args.n_iter, lr=args.lr,
w_cont_hand=args.w_cont_hand, w_cont_obj=1, save_history=args.vis, ncomps=args.ncomps,
w_cont_asym=args.w_cont_asym, w_opt_trans=args.w_opt_trans, w_opt_pose=args.w_opt_pose,
w_opt_rot=args.w_opt_rot,
caps_top=args.caps_top, caps_bot=args.caps_bot, caps_rad=args.caps_rad,
caps_on_hand=args.caps_hand,
contact_norm_method=args.cont_method, w_pen_cost=args.w_pen_cost,
w_obj_rot=args.w_obj_rot, pen_it=args.pen_it)
if re_it == 0:
out_pose = torch.zeros_like(cur_result[0])
out_mTc = torch.zeros_like(cur_result[1])
obj_rot = torch.zeros_like(cur_result[2])
opt_state = cur_result[3]
loss_val = cur_result[3][-1]['loss']
for b in range(batch_size):
if loss_val[b] < best_loss[b]:
best_loss[b] = loss_val[b]
out_pose[b, :] = cur_result[0][b, :]
out_mTc[b, :, :] = cur_result[1][b, :, :]
obj_rot[b, :, :] = cur_result[2][b, :, :]
# print('Loss, re', re_it, loss_val)
# print('Best loss', best_loss)
else:
result = optimize_pose(data_gpu, hand_contact_target, obj_contact_target, n_iter=args.n_iter, lr=args.lr,
w_cont_hand=args.w_cont_hand, w_cont_obj=1, save_history=args.vis, ncomps=args.ncomps,
w_cont_asym=args.w_cont_asym, w_opt_trans=args.w_opt_trans, w_opt_pose=args.w_opt_pose,
w_opt_rot=args.w_opt_rot,
caps_top=args.caps_top, caps_bot=args.caps_bot, caps_rad=args.caps_rad,
caps_on_hand=args.caps_hand,
contact_norm_method=args.cont_method, w_pen_cost=args.w_pen_cost,
w_obj_rot=args.w_obj_rot, pen_it=args.pen_it)
out_pose, out_mTc, obj_rot, opt_state = result
obj_contact_upscale = util.upscale_contact(data_gpu['mesh_aug'], data_gpu['obj_sampled_idx'], obj_contact_target)
for b in range(obj_contact_upscale.shape[0]): # Loop over batch
gt_ho = HandObject()
in_ho = HandObject()
out_ho = HandObject()
gt_ho.load_from_batch(data['hand_beta_gt'], data['hand_pose_gt'], data['hand_mTc_gt'], data['hand_contact_gt'], data['obj_contact_gt'], data['mesh_gt'], b)
in_ho.load_from_batch(data['hand_beta_aug'], data['hand_pose_aug'], data['hand_mTc_aug'], hand_contact_target, obj_contact_upscale, data['mesh_aug'], b)
out_ho.load_from_batch(data['hand_beta_aug'], out_pose, out_mTc, data['hand_contact_gt'], data['obj_contact_gt'], data['mesh_aug'], b, obj_rot=obj_rot)
# out_ho.calc_dist_contact(hand=True, obj=True)
all_data.append({'gt_ho': gt_ho, 'in_ho': in_ho, 'out_ho': out_ho})
if args.vis:
show_optimization(data, opt_state, hand_contact_target.detach().cpu().numpy(), obj_contact_upscale.detach().cpu().numpy(),
is_video=args.video, vis_method=args.vis_method)
if idx >= args.partial > 0: # Speed up for eval
break
out_file = 'data/optimized_{}.pkl'.format(args.split)
print('Saving to {}. Len {}'.format(out_file, len(all_data)))
pickle.dump(all_data, open(out_file, 'wb'))
if __name__ == '__main__':
util.hack_filedesciptor()
args = arguments.run_contactopt_parse_args()
if args.split == 'aug': # Settings defaults for Perturbed ContactPose
defaults = {'lr': 0.01,
'n_iter': 250,
'w_cont_hand': 2.0,
'sharpen_thresh': -1,
'ncomps': 15,
'w_cont_asym': 2,
'w_opt_trans': 0.3,
'w_opt_rot': 1.0,
'w_opt_pose': 1.0,
'caps_rad': 0.001,
'cont_method': 0,
'caps_top': 0.0005,
'caps_bot': -0.001,
'w_pen_cost': 600,
'pen_it': 0,
'rand_re': 8,
'rand_re_trans': 0.04,
'rand_re_rot': 5,
'w_obj_rot': 0,
'vis_method': 1}
elif args.split == 'im' or args.split == 'demo': # Settings defaults for image-based pose estimates
defaults = {'lr': 0.01,
'n_iter': 250,
'w_cont_hand': 2.5,
'sharpen_thresh': -1,
'ncomps': 15,
'w_cont_asym': 2,
'w_opt_trans': 0.3,
'w_opt_rot': 1,
'w_opt_pose': 1.0,
'caps_rad': 0.001,
'cont_method': 0,
'caps_top': 0.0005,
'caps_bot': -0.001,
'w_pen_cost': 320,
'pen_it': 0,
'rand_re': 8,
'rand_re_trans': 0.02,
'rand_re_rot': 5,
'w_obj_rot': 0,
'vis_method': 1}
elif args.split == 'fine': # Settings defaults for small-scale refinement
defaults = {'lr': 0.003,
'n_iter': 250,
'w_cont_hand': 0,
'sharpen_thresh': 0.3,
'ncomps': 15,
'w_cont_asym': 4,
'w_opt_trans': 0.03,
'w_opt_rot': 1.0,
'w_opt_pose': 1.0,
'caps_rad': 0.001,
'cont_method': 5,
'caps_top': 0.0005,
'caps_bot': -0.001,
'w_pen_cost': 600,
'pen_it': 0,
'rand_re': 1,
'rand_re_trans': 0.00,
'rand_re_rot': 0,
'w_obj_rot': 0,
'vis_method': 5}
for k in defaults.keys(): # Override arguments that have not been manually set with defaults
if vars(args)[k] is None:
vars(args)[k] = defaults[k]
print(args)
start_time = time.time()
run_contactopt(args)
print('Elapsed time:', time.time() - start_time)
|
ContactOpt-main
|
contactopt/run_contactopt.py
|
"""Pytorch-Geometric implementation of Pointnet++
Original source available at https://github.com/rusty1s/pytorch_geometric"""
import torch
import torch.nn.functional as F
from torch.nn import Sequential as Seq, Linear as Lin, ReLU, BatchNorm1d as BN
from torch_geometric.datasets import ModelNet
import torch_geometric.transforms as T
from torch_geometric.data import DataLoader
from torch_geometric.nn import PointConv, fps, radius, global_max_pool, knn_interpolate
class SAModule(torch.nn.Module):
def __init__(self, ratio, r, nn):
super(SAModule, self).__init__()
self.ratio = ratio
self.r = r
self.conv = PointConv(nn)
def forward(self, x, pos, batch):
idx = fps(pos, batch, ratio=self.ratio)
row, col = radius(pos, pos[idx], self.r, batch, batch[idx],
max_num_neighbors=64)
edge_index = torch.stack([col, row], dim=0)
x = self.conv(x, (pos, pos[idx]), edge_index)
pos, batch = pos[idx], batch[idx]
return x, pos, batch
class GlobalSAModule(torch.nn.Module):
def __init__(self, nn):
super(GlobalSAModule, self).__init__()
self.nn = nn
def forward(self, x, pos, batch):
x = self.nn(torch.cat([x, pos], dim=1))
x = global_max_pool(x, batch)
pos = pos.new_zeros((x.size(0), 3))
batch = torch.arange(x.size(0), device=batch.device)
return x, pos, batch
def MLP(channels):
return Seq(*[
Seq(Lin(channels[i - 1], channels[i]), ReLU(), BN(channels[i]))
for i in range(1, len(channels))
])
class FPModule(torch.nn.Module):
def __init__(self, k, nn):
super(FPModule, self).__init__()
self.k = k
self.nn = nn
def forward(self, x, pos, batch, x_skip, pos_skip, batch_skip):
x = knn_interpolate(x, pos, pos_skip, batch, batch_skip, k=self.k)
if x_skip is not None:
x = torch.cat([x, x_skip], dim=1)
x = self.nn(x)
return x, pos_skip, batch_skip
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
NUM_FEATS = 25
NUM_CLASSES = 10
self.sa1_module = SAModule(0.2, 0.1, MLP([3 + NUM_FEATS, 64, 64, 128])) # TODO, reduce PN params
self.sa2_module = SAModule(0.25, 0.2, MLP([128 + 3, 128, 128, 256]))
self.sa3_module = GlobalSAModule(MLP([256 + 3, 256, 512, 1024]))
self.fp3_module = FPModule(1, MLP([1024 + 256, 256, 256]))
self.fp2_module = FPModule(3, MLP([256 + 128, 256, 128]))
self.fp1_module = FPModule(3, MLP([128 + NUM_FEATS, 128, 128, 128]))
self.lin1 = torch.nn.Linear(128, 128)
self.lin2 = torch.nn.Linear(128, 128)
self.lin3 = torch.nn.Linear(128, NUM_CLASSES)
def forward(self, x, pos, batch):
sa0_out = (x, pos, batch)
sa1_out = self.sa1_module(*sa0_out)
sa2_out = self.sa2_module(*sa1_out)
sa3_out = self.sa3_module(*sa2_out)
fp3_out = self.fp3_module(*sa3_out, *sa2_out)
fp2_out = self.fp2_module(*fp3_out, *sa1_out)
x, _, _ = self.fp1_module(*fp2_out, *sa0_out)
x = F.relu(self.lin1(x))
x = F.dropout(x, p=0.5, training=self.training)
x = self.lin2(x)
x = F.dropout(x, p=0.5, training=self.training)
x = self.lin3(x)
# return x
# return F.sigmoid(x) # big hyperparam, Bound to 0-1
# print('pre softmax shape', x.shape)
return F.log_softmax(x, dim=-1)
|
ContactOpt-main
|
contactopt/pointnet.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from os import path
import sys
import numpy as np
import pickle
from tqdm import tqdm
from joblib import Parallel, delayed
import multiprocessing
from contactopt.hand_object import HandObject
from contactopt.util import *
sys.path.append('../ContactPose') # Change this path to point to the ContactPose repo
from utilities.dataset import get_object_names, ContactPose
object_cut_list = []
# object_cut_list = ['eyeglasses']
def get_all_contactpose_samples():
"""
Gets all participants and objects from ContactPose
Cuts out grasps with two hands or grasps using left hand
:return: list of (participant_num, intent, object_name, ContactPose_object)
"""
samples = []
print('Reading ContactPose dataset')
for participant_id in tqdm(range(1, 51)):
for intent in ['handoff', 'use']:
for object_name in get_object_names(participant_id, intent):
cp = ContactPose(participant_id, intent, object_name, load_mano=False)
if cp._valid_hands != [1]: # If anything else than just the right hand, remove
continue
samples.append((participant_id, intent, object_name, cp))
print('Valid ContactPose samples:', len(samples))
return samples
def generate_contactpose_dataset(dataset, output_file, low_p, high_p, num_pert=1, aug_trans=0.02, aug_rot=0.05, aug_pca=0.3):
"""
Generates a dataset pkl file and does preprocessing for the PyTorch dataloader
:param dataset: List of ContactPose objects
:param output_file: path to output pkl file
:param low_p: Lower split location of the dataset, [0-1)
:param high_p: Upper split location of the dataset, [0-1)
:param num_pert: Number of random perturbations which are computed for every true dataset sample
:param aug_trans: Std deviation of hand translation noise added to the datasets, meters
:param aug_rot: Std deviation of hand rotation noise, axis-angle radians
:param aug_pca: Std deviation of hand pose noise, PCA units
"""
low_split = int(len(dataset) * low_p)
high_split = int(len(dataset) * high_p)
dataset = dataset[low_split:high_split]
if len(object_cut_list) > 0:
dataset = [s for s in dataset if s[2] not in object_cut_list]
print('Some objects are being removed', object_cut_list)
def process_sample(s, idx):
ho_gt = HandObject()
ho_gt.load_from_contactpose(s[3])
sample_list = []
# print('Processing', idx)
for i in range(num_pert):
# Since we're only saving pointers to the data, it's memory efficient
sample_data = dict()
ho_aug = HandObject()
aug_t = np.random.randn(3) * aug_trans
aug_p = np.concatenate((np.random.randn(3) * aug_rot, np.random.randn(15) * aug_pca)).astype(np.float32)
ho_aug.load_from_ho(ho_gt, aug_p, aug_t)
sample_data['ho_gt'] = ho_gt
sample_data['ho_aug'] = ho_aug
sample_data['obj_sampled_idx'] = np.random.randint(0, len(ho_gt.obj_verts), SAMPLE_VERTS_NUM)
sample_data['hand_feats_aug'], sample_data['obj_feats_aug'] = ho_aug.generate_pointnet_features(sample_data['obj_sampled_idx'])
sample_list.append(sample_data)
return sample_list
parallel = True
if parallel:
num_cores = multiprocessing.cpu_count()
print('Running on {} cores'.format(num_cores))
all_data_2d = Parallel(n_jobs=num_cores)(delayed(process_sample)(s, idx) for idx, s in enumerate(tqdm(dataset)))
all_data = [item for sublist in all_data_2d for item in sublist] # flatten 2d list
else:
all_data = [] # Do non-parallel
for idx, s in enumerate(tqdm(dataset)):
all_data.extend(process_sample(s, idx))
print('Writing pickle file, often slow and freezes computer')
pickle.dump(all_data, open(output_file, 'wb'))
if __name__ == '__main__':
train_file = 'data/perturbed_contactpose_train.pkl'
test_file = 'data/perturbed_contactpose_test.pkl'
fine_file = 'data/contactpose_test.pkl'
aug_trans = 0.05
aug_rot = 0.1
aug_pca = 0.5
contactpose_dataset = get_all_contactpose_samples()
# Generate Perturbed ContactPose
generate_contactpose_dataset(contactpose_dataset, train_file, 0.0, 0.8, num_pert=16, aug_trans=aug_trans, aug_rot=aug_rot, aug_pca=aug_pca)
generate_contactpose_dataset(contactpose_dataset, test_file, 0.8, 1.0, num_pert=4, aug_trans=aug_trans, aug_rot=aug_rot, aug_pca=aug_pca)
# Generate "Small Refinements" dataset for optimizing ground-truth thermal contact
generate_contactpose_dataset(contactpose_dataset, fine_file, 0.0, 1.0, num_pert=1, aug_trans=0, aug_rot=0, aug_pca=0)
|
ContactOpt-main
|
contactopt/create_dataset_contactpose.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import hand_object
import os
import util
from scipy.linalg import orthogonal_procrustes
from scipy.spatial.transform import Rotation as R
import trimesh
from open3d import io as o3dio
from open3d import geometry as o3dg
from open3d import utility as o3du
from open3d import visualization as o3dv
import matplotlib.pyplot as plt
import torch
def np_apply_tform(points, tform):
"""
The non-batched numpy version
:param points: (N, 3)
:param tform: (4, 4)
:return:
"""
points_homo = np.concatenate((points, np.ones((points.shape[0], 1))), axis=1)
points_out = np.matmul(tform, points_homo.T).T
return points_out[:, :3]
def get_hand_align_tform(hand_joints):
"""
Find a 4x4 rigid transform to align the joints of a hand to a 'cardinal rotation'
:param hand_joints: (21, 3)
:return: tform: (4, 4)
"""
center_joint = 0
x_joint = 2
y_joint = 17
trans = hand_joints[center_joint, :]
x_vec = hand_joints[x_joint, :] - hand_joints[center_joint, :]
x_vec = x_vec / np.linalg.norm(x_vec)
y_vec = hand_joints[y_joint, :] - hand_joints[center_joint, :]
y_vec = np.cross(x_vec, y_vec)
y_vec = y_vec / np.linalg.norm(y_vec)
z_vec = np.cross(x_vec, y_vec)
z_vec = z_vec / np.linalg.norm(z_vec)
tform = np.eye(4)
tform[:3, 0] = x_vec
tform[:3, 1] = y_vec
tform[:3, 2] = z_vec
tform[:3, 3] = trans
return np.linalg.inv(tform)
def calc_procrustes(points1, points2, return_tform=False):
""" Align the predicted entity in some optimality sense with the ground truth.
Does NOT align scale
https://github.com/shreyashampali/ho3d/blob/master/eval.py """
t1 = points1.mean(0) # Find centroid
t2 = points2.mean(0)
points1_t = points1 - t1 # Zero mean
points2_t = points2 - t2
R, s = orthogonal_procrustes(points1_t, points2_t) # Run procrustes alignment, returns rotation matrix and scale
points2_t = np.dot(points2_t, R.T) # Apply tform to second pointcloud
points2_t = points2_t + t1
if return_tform:
return R, t1 - t2
else:
return points2_t
def align_by_tform(mtx, tform):
t2 = mtx.mean(0)
mtx_t = mtx - t2
R, t1 = tform
return np.dot(mtx_t, R.T) + t1 + t2
def get_trans_rot_err(points1, points2):
"""
Given two pointclouds, find the error in centroid and rotation
:param points1: numpy (V, 3)
:param points2: numpy (V, 3)
:return: translation error (meters), rotation error (degrees)
"""
tform = calc_procrustes(points1, points2, return_tform=True)
translation_error = np.linalg.norm(tform[1], 2)
r = R.from_matrix(tform[0])
rotation_error = r.magnitude() * 180 / np.pi
return translation_error, rotation_error
def geometric_eval(ho_test, ho_gt):
"""
Computes many statistics about ground truth and HO
Note that official HO-3D metrics are available here, but they only consider the hand, and I think they do too much alignment
https://github.com/shreyashampali/ho3d/blob/master/eval.py
:param ho_test: hand-object under test
:param ho_gt: ground-truth hand-object
:return: dictionary of stats
"""
stats = dict()
stats['unalign_hand_verts'] = util.calc_l2_err(ho_gt.hand_verts, ho_test.hand_verts, axis=1)
stats['unalign_hand_joints'] = util.calc_l2_err(ho_gt.hand_joints, ho_test.hand_joints, axis=1)
stats['unalign_obj_verts'] = util.calc_l2_err(ho_gt.obj_verts, ho_test.obj_verts, axis=1)
root_test = ho_test.hand_joints[0, :]
root_gt = ho_gt.hand_joints[0, :]
stats['rootalign_hand_joints'] = util.calc_l2_err(ho_gt.hand_joints - root_gt, ho_test.hand_joints - root_test, axis=1)
stats['rootalign_obj_verts'] = util.calc_l2_err(ho_gt.obj_verts - root_gt, ho_test.obj_verts - root_test, axis=1)
obj_cent_gt = ho_gt.obj_verts.mean(0)
obj_cent_test = ho_test.obj_verts.mean(0)
stats['objalign_hand_joints'] = util.calc_l2_err(ho_gt.hand_joints - obj_cent_gt, ho_test.hand_joints - obj_cent_test, axis=1)
hand_joints_align_gt = np_apply_tform(ho_gt.hand_joints, get_hand_align_tform(ho_gt.hand_joints))
hand_joints_align_test = np_apply_tform(ho_test.hand_joints, get_hand_align_tform(ho_test.hand_joints))
hand_verts_align_gt = np_apply_tform(ho_gt.hand_verts, get_hand_align_tform(ho_gt.hand_joints))
hand_verts_align_test = np_apply_tform(ho_test.hand_verts, get_hand_align_tform(ho_test.hand_joints))
stats['handalign_hand_joints'] = util.calc_l2_err(hand_joints_align_gt, hand_joints_align_test, axis=1)
stats['handalign_hand_verts'] = util.calc_l2_err(hand_verts_align_gt, hand_verts_align_test, axis=1)
stats['verts'] = ho_gt.obj_verts.shape[0]
return stats
|
ContactOpt-main
|
contactopt/geometric_eval.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from os import path as osp
import numpy as np
from open3d import io as o3dio
from open3d import geometry as o3dg
from open3d import utility as o3du
from open3d import visualization as o3dv
import json
import transforms3d.quaternions as txq
import torch
import pytorch3d
from pytorch3d.structures import Meshes
import contactopt.util as util
from manopth.manolayer import ManoLayer
from contactopt.diffcontact import calculate_contact_capsule
import matplotlib.pyplot as plt
def mano_get_faces():
return util.get_mano_closed_faces()
class HandObject:
"""
Universal data structure to handle hand, object, and contact data.
This class has many data elements, not all of them are always populated.
Has many loader functions to take data from multiple sources.
"""
closed_faces = util.get_mano_closed_faces()
def __init__(self):
self.is_left = None
self.hand_beta = None
self.hand_pose = None
self.hand_mTc = None
self.hand_contact = None
self.hand_verts = None
self.hand_joints = None
self.obj_verts = None
self.obj_faces = None
self.obj_contact = None
self.path = None
self.obj_normals = None
def load_from_verts(self, hand_verts, obj_faces, obj_verts):
"""Load from hand/object vertices alone"""
self.obj_verts = obj_verts
self.obj_faces = obj_faces
self.hand_verts = hand_verts
self.calc_dist_contact(hand=True, obj=True)
def load_from_image(self, hand_beta, hand_pose, obj_faces, obj_verts, hand_verts=None):
"""Load from image-based results pkl file. Mano root translation is not known, but hand vertices are"""
self.hand_beta = hand_beta
self.hand_pose = hand_pose
self.hand_mTc = np.eye(4)
self.obj_verts = obj_verts
self.obj_faces = obj_faces
self.run_mano() # Run mano model forwards
if hand_verts is not None:
displ = hand_verts[0, :] - self.hand_verts[0, :] # Find translation by comparing vertices of aligned hands
self.hand_mTc[:3, 3] = displ
self.run_mano() # Rerun mano model to account for translation
mean_err = np.linalg.norm(self.hand_verts - hand_verts, 2, 1)
if mean_err.mean() > 1e-6: # Check if there's much error in reconstruction
print('Mean verts error', mean_err.mean())
print('Mano reconstruction failure')
# self.calc_dist_contact(hand=True, obj=True)
self.hand_contact = np.zeros((self.hand_verts.shape[0], 1)) # Set to zero since we don't know the ground truth
self.obj_contact = np.zeros((self.obj_verts.shape[0], 1))
def load_from_batch(self, hand_beta, hand_pose, hand_mTc, hand_contact, obj_contact, obj_mesh, idx=0, obj_rot=None):
"""Generate HO object from a torch dataloader batch"""
obj_verts = obj_mesh.verts_list()[idx]
if obj_rot is not None:
obj_verts = util.apply_rot(obj_rot[idx, :, :].unsqueeze(0).detach().cpu(), obj_verts.unsqueeze(0), around_centroid=True).squeeze(0)
self.hand_beta = hand_beta[idx, :].detach().cpu().numpy()
self.hand_pose = hand_pose[idx, :].detach().cpu().numpy()
self.hand_mTc = hand_mTc[idx, :, :].detach().cpu().numpy()
self.hand_contact = hand_contact[idx, :, :].detach().cpu().numpy()
self.obj_verts = obj_verts.detach().cpu().numpy()
self.obj_faces = obj_mesh.faces_list()[idx].detach().cpu().numpy()
self.obj_contact = obj_contact[idx, :self.obj_verts.shape[0], :].detach().cpu().numpy() # Since we're using a padded array, need to cut off some
self.run_mano()
def load_from_contactpose(self, cp_obj):
"""Load HO object from ContactPose dataset"""
if not osp.isfile(cp_obj.contactmap_filename):
raise FileNotFoundError('Could not find {}'.format(cp_obj.contactmap_filename))
obj_mesh = o3dio.read_triangle_mesh(cp_obj.contactmap_filename) # Includes object mesh and contact map embedded as vertex colors
vertex_colors = np.array(obj_mesh.vertex_colors, dtype=np.float32)
self.obj_contact = np.expand_dims(util.fit_sigmoid(vertex_colors[:, 0]), axis=1) # Normalize with sigmoid, shape (V, 1)
self.obj_verts = np.array(obj_mesh.vertices, dtype=np.float32) # Keep as floats since torch uses floats
self.obj_faces = np.array(obj_mesh.triangles)
for idx, mp in enumerate(cp_obj.mano_params):
if mp is None:
continue
self.is_left = idx == 0 # Left then right
self.hand_beta = np.array(mp['betas']) # 10 shape PCA parameters
self.hand_pose = np.array(mp['pose']) # 18 dim length, first 3 ax-angle, 15 PCA pose
mTc = mp['hTm']
# mTc = np.linalg.inv(mTc) # World to object
self.hand_mTc = mTc
if self.is_left:
raise ValueError('Pipeline currently cant handle left hands')
self.run_mano()
self.calc_dist_contact(hand=True, obj=False)
def load_from_ho(self, ho, aug_pose=None, aug_trans=None):
"""Load from another HandObject obj, potentially with augmentation"""
self.hand_beta = np.array(ho.hand_beta)
self.hand_pose = np.array(ho.hand_pose)
self.hand_mTc = np.array(ho.hand_mTc)
self.obj_verts = ho.obj_verts
self.obj_faces = ho.obj_faces
self.obj_contact = ho.obj_contact
if aug_pose is not None:
self.hand_pose += aug_pose
if aug_trans is not None:
self.hand_mTc[:3, 3] += aug_trans
self.run_mano()
# self.calc_dist_contact(hand=True, obj=False) # DONT calculate hand contact, since it's not ground truth
def load_from_mano_params(self, hand_beta, hand_pose, hand_trans, obj_faces, obj_verts):
"""Load from mano parameters and object mesh"""
self.hand_beta = np.array(hand_beta)
self.hand_pose = np.array(hand_pose)
self.hand_mTc = np.eye(4)
self.hand_mTc[:3, 3] = hand_trans
self.obj_verts = np.array(obj_verts)
self.obj_faces = np.array(obj_faces)
self.run_mano()
self.hand_contact = np.zeros((self.hand_verts.shape[0], 1)) # Set to zero since we don't know the ground truth
self.obj_contact = np.zeros((self.obj_verts.shape[0], 1))
def calc_dist_contact(self, hand=True, obj=False, special_contact=False):
"""Set hand and object contact maps based on DiffContact method.
This is sometimes used when ground truth contact is not known"""
object_mesh = Meshes(verts=[torch.Tensor(self.obj_verts)], faces=[torch.Tensor(self.obj_faces)])
hand_mesh = Meshes(verts=torch.Tensor(self.hand_verts).unsqueeze(0), faces=torch.Tensor(self.closed_faces).unsqueeze(0))
hand_verts = torch.Tensor(self.hand_verts).unsqueeze(0)
if not special_contact:
obj_contact, hand_contact = calculate_contact_capsule(hand_verts, hand_mesh.verts_normals_padded(), object_mesh.verts_padded(), object_mesh.verts_normals_padded())
else:
# hand_verts_subdivided = util.subdivide_verts(hand_mesh.edges_packed().unsqueeze(0), hand_verts)
# hand_normals_subdivided = util.subdivide_verts(hand_mesh.edges_packed().unsqueeze(0), hand_mesh.verts_normals_padded())
hand_verts_subdivided = hand_verts
hand_normals_subdivided = hand_mesh.verts_normals_padded()
obj_contact, hand_contact = calculate_contact_capsule(hand_verts_subdivided, hand_normals_subdivided, object_mesh.verts_padded(),
object_mesh.verts_normals_padded(), caps_rad=0.003) # needed for paper vis?
if hand:
self.hand_contact = hand_contact.squeeze(0).detach().cpu().numpy()
if obj:
self.obj_contact = obj_contact.squeeze(0).detach().cpu().numpy()
def run_mano(self):
"""Runs forward_mano, computing the hand vertices and joints based on pose/beta parameters.
Handles numpy-pytorch-numpy conversion"""
if self.hand_pose.shape[0] == 48: # Special case when we're loading GT honnotate
mano_model = ManoLayer(mano_root='mano/models', joint_rot_mode="axisang", use_pca=False, center_idx=None, flat_hand_mean=True)
else: # Everything else
mano_model = ManoLayer(mano_root='mano/models', use_pca=True, ncomps=15, side='right', flat_hand_mean=False)
pose_tensor = torch.Tensor(self.hand_pose).unsqueeze(0)
beta_tensor = torch.Tensor(self.hand_beta).unsqueeze(0)
tform_tensor = torch.Tensor(self.hand_mTc).unsqueeze(0)
mano_verts, mano_joints = util.forward_mano(mano_model, pose_tensor, beta_tensor, [tform_tensor])
self.hand_verts = mano_verts.squeeze().detach().numpy()
self.hand_joints = mano_joints.squeeze().detach().numpy()
def generate_pointnet_features(self, obj_sampled_idx):
"""Calculates per-point features for pointnet. DeepContact uses these features"""
obj_mesh = Meshes(verts=[torch.Tensor(self.obj_verts)], faces=[torch.Tensor(self.obj_faces)])
hand_mesh = Meshes(verts=[torch.Tensor(self.hand_verts)], faces=[torch.Tensor(util.get_mano_closed_faces())])
obj_sampled_verts_tensor = obj_mesh.verts_padded()[:, obj_sampled_idx, :]
_, _, obj_nearest = pytorch3d.ops.knn_points(obj_sampled_verts_tensor, hand_mesh.verts_padded(), K=1, return_nn=True) # Calculate on object
_, _, hand_nearest = pytorch3d.ops.knn_points(hand_mesh.verts_padded(), obj_sampled_verts_tensor, K=1, return_nn=True) # Calculate on hand
obj_normals = obj_mesh.verts_normals_padded()
obj_normals = torch.nn.functional.normalize(obj_normals, dim=2, eps=1e-12) # Because buggy mistuned value in Pytorch3d, must re-normalize
norms = torch.sum(obj_normals * obj_normals, dim=2) # Dot product
obj_normals[norms < 0.8] = 0.6 # TODO hacky get-around when normal finding fails completely
self.obj_normals = obj_normals.detach().squeeze().numpy()
obj_sampled_verts = self.obj_verts[obj_sampled_idx, :]
obj_sampled_normals = obj_normals[0, obj_sampled_idx, :].detach().numpy()
hand_normals = hand_mesh.verts_normals_padded()[0, :, :].detach().numpy()
hand_centroid = np.mean(self.hand_verts, axis=0)
obj_centroid = np.mean(self.obj_verts, axis=0)
# Hand features
hand_one_hot = np.ones((self.hand_verts.shape[0], 1))
hand_vec_to_closest = hand_nearest.squeeze().numpy() - self.hand_verts
hand_dist_to_closest = np.expand_dims(np.linalg.norm(hand_vec_to_closest, 2, 1), axis=1)
hand_dist_along_normal = np.expand_dims(np.sum(hand_vec_to_closest * hand_normals, axis=1), axis=1)
hand_dist_to_joint = np.expand_dims(self.hand_verts, axis=1) - np.expand_dims(self.hand_joints, axis=0) # Expand for broadcasting
hand_dist_to_joint = np.linalg.norm(hand_dist_to_joint, 2, 2)
hand_dot_to_centroid = np.expand_dims(np.sum((self.hand_verts - obj_centroid) * hand_normals, axis=1), axis=1)
# Object features
obj_one_hot = np.zeros((obj_sampled_verts.shape[0], 1))
obj_vec_to_closest = obj_nearest.squeeze().numpy() - obj_sampled_verts
obj_dist_to_closest = np.expand_dims(np.linalg.norm(obj_vec_to_closest, 2, 1), axis=1)
obj_dist_along_normal = np.expand_dims(np.sum(obj_vec_to_closest * obj_sampled_normals, axis=1), axis=1)
obj_dist_to_joint = np.expand_dims(obj_sampled_verts, axis=1) - np.expand_dims(self.hand_joints, axis=0) # Expand for broadcasting
obj_dist_to_joint = np.linalg.norm(obj_dist_to_joint, 2, 2)
obj_dot_to_centroid = np.expand_dims(np.sum((obj_sampled_verts - hand_centroid) * obj_sampled_normals, axis=1), axis=1)
# hand_feats = np.concatenate((hand_one_hot, hand_normals, hand_vec_to_closest, hand_dist_to_closest, hand_dist_along_normal, hand_dist_to_joint), axis=1)
# obj_feats = np.concatenate((obj_one_hot, obj_sampled_normals, obj_vec_to_closest, obj_dist_to_closest, obj_dist_along_normal, obj_dist_to_joint), axis=1)
hand_feats = np.concatenate((hand_one_hot, hand_dot_to_centroid, hand_dist_to_closest, hand_dist_along_normal, hand_dist_to_joint), axis=1)
obj_feats = np.concatenate((obj_one_hot, obj_dot_to_centroid, obj_dist_to_closest, obj_dist_along_normal, obj_dist_to_joint), axis=1)
return hand_feats, obj_feats
def get_o3d_meshes(self, hand_contact=False, normalize_pos=False):
"""Returns Open3D meshes for visualization
Draw with: o3dv.draw_geometries([hand_mesh, obj_mesh])"""
hand_color = np.asarray([224.0, 172.0, 105.0]) / 255
obj_color = np.asarray([100.0, 100.0, 100.0]) / 255
obj_centroid = self.obj_verts.mean(0)
if not normalize_pos:
obj_centroid *= 0
hand_mesh = o3dg.TriangleMesh()
hand_mesh.vertices = o3du.Vector3dVector(self.hand_verts - obj_centroid)
hand_mesh.triangles = o3du.Vector3iVector(HandObject.closed_faces)
hand_mesh.compute_vertex_normals()
if hand_contact and self.hand_contact.mean() != 0:
util.mesh_set_color(self.hand_contact, hand_mesh)
else:
hand_mesh.paint_uniform_color(hand_color)
obj_mesh = o3dg.TriangleMesh()
obj_mesh.vertices = o3du.Vector3dVector(self.obj_verts - obj_centroid)
obj_mesh.triangles = o3du.Vector3iVector(self.obj_faces)
obj_mesh.compute_vertex_normals()
if self.obj_contact.mean() != 0:
util.mesh_set_color(self.obj_contact, obj_mesh)
else:
obj_mesh.paint_uniform_color(obj_color)
return hand_mesh, obj_mesh
def vis_hand_object(self):
"""Runs Open3D visualizer for the current data"""
hand_mesh, obj_mesh = self.get_o3d_meshes(hand_contact=True)
o3dv.draw_geometries([hand_mesh, obj_mesh])
|
ContactOpt-main
|
contactopt/hand_object.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import numpy as np
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
import contactopt.arguments as arguments
from contactopt.deepcontact_net import DeepContactNet
from tqdm import tqdm
import contactopt.util as util
from contactopt.loader import ContactDBDataset
def calc_losses(network_out, contact_obj_gt, contact_hand_gt, sampled_verts_idx):
losses = dict()
batch_size = contact_obj_gt.shape[0]
batch = torch.zeros(sampled_verts_idx.shape, device=device, dtype=torch.long)
for i in range(batch_size):
batch[i, :] = i
batch = batch.view(-1)
contact_obj_gt = contact_obj_gt[batch, sampled_verts_idx.view(-1), :] # Select sampled verts
contact_obj_gt = contact_obj_gt.reshape(batch_size, sampled_verts_idx.shape[1], 1) # Reshape into network's shape
class_hand_gt = util.val_to_class(contact_hand_gt).squeeze(2)
class_obj_gt = util.val_to_class(contact_obj_gt).squeeze(2)
# print('class obj gt', class_obj_gt.shape, network_out['contact_obj'], class_obj_gt)
losses['contact_obj'] = criterion(network_out['contact_obj'].permute(0, 2, 1), class_obj_gt)
losses['contact_hand'] = criterion(network_out['contact_hand'].permute(0, 2, 1), class_hand_gt)
return losses
def train_epoch(epoch):
model.train()
scheduler.step()
loss_meter = util.AverageMeter('Loss', ':.2f')
for idx, data in enumerate(tqdm(train_loader)):
data = util.dict_to_device(data, device)
batch_size = data['hand_pose_gt'].shape[0]
optimizer.zero_grad()
out = model(data['hand_verts_aug'], data['hand_feats_aug'], data['obj_sampled_verts_aug'], data['obj_feats_aug'])
losses = calc_losses(out, data['obj_contact_gt'], data['hand_contact_gt'], data['obj_sampled_idx'])
loss = losses['contact_obj'] * args.loss_c_obj + losses['contact_hand'] * args.loss_c_hand
loss_meter.update(loss.item(), batch_size) # TODO better loss monitoring
loss.backward()
optimizer.step()
if idx % 10 == 0:
print('{} / {}'.format(idx, len(train_loader)), loss_meter)
global_iter = epoch * len(train_loader) + idx
writer.add_scalar('training/loss_contact_obj', losses['contact_obj'], global_iter)
writer.add_scalar('training/loss_contact_hand', losses['contact_hand'], global_iter)
writer.add_scalar('training/lr', scheduler.get_lr(), global_iter)
print('Train epoch: {}. Avg loss {:.4f} --------------------'.format(epoch, loss_meter.avg))
def test():
model.eval()
for idx, data in enumerate(test_loader):
data = util.dict_to_device(data, device)
with torch.no_grad():
out = model(data['hand_verts_aug'], data['hand_feats_aug'], data['obj_sampled_verts_aug'], data['obj_feats_aug'])
losses = calc_losses(out, data['obj_contact_gt'], data['hand_contact_gt'], data['obj_sampled_idx'])
global_iter = epoch * len(train_loader)
writer.add_scalar('testing/loss_contact_obj', losses['contact_obj'], global_iter)
writer.add_scalar('testing/loss_contact_hand', losses['contact_hand'], global_iter)
# print('Test epoch: Mean joint err {:.2f} cm --------------------'.format(joint_err_meter.avg))
if __name__ == '__main__':
util.hack_filedesciptor()
args = arguments.train_network_parse_args()
train_dataset = ContactDBDataset(args.train_dataset, train=True)
test_dataset = ContactDBDataset(args.test_dataset)
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=6, collate_fn=ContactDBDataset.collate_fn)
test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=6, collate_fn=ContactDBDataset.collate_fn)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = DeepContactNet().to(device)
if args.checkpoint != '':
print('Attempting to load checkpoint file:', args.checkpoint)
pretrained_dict = torch.load(args.checkpoint)
model_dict = model.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict and 'mano' not in k}
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
if args.optimizer == 'adam':
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
elif args.optimizer == 'SGD':
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=0.9)
bin_weights = torch.Tensor(np.loadtxt(util.DEEPCONTACT_BIN_WEIGHTS_FILE)).to(device)
# criterion = torch.nn.CrossEntropyLoss(weight=bin_weights)
criterion = torch.nn.NLLLoss(weight=bin_weights)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[10], gamma=0.1) # TODO automatic?
writer = SummaryWriter(logdir='runs/' + args.desc)
writer.add_text('Hyperparams', args.all_str, 0)
for epoch in range(1, args.epochs):
train_epoch(epoch)
test()
torch.save(model.state_dict(), 'checkpoints/{}.pt'.format(args.desc))
print('\n')
|
ContactOpt-main
|
contactopt/train_deepcontact.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import os
parser = argparse.ArgumentParser(description='Generate Data')
parser.add_argument('--env-name', default='InvertedPendulum-v1',
help='environment to train on (default: InvertedPendulum-v1)')
parser.add_argument('--N', type=int, default=1000000)
parser.add_argument('--seed', type=int, default=1,
help='random seed (default: 1)')
parser.add_argument('--out', type=str, default='/data/ddr')
parser.add_argument('--num-processes', type=int, default=40,
help='how many training processes to use (default: 40)')
parser.add_argument('--rollout', type=int, default=20, help="rollout for goal")
parser.add_argument('--method', type=str, default='random',
help='["random", "pixel_control"]')
parser.add_argument('--render', action='store_true')
parser.add_argument('--reset', action='store_true')
parser.add_argument('--from-policy', type=str, default=None,
help="use reward module as policy")
parser.add_argument('--framework', default='gym',
help='framework of env (default: gym)')
parser.add_argument('--maze-id', type=int, default=0)
parser.add_argument('--maze-length', type=int, default=1)
parser.add_argument('--single-env', action='store_true')
parser.add_argument('--random-start', action='store_true')
parser.add_argument('-v', action='store_true', help='verbose logging')
parser.add_argument('--max-episode-length', type=int, default=500,
help='maximum length of an episode (default: 500)')
parser.add_argument('--file-path', type=str, default=None,
help='path to XML file for mujoco')
def generate_data(rank, args, start, end):
from envs import create_env, set_seed, get_obs
from model import R_Module
import torch
print(rank, "started")
env = create_env(args.env_name, framework=args.framework, args=args)
env = set_seed(args.seed + rank, env, args.framework)
state = get_obs(env, args.framework)
if args.from_policy is not None:
model_state, r_args = torch.load(args.from_policy)
policy = R_Module(env.action_space.shape[0],
r_args.dim,
discrete=r_args.discrete, baseline=r_args.baseline,
state_space=env.observation_space.shape[0])
policy.load_state_dict(model_state)
policy.eval()
states = []
actions = []
i = start
done = False
while i < end:
if i % 100 == 0:
print(rank, i)
ep_states = []
ep_actions = []
if args.from_policy is not None:
cx_p = Variable(torch.zeros(1, r_args.dim))
hx_p = Variable(torch.zeros(1, r_args.dim))
for j in range(args.rollout):
if args.from_policy is not None:
value, logit, (hx_p, cx_p) = policy(
state.unsqueeze(0), (hx_p, cx_p))
a, _, _ = get_action(logit, r_args.discrete)
else:
a = env.action_space.sample()
ep_actions.append(a)
state = get_obs(env, args.framework)
env.step(a)
if args.render:
env.render()
ep_states.append(state)
final_state = get_obs(env, args.framework)
ep_states.append(final_state)
states.append(ep_states)
actions.append(ep_actions)
i += 1
# reset the environment here
if done or args.reset:
env.reset()
done = False
torch.save((states, actions), os.path.join(
args.out_dir, 'states_actions_%s_%s.pt' % (start, end)))
if __name__ == '__main__':
import torch
import torch.multiprocessing as mp
mp.set_start_method('spawn')
from torch.autograd import Variable
from envs import create_env, set_seed, get_obs
from model import R_Module
os.environ['OMP_NUM_THREADS'] = '1'
args = parser.parse_args()
env_name = args.env_name
env_name += '_rollout%s' % args.rollout
if args.env_name.endswith('MazeEnv'):
env_name += 'mazeid%slength%s' % (args.maze_id, args.maze_length)
if args.single_env and args.maze_id == -1:
env = create_env(args.env_name, framework=args.framework, args=args)
env_name += '_single_env'
args.maze_structure = env._env.MAZE_STRUCTURE
if args.random_start:
env_name += '_randomstart'
if args.file_path is not None:
env_name += '_transfer'
if args.framework == 'mazebase':
env_name += '_rollout_%s_length_%s' % (args.rollout, args.maze_length)
args.out_dir = os.path.join(args.out, env_name)
print(args)
print(args.out_dir)
os.makedirs(args.out_dir, exist_ok=True)
processes = []
block = int(args.N / args.num_processes)
for rank in range(0, args.num_processes):
start = rank * block
end = (rank + 1) * block
p = mp.Process(target=generate_data, args=(rank, args, start, end))
p.start()
processes.append(p)
torch.save(args, os.path.join(args.out_dir, 'args.pt'))
# exit cleanly
for p in processes:
p.join()
|
ddr-master
|
generate_dynamics_data.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import math
import numpy as np
import os
import time
from itertools import chain
import torch
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from envs import *
from model import Encoder, Decoder, D_Module, R_Module
from train_dynamics_module import D_Module, get_dynamics_losses
from common import *
from tensorboardX import SummaryWriter
def ensure_shared_grads(model, shared_model):
for param, shared_param in zip(model.parameters(),
shared_model.parameters()):
if shared_param.grad is not None:
return
shared_param._grad = param.grad
def train_online(rank, args, shared_model, optimizer=None, writer_dir=None):
"""
Arguments:
- writer: the tensorboard summary writer directory (note: can't get it working directly with the SummaryWriter object)
"""
# create writer here itself
writer = None
if writer_dir is not None:
writer = SummaryWriter(log_dir=writer_dir)
shared_enc, shared_dec, shared_d_module, shared_r_module = shared_model
running_t, running_reward, running_value_loss, running_policy_loss, \
running_reward_loss = 0, 0, 0, 0, 0
torch.manual_seed(args.seed + rank)
env = create_env(args.env_name, framework=args.framework, args=args)
set_seed(args.seed + rank, env, args.framework)
enc = Encoder(env.observation_space.shape[0], args.dim,
use_conv=args.use_conv)
dec = Decoder(env.observation_space.shape[0], args.dim,
use_conv=args.use_conv)
d_module = D_Module(env.action_space.shape[0], args.dim, args.discrete)
r_module = R_Module(env.action_space.shape[0], args.dim,
discrete=args.discrete, baseline=False,
state_space=env.observation_space.shape[0])
all_params = chain(enc.parameters(), dec.parameters(),
d_module.parameters(),
r_module.parameters())
# no shared adam ?
if optimizer is None:
optimizer = optim.Adam(all_params, lr=args.lr)
enc.train()
dec.train()
d_module.train()
r_module.train()
results_dict = {
'enc': None,
'dec': None,
'd_module': None,
'args': args,
'reward': [],
'policy_loss': [],
'value_loss': [],
'mean_entropy': [],
'mean_predicted_value': [],
'dec_losses': [],
'forward_losses': [],
'inverse_losses': [],
'total_losses': [],
}
episode_length = 0
i_episode, total_episode = 0, 0
done = True
start = time.time()
while total_episode < args.num_episodes:
# Sync with the shared model
r_module.load_state_dict(shared_r_module.state_dict())
d_module.load_state_dict(shared_d_module.state_dict())
enc.load_state_dict(shared_enc.state_dict())
dec.load_state_dict(shared_dec.state_dict())
if done:
cx_p = Variable(torch.zeros(1, args.dim))
hx_p = Variable(torch.zeros(1, args.dim))
cx_d = Variable(torch.zeros(1, args.dim))
hx_d = Variable(torch.zeros(1, args.dim))
i_episode += 1
episode_length = 0
total_episode = args.num_processes * (i_episode - 1) + rank
start = time.time()
last_episode_length = episode_length
if not args.single_env and args.env_name.endswith('MazeEnv'): # generate new maze
env = create_env(
args.env_name, framework=args.framework, args=args)
s = env.reset()
s = Variable(torch.from_numpy(s).float())
else:
cx_p = Variable(cx_p.data)
hx_p = Variable(hx_p.data)
cx_d = Variable(cx_d.data)
hx_d = Variable(hx_d.data)
s = Variable(s.data)
z = enc(s).unsqueeze(0)
s_hat = dec(z)
values = []
rhats = []
log_probs = []
rewards = []
entropies = []
dec_loss = 0
inv_loss = 0
model_loss = 0
recon_loss = 0
forward_loss = 0
for step in range(args.num_steps):
episode_length += 1
value, rhat, logit, (hx_p, cx_p) = r_module((
z.detach(), (hx_p, cx_p)))
action, entropy, log_prob = get_action(logit, discrete=args.discrete)
vlog("Action: %s\t Bounds: %s" % (str(action), str((env.action_space.low, env.action_space.high))), args.v)
entropies.append(entropy)
s_prime, reward, done, _ = env.step(action.data.numpy())
s_prime = Variable(torch.from_numpy(s_prime).float())
done = done or episode_length >= args.max_episode_length
z_prime = enc(s_prime)
z_prime_hat, a_hat, (hx_d, cx_d) = d_module(
(z, z_prime, action, (hx_d, cx_d)))
s_prime_hat = dec(z_prime_hat)
r_loss, m_loss, d_loss, i_loss, f_loss = get_dynamics_losses(
s, s_hat, s_prime, s_prime_hat, z_prime, z_prime_hat, a_hat,
action)
values.append(value)
rhats.append(rhat)
log_probs.append(log_prob)
rewards.append(reward)
dec_loss += d_loss
inv_loss += i_loss
model_loss += m_loss
recon_loss += r_loss
forward_loss += f_loss
z = z_prime_hat
s = s_prime
s_hat = s_prime_hat
if done:
break
R = torch.zeros(1, 1)
if not done:
value, _, _, _ = r_module((z, (hx_p, cx_p)))
R = value.data
values.append(Variable(R))
policy_loss = 0
value_loss = 0
rew_loss = 0
pred_reward_loss = 0
R = Variable(R)
gae = torch.zeros(1, 1)
vlog("values: %s" % str([v.data[0,0] for v in values]), args.v)
vlog("rhats: %s" % str(rhats), args.v)
for i in reversed(range(len(rewards))):
R = args.gamma * R + rewards[i]
advantage = R - values[i]
value_loss += 0.5 * advantage.pow(2)
# reward loss
rew_loss += F.mse_loss(rhats[i], Variable(torch.from_numpy(
np.array([rewards[i]])).float()))
# Generalized Advantage Estimation
delta_t = rewards[i] + args.gamma * values[i + 1].data \
- values[i].data
gae = gae * args.gamma * args.tau + delta_t
if args.discrete:
policy_loss = policy_loss - log_probs[i] * Variable(gae) \
- args.entropy_coef * entropies[i]
else:
policy_loss = policy_loss - (log_probs[i] * Variable(gae).expand_as(
log_probs[i])).sum() - (args.entropy_coef * entropies[i]).sum()
optimizer.zero_grad()
U = 1. / min(i_episode, 100)
running_reward = running_reward * (1 - U) + sum(rewards) * U
running_t = running_t * (1 - U) + episode_length * U
running_policy_loss = running_policy_loss * (1 - U) + policy_loss.data[0] * U
running_value_loss = running_value_loss * (1 - U) + \
args.value_loss_coef * value_loss.data[0, 0] * U
running_reward_loss = running_reward_loss * (1 - U) + \
args.rew_loss_coef * rew_loss.data[0] * U
mean_entropy = np.mean([e.sum().data[0] for e in entropies])
mean_predicted_value = np.mean([v.sum().data[0] for v in values])
loss = policy_loss + args.value_loss_coef * value_loss + \
args.rew_loss_coef * rew_loss + args.inv_loss_coef * inv_loss + \
args.dec_loss_coef * dec_loss + forward_loss
if total_episode % args.log_interval == 0 and done:
if not args.discrete:
sample_logits = (list(logit[0].data[0].numpy()),
list(logit[1].data[0].numpy()))
else:
sample_logits = list(logit.data[0].numpy())
log(
'Episode {}\t'.format(total_episode) + \
'Avg reward: {:.2f}\tAverage length: {:.2f}\t'.format(
running_reward, running_t) + \
'Entropy: {:.2f}\tTime: {:.2f}\tRank: {}\t'.format(
mean_entropy, time.time() - start, rank) + \
'Policy Loss: {:.2f}\t'.format(running_policy_loss) + \
'Reward Loss: {:.2f}\t'.format(running_reward_loss) + \
'Weighted Value Loss: {:.2f}\t'.format(running_value_loss) + \
'Sample Action: %s\t' % str(list(action.data.numpy())) + \
'Logits: %s\t' % str(sample_logits) + \
'Decoder Loss: {:.2f}\t'.format(dec_loss.data[0]) + \
'Forward Loss: {:.2f}\t'.format(forward_loss.data[0]) + \
'Inverse Loss: {:.2f}\t'.format(inv_loss.data[0]) + \
'Loss: {:.2f}\t'.format(loss.data[0, 0]))
# write summaries here
if writer_dir is not None and done:
log('writing to tensorboard')
# running losses
writer.add_scalar('reward/running_reward', running_reward, i_episode)
writer.add_scalar('reward/running_policy_loss', running_policy_loss, i_episode)
writer.add_scalar('reward/running_value_loss', running_value_loss, i_episode)
# current episode stats
writer.add_scalar('reward/episode_reward', sum(rewards), i_episode)
writer.add_scalar('reward/episode_policy_loss', policy_loss.data[0], i_episode)
writer.add_scalar('reward/episode_value_loss', value_loss.data[0,0], i_episode)
writer.add_scalar('reward/mean_entropy', mean_entropy, i_episode)
writer.add_scalar('reward/mean_predicted_value', mean_predicted_value, i_episode)
writer.add_scalar('dynamics/total_loss', loss.data[0], i_episode)
writer.add_scalar('dynamics/decoder', dec_loss.data[0], i_episode)
writer.add_scalar('dynamics/reconstruction_loss', recon_loss.data[0], i_episode)
writer.add_scalar('dynamics/next_state_prediction_loss', model_loss.data[0], i_episode)
writer.add_scalar('dynamics/inv_loss', inv_loss.data[0], i_episode)
writer.add_scalar('dynamics/forward_loss', forward_loss.data[0], i_episode)
results_dict['reward'].append(sum(rewards))
results_dict['policy_loss'].append(policy_loss.data[0])
results_dict['value_loss'].append(value_loss.data[0,0])
results_dict['mean_entropy'].append(mean_entropy)
results_dict['mean_predicted_value'].append(mean_predicted_value)
results_dict['dec_losses'].append(dec_loss.data[0])
results_dict['forward_losses'].append(forward_loss.data[0])
results_dict['inverse_losses'].append(inv_loss.data[0])
results_dict['total_losses'].append(loss.data[0])
loss.backward()
torch.nn.utils.clip_grad_norm(all_params, args.max_grad_norm)
ensure_shared_grads(r_module, shared_r_module)
ensure_shared_grads(d_module, shared_d_module)
ensure_shared_grads(enc, shared_enc)
ensure_shared_grads(dec, shared_dec)
optimizer.step()
if total_episode % args.checkpoint_interval == 0:
args.curr_iter = total_episode
args.dynamics_module = os.path.join(
args.out, 'dynamics_module%s.pt' % total_episode)
torch.save((shared_r_module.state_dict(), args), os.path.join(
args.out, 'reward_module%s.pt' % total_episode))
results_dict['enc'] = shared_enc.state_dict()
results_dict['dec'] = shared_dec.state_dict()
results_dict['d_module'] = shared_d_module.state_dict()
torch.save(results_dict,
os.path.join(args.out, 'dynamics_module%s.pt' % total_episode))
log("Saved model %d" % total_episode)
if writer_dir is not None and i_episode % \
(args.checkpoint_interval // args.num_processes) == 0:
torch.save(results_dict,
os.path.join(args.out, 'results_dict.pt'))
print(os.path.join(args.out, 'results_dict.pt'))
if writer_dir is not None:
torch.save(results_dict,
os.path.join(args.out, 'results_dict.pt'))
print(os.path.join(args.out, 'results_dict.pt'))
|
ddr-master
|
train_online.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
import gym
from gym.spaces.box import Box
from rllab.envs.mujoco.swimmer_env import SwimmerEnv
from rllab.envs.mujoco.ant_env import AntEnv
from rllab.envs.mujoco.half_cheetah_env import HalfCheetahEnv
from rllab.envs.mujoco.hopper_env import HopperEnv
from rllab.envs.mujoco.humanoid_env import HumanoidEnv
from rllab.envs.mujoco.simple_humanoid_env import SimpleHumanoidEnv
from rllab.envs.mujoco.maze.point_maze_env import PointMazeEnv
from rllab.envs.mujoco.maze.swimmer_maze_env import SwimmerMazeEnv
from rllab.envs.mujoco.maze.ant_maze_env import AntMazeEnv
from rllab.envs.mujoco.inverted_double_pendulum_env import InvertedDoublePendulumEnv
from rllab.misc import ext
from rllab.envs.normalized_env import normalize
from common import *
def create_env(env_str, framework='gym', args=None, eval_flag=False, norm=True,
rank=0):
if framework == 'gym':
env = gym.make(env_str)
if norm:
env = NormalizedEnv(env)
elif framework == 'rllab':
if not hasattr(args, 'file_path'):
args.file_path = None
if env_str.endswith('MazeEnv'):
if not hasattr(args, 'coef_inner_rew'):
args.coef_inner_rew = 0.
if not hasattr(args, 'maze_structure'):
args.maze_structure = None
if not hasattr(args, 'random_start'):
args.random_start = False
if not hasattr(args, 'difficulty'):
args.difficulty = -1
difficulty = args.difficulty
if args.difficulty > 1 and not eval_flag:
if args.difficulty <= 5:
difficulty = np.random.choice(range(
args.difficulty - 1, args.difficulty + 1))
elif args.difficulty == -1:
difficulty = np.random.choice([1, 2, 3, 4, 5, -1])
env = eval(env_str)(maze_id=args.maze_id, length=args.maze_length,
coef_inner_rew=args.coef_inner_rew,
structure=args.maze_structure,
file_path=args.file_path,
random_start=args.random_start,
difficulty=difficulty)
env.horizon = args.max_episode_length
vlog(args.maze_structure, args.v)
else:
env = eval(env_str)(file_path=args.file_path)
if norm:
env = normalize(env)
else:
raise("framework not supported")
env.reset()
set_seed(args.seed + rank, env, framework)
return env
def wrapper(env):
def _wrap():
return env
return _wrap
def get_obs(env, framework):
if framework == 'gym':
state = env.unwrapped._get_obs()
elif framework == 'rllab':
state = env.get_current_obs()
else:
raise("framework not supported")
return state
def set_seed(seed, env, framework):
if framework == 'gym':
env.unwrapped.seed(seed)
elif framework == 'rllab':
ext.set_seed(seed)
else:
raise("framework not supported")
return env
def reset_env(env, args):
"""Reset env. Can differ based on env. e.g. in maze maybe we want to randomly
deposit the agent in different locations?"""
env.reset()
return get_obs(env, args.framework)
class NormalizedEnv(gym.ObservationWrapper):
def __init__(self, env=None):
super(NormalizedEnv, self).__init__(env)
self.state_mean = 0
self.state_std = 0
self.alpha = 0.9999
self.num_steps = 0
def _observation(self, observation):
self.num_steps += 1
self.state_mean = self.state_mean * self.alpha + \
observation.mean() * (1 - self.alpha)
self.state_std = self.state_std * self.alpha + \
observation.std() * (1 - self.alpha)
unbiased_mean = self.state_mean / (1 - pow(self.alpha, self.num_steps))
unbiased_std = self.state_std / (1 - pow(self.alpha, self.num_steps))
return (observation - unbiased_mean) / (unbiased_std + 1e-8)
|
ddr-master
|
envs.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import math
import numpy as np
import os
import time
import torch
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from envs import *
from model import R_Module
from common import *
from tensorboardX import SummaryWriter
def ensure_shared_grads(model, shared_model):
for param, shared_param in zip(model.parameters(),
shared_model.parameters()):
if shared_param.grad is not None:
return
shared_param._grad = param.grad
def train_rewards(rank, args, shared_model, enc, optimizer=None, writer_dir=None,
d_module=None):
"""
Arguments:
- writer: the tensorboard summary writer directory (note: can't get it working directly with the SummaryWriter object)
"""
# create writer here itself
writer = None
if writer_dir is not None:
writer = SummaryWriter(log_dir=writer_dir)
results_dict = {
'reward': [],
'policy_loss': [],
'value_loss': [],
'mean_entropy': [],
'mean_predicted_value': []
}
running_t, running_reward, running_value_loss, running_policy_loss, \
running_reward_loss = 0, 0, 0, 0, 0
torch.manual_seed(args.seed + rank)
env = create_env(args.env_name, framework=args.framework, args=args)
set_seed(args.seed + rank, env, args.framework)
model = R_Module(env.action_space.shape[0], args.dim,
discrete=args.discrete, baseline=args.baseline,
state_space=env.observation_space.shape[0])
max_rollout = 0
if args.planning:
max_rollout = args.rollout
if args.from_checkpoint is not None:
model_state, _ = torch.load(args.from_checkpoint, map_location=lambda storage, loc: storage)
model.load_state_dict(model_state)
# no shared adam ?
if optimizer is None:
optimizer = optim.Adam(shared_model.parameters(), lr=args.lr, eps=args.eps)
model.train()
done = True
episode_length = 0
i_episode, total_episode = 0, 0
start = time.time()
while total_episode < args.num_episodes:
# Sync with the shared model
model.load_state_dict(shared_model.state_dict())
if done:
cx_p = Variable(torch.zeros(1, args.dim))
hx_p = Variable(torch.zeros(1, args.dim))
cx_d = Variable(torch.zeros(1, args.dim))
hx_d = Variable(torch.zeros(1, args.dim))
i_episode += 1
episode_length = 0
total_episode = args.num_steps * (i_episode - 1) + rank
start = time.time()
last_episode_length = episode_length
if not args.single_env and args.env_name.endswith('MazeEnv'): # generate new maze
env = create_env(
args.env_name, framework=args.framework, args=args)
state = env.reset()
state = Variable(torch.from_numpy(state).float())
if not args.baseline:
state = enc(state)
else:
cx_p = Variable(cx_p.data)
hx_p = Variable(hx_p.data)
cx_d = Variable(cx_d.data)
hx_d = Variable(hx_d.data)
values = []
value_preds = []
log_probs = []
rewards = []
total_actions = []
entropies = []
obses = []
hx_ps = []
cx_ps = []
step = 0
while step < args.num_steps:
episode_length += 1
if args.planning:
_, actions, (hx_p, cx_p), (hx_d, cx_d), values, es, \
lps = mcts(
env, state, model, d_module, enc, (hx_p, cx_p), (hx_d, cx_d),
args, discrete=args.discrete)
log_probs += lps
entropies += es
actions = actions[:1]
else:
obses.append(state.unsqueeze(0))
hx_ps.append(hx_p)
cx_ps.append(cx_p)
value, logit, (hx_p, cx_p) = model((
state.unsqueeze(0), (hx_p, cx_p)))
action, entropy, log_prob = get_action(
logit, discrete=args.discrete)
vlog("Action: %s\t Bounds: %s" % (str(action), str(
(env.action_space.low, env.action_space.high))), args.v)
entropies.append(entropy.mean().data)
actions = [action]
values.append(value)
log_probs.append(log_prob)
for action in actions:
state, reward, done, _ = env.step(action.data.numpy())
if args.neg_reward:
reward = -reward
state = Variable(torch.from_numpy(state).float())
if args.clip_reward:
reward = max(min(reward, 1), -1)
if not args.baseline:
state = enc(state)
rewards.append(reward)
total_actions.append(action)
step += 1
if done:
break
if done:
break
R = torch.zeros(1, 1)
if not done:
value, _, _ = model((state.unsqueeze(0), (hx_p, cx_p)))
R = value.data
done = True
values.append(Variable(R))
policy_loss = 0
value_loss = 0
advantages = np.zeros_like(rewards, dtype=float)
R = Variable(R)
gae = torch.zeros(1, 1)
Rs = np.zeros_like(rewards, dtype=float)
vlog("values: %s" % str([v.data[0,0] for v in values]), args.v)
for i in reversed(range(len(rewards))):
R = args.gamma * R + rewards[i]
Rs[i] = R
advantage = R - values[i]
advantages[i] = advantage
if args.algo == 'a3c':
value_loss += 0.5 * advantage.pow(2)
# Generalized Advantage Estimation
if args.gae:
delta_t = rewards[i] + args.gamma * values[i + 1].data \
- values[i].data
gae = gae * args.gamma * args.tau + delta_t
policy_loss -= (log_probs[i] * Variable(gae).expand_as(
log_probs[i])).mean()
else:
policy_loss -= advantage * (log_probs[i].mean())
if args.algo == 'a3c':
optimizer.zero_grad()
(policy_loss + args.value_loss_coef * value_loss - \
args.entropy_coef * np.mean(entropies)).backward()
torch.nn.utils.clip_grad_norm(model.parameters(), args.max_grad_norm)
ensure_shared_grads(model, shared_model)
optimizer.step()
########Bookkeeping and logging#############
U = 1. / min(i_episode, 100)
running_reward = running_reward * (1 - U) + sum(rewards) * U
running_t = running_t * (1 - U) + episode_length * U
running_policy_loss = running_policy_loss * (1 - U) + policy_loss.squeeze().data[0] * U
running_value_loss = running_value_loss * (1 - U) + \
args.value_loss_coef * value_loss.squeeze().data[0] * U
mean_entropy = np.mean([e.mean().data[0] for e in entropies])
mean_predicted_value = np.mean([v.sum().data[0] for v in values])
if total_episode % args.log_interval == 0 and done:
if not args.discrete:
sample_logits = (list(logit[0].data[0].numpy()),
list(logit[1].data[0].numpy()))
else:
sample_logits = list(logit.data[0].numpy())
log(
'Frames {}\t'.format(total_episode) + \
'Avg reward: {:.2f}\tAverage length: {:.2f}\t'.format(
running_reward, running_t) + \
'Entropy: {:.2f}\tTime: {:.2f}\tRank: {}\t'.format(
mean_entropy, time.time() - start, rank) + \
'Policy Loss: {:.2f}\t'.format(running_policy_loss) + \
# 'Reward Loss: {:.2f}\t'.format(running_reward_loss) + \
'Weighted Value Loss: {:.2f}\t'.format(running_value_loss))
vlog('Sample Action: %s\t' % str(list(action.data.numpy())) + \
'Logits: %s\t' % str(sample_logits), args.v)
# write summaries here
if writer_dir is not None and done:
log('writing to tensorboard')
# running losses
writer.add_scalar('reward/running_reward', running_reward, i_episode)
writer.add_scalar('reward/running_policy_loss', running_policy_loss, i_episode)
writer.add_scalar('reward/running_value_loss', running_value_loss, i_episode)
# current episode stats
writer.add_scalar('reward/episode_reward', sum(rewards), i_episode)
writer.add_scalar('reward/episode_policy_loss', policy_loss.squeeze().data[0], i_episode)
writer.add_scalar('reward/episode_value_loss', value_loss.squeeze().data[0], i_episode)
writer.add_scalar('reward/mean_entropy', mean_entropy, i_episode)
writer.add_scalar('reward/mean_predicted_value', mean_predicted_value, i_episode)
results_dict['reward'].append(sum(rewards))
results_dict['policy_loss'].append(policy_loss.squeeze().data[0])
results_dict['value_loss'].append(value_loss.squeeze().data[0])
results_dict['mean_entropy'].append(mean_entropy)
results_dict['mean_predicted_value'].append(mean_predicted_value)
if total_episode % args.checkpoint_interval == 0:
args.curr_iter = total_episode
args.optimizer = optimizer
torch.save((shared_model.state_dict(), args), os.path.join(
args.out, args.model_name + '%s.pt' % total_episode))
log("Saved model %d rank %s" % (total_episode, rank))
log(os.path.join(
args.out, args.model_name + '%s.pt' % total_episode))
if writer_dir is not None and i_episode % \
(args.checkpoint_interval // args.num_processes) == 0:
torch.save(results_dict,
os.path.join(args.out, 'results_dict.pt'))
log(os.path.join(args.out, 'results_dict.pt'))
if writer_dir is not None:
torch.save(results_dict,
os.path.join(args.out, 'results_dict.pt'))
log(os.path.join(args.out, 'results_dict.pt'))
|
ddr-master
|
train_reward_module.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import math
import numpy as np
import os
import time
from itertools import chain
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data as data
from torch.autograd import Variable
from model import Encoder, Decoder, D_Module
from common import *
def get_dynamics_losses(s, s_hat, s_prime, s_prime_hat, z_prime, z_prime_hat,
a_hat, curr_actions, discrete=False):
# reconstruction loss
recon_loss = F.mse_loss(s_hat, s)
# next state prediction loss
model_loss = F.mse_loss(s_prime_hat, s_prime)
# net decoder loss
dec_loss = (F.mse_loss(s_hat, s) + F.mse_loss(s_prime_hat, s_prime))
# action reconstruction loss
if discrete:
a_hat = F.log_softmax(a_hat)
inv_loss = F.mse_loss(a_hat, curr_actions)
# representation space constraint
forward_loss = F.mse_loss(z_prime_hat, z_prime.detach())
return recon_loss, model_loss, dec_loss, inv_loss, forward_loss
def get_maze_dynamics_losses(s, s_hat_logits,
s_prime, s_prime_hat_logits,
z_prime, z_prime_hat,
a_hat_logits, curr_actions, discrete=True,
dec_mask=None):
"""
dec_mask: if to reweigh the weights on the agent and goal locations,
"""
# reconstruction loss
if dec_mask is not None:
recon_loss = F.cross_entropy(s_hat_logits.view(-1, 2), s.view(-1).long(), reduce=False)
recon_loss = (recon_loss * dec_mask).mean()
else:
recon_loss = F.cross_entropy(s_hat_logits.view(-1, 2), s.view(-1).long())
# next state prediction loss
if dec_mask is not None:
model_loss = F.cross_entropy(s_prime_hat_logits.view(-1, 2), s_prime.view(-1).long(), reduce=False)
model_loss = (model_loss * dec_mask).mean()
else:
model_loss = F.cross_entropy(s_prime_hat_logits.view(-1, 2), s_prime.view(-1).long())
# net decoder loss
dec_loss = recon_loss + model_loss
# action reconstruction loss
inv_loss = F.cross_entropy(a_hat_logits, curr_actions.view(-1).long())
# representation space constraint
forward_loss = F.mse_loss(z_prime_hat, z_prime.detach())
return recon_loss, model_loss, dec_loss, inv_loss, forward_loss
class DynamicsDataset(data.Dataset):
def __init__(self, root, size, batch, rollout):
self.size = size
self.root = root
self.actions = []
self.states = []
start = 0
while len(self.actions) < size:
end = start + batch
states, actions = torch.load(
os.path.join(self.root, 'states_actions_%s_%s.pt' % (start, end)))
self.states += states
self.actions += actions
start = end
rollout = len(actions[0])
self.actions = torch.Tensor(self.actions[:size]).view(
self.size, rollout, -1)
self.states = torch.Tensor(self.states[:size]).view(
self.size, rollout + 1, -1)
def __getitem__(self, index):
assert index < self.size
return self.states[index], self.actions[index]
def __len__(self):
return len(self.actions)
class MazeDynamicsDataset(data.Dataset):
def __init__(self, root, size, batch, rollout):
"""
batch: is the size of the blocks of the data
size: total size of the dataset, num of trajectories
rollout: length of the trajectory
"""
self.size = size
self.root = root
self.actions = []
self.states = []
start = 0
while len(self.actions) < size:
end = start + batch
states, actions = torch.load(
os.path.join(self.root, 'states_actions_%s_%s.pt' % (start, end)))
self.states += states
self.actions += actions
start = end
# convert the state and actions to the float
self.states = np.asarray(self.states, dtype=np.float32)
self.actions = np.asarray(self.actions, dtype=np.float32)
# convert to tensors
self.actions = torch.Tensor(self.actions).view(
self.size, rollout, -1)
self.states = torch.Tensor(self.states).view(
self.size, rollout + 1, -1)
def __getitem__(self, index):
assert index < self.size
return self.states[index], self.actions[index]
def __len__(self):
return len(self.actions)
def forward(i, states, target_actions, enc, dec, d_module, args,
d_init=None, dec_mask=None):
if args.framework == "mazebase":
# cx_d = Variable(torch.zeros(states.size(0), args.lstm_dim))
# hx_d = Variable(torch.zeros(states.size(0), args.lstm_dim))
hx_d, cx_d = d_init(Variable(states[:, 0, :]).contiguous().cuda())
else:
cx_d = Variable(torch.zeros(states.size(0), args.dim))
hx_d = Variable(torch.zeros(states.size(0), args.dim))
if args.gpu:
cx_d = cx_d.cuda()
hx_d = hx_d.cuda()
dec_loss = 0
inv_loss = 0
model_loss = 0
recon_loss = 0
forward_loss = 0
current_epoch_actions = 0
current_epoch_predicted_a_hat = 0
s = None
for r in range(args.rollout):
curr_state = states[:, r, :]
next_state = states[:, r + 1, :]
if args.framework == "mazebase":
curr_actions = Variable(target_actions[:, r].contiguous().view(
-1, 1))
else:
curr_actions = Variable(target_actions[:, r].contiguous().view(
-1, args.action_space.shape[0]))
if s is None:
s = Variable(curr_state.contiguous())
if args.gpu:
s = s.cuda()
z = enc(s)
s_prime = Variable(next_state.contiguous())
if args.gpu:
s_prime = s_prime.cuda()
z_prime = enc(s_prime)
if args.gpu:
curr_actions = curr_actions.cuda()
if args.framework == "mazebase":
s_hat, s_hat_binary = dec(z)
z_prime_hat, a_hat, (hx_d, cx_d) = d_module(
z, curr_actions.long(), z_prime.detach(), (hx_d, cx_d))
s_prime_hat, s_prime_hat_binary = dec(z_prime_hat)
r_loss, m_loss, d_loss, i_loss, f_loss = get_maze_dynamics_losses(
s, s_hat, s_prime, s_prime_hat, z_prime, z_prime_hat, a_hat,
curr_actions, discrete=args.discrete, dec_mask= dec_mask)
# caculate the accuracy here
_, predicted_a = torch.max(F.sigmoid(a_hat),1)
current_epoch_predicted_a_hat += (predicted_a == curr_actions.view(-1).long()).sum().data[0]
current_epoch_actions += curr_actions.size(0)
else:
s_hat = dec(z)
z_prime_hat, a_hat, (hx_d, cx_d) = d_module(
(z, z_prime, curr_actions, (hx_d, cx_d)))
s_prime_hat = dec(z_prime_hat)
r_loss, m_loss, d_loss, i_loss, f_loss = get_dynamics_losses(
s, s_hat, s_prime, s_prime_hat, z_prime, z_prime_hat,
a_hat, curr_actions, discrete=args.discrete)
inv_loss += i_loss
dec_loss += d_loss
forward_loss += f_loss
recon_loss += r_loss
model_loss += m_loss
s = s_prime
z = z_prime
return forward_loss, inv_loss, dec_loss, recon_loss, model_loss, \
current_epoch_predicted_a_hat, current_epoch_actions
def forward_planning(i, states, target_actions, enc, dec, d_module, args,
d_init=None, dec_mask=None):
cx_d = Variable(torch.zeros(states.size(0), args.dim))
hx_d = Variable(torch.zeros(states.size(0), args.dim))
if args.gpu:
cx_d = cx_d.cuda()
hx_d = hx_d.cuda()
dec_loss = 0
inv_loss = 0
model_loss = 0
recon_loss = 0
forward_loss = 0
current_epoch_actions = 0
current_epoch_predicted_a_hat = 0
s = None
for r in range(args.rollout):
curr_state = states[:, r, :]
next_state = states[:, r + 1, :]
curr_actions = Variable(target_actions[:, r].contiguous().view(
-1, args.action_space.shape[0]))
if s is None:
s = Variable(curr_state.contiguous())
if args.gpu:
s = s.cuda()
z = enc(s)
s_prime = Variable(next_state.contiguous())
if args.gpu:
s_prime = s_prime.cuda()
z_prime = enc(s_prime)
if args.gpu:
curr_actions = curr_actions.cuda()
s_hat = dec(z)
z_prime_hat, a_hat, (hx_d, cx_d) = d_module(
(z, z_prime, curr_actions, (hx_d, cx_d)))
s_prime_hat = dec(z_prime_hat)
r_loss, m_loss, d_loss, i_loss, f_loss = get_dynamics_losses(
s, s_hat, s_prime, s_prime_hat, z_prime, z_prime_hat,
a_hat, curr_actions, discrete=args.discrete)
inv_loss += i_loss
dec_loss += d_loss
forward_loss += f_loss
recon_loss += r_loss
model_loss += m_loss
s = s_prime
z = z_prime_hat
return forward_loss, inv_loss, dec_loss, recon_loss, model_loss, \
current_epoch_predicted_a_hat, current_epoch_actions
def multiple_forward(i, states, target_actions, enc, dec, d_module, args,
d_init=None, dec_mask = None):
cx_d = Variable(torch.zeros(states.size(0), args.dim))
hx_d = Variable(torch.zeros(states.size(0), args.dim))
if args.gpu:
cx_d = cx_d.cuda()
hx_d = hx_d.cuda()
dec_loss = 0
inv_loss = 0
model_loss = 0
recon_loss = 0
forward_loss = 0
current_epoch_actions = 0
current_epoch_predicted_a_hat = 0
s = None
for r in range(args.rollout):
curr_state = states[:, r, :]
next_state = states[:, r + 1, :]
if args.framework == "mazebase":
curr_actions = Variable(target_actions[:, r].contiguous().view(
-1, 1))
else:
curr_actions = Variable(target_actions[:, r].contiguous().view(
-1, args.action_space.shape[0]))
if s is None:
s = Variable(curr_state.contiguous())
if args.gpu:
s = s.cuda()
z = enc(s)
s_prime = Variable(next_state.contiguous())
if args.gpu:
s_prime = s_prime.cuda()
z_prime = enc(s_prime)
if args.gpu:
curr_actions = curr_actions.cuda()
if args.framework == "mazebase":
s_hat, s_hat_binary = dec(z)
z_prime_hat, a_hat, (hx_d, cx_d) = d_module(
z, curr_actions.long(), z_prime.detach(), (hx_d, cx_d))
s_prime_hat, s_prime_hat_binary = dec(z_prime_hat)
r_loss, m_loss, d_loss, i_loss, f_loss = get_maze_dynamics_losses(
s, s_hat, s_prime, s_prime_hat, z_prime, z_prime_hat, a_hat,
curr_actions, discrete=args.discrete, dec_mask= dec_mask)
# caculate the accuracy here
_, predicted_a = torch.max(F.sigmoid(a_hat),1)
current_epoch_predicted_a_hat += (predicted_a == curr_actions.view(-1).long()).sum().data[0]
current_epoch_actions += curr_actions.size(0)
else:
s_hat = dec(z)
z_prime_hat, a_hat, (hx_d, cx_d) = d_module(
(z, z_prime, curr_actions, (hx_d, cx_d)))
s_prime_hat = dec(z_prime_hat)
r_loss, m_loss, d_loss, i_loss, f_loss = get_dynamics_losses(
s, s_hat, s_prime, s_prime_hat, z_prime, z_prime_hat, a_hat,
curr_actions, discrete=args.discrete)
inv_loss += i_loss
dec_loss += d_loss
forward_loss += f_loss
recon_loss += r_loss
model_loss += m_loss
s = s_prime
z = z_prime_hat
return forward_loss, inv_loss, dec_loss, recon_loss, model_loss, \
current_epoch_predicted_a_hat, current_epoch_actions
def train_dynamics(env, args, writer=None):
"""
Trains the Dynamics module. Supervised.
Arguments:
env: the initialized environment (rllab/gym)
args: input arguments
writer: initialized summary writer for tensorboard
"""
args.action_space = env.action_space
# Initialize models
enc = Encoder(env.observation_space.shape[0], args.dim,
use_conv=args.use_conv)
dec = Decoder(env.observation_space.shape[0], args.dim,
use_conv=args.use_conv)
d_module = D_Module(env.action_space.shape[0], args.dim, args.discrete)
if args.from_checkpoint is not None:
results_dict = torch.load(args.from_checkpoint)
enc.load_state_dict(results_dict['enc'])
dec.load_state_dict(results_dict['dec'])
d_module.load_state_dict(results_dict['d_module'])
all_params = chain(enc.parameters(), dec.parameters(), d_module.parameters())
if args.transfer:
for p in enc.parameters():
p.requires_grad = False
for p in dec.parameters():
p.requires_grad = False
all_params = d_module.parameters()
optimizer = torch.optim.Adam(all_params, lr=args.lr,
weight_decay=args.weight_decay)
if args.gpu:
enc = enc.cuda()
dec = dec.cuda()
d_module = d_module.cuda()
# Initialize datasets
val_loader = None
train_dataset = DynamicsDataset(
args.train_set, args.train_size, batch=args.train_batch,
rollout=args.rollout)
val_dataset = DynamicsDataset(args.test_set, 5000, batch=args.test_batch,
rollout=args.rollout)
val_loader = torch.utils.data.DataLoader(
dataset=val_dataset, batch_size=args.batch_size, shuffle=False,
num_workers=args.num_workers)
train_loader = torch.utils.data.DataLoader(
dataset=train_dataset, batch_size=args.batch_size, shuffle=True,
num_workers=args.num_workers)
results_dict = {
'dec_losses': [],
'forward_losses': [],
'inverse_losses': [],
'total_losses': [],
'enc': None,
'dec': None,
'd_module': None,
'd_init':None,
'args': args
}
total_action_taken = 0
correct_predicted_a_hat = 0
# create the mask here for re-weighting
dec_mask = None
if args.dec_mask is not None:
dec_mask = torch.ones(9)
game_vocab = dict([(b, a) for a, b in enumerate(sorted(env.game.all_possible_features()))])
dec_mask[game_vocab['Agent']] = args.dec_mask
dec_mask[game_vocab['Goal']] = args.dec_mask
dec_mask = dec_mask.expand(args.batch_size, args.maze_length,args.maze_length,9).contiguous().view(-1)
dec_mask = Variable(dec_mask, requires_grad = False)
if args.gpu:
dec_mask = dec_mask.cuda()
for epoch in range(1, args.num_epochs + 1):
enc.train()
dec.train()
d_module.train()
if args.framework == "mazebase":
d_init.train()
# for measuring the accuracy
train_acc = 0
current_epoch_actions = 0
current_epoch_predicted_a_hat = 0
start = time.time()
for i, (states, target_actions) in enumerate(train_loader):
optimizer.zero_grad()
if args.framework != "mazebase":
forward_loss, inv_loss, dec_loss, recon_loss, model_loss, _, _ = forward_planning(
i, states, target_actions, enc, dec, d_module, args)
else:
forward_loss, inv_loss, dec_loss, recon_loss, model_loss, current_epoch_predicted_a_hat, current_epoch_actions = multiple_forward(
i, states, target_actions, enc, dec, d_module, args, d_init, dec_mask )
loss = forward_loss + args.inv_loss_coef * inv_loss + \
args.dec_loss_coef * dec_loss
if i % args.log_interval == 0:
log(
'Epoch [{}/{}]\tIter [{}/{}]\t'.format(
epoch, args.num_epochs, i+1, len(
train_dataset)//args.batch_size) + \
'Time: {:.2f}\t'.format(time.time() - start) + \
'Decoder Loss: {:.2f}\t'.format(dec_loss.data[0]) + \
'Forward Loss: {:.2f}\t'.format(forward_loss.data[0] ) + \
'Inverse Loss: {:.2f}\t'.format(inv_loss.data[0]) + \
'Loss: {:.2f}\t'.format(loss.data[0]))
results_dict['dec_losses'].append(dec_loss.data[0])
results_dict['forward_losses'].append(forward_loss.data[0])
results_dict['inverse_losses'].append(inv_loss.data[0])
results_dict['total_losses'].append(loss.data[0])
# write the summaries here
if writer:
writer.add_scalar('dynamics/total_loss', loss.data[0], epoch)
writer.add_scalar('dynamics/decoder', dec_loss.data[0], epoch)
writer.add_scalar(
'dynamics/reconstruction_loss', recon_loss.data[0], epoch)
writer.add_scalar(
'dynamics/next_state_prediction_loss',
model_loss.data[0], epoch)
writer.add_scalar('dynamics/inv_loss', inv_loss.data[0], epoch)
writer.add_scalar(
'dynamics/forward_loss', forward_loss.data[0], epoch)
writer.add_scalars(
'dynamics/all_losses',
{"total_loss":loss.data[0],
"reconstruction_loss":recon_loss.data[0],
"next_state_prediction_loss":model_loss.data[0],
"decoder_loss":dec_loss.data[0],
"inv_loss":inv_loss.data[0],
"forward_loss":forward_loss.data[0],
} , epoch)
loss.backward()
correct_predicted_a_hat += current_epoch_predicted_a_hat
total_action_taken += current_epoch_actions
# does it not work at all without grad clipping ?
torch.nn.utils.clip_grad_norm(all_params, args.max_grad_norm)
optimizer.step()
# maybe add the generated image to add the logs
# writer.add_image()
# Run validation
if val_loader is not None:
enc.eval()
dec.eval()
d_module.eval()
forward_loss, inv_loss, dec_loss = 0, 0, 0
for i, (states, target_actions) in enumerate(val_loader):
f_loss, i_loss, d_loss, _, _, _, _ = forward_planning(
i, states, target_actions, enc, dec, d_module, args)
forward_loss += f_loss
inv_loss += i_loss
dec_loss += d_loss
loss = forward_loss + args.inv_loss_coef * inv_loss + \
args.dec_loss_coef * dec_loss
if writer:
writer.add_scalar('val/forward_loss', forward_loss.data[0] / i, epoch)
writer.add_scalar('val/inverse_loss', inv_loss.data[0] / i, epoch)
writer.add_scalar('val/decoder_loss', dec_loss.data[0] / i, epoch)
log(
'[Validation]\t' + \
'Decoder Loss: {:.2f}\t'.format(dec_loss.data[0] / i) + \
'Forward Loss: {:.2f}\t'.format(forward_loss.data[0] / i) + \
'Inverse Loss: {:.2f}\t'.format(inv_loss.data[0] / i) + \
'Loss: {:.2f}\t'.format(loss.data[0] / i))
if epoch % args.checkpoint == 0:
results_dict['enc'] = enc.state_dict()
results_dict['dec'] = dec.state_dict()
results_dict['d_module'] = d_module.state_dict()
if args.framework == "mazebase":
results_dict['d_init'] = d_init.state_dict()
torch.save(results_dict,
os.path.join(args.out, 'dynamics_module_epoch%s.pt' % epoch))
log('Saved model %s' % epoch)
results_dict['enc'] = enc.state_dict()
results_dict['dec'] = dec.state_dict()
results_dict['d_module'] = d_module.state_dict()
torch.save(results_dict,
os.path.join(args.out, 'dynamics_module_epoch%s.pt' % epoch))
print(os.path.join(args.out, 'dynamics_module_epoch%s.pt' % epoch))
|
ddr-master
|
train_dynamics_module.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
def get_args():
parser = argparse.ArgumentParser(description='Train Modules')
# Learning parameters
parser.add_argument('--lr', type=float, default=0.0001,
help='learning rate (default: 0.0001)')
parser.add_argument('--gamma', type=float, default=0.99,
help='discount factor for rewards (default: 0.99)')
parser.add_argument('--tau', type=float, default=0.95,
help='parameter for GAE (default: 0.95)')
parser.add_argument('--eps', type=float, default=1e-5,
help='RMSprop optimizer epsilon (default: 1e-5)')
parser.add_argument('--alpha', type=float, default=0.99,
help='RMSprop optimizer apha (default: 0.99)')
parser.add_argument('--max-grad-norm', type=float, default=50,
help='value loss coefficient (default: 50)')
parser.add_argument('--no-shared', default=False,
help='use an optimizer without shared momentum.')
parser.add_argument('--dim', type=int, default=32,
help='number of dimensions of representation space')
parser.add_argument('--use-conv', action='store_true', help='Use conv layers')
parser.add_argument('--discrete', action='store_true', help='discrete action space')
parser.add_argument('--weight-decay', type=float, default=0.0001)
# TODO:// finish implementation for discrete action spaces.
# Environment settings
parser.add_argument('--seed', type=int, default=1,
help='random seed (default: 1)')
parser.add_argument('--num-processes', type=int, default=40,
help='how many training processes to use (default: 40)')
parser.add_argument('--num-steps', type=int, default=200,
help='number of forward steps in A3C (default: 20)')
parser.add_argument('--framework', default='gym',
help='framework of env (default: gym)')
parser.add_argument('--env-name', default='InvertedPendulum-v1',
help='environment to train on (default: InvertedPendulum-v1)')
parser.add_argument('--maze-id', type=int, default=0)
parser.add_argument('--maze-length', type=int, default=1)
# Dynamics Module settings
parser.add_argument('--rollout', type=int, default=20, help="rollout for goal")
parser.add_argument('--train-set', type=str, default=None)
parser.add_argument('--train-batch', type=int, default=2500)
parser.add_argument('--test-set', type=str)
parser.add_argument('--test-batch', type=int, default=2500)
parser.add_argument('--train-size', type=int, default=100000)
parser.add_argument('--dec-loss-coef', type=float, default=0.1,
help='decoder loss coefficient (default: 0.1)')
parser.add_argument('--forward-loss-coef', type=float, default=10,
help='forward loss coefficient (default: 10)')
parser.add_argument('--inv-loss-coef', type=float, default=100,
help='inverse loss coefficient (default: 10)')
parser.add_argument('--num-epochs', type=int, default=1000)
parser.add_argument('--batch-size', type=int, default=128)
parser.add_argument('--num-workers', type=int, default=20)
parser.add_argument('--out', type=str, default='/checkpoint/amyzhang/ddr/models')
parser.add_argument('--dec-mask', type=float, default = None,
help="to use masking while calculating the decoder reconstruction loss ")
# Rewards Module settings
parser.add_argument('--coef-inner-rew', type=float, default=1.)
parser.add_argument('--checkpoint-interval', type=int, default=1000)
parser.add_argument('--num-episodes', type=int, default=1000000,
help='max number of episodes to train')
parser.add_argument('--max-episode-length', type=int, default=500,
help='maximum length of an episode (default: 500)')
parser.add_argument('--curriculum', type=int, default=0,
help='number of iterations in curriculum. (default: 0, no curriculum)')
parser.add_argument('--single-env', action='store_true')
parser.add_argument('--entropy-coef', type=float, default=0.,
help='entropy term coefficient (default: 0.), use 0.0001 for mujoco')
parser.add_argument('--value-loss-coef', type=float, default=0.5,
help='value loss coefficient (default: 0.5)')
parser.add_argument('--rew-loss-coef', type=float, default=0,
help='reward loss coefficient (default: 0)')
parser.add_argument('--lstm-dim', type=int, default=128,
help='number of dimensions of lstm hidden state')
parser.add_argument('--difficulty', type=int, default=-1, help='difficulty of maze')
parser.add_argument('--clip-reward', action='store_true')
parser.add_argument('--finetune-enc', action='store_true',
help="allow the ActorCritic to change the observation space representation")
parser.add_argument('--gae', action='store_true')
parser.add_argument('--algo', default='a3c',
help='algorithm to use: a3c')
# General training settings
parser.add_argument('--checkpoint', type=int, default=10000)
parser.add_argument('--log-interval', type=int, default=100,
help='interval between training status logs (default: 100)')
parser.add_argument('-v', action='store_true', help='verbose logging')
parser.add_argument('--gpu', action='store_true')
parser.add_argument('--log-dir', type=str, default='/checkpoint/amyzhang/ddr/logs',
help='The logging directory to record the logs and tensorboard summaries')
parser.add_argument('--reset-dir', action='store_true',
help="give this argument to delete the existing logs for the current set of parameters")
# transfer
parser.add_argument('--file-path', type=str, default=None,
help='path to XML file for mujoco')
parser.add_argument('--neg-reward', action='store_true',
help='set reward negative for transfer')
parser.add_argument('--random-start', action='store_true')
# What to run
parser.add_argument('--train-dynamics', action='store_true')
parser.add_argument('--train-reward', action='store_true')
parser.add_argument('--train-online', action='store_true',
help='train both modules online')
parser.add_argument('--dynamics-module', type=str, default=None,
help='Encoder from dynamics module')
parser.add_argument('--from-checkpoint', type=str, default=None,
help='Start from stored model')
parser.add_argument('--baseline', action='store_true',
help='Running A3C baseline.')
parser.add_argument('--planning', action='store_true',
help='train with planning (reward and online only)')
parser.add_argument('--transfer', action='store_true',
help='Keep encoder and decoder static')
parser.add_argument('--eval-every', type=float, default=10)
parser.add_argument('--enc-dims', type=int, nargs='+', default=[256, 128])
parser.add_argument('--dec-dims', type=int, nargs='+', default=[128, 256])
parser.add_argument('--num-runs', type=int, default=5,
help='number of models to train in parallel')
parser.add_argument('--mcts', action='store_true', help='Monte Carlo Tree Search')
parser.add_argument('--render', action='store_true')
parser.add_argument('-b', type=int, default=4, help='branching factor')
parser.add_argument('-d', type=int, default=3, help='planning depth')
parser.add_argument('--eval', action='store_true')
parser.add_argument('--local', action='store_true')
args = parser.parse_args()
return args
|
ddr-master
|
arguments.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import time
from collections import deque
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from envs import create_env
from model import *
def test(rank, args, shared_model, counter):
torch.manual_seed(args.seed + rank)
env = create_env(args.env_name)
env.seed(args.seed + rank)
model = ActorCritic(env.observation_space.shape[0], env.action_space)
model.eval()
state = env.reset()
state = torch.from_numpy(state).float()
reward_sum = 0
done = True
start_time = time.time()
# a quick hack to prevent the agent from stucking
actions = deque(maxlen=100)
episode_length = 0
while True:
episode_length += 1
# Sync with the shared model
if done:
model.load_state_dict(shared_model.state_dict())
cx_d = Variable(torch.zeros(1, 256), volatile=True)
hx_d = Variable(torch.zeros(1, 256), volatile=True)
cx_p = Variable(torch.zeros(1, 256), volatile=True)
hx_p = Variable(torch.zeros(1, 256), volatile=True)
else:
cx_d = Variable(cx_d.data, volatile=True)
hx_d = Variable(hx_d.data, volatile=True)
cx_p = Variable(cx_p.data, volatile=True)
hx_p = Variable(hx_p.data, volatile=True)
value, logit, (hx_d, cx_d), (hx_p, cx_p) = model((Variable(
state.unsqueeze(0), volatile=True), (hx_d, cx_d), (hx_p, cx_p)))
if args.discrete:
prob = F.softmax(logit)
action = prob.max(1, keepdim=True)[1].data.numpy()
else:
mu, sigma_sq = logit
sigma_sq = F.softplus(sigma_sq)
eps = torch.randn(mu.size())
action = (mu + sigma_sq.sqrt()*Variable(eps)).data
state, reward, done, _ = env.step(action[0, 0])
done = done or episode_length >= args.max_episode_length
reward_sum += reward
# a quick hack to prevent the agent from stucking
actions.append(action[0, 0])
if actions.count(actions[0]) == actions.maxlen:
done = True
if done:
print("Time {}, num steps {}, FPS {:.0f}, episode reward {}, episode length {}".format(
time.strftime("%Hh %Mm %Ss",
time.gmtime(time.time() - start_time)),
counter.value, counter.value / (time.time() - start_time),
reward_sum, episode_length))
reward_sum = 0
episode_length = 0
actions.clear()
state = env.reset()
time.sleep(60)
state = torch.from_numpy(state).float()
|
ddr-master
|
test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
def normalized_columns_initializer(weights, std=1.0):
out = torch.randn(weights.size())
out *= std / torch.sqrt(out.pow(2).sum(1, keepdim=True))
return out
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
weight_shape = list(m.weight.data.size())
fan_in = np.prod(weight_shape[1:4])
fan_out = np.prod(weight_shape[2:4]) * weight_shape[0]
w_bound = np.sqrt(6. / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
weight_shape = list(m.weight.data.size())
fan_in = weight_shape[1]
fan_out = weight_shape[0]
w_bound = np.sqrt(6. / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
m.bias.data.fill_(0)
class Encoder(torch.nn.Module):
def __init__(self, obs_space, dim, use_conv=False):
"""
architecture should be input, so that we can pass multiple jobs !
"""
super(Encoder, self).__init__()
self.use_conv = use_conv
self.obs_space = obs_space
if use_conv:
self.conv1 = nn.Conv2d(3, 32, 3, stride=2, padding=1)
self.conv2 = nn.Conv2d(32, 32, 3, stride=2, padding=1)
self.conv3 = nn.Conv2d(32, 32, 3, stride=2, padding=1)
self.conv4 = nn.Conv2d(32, 32, 3, stride=2, padding=1)
else:
self.linear1 = nn.Linear(obs_space, dim)
self.linear2 = nn.Linear(dim, 32 * 3 * 3)
self.fc = nn.Linear(32 * 3 * 3, dim)
self.apply(weights_init)
self.train()
def forward(self, inputs):
# why elu and not relu ?
if self.use_conv:
x = F.elu(self.conv1(inputs))
x = F.elu(self.conv2(x))
x = F.elu(self.conv3(x))
x = F.elu(self.conv4(x))
else:
x = F.elu(self.linear1(inputs))
x = F.elu(self.linear2(x))
x = F.tanh(self.fc(x))
return x
class Decoder(torch.nn.Module):
def __init__(self, obs_space, dim, use_conv=False):
super(Decoder, self).__init__()
self.use_conv = use_conv
self.fc = nn.Linear(dim, 32 * 3 * 3)
if self.use_conv:
self.deconv1 = nn.ConvTranspose2d(32, 32, 3, stride=2, padding=1)
self.deconv2 = nn.ConvTranspose2d(32, 32, 3, stride=2, padding=1)
self.deconv3 = nn.ConvTranspose2d(32, 32, 3, stride=2, padding=1)
self.deconv4 = nn.ConvTranspose2d(32, 3, 3, stride=2, padding=1)
else:
self.linear1 = nn.Linear(32 * 3 * 3, dim)
self.linear2 = nn.Linear(dim, obs_space)
self.apply(weights_init)
self.train()
def forward(self, inputs):
x = F.elu(self.fc(inputs))
if self.use_conv:
x = F.elu(self.deconv1(x))
x = F.elu(self.deconv2(x))
x = F.elu(self.deconv3(x))
x = self.deconv4(x)
else:
x = F.elu(self.linear1(x))
x = self.linear2(x)
return x
class D_Module(torch.nn.Module):
def __init__(self, action_space, dim, discrete=False):
super(D_Module, self).__init__()
self.dim = dim
self.discrete = discrete
self.za_embed = nn.Linear(2 * dim, dim)
self.lstm_dynamics = nn.LSTMCell(dim, dim)
self.z_embed = nn.Linear(dim, dim)
self.inv = nn.Linear(2 * dim, dim)
self.inv2 = nn.Linear(dim, action_space)
self.action_linear = nn.Linear(action_space, dim)
self.action_linear2 = nn.Linear(dim, dim)
self.apply(weights_init)
self.lstm_dynamics.bias_ih.data.fill_(0)
self.lstm_dynamics.bias_hh.data.fill_(0)
self.train()
def forward(self, inputs):
z, z_prime, actions, (hx_d, cx_d) = inputs
z = z.view(-1, self.dim)
a_embedding = F.elu(self.action_linear(actions))
a_embedding = self.action_linear2(a_embedding)
za_embedding = self.za_embed(
torch.cat([z, a_embedding.view(z.size())], 1))
hx_d, cx_d = self.lstm_dynamics(za_embedding, (hx_d, cx_d))
z_prime_hat = F.tanh(self.z_embed(hx_d))
# decode the action
if z_prime is not None:
z_prime = z_prime.view(-1, self.dim)
else:
z_prime = z_prime_hat
a_hat = F.elu(self.inv(torch.cat([z, z_prime], 1)))
a_hat = self.inv2(a_hat)
return z_prime_hat, a_hat, (hx_d, cx_d)
class R_Module(torch.nn.Module):
def __init__(self, action_space, dim, discrete=False, baseline=False,
state_space=None):
super(R_Module, self).__init__()
self.discrete = discrete
self.baseline = baseline
self.dim = dim
if baseline:
self.linear1 = nn.Linear(state_space, dim)
self.linear2 = nn.Linear(dim, dim)
self.lstm_policy = nn.LSTMCell(dim, dim)
self.actor_linear = nn.Linear(dim, action_space)
self.critic_linear = nn.Linear(dim, 1)
self.rhat_linear = nn.Linear(dim, 1)
if not discrete:
self.actor_sigma_sq = nn.Linear(dim, action_space)
self.apply(weights_init)
self.actor_linear.weight.data = normalized_columns_initializer(
self.actor_linear.weight.data, 0.01)
self.actor_linear.bias.data.fill_(0)
self.critic_linear.weight.data = normalized_columns_initializer(
self.critic_linear.weight.data, 1.0)
self.critic_linear.bias.data.fill_(0)
# only forget should be 1
self.lstm_policy.bias_ih.data.fill_(0)
self.lstm_policy.bias_hh.data.fill_(0)
if not discrete:
self.actor_sigma_sq.weight.data = normalized_columns_initializer(
self.actor_sigma_sq.weight.data, 0.01)
self.actor_sigma_sq.bias.data.fill_(0)
self.train()
def forward(self, inputs):
inputs, (hx_p, cx_p) = inputs
if self.baseline:
inputs = F.elu(self.linear1(inputs))
inputs = F.elu(self.linear2(inputs))
hx_p, cx_p = self.lstm_policy(inputs, (hx_p, cx_p))
x = hx_p
if self.discrete:
action = self.actor_linear(x)
else:
action = (self.actor_linear(x), self.actor_sigma_sq(x))
return self.critic_linear(x), action, (hx_p, cx_p)
|
ddr-master
|
model.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import math
import sys
from datetime import datetime
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from model import Encoder, D_Module
pi = Variable(torch.FloatTensor([math.pi]))
def get_prob(x, mu, sigma_sq):
a = (-1*(Variable(x)-mu).pow(2)/(2*sigma_sq + 1e-5)).exp()
b = 1/(2*sigma_sq*pi.expand_as(sigma_sq) + 1e-5).sqrt()
return a*b
def log(msg):
print("[%s]\t%s" % (datetime.now().strftime("%Y-%m-%d %H:%M:%S"), msg))
sys.stdout.flush()
def vlog(msg, v):
if v:
log(msg)
def load_encoder(obs_space, args, freeze=True):
enc = Encoder(obs_space, args.dim,
use_conv=args.use_conv)
enc_state = torch.load(args.dynamics_module, map_location=lambda storage,
loc: storage)['enc']
enc.load_state_dict(enc_state)
enc.eval()
if freeze:
for p in enc.parameters():
p.requires_grad = False
return enc
def load_d_module(action_space, args, freeze=True):
d_module_state = torch.load(args.dynamics_module, map_location=lambda storage,
loc: storage)['d_module']
d_module = D_Module(action_space, args.dim, args.discrete)
d_module.load_state_dict(d_module_state)
d_module.eval()
if freeze:
for p in d_module.parameters():
p.requires_grad = False
return d_module
def get_action(logit, discrete, v=False):
"""Compute action, entropy, and log prob for discrete and continuous case
from logit.
"""
if discrete:
prob = F.softmax(logit)
log_prob = F.log_softmax(logit)
# why entropy regularization ?
entropy = -(log_prob * prob).sum(1, keepdim=True)
action = prob.multinomial()
log_prob = log_prob.gather(1, action)
else:
mu, sigma_sq = logit
sigma_sq = F.softplus(sigma_sq)
vlog('sigma_sq: %s' % str(sigma_sq.data), v)
action = torch.normal(mu, sigma_sq)
prob = get_prob(action.data, mu, sigma_sq) + 1e-5
entropy = -0.5*((2 * sigma_sq * pi.expand_as(sigma_sq) + 1e-5).log() + 1)
log_prob = prob.log()
return action, entropy, log_prob
def eval_action(logit, action, discrete, v=False):
mu, sigma_sq = logit
sigma_sq = F.softplus(sigma_sq)
vlog('sigma_sq: %s' % str(sigma_sq.data), v)
prob = get_prob(action.data, mu, sigma_sq) + 1e-5
entropy = -0.5*((2 * sigma_sq * pi.expand_as(sigma_sq) + 1e-5).log() + 1)
log_prob = prob.log()
return entropy, log_prob
def mcts(env, z_hat, r_module, d_module, enc, r_state, d_state, args, discrete,
use_env=False):
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from common import get_action
from envs import get_obs
(hx_r, cx_r) = r_state
(hx_d, cx_d) = d_state
parent_states = [(z_hat, [], (hx_r, cx_r), (hx_d, cx_d), [], [], [])]
child_states = []
init_state = get_obs(env, args.framework)
for i in range(args.d):
actions = []
best_val = None
for z_hat, trajectory, (hx_r, cx_r), (hx_d, cx_d), val, entropies, \
logprobs in parent_states:
if best_val is None:
best_val = val
elif val < best_val:
continue
value, logit, (hx_r_prime, cx_r_prime) = r_module(
(z_hat, (hx_r, cx_r)))
val.append(value)
if not discrete:
for b in range(args.b):
action, entropy, log_prob = get_action(
logit, discrete=False, v=args.v)
actions.append((action, entropy, log_prob))
else:
prob = F.softmax(logit)
actions = np.argpartition(prob.data.numpy(), args.b)[:b]
for a, e, lp in actions:
if not use_env:
z_prime_hat, _, (hx_d_prime, cx_d_prime) = d_module(
(z_hat, z_hat, a, (hx_d, cx_d)))
else:
state = get_obs(env, args.framework)
for t in trajectory:
env.step(t.data.numpy())
s_prime, _, _, _ = env.step(a.data.numpy())
s_prime = Variable(torch.from_numpy(s_prime).float())
z_prime_hat = enc(s_prime).unsqueeze(0)
env.reset(state)
hx_d_prime, cx_d_prime = hx_d, cx_d
child_states.append(
(z_prime_hat, trajectory + [a], (hx_r_prime, cx_r_prime),
(hx_d_prime, cx_d_prime), val, entropies + [e], logprobs + [lp]))
child_states = prune(child_states, b)
parent_states = child_states
child_states = []
# compute value of final state in each trajectory and choose best
best_val = sum(parent_states[0][4]).data[0,0]
best_ind = 0
for ind, (z, traj, hr, hd, v, _, _) in enumerate(parent_states):
vr, _, _ = r_module((z, hr))
v.append(vr)
if sum(v).data[0,0] > best_val:
best_ind = ind
best_val = sum(v).data[0,0]
return parent_states[best_ind]
|
ddr-master
|
common.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import print_function
import argparse
import numpy as np
import os
import random
from operator import itemgetter
# Environment settings
parser = argparse.ArgumentParser(description='Eval DDR')
parser.add_argument('--dynamics-module', type=str, default=None,
help='Dynamics module')
parser.add_argument('--rewards-module', type=str, default=None,
help='Rewards module')
parser.add_argument('--num-processes', type=int, default=20,
help='how many training processes to use (default: 20)')
parser.add_argument('--N', type=int, default=1,
help='Number of episodes')
parser.add_argument('--rollout', type=int, default=20, help="rollout for goal")
parser.add_argument('--seed', type=int, default=1,
help='random seed (default: 1)')
parser.add_argument('--render', action='store_true')
parser.add_argument('--out', type=str, default=None)
parser.add_argument('--max-episode-length', type=int, default=1000,
help='maximum length of an episode')
parser.add_argument('--framework', default='gym',
help='framework of env (default: gym)')
parser.add_argument('--env-name', default='InvertedPendulum-v1',
help='environment to train on (default: InvertedPendulum-v1)')
parser.add_argument('--maze-id', type=int, default=0)
parser.add_argument('--maze-length', type=int, default=1)
parser.add_argument('--log-interval', type=int, default=1)
parser.add_argument('--baseline', action='store_true')
parser.add_argument('--local', action='store_true',
help='running locally to render, no multiprocessing')
parser.add_argument('--single-env', action='store_true')
parser.add_argument('--coef-inner-rew', type=float, default=1.)
parser.add_argument('--mcts', action='store_true', help='Monte Carlo Tree Search')
parser.add_argument('-b', type=int, default=4, help='branching factor')
parser.add_argument('-d', type=int, default=3, help='planning depth')
parser.add_argument('--file-path', type=str, default=None,
help='path to XML file for mujoco')
parser.add_argument('--save-figs', action='store_true')
parser.add_argument('--neg-reward', action='store_true',
help='set reward negative for transfer')
parser.add_argument('--use-env', action='store_true', help='Use env with MCTS')
parser.add_argument('-v', action='store_true', help='verbose logging')
parser.add_argument('--difficulty', type=int, default=-1, help='difficulty of maze')
def prune(states, b):
"""Prune states down to length b, sorting by val."""
return sorted(states, key=itemgetter(4))[:b]
def test(block, args, d_args, r_args, d_module, r_module, enc, dec, q=None, rank=0):
import torch
from torch.autograd import Variable
from envs import create_env, reset_env, get_obs
from common import get_action, log
seed = args.seed * 9823 + 194885 + rank # make sure doesn't copy train
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
i = 1
total_acc, total_reward = [], []
avg_succ, avg_reward, avg_len = 0, 0, 0
while len(total_acc) < block:
reward_sum, succ = 0, 0
actions = []
if args.single_env and i > 1:
reset_env(env, args)
else:
env = create_env(args.env_name, framework=args.framework, args=args, eval_flag=True)
done = False
step = 0
# Should the two LSTMs share a hidden state?
cx_r = Variable(torch.zeros(1, r_args.dim))
hx_r = Variable(torch.zeros(1, r_args.dim))
if not args.baseline:
cx_d = Variable(torch.zeros(1, d_args.dim))
hx_d = Variable(torch.zeros(1, d_args.dim))
while step < args.max_episode_length and not done:
# Encode state
state = get_obs(env, r_args.framework)
state = Variable(torch.from_numpy(state).float())
if not args.baseline:
z = enc(state)
z_prime_hat = z.unsqueeze(0)
else:
z_prime_hat = state.unsqueeze(0)
actions = []
if args.mcts:
z_prime_hat, actions, (hx_r, cx_r), (hx_d, cx_d), _, _, _ = mcts(
env, z_prime_hat, r_module, d_module, enc, (hx_r, cx_r),
(hx_d, cx_d), args, discrete=r_args.discrete,
use_env=args.use_env)
for r in range(args.rollout - args.d):
value, logit, (hx_r, cx_r) = r_module(
(z_prime_hat, (hx_r, cx_r)))
action, entropy, log_prob = get_action(
logit, discrete=r_args.discrete)
actions.append(action)
if not args.baseline:
z_prime_hat, _, (hx_d, cx_d) = d_module(
(z_prime_hat, z_prime_hat, action, (hx_d, cx_d)))
if args.save_figs:
s_prime_hat = dec(z_prime_hat)
for action in actions[:args.rollout]:
_, reward, done, _ = env.step(action.data.numpy())
if args.render:
env.render()
reward_sum += reward
step += 1
if done:
succ = 1
break
U = 1. / i
total_acc.append(succ)
total_reward.append(reward_sum)
avg_succ = avg_succ * (1 - U) + succ * U
avg_reward = avg_reward * (1 - U) + reward_sum * U
avg_len = avg_len * (1 - U) + (step + 1) * U
if i % args.log_interval == 0:
log("Eval: {:d} episodes, avg succ {:.2f}, avg reward {:.2f}, avg length {:.2f}".format(
len(total_acc), avg_succ, reward_sum, step))
i += 1
if args.local:
return (sum(total_acc), len(total_acc), sum(total_reward), avg_len)
q.put((sum(total_acc), len(total_acc), sum(total_reward)))
if __name__ == '__main__':
import torch
import torch.multiprocessing as mp
mp.set_start_method('spawn')
from envs import *
from model import *
from common import *
# from ppo.model import MLPPolicy
os.environ['OMP_NUM_THREADS'] = '1'
os.environ['CUDA_VISIBLE_DEVICES'] = ""
args = parser.parse_args()
if not args.mcts:
args.d = 0
log(args)
torch.manual_seed(args.seed)
d_args, d_module, enc, dec = None, None, None, None
r_state_dict, r_args = torch.load(args.rewards_module, map_location=lambda storage, loc: storage)
if args.single_env and hasattr(r_args, 'maze_structure'):
args.maze_structure = r_args.maze_structure
env = create_env(args.env_name, framework=args.framework, args=args, eval_flag=True)
r_module = R_Module(env.action_space.shape[0], r_args.dim,
discrete=r_args.discrete, baseline=r_args.baseline,
state_space=env.observation_space.shape[0])
r_module.load_state_dict(r_state_dict)
r_module.eval()
if not args.baseline:
if args.local:
r_args.dynamics_module = '/Users/amyzhang/ddr_for_tl' + r_args.dynamics_module[24:]
if args.dynamics_module is None:
d_dict = torch.load(r_args.dynamics_module, map_location=lambda storage, loc: storage)
else:
d_dict = torch.load(args.dynamics_module, map_location=lambda storage, loc: storage)
d_args = d_dict['args']
enc_state = d_dict['enc']
dec_state = d_dict['dec']
d_state_dict = d_dict['d_module']
d_module = D_Module(env.action_space.shape[0], d_args.dim, d_args.discrete)
d_module.load_state_dict(d_state_dict)
d_module.eval()
enc = Encoder(env.observation_space.shape[0], d_args.dim,
use_conv=d_args.use_conv)
dec = Decoder(env.observation_space.shape[0], d_args.dim,
use_conv=d_args.use_conv)
enc.load_state_dict(enc_state)
dec.load_state_dict(dec_state)
enc.eval()
dec.eval()
block = int(args.N / args.num_processes)
if args.local:
all_succ, all_total, avg_reward = test(
block, args, d_args, r_args, d_module, r_module, enc, dec)
else:
processes = []
queues = []
for rank in range(0, args.num_processes):
q = mp.Queue()
p = mp.Process(target=test, args=(
block, args, d_args, r_args, d_module, r_module, enc, dec, q, rank))
p.start()
processes.append(p)
queues.append(q)
for i, p in enumerate(processes):
log("Exit process %d" % i)
p.join()
all_succ = 0
all_total = 0
total_reward = 0
for q in queues:
while not q.empty():
succ, total, total_r = q.get()
all_succ += succ
all_total += total
total_reward += total_r
log("Success: %s, %s, %s" % (all_succ / all_total, all_succ, all_total))
log("Average Reward: %s" % (total_reward / all_total))
if args.out:
with open(args.out, 'a') as f:
f.write("Success: %s \n" % (all_succ / all_total))
|
ddr-master
|
eval.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import print_function
import datetime
import os
import time
import shutil
from itertools import chain
import dill
from arguments import get_args
if __name__ == '__main__':
import torch
import torch.multiprocessing as mp
mp.set_start_method('spawn')
import my_optim
from envs import create_env
from model import *
from test import test
from train_reward_module import train_rewards
from common import *
from train_dynamics_module import train_dynamics
from train_online import train_online
from eval_modules import eval_reward
from tensorboardX import SummaryWriter
os.environ['OMP_NUM_THREADS'] = '1'
args = get_args()
log(args)
if not args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = ""
torch.manual_seed(args.seed)
args_param = vars(args)
toprint = ['seed', 'lr', 'entropy_coef', 'value_loss_coef', 'num_steps',
'dim']
if args.planning:
toprint += ['rollout']
env_name = args.env_name
if args.env_name.endswith("MazeEnv"):
env_name += 'mazeid%slength%s' % (args.maze_id, args.maze_length)
toprint += ['random_start', 'difficulty']
if args.baseline:
model_type = 'baseline'
if args.neg_reward:
model_type += '_neg_reward'
if args.file_path:
model_type += '_dynamics_transfer'
toprint += ['algo', 'gae', 'num_processes']
elif args.train_dynamics:
model_type = 'dynamics_planning'
toprint = ['lr', 'forward_loss_coef', 'dec_loss_coef', 'inv_loss_coef', 'rollout', 'dim',
'train_size']
# env_name = os.path.basename(args.train_set.strip('/'))
if args.single_env:
data_args = torch.load(os.path.join(args.train_set, 'args.pt'))
args.maze_structure = data_args.maze_structure
elif args.train_reward:
model_type = 'reward'
if args.neg_reward:
model_type += '_neg_reward'
if args.file_path:
model_type += '_dynamics_transfer'
toprint += ['algo', 'gae']
if args.planning:
model_type += '_planning'
elif args.train_online:
model_type = 'online'
toprint += ['lr', 'dec_loss_coef', 'inv_loss_coef', 'rollout', 'dim']
if args.transfer:
model_type += '_transfer'
name = ''
for arg in toprint:
name += '_{}{}'.format(arg, args_param[arg])
out_dir = os.path.join(args.out, env_name, model_type, name)
args.out = out_dir
dynamics_path = ''
if args.dynamics_module is not None and not args.baseline:
dynamics_path = args.dynamics_module.split('/')
dynamics_path = dynamics_path[-4] + dynamics_path[-2] +\
'_' + dynamics_path[-1].strip('.pt')
args.out = os.path.join(out_dir, dynamics_path)
os.makedirs(args.out, exist_ok=True)
# create the tensorboard summary writer here
tb_log_dir = os.path.join(args.log_dir, env_name, model_type, name,
dynamics_path, 'tb_logs')
print(tb_log_dir)
print(args.out)
if args.reset_dir:
shutil.rmtree(tb_log_dir, ignore_errors=True)
os.makedirs(tb_log_dir, exist_ok=True)
tb_writer = SummaryWriter(log_dir=tb_log_dir)
# dump all the arguments in the tb_log_dir
print(args, file=open(os.path.join(tb_log_dir, "arguments"), "w"))
env = create_env(args.env_name, framework=args.framework, args=args)
if args.train_dynamics:
train_dynamics(env, args, None) # tb_writer
if args.train_reward:
model_name = 'rewards_module'
if args.from_checkpoint is not None: # using curriculum
model_name += 'curr'
if args.single_env:
model_name += '_single_env'
args.maze_structure = env._env.MAZE_STRUCTURE
args.model_name = model_name
enc = None
d_module = None
assert args.dynamics_module is not None
enc = load_encoder(env.observation_space.shape[0], args)
if args.planning:
d_module = load_d_module(env.action_space.shape[0], args)
shared_model = R_Module(env.action_space.shape[0], args.dim,
discrete=args.discrete, baseline=args.baseline,
state_space=env.observation_space.shape[0])
# shared reward module for everyone
shared_model.share_memory()
if args.no_shared:
optimizer = None
else:
optimizer = my_optim.SharedAdam(shared_model.parameters(), lr=args.lr)
optimizer.share_memory()
processes = []
train_agent_method = None
total_args = args
train_agent_method = train_rewards
for rank in range(0, args.num_processes):
if rank==0:
p = mp.Process(target=train_agent_method, args=(
rank, total_args, shared_model, enc, optimizer, tb_log_dir,
d_module))
else:
p = mp.Process(target=train_agent_method, args=(
rank, total_args, shared_model, enc, optimizer, None, d_module))
p.start()
processes.append(p)
for p in processes:
p.join()
torch.save((shared_model.state_dict(), args), os.path.join(
args.out, model_name + '%s.pt' % args.num_episodes))
print(os.path.join(args.out, model_name))
if args.train_online:
model_name = 'rewards_module'
if args.from_checkpoint is not None: # using curriculum
model_name += 'curr'
if args.single_env:
model_name += '_single_env'
args.maze_structure = env._env.MAZE_STRUCTURE
args.model_name = model_name
shared_enc = Encoder(env.observation_space.shape[0], args.dim,
use_conv=args.use_conv)
shared_dec = Decoder(env.observation_space.shape[0], args.dim,
use_conv=args.use_conv)
shared_d_module = D_Module(env.action_space.shape[0], args.dim,
args.discrete)
shared_r_module = R_Module(env.action_space.shape[0], args.dim,
discrete=args.discrete, baseline=args.baseline,
state_space=env.observation_space.shape[0])
shared_enc = Encoder(env.observation_space.shape[0], args.dim,
use_conv=args.use_conv)
shared_dec = Decoder(env.observation_space.shape[0], args.dim,
use_conv=args.use_conv)
shared_d_module = D_Module(env.action_space.shape[0], args.dim,
args.discrete)
shared_r_module = R_Module(env.action_space.shape[0], args.dim,
discrete=args.discrete, baseline=args.baseline,
state_space=env.observation_space.shape[0])
shared_enc.share_memory()
shared_dec.share_memory()
shared_d_module.share_memory()
shared_r_module.share_memory()
all_params = chain(shared_enc.parameters(), shared_dec.parameters(),
shared_d_module.parameters(),
shared_r_module.parameters())
shared_model = [shared_enc, shared_dec, shared_d_module, shared_r_module]
if args.single_env:
model_name += '_single_env'
args.maze_structure = env.MAZE_STRUCTURE
if args.no_shared:
optimizer = None
else:
optimizer = my_optim.SharedAdam(all_params, lr=args.lr)
optimizer.share_memory()
train_agent_method = train_online
processes = []
for rank in range(0, args.num_processes):
if rank==0:
p = mp.Process(target=train_agent_method, args=(
rank, args, shared_model, optimizer, tb_log_dir))
else:
p = mp.Process(target=train_agent_method, args=(
rank, args, shared_model, optimizer))
p.start()
processes.append(p)
# start an eval process here
eval_agent_method = eval_reward
p = mp.Process(target=eval_agent_method, args=(
args, shared_model, tb_log_dir))
p.start()
processes.append(p)
for p in processes:
p.join()
results_dict = {'args': args}
torch.save((shared_r_module.state_dict(), args), os.path.join(
args.out, 'reward_module%s.pt' % args.num_episodes))
results_dict['enc'] = shared_enc.state_dict()
results_dict['dec'] = shared_dec.state_dict()
results_dict['d_module'] = shared_d_module.state_dict()
torch.save(results_dict,
os.path.join(args.out, 'dynamics_module%s.pt' % args.num_episodes))
log("Saved model %s" % os.path.join(args.out, model_name))
|
ddr-master
|
main.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import math
import numpy as np
import os
import time
import torch
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from envs import *
from model import Encoder, Decoder, D_Module, R_Module
from common import *
from tensorboardX import SummaryWriter
from itertools import chain
from eval import test
def eval_reward(args, shared_model, writer_dir=None):
"""
For evaluation
Arguments:
- writer: the tensorboard summary writer directory (note: can't get it working directly with the SummaryWriter object)
"""
writer = SummaryWriter(log_dir=os.path.join(writer_dir,'eval')) if writer_dir is not None else None
# current episode stats
episode_reward = episode_value_mse = episode_td_error = episode_pg_loss = episode_length = 0
# global stats
i_episode = 0
total_episode = total_steps = 0
num_goals_achieved = 0
# intilialize the env and models
torch.manual_seed(args.seed)
env = create_env(args.env_name, framework=args.framework, args=args)
set_seed(args.seed , env, args.framework)
shared_enc, shared_dec, shared_d_module, shared_r_module = shared_model
enc = Encoder(env.observation_space.shape[0], args.dim,
use_conv=args.use_conv)
dec = Decoder(env.observation_space.shape[0], args.dim,
use_conv=args.use_conv)
d_module = D_Module(env.action_space.shape[0], args.dim, args.discrete)
r_module = R_Module(env.action_space.shape[0], args.dim,
discrete=args.discrete, baseline=False,
state_space=env.observation_space.shape[0])
all_params = chain(enc.parameters(), dec.parameters(),
d_module.parameters(),
r_module.parameters())
if args.from_checkpoint is not None:
model_state, _ = torch.load(args.from_checkpoint)
model.load_state_dict(model_state)
# set the model to evaluation mode
enc.eval()
dec.eval()
d_module.eval()
r_module.eval()
# reset the state
state = env.reset()
state = Variable(torch.from_numpy(state).float())
start = time.time()
while total_episode < args.num_episodes:
# Sync with the shared model
r_module.load_state_dict(shared_r_module.state_dict())
d_module.load_state_dict(shared_d_module.state_dict())
enc.load_state_dict(shared_enc.state_dict())
dec.load_state_dict(shared_dec.state_dict())
# reset stuff
cd_p = Variable(torch.zeros(1, args.lstm_dim))
hd_p = Variable(torch.zeros(1, args.lstm_dim))
# for the reward
cr_p = Variable(torch.zeros(1, args.lstm_dim))
hr_p = Variable(torch.zeros(1, args.lstm_dim))
i_episode += 1
episode_length = 0
episode_reward = 0
args.local = True
args.d = 0
succ, _, episode_reward, episode_length = test(
1, args, args, args, d_module, r_module, enc)
log("Eval: succ {:.2f}, reward {:.2f}, length {:.2f}".format(
succ, episode_reward, episode_length))
# Episode has ended, write the summaries here
if writer_dir is not None:
# current episode stats
writer.add_scalar('eval/episode_reward', episode_reward, i_episode)
writer.add_scalar('eval/episode_length', episode_length, i_episode)
writer.add_scalar('eval/success', succ, i_episode)
time.sleep(args.eval_every)
print("sleep")
|
ddr-master
|
eval_modules.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import math
import torch
import torch.optim as optim
class SharedAdam(optim.Adam):
"""Implements Adam algorithm with shared states.
"""
def __init__(self,
params,
lr=1e-3,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0):
super(SharedAdam, self).__init__(params, lr, betas, eps, weight_decay)
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['step'] = torch.zeros(1)
state['exp_avg'] = p.data.new().resize_as_(p.data).zero_()
state['exp_avg_sq'] = p.data.new().resize_as_(p.data).zero_()
def share_memory(self):
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['step'].share_memory_()
state['exp_avg'].share_memory_()
state['exp_avg_sq'].share_memory_()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
if group['weight_decay'] != 0:
grad = grad.add(group['weight_decay'], p.data)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1**state['step'][0]
bias_correction2 = 1 - beta2**state['step'][0]
step_size = group['lr'] * math.sqrt(
bias_correction2) / bias_correction1
p.data.addcdiv_(exp_avg, denom, value=-float(step_size.data.numpy()[0]))
return loss
|
ddr-master
|
my_optim.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
import fire
from llama import Llama
def main(
ckpt_dir: str,
tokenizer_path: str,
temperature: float = 0.0,
top_p: float = 0.9,
max_seq_len: int = 192,
max_gen_len: int = 128,
max_batch_size: int = 4,
):
generator = Llama.build(
ckpt_dir=ckpt_dir,
tokenizer_path=tokenizer_path,
max_seq_len=max_seq_len,
max_batch_size=max_batch_size,
)
prompts = [
'''def remove_non_ascii(s: str) -> str:
""" <FILL>
return result
''',
"""# Installation instructions:
```bash
<FILL>
```
This downloads the LLaMA inference code and installs the repository as a local pip package.
""",
"""class InterfaceManagerFactory(AbstractManagerFactory):
def __init__(<FILL>
def main():
factory = InterfaceManagerFactory(start=datetime.now())
managers = []
for i in range(10):
managers.append(factory.build(id=i))
""",
"""/-- A quasi-prefunctoid is 1-connected iff all its etalisations are 1-connected. -/
theorem connected_iff_etalisation [C D : precategoroid] (P : quasi_prefunctoid C D) :
π₁ P = 0 ↔ <FILL> = 0 :=
begin
split,
{ intros h f,
rw pi_1_etalisation at h,
simp [h],
refl
},
{ intro h,
have := @quasi_adjoint C D P,
simp [←pi_1_etalisation, this, h],
refl
}
end
""",
]
prefixes = [p.split("<FILL>")[0] for p in prompts]
suffixes = [p.split("<FILL>")[1] for p in prompts]
results = generator.text_infilling(
prefixes=prefixes,
suffixes=suffixes,
max_gen_len=max_gen_len,
temperature=temperature,
top_p=top_p,
)
for prompt, result in zip(prompts, results):
print("\n================= Prompt text =================\n")
print(prompt)
print("\n================= Filled text =================\n")
print(result["full_text"])
if __name__ == "__main__":
fire.Fire(main)
|
codellama-main
|
example_infilling.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
from typing import Optional
import fire
from llama import Llama
def main(
ckpt_dir: str,
tokenizer_path: str,
temperature: float = 0.2,
top_p: float = 0.95,
max_seq_len: int = 512,
max_batch_size: int = 8,
max_gen_len: Optional[int] = None,
):
generator = Llama.build(
ckpt_dir=ckpt_dir,
tokenizer_path=tokenizer_path,
max_seq_len=max_seq_len,
max_batch_size=max_batch_size,
)
instructions = [
[
{
"role": "user",
"content": "In Bash, how do I list all text files in the current directory (excluding subdirectories) that have been modified in the last month?",
}
],
[
{
"role": "user",
"content": "What is the difference between inorder and preorder traversal? Give an example in Python.",
}
],
[
{
"role": "system",
"content": "Provide answers in JavaScript",
},
{
"role": "user",
"content": "Write a function that computes the set of sums of all contiguous sublists of a given list.",
}
],
]
results = generator.chat_completion(
instructions, # type: ignore
max_gen_len=max_gen_len,
temperature=temperature,
top_p=top_p,
)
for instruction, result in zip(instructions, results):
for msg in instruction:
print(f"{msg['role'].capitalize()}: {msg['content']}\n")
print(
f"> {result['generation']['role'].capitalize()}: {result['generation']['content']}"
)
print("\n==================================\n")
if __name__ == "__main__":
fire.Fire(main)
|
codellama-main
|
example_instructions.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
from typing import Optional
import fire
from llama import Llama
def main(
ckpt_dir: str,
tokenizer_path: str,
temperature: float = 0.2,
top_p: float = 0.9,
max_seq_len: int = 256,
max_batch_size: int = 4,
max_gen_len: Optional[int] = None,
):
generator = Llama.build(
ckpt_dir=ckpt_dir,
tokenizer_path=tokenizer_path,
max_seq_len=max_seq_len,
max_batch_size=max_batch_size,
)
prompts = [
# For these prompts, the expected answer is the natural continuation of the prompt
"""\
import socket
def ping_exponential_backoff(host: str):""",
"""\
import argparse
def main(string: str):
print(string)
print(string[::-1])
if __name__ == "__main__":"""
]
results = generator.text_completion(
prompts,
max_gen_len=max_gen_len,
temperature=temperature,
top_p=top_p,
)
for prompt, result in zip(prompts, results):
print(prompt)
print(f"> {result['generation']}")
print("\n==================================\n")
if __name__ == "__main__":
fire.Fire(main)
|
codellama-main
|
example_completion.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
from setuptools import find_packages, setup
def get_requirements(path: str):
return [l.strip() for l in open(path)]
setup(
name="codellama",
version="0.0.1",
packages=find_packages(),
install_requires=get_requirements("requirements.txt"),
)
|
codellama-main
|
setup.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
import json
import os
import sys
import time
from pathlib import Path
from typing import List, Literal, Optional, Tuple, TypedDict
import torch
import torch.nn.functional as F
from fairscale.nn.model_parallel.initialize import (
get_model_parallel_rank,
initialize_model_parallel,
model_parallel_is_initialized,
)
from llama.model import ModelArgs, Transformer
from llama.tokenizer import Tokenizer
Role = Literal["system", "user", "assistant"]
class Message(TypedDict):
role: Role
content: str
class InfillingPrediction(TypedDict, total=False):
generation: str
full_text: str
tokens: List[str] # not required
logprobs: List[float] # not required
class CompletionPrediction(TypedDict, total=False):
generation: str
tokens: List[str] # not required
logprobs: List[float] # not required
class ChatPrediction(TypedDict, total=False):
generation: Message
tokens: List[str] # not required
logprobs: List[float] # not required
Dialog = List[Message]
B_INST, E_INST = "[INST]", "[/INST]"
B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
SPECIAL_TAGS = [B_INST, E_INST, "<<SYS>>", "<</SYS>>"]
UNSAFE_ERROR = "Error: special tags are not allowed as part of the prompt."
class Llama:
@staticmethod
def build(
ckpt_dir: str,
tokenizer_path: str,
max_seq_len: int,
max_batch_size: int,
model_parallel_size: Optional[int] = None,
) -> "Llama":
if not torch.distributed.is_initialized():
torch.distributed.init_process_group("nccl")
if not model_parallel_is_initialized():
if model_parallel_size is None:
model_parallel_size = int(os.environ.get("WORLD_SIZE", 1))
initialize_model_parallel(model_parallel_size)
local_rank = int(os.environ.get("LOCAL_RANK", 0))
torch.cuda.set_device(local_rank)
# seed must be the same in all processes
torch.manual_seed(1)
if local_rank > 0:
sys.stdout = open(os.devnull, "w")
start_time = time.time()
checkpoints = sorted(Path(ckpt_dir).glob("*.pth"))
assert len(checkpoints) > 0, f"no checkpoint files found in {ckpt_dir}"
assert model_parallel_size == len(
checkpoints
), f"Loading a checkpoint for MP={len(checkpoints)} but world size is {model_parallel_size}"
ckpt_path = checkpoints[get_model_parallel_rank()]
checkpoint = torch.load(ckpt_path, map_location="cpu")
with open(Path(ckpt_dir) / "params.json", "r") as f:
params = json.loads(f.read())
model_args: ModelArgs = ModelArgs(
max_seq_len=max_seq_len,
max_batch_size=max_batch_size,
**params,
)
tokenizer = Tokenizer(model_path=tokenizer_path)
model_args.vocab_size = tokenizer.n_words
if torch.cuda.is_bf16_supported():
torch.set_default_tensor_type(torch.cuda.BFloat16Tensor)
else:
torch.set_default_tensor_type(torch.cuda.HalfTensor)
model = Transformer(model_args)
model.load_state_dict(checkpoint, strict=False)
print(f"Loaded in {time.time() - start_time:.2f} seconds")
return Llama(model, tokenizer)
def __init__(self, model: Transformer, tokenizer: Tokenizer):
self.model = model
self.tokenizer = tokenizer
@torch.inference_mode()
def generate(
self,
prompt_tokens: List[List[int]],
max_gen_len: int,
temperature: float = 0.6,
top_p: float = 0.9,
logprobs: bool = False,
echo: bool = False,
stop_token: Optional[int] = None,
) -> Tuple[List[List[int]], Optional[List[List[float]]]]:
if stop_token is None:
stop_token = self.tokenizer.eos_id
params = self.model.params
bsz = len(prompt_tokens)
assert bsz <= params.max_batch_size, (bsz, params.max_batch_size)
min_prompt_len = min(len(t) for t in prompt_tokens)
max_prompt_len = max(len(t) for t in prompt_tokens)
assert max_prompt_len <= params.max_seq_len
total_len = min(params.max_seq_len, max_gen_len + max_prompt_len)
pad_id = self.tokenizer.pad_id
tokens = torch.full((bsz, total_len), pad_id, dtype=torch.long, device="cuda")
for k, t in enumerate(prompt_tokens):
tokens[k, : len(t)] = torch.tensor(t, dtype=torch.long, device="cuda")
if logprobs:
token_logprobs = torch.zeros_like(tokens, dtype=torch.float)
prev_pos = 0
stop_reached = torch.tensor([False] * bsz, device="cuda")
input_text_mask = tokens != pad_id
for cur_pos in range(min_prompt_len, total_len):
logits = self.model.forward(tokens[:, prev_pos:cur_pos], prev_pos)
if logprobs:
token_logprobs[:, prev_pos + 1 : cur_pos + 1] = -F.cross_entropy(
input=logits.transpose(1, 2),
target=tokens[:, prev_pos + 1 : cur_pos + 1],
reduction="none",
ignore_index=pad_id,
)
if temperature > 0:
probs = torch.softmax(logits[:, -1] / temperature, dim=-1)
next_token = sample_top_p(probs, top_p)
else:
next_token = torch.argmax(logits[:, -1], dim=-1)
next_token = next_token.reshape(-1)
# only replace token if prompt has already been generated
next_token = torch.where(
input_text_mask[:, cur_pos], tokens[:, cur_pos], next_token
)
tokens[:, cur_pos] = next_token
stop_reached |= (~input_text_mask[:, cur_pos]) & (next_token == stop_token)
prev_pos = cur_pos
if all(stop_reached):
break
if logprobs:
token_logprobs = token_logprobs.tolist()
out_tokens, out_logprobs = [], []
for i, toks in enumerate(tokens.tolist()):
# cut to max gen len
start = 0 if echo else len(prompt_tokens[i])
toks = toks[start : len(prompt_tokens[i]) + max_gen_len]
probs = None
if logprobs:
probs = token_logprobs[i][start : len(prompt_tokens[i]) + max_gen_len]
# cut to stop token if present
if stop_token in toks:
stop_idx = toks.index(stop_token)
toks = toks[:stop_idx]
probs = probs[:stop_idx] if logprobs else None
out_tokens.append(toks)
out_logprobs.append(probs)
return (out_tokens, out_logprobs if logprobs else None)
def text_completion(
self,
prompts: List[str],
temperature: float = 0.6,
top_p: float = 0.9,
max_gen_len: Optional[int] = None,
logprobs: bool = False,
echo: bool = False,
) -> List[CompletionPrediction]:
if max_gen_len is None:
max_gen_len = self.model.params.max_seq_len - 1
prompt_tokens = [self.tokenizer.encode(x, bos=True, eos=False) for x in prompts]
generation_tokens, generation_logprobs = self.generate(
prompt_tokens=prompt_tokens,
max_gen_len=max_gen_len,
temperature=temperature,
top_p=top_p,
logprobs=logprobs,
echo=echo,
)
if logprobs:
return [
{
"generation": self.tokenizer.decode(t),
"tokens": [self.tokenizer.decode(x) for x in t],
"logprobs": logprobs_i,
}
for t, logprobs_i in zip(generation_tokens, generation_logprobs)
]
return [{"generation": self.tokenizer.decode(t)} for t in generation_tokens]
def text_infilling(
self,
prefixes: List[str],
suffixes: List[str],
temperature: float = 0.6,
top_p: float = 0.9,
max_gen_len: Optional[int] = None,
logprobs: bool = False,
suffix_first: bool = False,
) -> List[InfillingPrediction]:
assert self.tokenizer.eot_id is not None
if max_gen_len is None:
max_gen_len = self.model.params.max_seq_len - 1
prompt_tokens = [
infilling_prompt_tokens(
self.tokenizer, prefix, suffix, suffix_first=suffix_first
)
for prefix, suffix in zip(prefixes, suffixes)
]
generation_tokens, generation_logprobs = self.generate(
prompt_tokens=prompt_tokens,
max_gen_len=max_gen_len,
temperature=temperature,
top_p=top_p,
logprobs=logprobs,
echo=False,
stop_token=self.tokenizer.eot_id,
)
generations = [self.tokenizer.decode_infilling(t) for t in generation_tokens]
if logprobs:
return [
{
"generation": generation,
"logprobs": logprobs_i,
"tokens": t,
"full_text": prefix + generation + suffix,
}
for prefix, suffix, generation, t, logprobs_i in zip(
prefixes,
suffixes,
generations,
generation_tokens,
generation_logprobs,
)
]
else:
return [
{
"generation": generation,
"full_text": prefix + generation + suffix,
}
for prefix, suffix, generation in zip(prefixes, suffixes, generations)
]
def chat_completion(
self,
dialogs: List[Dialog],
temperature: float = 0.6,
top_p: float = 0.9,
max_gen_len: Optional[int] = None,
logprobs: bool = False,
) -> List[ChatPrediction]:
if max_gen_len is None:
max_gen_len = self.model.params.max_seq_len - 1
prompt_tokens = []
unsafe_requests = []
for dialog in dialogs:
unsafe_requests.append(
any([tag in msg["content"] for tag in SPECIAL_TAGS for msg in dialog])
)
if dialog[0]["role"] == "system":
dialog = [
{
"role": dialog[1]["role"],
"content": B_SYS
+ dialog[0]["content"]
+ E_SYS
+ dialog[1]["content"],
}
] + dialog[2:]
assert all([msg["role"] == "user" for msg in dialog[::2]]) and all(
[msg["role"] == "assistant" for msg in dialog[1::2]]
), (
"model only supports 'system', 'user' and 'assistant' roles, "
"starting with 'system', then 'user' and alternating (u/a/u/a/u...)"
)
dialog_tokens: List[int] = sum(
[
self.tokenizer.encode(
f"{B_INST} {(prompt['content']).strip()} {E_INST} {(answer['content']).strip()} ",
bos=True,
eos=True,
)
for prompt, answer in zip(
dialog[::2],
dialog[1::2],
)
],
[],
)
assert (
dialog[-1]["role"] == "user"
), f"Last message must be from user, got {dialog[-1]['role']}"
dialog_tokens += self.tokenizer.encode(
f"{B_INST} {(dialog[-1]['content']).strip()} {E_INST}",
bos=True,
eos=False,
)
prompt_tokens.append(dialog_tokens)
generation_tokens, generation_logprobs = self.generate(
prompt_tokens=prompt_tokens,
max_gen_len=max_gen_len,
temperature=temperature,
top_p=top_p,
logprobs=logprobs,
)
if logprobs:
return [
{
"generation": {
"role": "assistant",
"content": self.tokenizer.decode(t)
if not unsafe
else UNSAFE_ERROR,
},
"tokens": [self.tokenizer.decode(x) for x in t],
"logprobs": logprobs_i,
}
for t, logprobs_i, unsafe in zip(
generation_tokens, generation_logprobs, unsafe_requests
)
]
return [
{
"generation": {
"role": "assistant",
"content": self.tokenizer.decode(t) if not unsafe else UNSAFE_ERROR,
}
}
for t, unsafe in zip(generation_tokens, unsafe_requests)
]
def sample_top_p(probs, p):
probs_sort, probs_idx = torch.sort(probs, dim=-1, descending=True)
probs_sum = torch.cumsum(probs_sort, dim=-1)
mask = probs_sum - probs_sort > p
probs_sort[mask] = 0.0
probs_sort.div_(probs_sort.sum(dim=-1, keepdim=True))
next_token = torch.multinomial(probs_sort, num_samples=1)
next_token = torch.gather(probs_idx, -1, next_token)
return next_token
def infilling_prompt_tokens(
tokenizer: Tokenizer,
pre: str,
suf: str,
suffix_first: bool = False,
) -> List[int]:
"""
Format and encode an infilling problem.
If `suffix_first` is set, format in suffix-prefix-middle format.
"""
assert tokenizer.prefix_id is not None
assert tokenizer.middle_id is not None
assert tokenizer.suffix_id is not None
if suffix_first:
# format as "<PRE> <SUF>{suf} <MID> {pre}"
return (
[tokenizer.bos_id, tokenizer.prefix_id, tokenizer.suffix_id]
+ tokenizer.encode_infilling(suf)
+ [tokenizer.middle_id]
+ tokenizer.encode(pre, bos=False, eos=False)
)
else:
# format as "<PRE> {pre} <SUF>{suf} <MID>"
return (
[tokenizer.bos_id, tokenizer.prefix_id]
+ tokenizer.encode(pre, bos=False, eos=False)
+ [tokenizer.suffix_id]
+ tokenizer.encode_infilling(suf)
+ [tokenizer.middle_id]
)
|
codellama-main
|
llama/generation.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
from .generation import Llama
from .model import ModelArgs, Transformer
from .tokenizer import Tokenizer
|
codellama-main
|
llama/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
import math
from dataclasses import dataclass
from typing import Any, Optional, Tuple
import fairscale.nn.model_parallel.initialize as fs_init
import torch
import torch.nn.functional as F
from fairscale.nn.model_parallel.layers import (
ColumnParallelLinear,
ParallelEmbedding,
RowParallelLinear,
)
from torch import nn
@dataclass
class ModelArgs:
dim: int = 4096
n_layers: int = 32
n_heads: int = 32
n_kv_heads: Optional[int] = None
vocab_size: int = -1 # defined later by tokenizer
multiple_of: int = 256 # make SwiGLU hidden layer size multiple of large power of 2
ffn_dim_multiplier: Optional[float] = None
norm_eps: float = 1e-5
rope_theta: float = 10000
max_batch_size: int = 32
max_seq_len: int = 2048
class RMSNorm(torch.nn.Module):
def __init__(self, dim: int, eps: float = 1e-6):
super().__init__()
self.eps = eps
self.weight = nn.Parameter(torch.ones(dim))
def _norm(self, x):
return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
def forward(self, x):
output = self._norm(x.float()).type_as(x)
return output * self.weight
def precompute_freqs_cis(dim: int, end: int, theta: float = 10000.0):
freqs = 1.0 / (theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim))
t = torch.arange(end, device=freqs.device, dtype=torch.float32) # type: ignore
freqs = torch.outer(t, freqs) # type: ignore
freqs_cis = torch.polar(torch.ones_like(freqs), freqs) # complex64
return freqs_cis
def reshape_for_broadcast(freqs_cis: torch.Tensor, x: torch.Tensor):
ndim = x.ndim
assert 0 <= 1 < ndim
assert freqs_cis.shape == (x.shape[1], x.shape[-1])
shape = [d if i == 1 or i == ndim - 1 else 1 for i, d in enumerate(x.shape)]
return freqs_cis.view(*shape)
def apply_rotary_emb(
xq: torch.Tensor,
xk: torch.Tensor,
freqs_cis: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2))
xk_ = torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2))
freqs_cis = reshape_for_broadcast(freqs_cis, xq_)
xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(3)
xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(3)
return xq_out.type_as(xq), xk_out.type_as(xk)
def repeat_kv(x: torch.Tensor, n_rep: int) -> torch.Tensor:
"""torch.repeat_interleave(x, dim=2, repeats=n_rep)"""
bs, slen, n_kv_heads, head_dim = x.shape
if n_rep == 1:
return x
return (
x[:, :, :, None, :]
.expand(bs, slen, n_kv_heads, n_rep, head_dim)
.reshape(bs, slen, n_kv_heads * n_rep, head_dim)
)
class Attention(nn.Module):
def __init__(self, args: ModelArgs):
super().__init__()
self.n_kv_heads = args.n_heads if args.n_kv_heads is None else args.n_kv_heads
model_parallel_size = fs_init.get_model_parallel_world_size()
self.n_local_heads = args.n_heads // model_parallel_size
self.n_local_kv_heads = self.n_kv_heads // model_parallel_size
self.n_rep = self.n_local_heads // self.n_local_kv_heads
self.head_dim = args.dim // args.n_heads
self.wq = ColumnParallelLinear(
args.dim,
args.n_heads * self.head_dim,
bias=False,
gather_output=False,
init_method=lambda x: x,
)
self.wk = ColumnParallelLinear(
args.dim,
self.n_kv_heads * self.head_dim,
bias=False,
gather_output=False,
init_method=lambda x: x,
)
self.wv = ColumnParallelLinear(
args.dim,
self.n_kv_heads * self.head_dim,
bias=False,
gather_output=False,
init_method=lambda x: x,
)
self.wo = RowParallelLinear(
args.n_heads * self.head_dim,
args.dim,
bias=False,
input_is_parallel=True,
init_method=lambda x: x,
)
self.cache_k = torch.zeros(
(
args.max_batch_size,
args.max_seq_len,
self.n_local_kv_heads,
self.head_dim,
)
).cuda()
self.cache_v = torch.zeros(
(
args.max_batch_size,
args.max_seq_len,
self.n_local_kv_heads,
self.head_dim,
)
).cuda()
def forward(
self,
x: torch.Tensor,
start_pos: int,
freqs_cis: torch.Tensor,
mask: Optional[torch.Tensor],
):
bsz, seqlen, _ = x.shape
xq, xk, xv = self.wq(x), self.wk(x), self.wv(x)
xq = xq.view(bsz, seqlen, self.n_local_heads, self.head_dim)
xk = xk.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim)
xv = xv.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim)
xq, xk = apply_rotary_emb(xq, xk, freqs_cis=freqs_cis)
self.cache_k = self.cache_k.to(xq)
self.cache_v = self.cache_v.to(xq)
self.cache_k[:bsz, start_pos : start_pos + seqlen] = xk
self.cache_v[:bsz, start_pos : start_pos + seqlen] = xv
keys = self.cache_k[:bsz, : start_pos + seqlen]
values = self.cache_v[:bsz, : start_pos + seqlen]
# repeat k/v heads if n_kv_heads < n_heads
keys = repeat_kv(keys, self.n_rep) # (bs, seqlen, n_local_heads, head_dim)
values = repeat_kv(values, self.n_rep) # (bs, seqlen, n_local_heads, head_dim)
xq = xq.transpose(1, 2) # (bs, n_local_heads, seqlen, head_dim)
keys = keys.transpose(1, 2)
values = values.transpose(1, 2)
scores = torch.matmul(xq, keys.transpose(2, 3)) / math.sqrt(self.head_dim)
if mask is not None:
scores = scores + mask # (bs, n_local_heads, seqlen, cache_len + seqlen)
scores = F.softmax(scores.float(), dim=-1).type_as(xq)
output = torch.matmul(scores, values) # (bs, n_local_heads, seqlen, head_dim)
output = output.transpose(1, 2).contiguous().view(bsz, seqlen, -1)
return self.wo(output)
class FeedForward(nn.Module):
def __init__(
self,
dim: int,
hidden_dim: int,
multiple_of: int,
ffn_dim_multiplier: Optional[float],
):
super().__init__()
hidden_dim = int(2 * hidden_dim / 3)
# custom dim factor multiplier
if ffn_dim_multiplier is not None:
hidden_dim = int(ffn_dim_multiplier * hidden_dim)
hidden_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of)
self.w1 = ColumnParallelLinear(
dim, hidden_dim, bias=False, gather_output=False, init_method=lambda x: x
)
self.w2 = RowParallelLinear(
hidden_dim, dim, bias=False, input_is_parallel=True, init_method=lambda x: x
)
self.w3 = ColumnParallelLinear(
dim, hidden_dim, bias=False, gather_output=False, init_method=lambda x: x
)
def forward(self, x):
return self.w2(F.silu(self.w1(x)) * self.w3(x))
class TransformerBlock(nn.Module):
def __init__(self, layer_id: int, args: ModelArgs):
super().__init__()
self.n_heads = args.n_heads
self.dim = args.dim
self.head_dim = args.dim // args.n_heads
self.attention = Attention(args)
self.feed_forward = FeedForward(
dim=args.dim,
hidden_dim=4 * args.dim,
multiple_of=args.multiple_of,
ffn_dim_multiplier=args.ffn_dim_multiplier,
)
self.layer_id = layer_id
self.attention_norm = RMSNorm(args.dim, eps=args.norm_eps)
self.ffn_norm = RMSNorm(args.dim, eps=args.norm_eps)
def forward(
self,
x: torch.Tensor,
start_pos: int,
freqs_cis: torch.Tensor,
mask: Optional[torch.Tensor],
):
h = x + self.attention.forward(
self.attention_norm(x), start_pos, freqs_cis, mask
)
out = h + self.feed_forward.forward(self.ffn_norm(h))
return out
class Transformer(nn.Module):
def __init__(self, params: ModelArgs):
super().__init__()
self.params = params
self.vocab_size = params.vocab_size
self.n_layers = params.n_layers
self.tok_embeddings = ParallelEmbedding(
params.vocab_size, params.dim, init_method=lambda x: x
)
self.layers = torch.nn.ModuleList()
for layer_id in range(params.n_layers):
self.layers.append(TransformerBlock(layer_id, params))
self.norm = RMSNorm(params.dim, eps=params.norm_eps)
self.output = ColumnParallelLinear(
params.dim, params.vocab_size, bias=False, init_method=lambda x: x
)
self.freqs_cis = precompute_freqs_cis(
self.params.dim // self.params.n_heads,
self.params.max_seq_len * 2,
params.rope_theta,
)
@torch.inference_mode()
def forward(self, tokens: torch.Tensor, start_pos: int):
_bsz, seqlen = tokens.shape
h = self.tok_embeddings(tokens)
self.freqs_cis = self.freqs_cis.to(h.device)
freqs_cis = self.freqs_cis[start_pos : start_pos + seqlen]
mask = None
if seqlen > 1:
mask = torch.full(
(1, 1, seqlen, seqlen), float("-inf"), device=tokens.device
)
mask = mask.to(torch.float32).triu(diagonal=start_pos+1).type_as(h)
for layer in self.layers:
h = layer(h, start_pos, freqs_cis, mask)
h = self.norm(h)
output = self.output(h).float()
return output
|
codellama-main
|
llama/model.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
import os
from logging import getLogger
from typing import List, Optional
from sentencepiece import SentencePieceProcessor
logger = getLogger()
class Tokenizer:
def __init__(self, model_path: str):
# reload tokenizer
assert os.path.isfile(model_path), model_path
self.sp_model = SentencePieceProcessor(model_file=model_path)
logger.info(f"Reloaded SentencePiece model from {model_path}")
# BOS / EOS token IDs
self.n_words: int = self.sp_model.vocab_size()
self.bos_id: int = self.sp_model.bos_id()
self.eos_id: int = self.sp_model.eos_id()
self.pad_id: int = self.sp_model.pad_id()
# token IDs for special infilling tokens
self.prefix_id: Optional[int] = self.sp_model.piece_to_id("▁<PRE>") or None
self.middle_id: Optional[int] = self.sp_model.piece_to_id("▁<MID>") or None
self.suffix_id: Optional[int] = self.sp_model.piece_to_id("▁<SUF>") or None
self.eot_id: Optional[int] = self.sp_model.piece_to_id("▁<EOT>") or None
logger.info(
f"#words: {self.n_words} - BOS ID: {self.bos_id} - EOS ID: {self.eos_id} "
f"- PRE ID: {self.prefix_id} - MID ID: {self.middle_id} - SUF ID: {self.suffix_id} - EOT ID: {self.eot_id}"
)
assert self.sp_model.vocab_size() == self.sp_model.get_piece_size()
def encode(self, s: str, bos: bool, eos: bool) -> List[int]:
assert type(s) is str
t = self.sp_model.encode(s)
if bos:
t = [self.bos_id] + t
if eos:
t = t + [self.eos_id]
return t
def decode(self, t: List[int]) -> str:
return self.sp_model.decode(t)
def encode_infilling(self, s: str) -> List[int]:
"""Encode a string without an implicit leading space."""
return self.sp_model.encode("☺" + s)[2:]
def decode_infilling(self, t: List[int]) -> str:
"""Decode a string without an implicit leading space."""
return self.sp_model.decode([self.sp_model.piece_to_id("☺")] + t)[1:]
|
codellama-main
|
llama/tokenizer.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
import torch.nn.functional as F
from layers import convnet, coordinates
class FiLMed(nn.Module):
"""
Implements a FiLMed block.
"""
def __init__(self, num_conv_filts_in, num_conv_filts, stride, dilation):
super(FiLMed, self).__init__()
self.conv1 = nn.Conv2d(num_conv_filts_in,
num_conv_filts,
kernel_size=3,
padding=1,
stride=stride,
dilation=dilation)
self.conv2 = nn.Conv2d(num_conv_filts,
num_conv_filts,
kernel_size=3,
padding=1,
stride=stride,
dilation=dilation)
self.batchnorm2 = nn.BatchNorm2d(num_conv_filts, affine=False)
def forward(self, x, gamma, beta):
b1 = F.relu(self.conv1(x))
b2 = self.batchnorm2(self.conv2(b1))
gamma = gamma.unsqueeze(2).unsqueeze(3).expand_as(b2)
beta = beta.unsqueeze(2).unsqueeze(3).expand_as(b2)
b2 = F.relu((b2 * gamma) + beta)
return (b1 + b2)
class FiLM(nn.Module):
"""
Implements FiLM.
"""
def __init__(self,
vocab_dim,
embedding_dim,
padding_idx,
lstm_hidden_dim_q,
num_lstm_layers_q,
bidirectional,
num_conv_filts_base,
num_conv_layers_base,
stride_base,
dilation_base,
use_coordinates,
num_conv_filts_film,
num_conv_layers_film,
stride_film,
dilation_film,
fcn_output_dim,
fcn_coeff_dim,
fcn_temp_dim,
aggregate,
output_hidden_dim,
output_dim):
super(FiLM, self).__init__()
self.bidirectional = bidirectional
self.use_coordinates = use_coordinates
self.embeddings = nn.Embedding(vocab_dim,
embedding_dim,
padding_idx=padding_idx)
self.lstm_q = nn.LSTM(embedding_dim,
lstm_hidden_dim_q,
num_lstm_layers_q,
batch_first=True,
bidirectional=bidirectional)
if bidirectional:
lstm_output_dim_q = (2 * lstm_hidden_dim_q)
else:
lstm_output_dim_q = lstm_hidden_dim_q
# Compute required output dimension given convnet specs
# * 2 for gamma and beta. Assumes constant num filters per layer
num_feats = num_conv_filts_film * num_conv_layers_film * 2
self.num_conv_filts_film = num_conv_filts_film
self.num_conv_layers_film = num_conv_layers_film
self.decoder = nn.Linear(lstm_output_dim_q, num_feats)
# Base convnet
self.conv, num_channels, _ = convnet(num_conv_filts_base,
num_conv_layers_base,
stride_base,
dilation_base)
# Filmed convnet
self.film_conv_modules = []
for i in range(num_conv_layers_film):
num_channels += 2 if use_coordinates else 0
fcm = FiLMed(num_channels,
num_conv_filts_film,
stride_film,
dilation_film)
num_channels = num_conv_filts_film
self.film_conv_modules.append(fcm)
self.add_module('film_module_%d' % i, fcm)
num_conv_filts_film += 2 if use_coordinates else 0
self.conv1 = nn.Conv2d(num_conv_filts_film,
fcn_output_dim,
kernel_size=1,
padding=0)
if aggregate == 'max':
self.pool = nn.AdaptiveMaxPool2d((fcn_coeff_dim, fcn_temp_dim))
elif aggregate == 'mean':
self.pool = nn.AdaptiveAvgPool2d((fcn_coeff_dim, fcn_temp_dim))
else:
assert False, 'Unknown aggregate function.'
self.use_coordinates_class = (use_coordinates
and fcn_coeff_dim > 1
and fcn_temp_dim > 1)
fcn_output_dim += 2 if self.use_coordinates_class else 0
adaptive_pool_dim = fcn_output_dim * fcn_coeff_dim * fcn_temp_dim
self.output = nn.Sequential(nn.Linear(adaptive_pool_dim, output_hidden_dim),
nn.ReLU(),
nn.Linear(output_hidden_dim, output_dim))
def forward(self, a, len_a, q, len_q):
embeddings = self.embeddings(q)
packed = torch.nn.utils.rnn.pack_padded_sequence(embeddings,
len_q,
batch_first=True)
lstm_q, _ = self.lstm_q(packed)
unpacked, lens = torch.nn.utils.rnn.pad_packed_sequence(lstm_q,
batch_first=True)
if self.bidirectional:
bid_q = unpacked.view(unpacked.size(0),
unpacked.size(1),
2,
int(unpacked.size(2) / 2))
enc_q = torch.cat((bid_q[torch.arange(bid_q.size(0), dtype=torch.long),
lens - 1, 0],
bid_q[torch.arange(bid_q.size(0), dtype=torch.long),
lens - 1, 1]),
dim=1)
else:
enc_q = unpacked[torch.arange(unpacked.size(0), dtype=torch.long), lens - 1]
gammas_betas = self.decoder(enc_q)
gammas_betas = gammas_betas.view(gammas_betas.size(0),
self.num_conv_layers_film,
self.num_conv_filts_film,
2)
a = torch.unsqueeze(a, 1)
a = self.conv(a)
for i, fcm in enumerate(self.film_conv_modules):
# Append coordinate maps
if self.use_coordinates:
coordinates_maps = coordinates(a.shape[2], a.shape[3]).to(a.device)
a = torch.cat((a, coordinates_maps.expand(a.size(0), -1, -1, -1)), 1)
# see FiLM appendix for + 1
a = fcm(a, gammas_betas[:, i, :, 0] + 1, gammas_betas[:, i, :, 1])
if self.use_coordinates:
coordinates_maps = coordinates(a.shape[2], a.shape[3]).to(a.device)
a = torch.cat((a, coordinates_maps.expand(a.size(0), -1, -1, -1)), 1)
a = self.conv1(a)
a = self.pool(a)
if self.use_coordinates_class:
coordinates_maps = coordinates(a.shape[2], a.shape[3]).to(a.device)
a = torch.cat((a, coordinates_maps.expand(a.size(0), -1, -1, -1)), 1)
a = a.view(a.size(0), -1)
output = self.output(a)
return output
|
daqa-master
|
daqa-mod/film.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
from layers import StackedAttention, StackedAttention1D, convnet, coordinates
class LSTMN(nn.Module):
def __init__(self,
vocab_dim,
embedding_dim,
padding_idx,
lstm_hidden_dim_q,
num_lstm_layers_q,
bidirectional,
input_dim,
lstm_hidden_dim_a,
num_lstm_layers_a,
output_hidden_dim,
output_dim):
super(LSTMN, self).__init__()
self.bidirectional = bidirectional
self.embeddings = nn.Embedding(vocab_dim,
embedding_dim,
padding_idx=padding_idx)
self.lstm_q = nn.LSTM(embedding_dim,
lstm_hidden_dim_q,
num_lstm_layers_q,
batch_first=True,
bidirectional=bidirectional)
self.lstm_a = nn.LSTM(input_dim,
lstm_hidden_dim_a,
num_lstm_layers_a,
batch_first=True,
bidirectional=bidirectional)
if bidirectional:
lstm_output_dim_q = (2 * lstm_hidden_dim_q)
lstm_output_dim_a = (2 * lstm_hidden_dim_a)
else:
lstm_output_dim_q = lstm_hidden_dim_q
lstm_output_dim_a = lstm_hidden_dim_a
lstm_output_dim = lstm_output_dim_q + lstm_output_dim_a
self.output = nn.Sequential(nn.Linear(lstm_output_dim, output_hidden_dim),
nn.ReLU(),
nn.Linear(output_hidden_dim, output_dim))
def forward(self, a, len_a, q, len_q):
embeddings = self.embeddings(q)
packed = torch.nn.utils.rnn.pack_padded_sequence(embeddings,
len_q,
batch_first=True)
# self.lstm_q.flatten_parameters()
lstm_q, _ = self.lstm_q(packed)
unpacked, lens = torch.nn.utils.rnn.pad_packed_sequence(lstm_q,
batch_first=True)
# self.lstm_a.flatten_parameters()
lstm_a, _ = self.lstm_a(a)
if self.bidirectional:
bid_q = unpacked.view(unpacked.size(0),
unpacked.size(1),
2,
int(unpacked.size(2) / 2))
bid_a = lstm_a.view(lstm_a.size(0),
lstm_a.size(1),
2,
int(lstm_a.size(2) / 2))
cat_q = torch.cat((bid_q[torch.arange(bid_q.size(0), dtype=torch.long),
lens - 1, 0],
bid_q[torch.arange(bid_q.size(0), dtype=torch.long),
lens - 1, 1]),
dim=1)
cat_a = torch.cat((bid_a[torch.arange(bid_a.size(0), dtype=torch.long),
len_a - 1, 0],
bid_a[torch.arange(bid_a.size(0), dtype=torch.long),
len_a - 1, 1]),
dim=1)
else:
cat_a = lstm_a[torch.arange(lstm_a.size(0), dtype=torch.long), len_a - 1]
cat_q = unpacked[torch.arange(unpacked.size(0), dtype=torch.long), lens - 1]
cat = torch.cat((cat_a, cat_q), 1)
output = self.output(cat)
return output
class FCNLSTMN(nn.Module):
def __init__(self,
vocab_dim,
embedding_dim,
padding_idx,
lstm_hidden_dim_q,
num_lstm_layers_q,
bidirectional,
num_conv_filts,
num_conv_layers,
stride,
dilation,
fcn_output_dim,
fcn_coeff_dim,
fcn_temp_dim,
aggregate,
output_hidden_dim,
output_dim):
super(FCNLSTMN, self).__init__()
self.bidirectional = bidirectional
self.embeddings = nn.Embedding(vocab_dim,
embedding_dim,
padding_idx=padding_idx)
self.lstm_q = nn.LSTM(embedding_dim,
lstm_hidden_dim_q,
num_lstm_layers_q,
batch_first=True,
bidirectional=bidirectional)
if bidirectional:
lstm_output_dim_q = (2 * lstm_hidden_dim_q)
else:
lstm_output_dim_q = lstm_hidden_dim_q
self.conv, num_channels, _ = convnet(num_conv_filts,
num_conv_layers,
stride,
dilation)
self.conv1 = nn.Conv2d(num_channels,
fcn_output_dim,
kernel_size=1,
padding=0)
if aggregate == 'max':
self.pool = nn.AdaptiveMaxPool2d((fcn_coeff_dim, fcn_temp_dim))
elif aggregate == 'mean':
self.pool = nn.AdaptiveAvgPool2d((fcn_coeff_dim, fcn_temp_dim))
else:
assert False, 'Unknown aggregate function.'
lstm_output_dim = lstm_output_dim_q \
+ (fcn_output_dim * fcn_coeff_dim * fcn_temp_dim)
self.output = nn.Sequential(nn.Linear(lstm_output_dim, output_hidden_dim),
nn.ReLU(),
nn.Linear(output_hidden_dim, output_dim))
def forward(self, a, len_a, q, len_q):
embeddings = self.embeddings(q)
packed = torch.nn.utils.rnn.pack_padded_sequence(embeddings,
len_q,
batch_first=True)
# self.lstm_q.flatten_parameters()
lstm_q, _ = self.lstm_q(packed)
unpacked, lens = torch.nn.utils.rnn.pad_packed_sequence(lstm_q,
batch_first=True)
if self.bidirectional:
bid_q = unpacked.view(unpacked.size(0),
unpacked.size(1),
2,
int(unpacked.size(2) / 2))
cat_q = torch.cat((bid_q[torch.arange(bid_q.size(0), dtype=torch.long),
lens - 1, 0],
bid_q[torch.arange(bid_q.size(0), dtype=torch.long),
lens - 1, 1]),
dim=1)
else:
cat_q = unpacked[torch.arange(unpacked.size(0), dtype=torch.long), lens - 1]
a = torch.unsqueeze(a, 1)
conv_a = self.conv(a)
conv1_a = self.conv1(conv_a)
pool_a = self.pool(conv1_a)
cat_a = pool_a.view(pool_a.size(0), -1)
cat = torch.cat((cat_a, cat_q), 1)
output = self.output(cat)
return output
class CONVLSTMN(nn.Module):
def __init__(self,
vocab_dim,
embedding_dim,
padding_idx,
lstm_hidden_dim_q,
num_lstm_layers_q,
bidirectional,
input_dim,
num_conv_filts,
num_conv_layers,
stride,
dilation,
lstm_hidden_dim_a,
num_lstm_layers_a,
output_hidden_dim,
output_dim):
super(CONVLSTMN, self).__init__()
self.bidirectional = bidirectional
self.embeddings = nn.Embedding(vocab_dim,
embedding_dim,
padding_idx=padding_idx)
self.lstm_q = nn.LSTM(embedding_dim,
lstm_hidden_dim_q,
num_lstm_layers_q,
batch_first=True,
bidirectional=bidirectional)
self.conv, num_channels, conv_red_dim = convnet(num_conv_filts,
num_conv_layers,
stride,
dilation)
self.lstm_a = nn.LSTM(num_channels * int(input_dim / conv_red_dim),
lstm_hidden_dim_a,
num_lstm_layers_a,
batch_first=True,
bidirectional=bidirectional)
if bidirectional:
lstm_output_dim_q = (2 * lstm_hidden_dim_q)
lstm_output_dim_a = (2 * lstm_hidden_dim_a)
else:
lstm_output_dim_q = lstm_hidden_dim_q
lstm_output_dim_a = lstm_hidden_dim_a
lstm_output_dim = lstm_output_dim_q + lstm_output_dim_a
self.output = nn.Sequential(nn.Linear(lstm_output_dim, output_hidden_dim),
nn.ReLU(),
nn.Linear(output_hidden_dim, output_dim))
def forward(self, a, len_a, q, len_q):
embeddings = self.embeddings(q)
packed = torch.nn.utils.rnn.pack_padded_sequence(embeddings,
len_q,
batch_first=True)
# self.lstm_q.flatten_parameters()
lstm_q, _ = self.lstm_q(packed)
unpacked, lens = torch.nn.utils.rnn.pad_packed_sequence(lstm_q,
batch_first=True)
a = torch.unsqueeze(a, 1)
a = self.conv(a)
a = a.permute(0, 2, 1, 3).contiguous()
a = a.view(a.size(0), a.size(1), a.size(2) * a.size(3))
lstm_a, _ = self.lstm_a(a)
if self.bidirectional:
bid_a = lstm_a.view(lstm_a.size(0),
lstm_a.size(1),
2,
int(lstm_a.size(2) / 2))
bid_q = unpacked.view(unpacked.size(0),
unpacked.size(1),
2,
int(unpacked.size(2) / 2))
cat_a = torch.cat((bid_a[torch.arange(bid_a.size(0), dtype=torch.long),
-1, 0],
bid_a[torch.arange(bid_a.size(0), dtype=torch.long),
-1, 1]),
dim=1)
cat_q = torch.cat((bid_q[torch.arange(bid_q.size(0), dtype=torch.long),
lens - 1, 0],
bid_q[torch.arange(bid_q.size(0), dtype=torch.long),
lens - 1, 1]),
dim=1)
else:
cat_a = lstm_a[torch.arange(lstm_a.size(0), dtype=torch.long), -1]
cat_q = unpacked[torch.arange(unpacked.size(0), dtype=torch.long), lens - 1]
cat = torch.cat((cat_a, cat_q), 1)
output = self.output(cat)
return output
class FCNLSTMNSA(nn.Module):
def __init__(self,
vocab_dim,
embedding_dim,
padding_idx,
lstm_hidden_dim_q,
num_lstm_layers_q,
bidirectional,
num_conv_filts,
num_conv_layers,
stride,
dilation,
fcn_output_dim,
fcn_coeff_dim,
fcn_temp_dim,
aggregate,
use_coordinates,
stacked_att_dim,
num_stacked_att,
output_hidden_dim,
output_dim):
super(FCNLSTMNSA, self).__init__()
self.bidirectional = bidirectional
self.embeddings = nn.Embedding(vocab_dim,
embedding_dim,
padding_idx=padding_idx)
self.lstm_q = nn.LSTM(embedding_dim,
lstm_hidden_dim_q,
num_lstm_layers_q,
batch_first=True,
bidirectional=bidirectional)
if bidirectional:
lstm_output_dim_q = (2 * lstm_hidden_dim_q)
else:
lstm_output_dim_q = lstm_hidden_dim_q
self.conv, num_channels, _ = convnet(num_conv_filts,
num_conv_layers,
stride,
dilation)
self.conv1 = nn.Conv2d(num_channels,
fcn_output_dim,
kernel_size=1,
padding=0)
if aggregate == 'max':
self.pool = nn.AdaptiveMaxPool2d((fcn_coeff_dim, fcn_temp_dim))
elif aggregate == 'mean':
self.pool = nn.AdaptiveAvgPool2d((fcn_coeff_dim, fcn_temp_dim))
else:
assert False, 'Unknown aggregate function.'
self.use_coordinates = (use_coordinates
and fcn_coeff_dim > 1
and fcn_temp_dim > 1)
fcn_output_dim += 2 if self.use_coordinates else 0
self.projection = nn.Conv2d(fcn_output_dim,
lstm_output_dim_q,
kernel_size=1,
padding=0)
self.stacked_att = []
for i in range(num_stacked_att):
sa = StackedAttention(lstm_output_dim_q, stacked_att_dim)
self.stacked_att.append(sa)
self.add_module('stacked_att_%d' % i, sa)
self.output = nn.Sequential(nn.Linear(lstm_output_dim_q, output_hidden_dim),
nn.ReLU(),
nn.Linear(output_hidden_dim, output_dim))
def forward(self, a, len_a, q, len_q):
embeddings = self.embeddings(q)
packed = torch.nn.utils.rnn.pack_padded_sequence(embeddings,
len_q,
batch_first=True)
# self.lstm_q.flatten_parameters()
lstm_q, _ = self.lstm_q(packed)
unpacked, lens = torch.nn.utils.rnn.pad_packed_sequence(lstm_q,
batch_first=True)
if self.bidirectional:
bid_q = unpacked.view(unpacked.size(0),
unpacked.size(1),
2,
int(unpacked.size(2) / 2))
cat_q = torch.cat((bid_q[torch.arange(bid_q.size(0), dtype=torch.long),
lens - 1, 0],
bid_q[torch.arange(bid_q.size(0), dtype=torch.long),
lens - 1, 1]),
dim=1)
else:
cat_q = unpacked[torch.arange(unpacked.size(0), dtype=torch.long), lens - 1]
a = torch.unsqueeze(a, 1)
conv_a = self.conv(a)
conv1_a = self.conv1(conv_a)
pool_a = self.pool(conv1_a)
if self.use_coordinates:
coo = coordinates(pool_a.shape[2], pool_a.shape[3]).to(pool_a.device)
pool_a = torch.cat((pool_a, coo.expand(pool_a.size(0), -1, -1, -1)), 1)
pool_a = torch.tanh(self.projection(pool_a))
for sa in self.stacked_att:
cat_q = sa(pool_a, cat_q)
output = self.output(cat_q)
return output
class CONVLSTMNSA(nn.Module):
def __init__(self,
vocab_dim,
embedding_dim,
padding_idx,
lstm_hidden_dim_q,
num_lstm_layers_q,
bidirectional,
input_dim,
num_conv_filts,
num_conv_layers,
stride,
dilation,
lstm_hidden_dim_a,
num_lstm_layers_a,
stacked_att_dim,
num_stacked_att,
output_hidden_dim,
output_dim):
super(CONVLSTMNSA, self).__init__()
self.bidirectional = bidirectional
self.embeddings = nn.Embedding(vocab_dim,
embedding_dim,
padding_idx=padding_idx)
self.lstm_q = nn.LSTM(embedding_dim,
lstm_hidden_dim_q,
num_lstm_layers_q,
batch_first=True,
bidirectional=bidirectional)
self.conv, num_channels, conv_red_dim = convnet(num_conv_filts,
num_conv_layers,
stride,
dilation)
self.lstm_a = nn.LSTM(num_channels * int(input_dim / conv_red_dim),
lstm_hidden_dim_a,
num_lstm_layers_a,
batch_first=True,
bidirectional=bidirectional)
if bidirectional:
lstm_output_dim_q = (2 * lstm_hidden_dim_q)
lstm_output_dim_a = (2 * lstm_hidden_dim_a)
else:
lstm_output_dim_q = lstm_hidden_dim_q
lstm_output_dim_a = lstm_hidden_dim_a
self.projection = nn.Linear(lstm_output_dim_a, lstm_output_dim_q)
self.stacked_att = []
for i in range(num_stacked_att):
sa = StackedAttention1D(lstm_output_dim_q, stacked_att_dim)
self.stacked_att.append(sa)
self.add_module('stacked_att_%d' % i, sa)
self.output = nn.Sequential(nn.Linear(lstm_output_dim_q, output_hidden_dim),
nn.ReLU(),
nn.Linear(output_hidden_dim, output_dim))
def forward(self, a, len_a, q, len_q):
embeddings = self.embeddings(q)
packed = torch.nn.utils.rnn.pack_padded_sequence(embeddings,
len_q,
batch_first=True)
# self.lstm_q.flatten_parameters()
lstm_q, _ = self.lstm_q(packed)
unpacked, lens = torch.nn.utils.rnn.pad_packed_sequence(lstm_q,
batch_first=True)
a = torch.unsqueeze(a, 1)
a = self.conv(a)
a = a.permute(0, 2, 1, 3).contiguous()
a = a.view(a.size(0), a.size(1), a.size(2) * a.size(3))
# self.lstm_a.flatten_parameters()
lstm_a, _ = self.lstm_a(a)
if self.bidirectional:
bid_a = lstm_a.view(lstm_a.size(0),
lstm_a.size(1),
2,
int(lstm_a.size(2) / 2))
bid_q = unpacked.view(unpacked.size(0),
unpacked.size(1),
2,
int(unpacked.size(2) / 2))
cat_a = torch.cat((bid_a[torch.arange(bid_a.size(0), dtype=torch.long),
-1, 0],
bid_a[torch.arange(bid_a.size(0), dtype=torch.long),
-1, 1]),
dim=1)
cat_q = torch.cat((bid_q[torch.arange(bid_q.size(0), dtype=torch.long),
lens - 1, 0],
bid_q[torch.arange(bid_q.size(0), dtype=torch.long),
lens - 1, 1]),
dim=1)
else:
cat_a = lstm_a[torch.arange(lstm_a.size(0), dtype=torch.long), -1]
cat_q = unpacked[torch.arange(unpacked.size(0), dtype=torch.long), lens - 1]
cat_a = torch.tanh(self.projection(cat_a)) # cat_a.size() == cat_q.size()
for sa in self.stacked_att:
cat_q = sa(cat_a, cat_q)
output = self.output(cat_q)
return output
|
daqa-master
|
daqa-mod/models.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
import torch.nn.functional as F
from layers import convnet, coordinates
class FiLM(nn.Module):
"""
Implements a FiLM block.
"""
def __init__(self, num_conv_filts_in, num_conv_filts, stride, dilation):
super(FiLM, self).__init__()
self.conv1 = nn.Conv2d(num_conv_filts_in,
num_conv_filts,
kernel_size=3,
padding=1,
stride=stride,
dilation=dilation)
self.conv2 = nn.Conv2d(num_conv_filts,
num_conv_filts,
kernel_size=3,
padding=1,
stride=stride,
dilation=dilation)
self.batchnorm2 = nn.BatchNorm2d(num_conv_filts, affine=False)
def forward(self, x, gamma, beta):
b1 = F.relu(self.conv1(x))
b2 = self.batchnorm2(self.conv2(b1))
gamma = gamma.unsqueeze(2).unsqueeze(3).expand_as(b2)
beta = beta.unsqueeze(2).unsqueeze(3).expand_as(b2)
b2 = F.relu((b2 * gamma) + beta)
return (b1 + b2)
class MALiMo(nn.Module):
"""
Implements MALiMo.
"""
def __init__(self,
vocab_dim,
embedding_dim,
padding_idx,
lstm_hidden_dim_q,
num_lstm_layers_q,
bidirectional,
num_conv_filts_base,
num_conv_layers_base,
stride_base,
dilation_base,
input_dim,
a_aggregate,
lstm_hidden_dim_a,
num_lstm_layers_a,
use_coordinates,
num_conv_filts_film,
num_conv_layers_film,
stride_film,
dilation_film,
fcn_output_dim,
fcn_coeff_dim,
fcn_temp_dim,
aggregate,
output_hidden_dim,
output_dim):
super(MALiMo, self).__init__()
self.bidirectional = bidirectional
self.use_coordinates = use_coordinates
# Base convnet
self.conv, num_channels, freq_red = convnet(num_conv_filts_base,
num_conv_layers_base,
stride_base,
dilation_base)
# Compute required output dimension given convnet specs
# * 2 for gamma and beta. Assumes constant num filters per layer
num_feats = num_conv_filts_film * num_conv_layers_film * 2
self.num_conv_filts_film = num_conv_filts_film
self.num_conv_layers_film = num_conv_layers_film
# Audio Controller
if a_aggregate == 'max':
self.a_decoder_pool = nn.MaxPool2d(
kernel_size=(input_dim // freq_red, 8),
stride=(input_dim // freq_red, 8))
elif a_aggregate == 'mean':
self.a_decoder_pool = nn.MaxPool2d(
kernel_size=(input_dim // freq_red, 8),
stride=(input_dim // freq_red, 8))
else:
assert False, 'Unknown aggregate function.'
self.lstm_a = nn.LSTM(num_channels,
lstm_hidden_dim_a,
num_lstm_layers_a,
batch_first=True,
bidirectional=bidirectional)
if bidirectional:
lstm_output_dim_a = (2 * lstm_hidden_dim_a)
else:
lstm_output_dim_a = lstm_hidden_dim_a
self.audio_decoder = nn.Linear(lstm_output_dim_a, num_feats)
# Question Controller
self.embeddings = nn.Embedding(vocab_dim,
embedding_dim,
padding_idx=padding_idx)
self.lstm_q = nn.LSTM(embedding_dim,
lstm_hidden_dim_q,
num_lstm_layers_q,
batch_first=True,
bidirectional=bidirectional)
if bidirectional:
lstm_output_dim_q = (2 * lstm_hidden_dim_q)
else:
lstm_output_dim_q = lstm_hidden_dim_q
self.question_decoder = nn.Linear(lstm_output_dim_q, num_feats)
# Modulated Layers
self.a_modulated_modules = []
self.q_modulated_modules = []
for i in range(num_conv_layers_film):
num_channels += 2 if use_coordinates else 0
afcm = FiLM(num_channels,
num_conv_filts_film,
stride_film,
dilation_film)
self.a_modulated_modules.append(afcm)
self.add_module('a_modulated_module_%d' % i, afcm)
num_channels = num_conv_filts_film
num_channels += 2 if use_coordinates else 0
qfcm = FiLM(num_channels,
num_conv_filts_film,
stride_film,
dilation_film)
self.q_modulated_modules.append(qfcm)
self.add_module('q_modulated_module_%d' % i, qfcm)
num_channels = num_conv_filts_film
num_conv_filts_film += 2 if use_coordinates else 0
self.conv1 = nn.Conv2d(num_conv_filts_film,
fcn_output_dim,
kernel_size=1,
padding=0)
if aggregate == 'max':
self.pool = nn.AdaptiveMaxPool2d((fcn_coeff_dim, fcn_temp_dim))
elif aggregate == 'mean':
self.pool = nn.AdaptiveAvgPool2d((fcn_coeff_dim, fcn_temp_dim))
else:
assert False, 'Unknown aggregate function.'
# Classifier
self.use_coordinates_class = (use_coordinates
and fcn_coeff_dim > 1
and fcn_temp_dim > 1)
fcn_output_dim += 2 if self.use_coordinates_class else 0
adaptive_pool_dim = fcn_output_dim * fcn_coeff_dim * fcn_temp_dim
self.output = nn.Sequential(nn.Linear(adaptive_pool_dim, output_hidden_dim),
nn.ReLU(),
nn.Linear(output_hidden_dim, output_dim))
# Initialization
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight)
def forward(self, a, len_a, q, len_q):
# Base convnet
a = torch.unsqueeze(a, 1)
a = self.conv(a)
# Audio Controller
pooled_a = self.a_decoder_pool(a)
pooled_a = torch.transpose(pooled_a, 1, 2)
pooled_a = pooled_a.view(pooled_a.size(0),
pooled_a.size(1),
pooled_a.size(2) * pooled_a.size(3))
lstm_a, _ = self.lstm_a(pooled_a)
if self.bidirectional:
bid_a = lstm_a.view(lstm_a.size(0),
lstm_a.size(1),
2,
int(lstm_a.size(2) / 2))
enc_a = torch.cat((bid_a[torch.arange(bid_a.size(0), dtype=torch.long),
-1, 0],
bid_a[torch.arange(bid_a.size(0), dtype=torch.long),
-1, 1]),
dim=1)
else:
enc_a = lstm_a[torch.arange(lstm_a.size(0), dtype=torch.long), -1]
a_gammas_betas = self.audio_decoder(enc_a)
a_gammas_betas = a_gammas_betas.view(a_gammas_betas.size(0),
self.num_conv_layers_film,
self.num_conv_filts_film,
2)
# Question Controller
embeddings = self.embeddings(q)
packed = torch.nn.utils.rnn.pack_padded_sequence(embeddings,
len_q,
batch_first=True)
# self.lstm_q.flatten_parameters()
lstm_q, _ = self.lstm_q(packed)
unpacked, lens = torch.nn.utils.rnn.pad_packed_sequence(lstm_q,
batch_first=True)
if self.bidirectional:
bid_q = unpacked.view(unpacked.size(0),
unpacked.size(1),
2,
int(unpacked.size(2) / 2))
enc_q = torch.cat((bid_q[torch.arange(bid_q.size(0), dtype=torch.long),
lens - 1, 0],
bid_q[torch.arange(bid_q.size(0), dtype=torch.long),
lens - 1, 1]),
dim=1)
else:
enc_q = unpacked[torch.arange(unpacked.size(0), dtype=torch.long), lens - 1]
q_gammas_betas = self.question_decoder(enc_q)
q_gammas_betas = q_gammas_betas.view(q_gammas_betas.size(0),
self.num_conv_layers_film,
self.num_conv_filts_film,
2)
# Modulated Layers
for i, (afcm, qfcm) in enumerate(zip(self.a_modulated_modules,
self.q_modulated_modules)):
# Append coordinate maps
if self.use_coordinates:
coordinates_maps = coordinates(a.shape[2], a.shape[3]).to(a.device)
a = torch.cat((a, coordinates_maps.expand(a.size(0), -1, -1, -1)), 1)
# see FiLM appendix for + 1
a = afcm(a, a_gammas_betas[:, i, :, 0] + 1, a_gammas_betas[:, i, :, 1])
# Append coordinate maps
if self.use_coordinates:
coordinates_maps = coordinates(a.shape[2], a.shape[3]).to(a.device)
a = torch.cat((a, coordinates_maps.expand(a.size(0), -1, -1, -1)), 1)
# see FiLM appendix for + 1
a = qfcm(a, q_gammas_betas[:, i, :, 0] + 1, q_gammas_betas[:, i, :, 1])
# Classifier
if self.use_coordinates:
coordinates_maps = coordinates(a.shape[2], a.shape[3]).to(a.device)
a = torch.cat((a, coordinates_maps.expand(a.size(0), -1, -1, -1)), 1)
a = self.conv1(a)
a = self.pool(a)
if self.use_coordinates_class:
coordinates_maps = coordinates(a.shape[2], a.shape[3]).to(a.device)
a = torch.cat((a, coordinates_maps.expand(a.size(0), -1, -1, -1)), 1)
a = a.view(a.size(0), -1)
output = self.output(a)
return output
|
daqa-master
|
daqa-mod/malimo.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import os
# import numpy as np
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets # NOQA F401
from data import DAQA
from models import LSTMN, FCNLSTMN, CONVLSTMN, FCNLSTMNSA, CONVLSTMNSA
from film import FiLM
from malimo import MALiMo
# Training settings
parser = argparse.ArgumentParser()
# Input
parser.add_argument('--audio-training-set', type=str,
default='daqa_audio_train.h5',
help='Path to training data.')
parser.add_argument('--qa-training-set', type=str,
default='daqa_train_questions_answers.json',
help='Path to training data.')
parser.add_argument('--audio-test-set', type=str,
default='daqa_audio_val.h5',
help='Path to test data.')
parser.add_argument('--qa-test-set', type=str,
default='daqa_val_questions_answers.json',
help='Path to test data.')
# Settings
parser.add_argument('--seed', type=int, default=0, metavar='S',
help='Random seed.')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='Disable CUDA.')
parser.add_argument('--multi-gpus', action='store_true', default=False,
help='Use all available GPUs.')
parser.add_argument('--distributed-parallel', action='store_true', default=False,
help='Distributed data parallel mode.')
parser.add_argument('--resume', action='store_true', default=False,
help='Resume training.')
parser.add_argument('--model', type=str, default='malimo',
help='Model to train.')
parser.add_argument('--embedding-dim', type=int, default=256,
help='Size of embedding layer.')
parser.add_argument('--lstm-hidden-dim-q', type=int, default=128,
help='Size of layer(s) in LSTM.')
parser.add_argument('--num-lstm-layers-q', type=int, default=1,
help='Number of layers in LSTM.')
parser.add_argument('--bidirectional', action='store_true', default=False,
help='Bidirectional LSTM.')
parser.add_argument('--num-conv-filts', type=int, default=16,
help='Number of filters in first layer in ConvNet.')
parser.add_argument('--num-conv-layers', type=int, default=5,
help='Number of layers in ConvNet.')
parser.add_argument('--stride', type=int, default=1,
help='Convolution stride.')
parser.add_argument('--dilation', type=int, default=1,
help='Convolution dilation.')
parser.add_argument('--fcn-output-dim', type=int, default=256,
help='Number of filters in final FCN layer.')
parser.add_argument('--fcn-coeff-dim', type=int, default=1,
help='Dimension along coefficients in adaptive pooling.')
parser.add_argument('--fcn-temp-dim', type=int, default=1,
help='Dimension along time in adaptive pooling.')
parser.add_argument('--aggregate', type=str, default='mean',
help='Function to aggregate over variable size input.')
parser.add_argument('--lstm-hidden-dim-a', type=int, default=128,
help='Size of layer(s) in LSTM.')
parser.add_argument('--num-lstm-layers-a', type=int, default=1,
help='Number of layers in LSTM.')
parser.add_argument('--stacked-att-dim', type=int, default=512,
help='Stacked attention layer dimension.')
parser.add_argument('--num-stacked-att', type=int, default=2,
help='Number of stacked attention layers.')
parser.add_argument('--use-coordinates', action='store_true', default=False,
help='Append coordinates to feature maps.')
parser.add_argument('--num-conv-filts-film', type=int, default=64,
help='Number of filters in first layer in film ConvNet.')
parser.add_argument('--num-conv-layers-film', type=int, default=2,
help='Number of layers in film ConvNet.')
parser.add_argument('--output-hidden-dim', type=int, default=1024,
help='Dimension of hidden layer before output layer.')
parser.add_argument('--optimizer', type=str, default='adam',
help='Optimzer.')
parser.add_argument('--lr', type=float, default=0.0001, metavar='L',
help='Learning rate.')
parser.add_argument('--l2', type=float, default=0.0001, metavar='M',
help='Weight decay.')
parser.add_argument('--dropout', type=float, default=0.0, metavar='R',
help='Dropout rate.')
parser.add_argument('--batch-size', type=int, default=1, metavar='N',
help='Batch size for training.')
parser.add_argument('--test-batch-size', type=int, default=1, metavar='N',
help='Batch size for testing.')
parser.add_argument('--epochs', type=int, default=10, metavar='T',
help='Number of epochs to train.')
parser.add_argument('--early-stopping', action='store_true', default=False,
help='Early stopping.')
parser.add_argument('--anneal-learning-rate', action='store_true', default=False,
help='Anneal Learning Rate.')
parser.add_argument('--patience', type=int, default=10, metavar='P',
help='Number of epochs before early stopping.')
# Output
parser.add_argument('--show-log', action='store_true', default=False,
help='Log training status.')
parser.add_argument('--log-interval', type=int, default=1000, metavar='I',
help='Number of batches to logging status.')
parser.add_argument('--save-model', action='store_true', default=False,
help='Save current model.')
parser.add_argument('--model-dir', type=str, default='models',
help='Path to model.')
parser.add_argument('--model-name', type=str, default='model.pt',
help='Model name.')
parser.add_argument('--infer-only', action='store_true', default=False,
help='Run in test mode only.')
def build_model(args, vocab_dim, padding_idx, input_dim, output_dim):
if args.model == 'lstmn':
model = LSTMN(vocab_dim=vocab_dim,
embedding_dim=args.embedding_dim,
padding_idx=padding_idx,
lstm_hidden_dim_q=args.lstm_hidden_dim_q,
num_lstm_layers_q=args.num_lstm_layers_q,
bidirectional=args.bidirectional,
input_dim=input_dim,
lstm_hidden_dim_a=args.lstm_hidden_dim_a,
num_lstm_layers_a=args.num_lstm_layers_a,
output_hidden_dim=args.output_hidden_dim,
output_dim=output_dim)
elif args.model == 'fcnlstmn':
model = FCNLSTMN(vocab_dim=vocab_dim,
embedding_dim=args.embedding_dim,
padding_idx=padding_idx,
lstm_hidden_dim_q=args.lstm_hidden_dim_q,
num_lstm_layers_q=args.num_lstm_layers_q,
bidirectional=args.bidirectional,
num_conv_filts=args.num_conv_filts,
num_conv_layers=args.num_conv_layers,
stride=args.stride,
dilation=args.dilation,
fcn_output_dim=args.fcn_output_dim,
fcn_coeff_dim=args.fcn_coeff_dim,
fcn_temp_dim=args.fcn_temp_dim,
aggregate=args.aggregate,
output_hidden_dim=args.output_hidden_dim,
output_dim=output_dim)
elif args.model == 'convlstmn':
model = CONVLSTMN(vocab_dim=vocab_dim,
embedding_dim=args.embedding_dim,
padding_idx=padding_idx,
lstm_hidden_dim_q=args.lstm_hidden_dim_q,
num_lstm_layers_q=args.num_lstm_layers_q,
bidirectional=args.bidirectional,
input_dim=input_dim,
num_conv_filts=args.num_conv_filts,
num_conv_layers=args.num_conv_layers,
stride=args.stride,
dilation=args.dilation,
lstm_hidden_dim_a=args.lstm_hidden_dim_a,
num_lstm_layers_a=args.num_lstm_layers_a,
output_hidden_dim=args.output_hidden_dim,
output_dim=output_dim)
elif args.model == 'fcnlstmnsa':
model = FCNLSTMNSA(vocab_dim=vocab_dim,
embedding_dim=args.embedding_dim,
padding_idx=padding_idx,
lstm_hidden_dim_q=args.lstm_hidden_dim_q,
num_lstm_layers_q=args.num_lstm_layers_q,
bidirectional=args.bidirectional,
num_conv_filts=args.num_conv_filts,
num_conv_layers=args.num_conv_layers,
stride=args.stride,
dilation=args.dilation,
fcn_output_dim=args.fcn_output_dim,
fcn_coeff_dim=args.fcn_coeff_dim,
fcn_temp_dim=args.fcn_temp_dim,
aggregate=args.aggregate,
use_coordinates=args.use_coordinates,
stacked_att_dim=args.stacked_att_dim,
num_stacked_att=args.num_stacked_att,
output_hidden_dim=args.output_hidden_dim,
output_dim=output_dim)
elif args.model == 'convlstmnsa':
model = CONVLSTMNSA(vocab_dim=vocab_dim,
embedding_dim=args.embedding_dim,
padding_idx=padding_idx,
lstm_hidden_dim_q=args.lstm_hidden_dim_q,
num_lstm_layers_q=args.num_lstm_layers_q,
bidirectional=args.bidirectional,
input_dim=input_dim,
num_conv_filts=args.num_conv_filts,
num_conv_layers=args.num_conv_layers,
stride=args.stride,
dilation=args.dilation,
lstm_hidden_dim_a=args.lstm_hidden_dim_a,
num_lstm_layers_a=args.num_lstm_layers_a,
stacked_att_dim=args.stacked_att_dim,
num_stacked_att=args.num_stacked_att,
output_hidden_dim=args.output_hidden_dim,
output_dim=output_dim)
elif args.model == 'film':
model = FiLM(vocab_dim=vocab_dim,
embedding_dim=args.embedding_dim,
padding_idx=padding_idx,
lstm_hidden_dim_q=args.lstm_hidden_dim_q,
num_lstm_layers_q=args.num_lstm_layers_q,
bidirectional=args.bidirectional,
num_conv_filts_base=args.num_conv_filts,
num_conv_layers_base=args.num_conv_layers,
stride_base=args.stride,
dilation_base=args.dilation,
use_coordinates=args.use_coordinates,
num_conv_filts_film=args.num_conv_filts_film,
num_conv_layers_film=args.num_conv_layers_film,
stride_film=args.stride,
dilation_film=args.dilation,
fcn_output_dim=args.fcn_output_dim,
fcn_coeff_dim=args.fcn_coeff_dim,
fcn_temp_dim=args.fcn_temp_dim,
aggregate=args.aggregate,
output_hidden_dim=args.output_hidden_dim,
output_dim=output_dim)
elif args.model == 'malimo':
model = MALiMo(vocab_dim=vocab_dim,
embedding_dim=args.embedding_dim,
padding_idx=padding_idx,
lstm_hidden_dim_q=args.lstm_hidden_dim_q,
num_lstm_layers_q=args.num_lstm_layers_q,
bidirectional=args.bidirectional,
num_conv_filts_base=args.num_conv_filts,
num_conv_layers_base=args.num_conv_layers,
stride_base=args.stride,
dilation_base=args.dilation,
input_dim=input_dim,
a_aggregate=args.aggregate,
lstm_hidden_dim_a=args.lstm_hidden_dim_a,
num_lstm_layers_a=args.num_lstm_layers_a,
use_coordinates=args.use_coordinates,
num_conv_filts_film=args.num_conv_filts_film,
num_conv_layers_film=args.num_conv_layers_film,
stride_film=args.stride,
dilation_film=args.dilation,
fcn_output_dim=args.fcn_output_dim,
fcn_coeff_dim=args.fcn_coeff_dim,
fcn_temp_dim=args.fcn_temp_dim,
aggregate=args.aggregate,
output_hidden_dim=args.output_hidden_dim,
output_dim=output_dim)
else:
assert False, 'Unknown model.'
return model
def save_state(args, epoch, model, optimizer, scheduler, train_loss, train_perf,
test_loss, test_perf, best_perf, patience, early_stopping, best=False):
checkpoint = os.path.join(args.model_dir, args.model_name)
kwargs = {
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict(),
'train_loss': train_loss,
'train_perf': train_perf,
'test_loss': test_loss,
'test_perf': test_perf,
'best_perf': best_perf,
'patience': patience,
'early_stopping': early_stopping,
}
if best:
checkpoint += '.best'
kwargs['model_state_dict'] = model.module.state_dict() # unwrap model
torch.save(kwargs, checkpoint)
else:
kwargs['model_state_dict'] = model.state_dict()
torch.save(kwargs, checkpoint)
def load_state(args, model, optimizer, scheduler):
checkpoint = torch.load(os.path.join(args.model_dir, args.model_name))
model.load_state_dict(checkpoint['model_state_dict'])
sepoch = checkpoint['epoch'] if 'epoch' in checkpoint else 0
if 'optimizer_state_dict' in checkpoint:
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
if 'scheduler_state_dict' in checkpoint:
scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
train_loss = checkpoint['train_loss'] if 'train_loss' in checkpoint else 0
train_perf = checkpoint['train_perf'] if 'train_perf' in checkpoint else 0
test_loss = checkpoint['test_loss'] if 'test_loss' in checkpoint else 0
test_perf = checkpoint['test_perf'] if 'test_perf' in checkpoint else 0
best_perf = checkpoint['best_perf'] if 'best_perf' in checkpoint else 0
patience = checkpoint['patience'] if 'patience' in checkpoint else 0
if 'early_stopping' in checkpoint:
early_stopping = checkpoint['early_stopping']
else:
early_stopping = False
return sepoch, model, optimizer, scheduler, train_loss, train_perf, \
test_loss, test_perf, best_perf, patience, early_stopping
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (a, len_a, q, len_q, target) in enumerate(train_loader):
a = a.to(device)
len_a = len_a.to(device)
q = q.to(device)
len_q = len_q.to(device)
target = target.to(device)
optimizer.zero_grad()
output = model(a, len_a, q, len_q)
loss = F.cross_entropy(output, target)
loss.backward()
optimizer.step()
if args.show_log and batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx, len(train_loader),
100. * batch_idx / len(train_loader), loss.item()))
def test(args, model, device, test_loader):
model.eval()
test_loss, correct, examples = 0., 0, 0
with torch.no_grad():
for a, len_a, q, len_q, target in test_loader:
a = a.to(device)
len_a = len_a.to(device)
q = q.to(device)
len_q = len_q.to(device)
target = target.to(device)
output = model(a, len_a, q, len_q)
test_loss += F.cross_entropy(output, target, reduction='sum').item()
label = output.argmax(dim=1, keepdim=True)
correct += label.eq(target.view_as(label)).sum().item()
examples += len(a)
test_loss /= examples
perf = correct / examples
# print('Average loss: {:.4f}, perf: {:.4f}%'.format(test_loss, 100. * perf))
return test_loss, perf
def main(id, args): # noqa C901
# Infra
use_cuda = not args.no_cuda and torch.cuda.is_available()
dist_parallel_mode = (use_cuda
and args.multi_gpus
and args.distributed_parallel
and torch.cuda.device_count() > 1)
if dist_parallel_mode:
dist.init_process_group(backend='nccl',
init_method='tcp://127.0.0.1:23456',
world_size=torch.cuda.device_count(),
rank=id)
torch.cuda.set_device(id)
device = torch.device('cuda:%d' % id)
else:
device = torch.device('cuda' if use_cuda else 'cpu')
if id == 0 and args.save_model:
if not os.path.isdir(args.model_dir):
os.makedirs(args.model_dir)
# Dataset
train_set = DAQA(args.audio_training_set, args.qa_training_set)
test_set = DAQA(args.audio_test_set,
args.qa_test_set,
train_set.stats,
train_set.word_to_ix,
train_set.answer_to_ix)
if dist_parallel_mode:
sampler_kwargs = {'num_replicas': torch.cuda.device_count(), 'rank': id}
train_sampler = torch.utils.data.DistributedSampler(train_set, **sampler_kwargs)
# test_sampler = torch.utils.data.DistributedSampler(test_set, **sampler_kwargs)
# The above is commented out because we only evaluate on the main process
# Note also that this means that evaluation using train_sampler will lead to
# evaluation on a subset of the training set which is advantageous.
test_sampler = torch.utils.data.RandomSampler(test_set)
batch_size = int(args.batch_size / torch.cuda.device_count())
else:
train_sampler = torch.utils.data.RandomSampler(train_set)
test_sampler = torch.utils.data.RandomSampler(test_set)
batch_size = args.batch_size
assert batch_size == 1, 'Batch size / number of GPUs != 1.'
loader_kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
train_loader = torch.utils.data.DataLoader(train_set,
batch_size=batch_size,
sampler=train_sampler,
collate_fn=DAQA.pad_collate_fn,
**loader_kwargs)
test_loader = torch.utils.data.DataLoader(test_set,
batch_size=batch_size,
sampler=test_sampler,
collate_fn=DAQA.pad_collate_fn,
**loader_kwargs)
# Model
model = build_model(args,
vocab_dim=len(train_set.word_to_ix),
padding_idx=train_set.word_to_ix['<pad>'],
input_dim=train_set.stats['mean'].shape[0],
output_dim=len(train_set.answer_to_ix))
model = model.to(device)
# GPU / multi-GPU / distributed multi-GPU
if dist_parallel_mode:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model = torch.nn.parallel.DistributedDataParallel(model,
device_ids=[id],
output_device=id,
check_reduction=True,
broadcast_buffers=False)
if id == 0:
print('DistributedDataParallel! Using', device)
elif (use_cuda
and args.multi_gpus
and torch.cuda.device_count() > 1):
model = nn.DataParallel(model)
print('DataParallel! Using', torch.cuda.device_count(), 'GPUs!')
else:
print('Single CPU/GPU! Using', device)
# Optimizer and scheduler
if args.optimizer == 'adam':
optimizer = optim.Adam(model.parameters(),
lr=args.lr,
weight_decay=args.l2)
elif args.optimizer == 'rmsprop':
optimizer = optim.RMSprop(model.parameters(),
lr=args.lr,
weight_decay=args.l2)
else:
assert False, 'Unknown optimizer.'
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'max', 0.1,
int(args.patience / 2),
verbose=True)
checkpoint_pt = os.path.join(args.model_dir, args.model_name)
# Inference
if args.infer_only:
if id == 0:
if os.path.isfile(checkpoint_pt):
print('Testing: ' + checkpoint_pt)
_, model, optimizer, scheduler, _, _, _, _, _, _, _ = \
load_state(args, model, optimizer, scheduler)
print(' ')
print('Hyperparamters')
print(args)
print(' ')
print('Model')
print(model)
print(' ')
print('Start testing.')
test_loss, test_perf = test(args, model, device, test_loader)
print(('Test loss: {:.3f}, Test Perf: {:.3f}%.').format(
test_loss,
100. * test_perf))
else:
print('Could not find model to test.')
return # inference done, nothing else to do here.
# Initialize or load from exisiting checkpoint
if (args.resume and os.path.isfile(checkpoint_pt)):
if id == 0:
print('Continue training from: ' + checkpoint_pt)
sepoch, model, optimizer, scheduler, train_loss, train_perf, \
test_loss, test_perf, best_perf, patience, early_stopping = \
load_state(args, model, optimizer, scheduler)
else:
sepoch = 0
best_perf, patience = 0., 0
early_stopping = False
if id == 0: # evaluate only on main process
print(' ')
print('Hyperparamters')
print(args)
print(' ')
print('Model')
print(model)
print(' ')
print('Start training.')
train_loss, train_perf = test(args, model, device, train_loader)
test_loss, test_perf = test(args, model, device, test_loader)
print(('Epoch {:03d}. Train loss: {:.3f}, Train Perf: {:.3f}%'
+ '. Test loss: {:.3f}, Test Perf: {:.3f}%.').format(sepoch,
train_loss,
100. * train_perf,
test_loss,
100. * test_perf))
else: # Other processes don't need this
train_loss, train_perf, test_loss, test_perf = 0, 0, 0, 0
# Force other processes to wait
if dist_parallel_mode or dist.is_initialized():
dist.barrier()
# Training loop
for epoch in range(sepoch + 1, args.epochs + 1):
# Load latest checkpoint to synchronize optimizer, early stopping, etc.
if dist_parallel_mode and epoch > sepoch + 1:
if args.anneal_learning_rate or args.early_stopping:
checkpoint = torch.load(checkpoint_pt)
if args.anneal_learning_rate:
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
if args.early_stopping:
early_stopping = checkpoint['early_stopping']
if early_stopping:
print('Early Stopping! Id: ' + id)
break
# DistributedSampler requires manually seting the epoch for randomization
if dist_parallel_mode:
train_loader.sampler.set_epoch(epoch)
# test_loader is RandomSampler doesnt require this
# Train
train(args, model, device, train_loader, optimizer, epoch)
# Force other processes to wait
if dist_parallel_mode or dist.is_initialized():
dist.barrier()
# Eval
if id == 0: # evaluate only on main process
train_loss, train_perf = test(args, model, device, train_loader)
test_loss, test_perf = test(args, model, device, test_loader)
print(('Epoch {:03d}. Train loss: {:.3f}, Train Perf: {:.3f}%'
+ '. Test loss: {:.3f}, Test Perf: {:.3f}%.').format(epoch,
train_loss,
100. * train_perf,
test_loss,
100. * test_perf))
if args.anneal_learning_rate:
scheduler.step(test_perf)
# Monitor best performance so far assuming higher better
if test_perf > best_perf:
best_perf, patience = test_perf, 0
print('Best Model at Epoch ' + str(epoch))
if args.save_model:
save_state(args, epoch, model, optimizer, scheduler,
train_loss, train_perf, test_loss, test_perf,
best_perf, patience, early_stopping, best=True)
else:
patience += 1
if args.early_stopping and (patience >= args.patience):
early_stopping = True
if (args.save_model):
save_state(args, epoch, model, optimizer, scheduler,
train_loss, train_perf, test_loss, test_perf,
best_perf, patience, early_stopping)
# If there is only a single process then break now
# If > a single process then all processes break start of next epoch
if not dist_parallel_mode and early_stopping:
print('Early Stopping!')
break
# Force other processes to wait
if dist_parallel_mode or dist.is_initialized():
dist.barrier()
def union(args):
# Set seed
# np.random.seed(args.seed)
torch.manual_seed(args.seed)
if (not args.no_cuda
and torch.cuda.is_available()
and args.multi_gpus
and args.distributed_parallel
and torch.cuda.device_count() > 1):
assert args.batch_size == torch.cuda.device_count(), \
'Batch size must equal to number of GPUs.'
if not args.save_model:
assert not args.anneal_learning_rate, \
'Checkpoints are used to synchronize learning rate.'
assert not args.early_stopping, \
'Checkpoints are used to synchronize early stopping flag.'
print('Distributed!')
mp.spawn(main, nprocs=torch.cuda.device_count(), args=(args,), daemon=False)
else:
assert args.batch_size == 1, 'Illegal batch size > 1 for undistributed mode.'
main(0, args)
if __name__ == '__main__':
args = parser.parse_args()
union(args)
print('Success!')
|
daqa-master
|
daqa-mod/main.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
import torch.nn.functional as F
def convnet(num_conv_filts, num_conv_layers, stride, dilation, max_n_filts=512):
"""
Implements num_conv_layers conv layers a la VGG.
"""
layers = []
in_channels = 1
n_filts = num_conv_filts
conv_red_dim = 1 # subsampling factor
for _ in range(num_conv_layers):
if len(layers) == 0:
layers += [nn.Conv2d(in_channels,
n_filts,
kernel_size=(12, 3),
padding=1,
stride=(9, stride),
dilation=dilation)]
else:
layers += [nn.Conv2d(in_channels,
n_filts,
kernel_size=3,
padding=1,
stride=stride,
dilation=dilation)]
layers += [nn.BatchNorm2d(n_filts, affine=True)]
layers += [nn.ReLU()]
layers += [nn.Conv2d(n_filts,
n_filts,
kernel_size=3,
padding=1,
stride=stride,
dilation=dilation)]
layers += [nn.BatchNorm2d(n_filts, affine=True)]
layers += [nn.ReLU()]
if conv_red_dim <= 32:
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
conv_red_dim *= 2 # max pooled (only correct for frequency dim)
in_channels = n_filts
n_filts = 2 * n_filts if n_filts < max_n_filts else n_filts
return nn.Sequential(*layers), in_channels, conv_red_dim
def coordinates(x, y, start=-1, end=1):
"""
Returns a map of coordinates with x rows and y columns.
Input:
- x: rows
- y: columns
Returns:
- xy_coords: 1 x 2 x 'x' x y
"""
x_row = torch.linspace(start, end, steps=y) # y
y_row = torch.linspace(start, end, steps=x) # x
x_coords = x_row.unsqueeze(0).expand(x, y).unsqueeze(0) # 1 x y
y_coords = y_row.unsqueeze(1).expand(x, y).unsqueeze(0) # 1 x y
# 1 2 x y
return torch.autograd.Variable(torch.cat([x_coords, y_coords], 0).unsqueeze(0))
class StackedAttention1D(nn.Module):
"""
Adapted from clevr-iep/blob/master/iep/models/baselines.py
"""
def __init__(self, input_dim, hidden_dim):
super(StackedAttention1D, self).__init__()
self.Wa = nn.Linear(input_dim, hidden_dim)
self.Wu = nn.Linear(input_dim, hidden_dim)
self.Wp = nn.Linear(hidden_dim, input_dim)
def forward(self, a, u):
"""
Input:
- a: N x D
- u: N x D
Returns:
- next_u: N x D
"""
a_proj = self.Wa(a) # N x K
u_proj = self.Wu(u) # N x K
h = torch.tanh(a_proj + u_proj)
p = F.softmax(self.Wp(h), dim=1) # N x D
a_tilde = p * a # N x D
next_u = a_tilde + u # N x D
return next_u
class StackedAttention(nn.Module):
"""
Adapted from clevr-iep/blob/master/iep/models/baselines.py
"""
def __init__(self, input_dim, hidden_dim):
super(StackedAttention, self).__init__()
self.Wv = nn.Conv2d(input_dim, hidden_dim, kernel_size=1, padding=0)
self.Wu = nn.Linear(input_dim, hidden_dim)
self.Wp = nn.Conv2d(hidden_dim, 1, kernel_size=1, padding=0)
self.hidden_dim = hidden_dim
self.attention_maps = None
def forward(self, v, u):
"""
Input:
- v: N x D x H x W
- u: N x D
Returns:
- next_u: N x D
"""
N, K = v.size(0), self.hidden_dim
H, W = v.size(2), v.size(3)
v_proj = self.Wv(v) # N x K x H x W
u_proj = self.Wu(u) # N x K
u_proj_expand = u_proj.view(N, K, 1, 1).expand(N, K, H, W)
h = torch.tanh(v_proj + u_proj_expand)
p = F.softmax(self.Wp(h).view(N, H * W), dim=1).view(N, 1, H, W)
self.attention_maps = p.data.clone()
v_tilde = (p.expand_as(v) * v).sum((2, 3))
next_u = u + v_tilde
return next_u
|
daqa-master
|
daqa-mod/layers.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import json
import re
import h5py
import torch
from torch.utils.data.dataloader import default_collate
from torchvision import datasets # NOQA F401
class DAQA(torch.utils.data.Dataset):
_special_ix = {'<pad>': 0}
def __init__(self, audio_pt, ques_ans_pt, stats=None,
word_to_ix=None, answer_to_ix=None):
# Read audio HDF5 file
self.audio = h5py.File(audio_pt, 'r')
if stats is None:
self.stats = {}
self.stats['mean'] = self.audio['mean'][:]
self.stats['stddev'] = self.audio['stddev'][:]
else:
self.stats = stats
# h5py doesnt support using one file handle for multithreaded ops
# uncomment the following two lines if using >1 worker
# and ammend __getitem__ accordingly.
self.audio.close()
self.audio = audio_pt
# Read JSON file
with open(ques_ans_pt, 'r') as f:
questions_answers = json.load(f)
# Audio, questions, and answers to a nice list
dataset = []
for i in range(len(questions_answers['questions'])):
aud = questions_answers['questions'][i]['audio_filename'][:-4]
ques = questions_answers['questions'][i]['question']
ans = questions_answers['questions'][i]['answer_token']
dataset.append({'audio': aud, 'question': ques, 'answer': ans})
if word_to_ix is None:
self.word_to_ix = DAQA.build_vocab_questions(dataset, DAQA._special_ix)
else:
self.word_to_ix = word_to_ix
dataset = DAQA.encode_questions(dataset, self.word_to_ix)
if answer_to_ix is None:
self.answer_to_ix = DAQA.build_vocab_answers(dataset)
else:
self.answer_to_ix = answer_to_ix
dataset = DAQA.encode_answers(dataset, self.answer_to_ix)
self.dataset = dataset
# Pack questions and answers for each audio into a nice dictionary.
dataset_wrt_audio = {}
for i in range(len(dataset)):
aud = dataset[i]['audio']
ques = dataset[i]['question']
ans = dataset[i]['answer']
if aud not in dataset_wrt_audio:
dataset_wrt_audio[aud] = [{'question': ques, 'answer': ans}]
else:
dataset_wrt_audio[aud] += [{'question': ques, 'answer': ans}]
self.dataset_wrt_audio = dataset_wrt_audio
def __len__(self):
# return len(self.dataset)
return len(self.dataset_wrt_audio)
def __getitem__(self, index):
sub_mini_batch = []
audio = sorted(self.dataset_wrt_audio)[index] # maybe move up
audio_pt = h5py.File(self.audio, 'r') # swmr=True
a = audio_pt[audio][:]
a = torch.tensor((a - self.stats['mean']) / self.stats['stddev'])
# The previous 3 lines should be commented if reading audio from memory,
# as well as audio_pt.close() below.
# The following line should be uncommented if reading audio from memory.
# a = torch.tensor((self.audio[audio][:] - self.stats['mean'])
# / self.stats['stddev'])
len_a = torch.tensor(a.shape[0], dtype=torch.long)
for qas in range(len(self.dataset_wrt_audio[audio])):
q = torch.tensor(self.dataset_wrt_audio[audio][qas]['question'],
dtype=torch.long)
len_q = torch.tensor(len(q), dtype=torch.long)
y = torch.tensor(self.dataset_wrt_audio[audio][qas]['answer'],
dtype=torch.long)
sub_mini_batch += [(a, len_a, q, len_q, y)]
audio_pt.close()
return sub_mini_batch
@staticmethod
def build_vocab_questions(d, special_ix):
to_ix = special_ix # start with special tokens
for i in range(len(d)):
# Remove punctuation, lower case, convert to list of words
qr = re.sub(r'[^\w\s]', '', d[i]['question']).lower().split()
for w in qr:
if w not in to_ix:
to_ix[w] = len(to_ix)
return to_ix
@staticmethod
def build_vocab_answers(d):
to_ix = {}
for i in range(len(d)):
if d[i]['answer'] not in to_ix:
to_ix[d[i]['answer']] = len(to_ix)
return to_ix
@staticmethod
def encode_questions(d, to_ix):
for i in range(len(d)):
qr = re.sub(r'[^\w\s]', '', d[i]['question']).lower().split()
d[i]['question'] = [to_ix[w] for w in qr if w in to_ix]
# if w in to_ix is potentially dangerous
return d
@staticmethod
def encode_answers(d, to_ix):
for i in range(len(d)):
d[i]['answer'] = to_ix[d[i]['answer']]
return d
@staticmethod
def pad_collate_fn(batch):
"""
Input: a list of list((A, len_A, Q, len_Q, Ans)).
"""
batch = [i for j in batch for i in j] # unpack list of lists to list
pad_idx = DAQA._special_ix['<pad>']
# Sort batch wrt to length of question
batch = sorted(batch, key=lambda x: x[3], reverse=True) # sort wrt Q
max_len_q = batch[0][3]
# Pad questions with pad_idx
for i in range(len(batch)):
x = torch.ones(max_len_q, dtype=batch[i][2].dtype) * pad_idx
x[:batch[i][2].size(0)] = batch[i][2]
batch[i] = (batch[i][0], batch[i][1], x, batch[i][3], batch[i][4])
return default_collate(batch)
|
daqa-master
|
daqa-mod/data.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import os
import h5py
import numpy as np
import scipy
import scipy.io.wavfile
import librosa
parser = argparse.ArgumentParser()
# Input
parser.add_argument('--input-wavs', default='wavs', type=str,
help='Path to folder with wavs to process.')
parser.add_argument('--input-features', default='features', type=str,
help='Path to folder with mels to process.')
# Settings
parser.add_argument('--compute-features', action='store_true', default=False,
help='Compute features.')
parser.add_argument('--window', default=0.025, type=float,
help='Window size (s).')
parser.add_argument('--stride', default=0.01, type=float,
help='Window stride (s).')
parser.add_argument('--num-mels', default=64, type=int,
help='Number of Mel coefficients.')
parser.add_argument('--astype', default='float32', type=str,
help='Data type for storage.')
parser.add_argument('--pack-features', action='store_true', default=False,
help='Pack features.')
parser.add_argument('--compressed', action='store_true', default=False,
help='Compress features.')
# Output
parser.add_argument('--output-features', default='features', type=str,
help='Path to folder with processed features.')
parser.add_argument('--output-file', default='features.hdf5', type=str,
help='Path to file with processed features.')
def compute_features(args):
"""
Compute MFSCs for all audio wav files in a given directory.
"""
print('Computing features...')
if not os.path.isdir(args.output_features):
os.makedirs(args.output_features)
lst_wavs = os.listdir(args.input_wavs)
lst_wavs = [e[:-4] for e in lst_wavs if e.endswith('.wav')]
counter = 0
for i in lst_wavs:
try:
fs, audio = scipy.io.wavfile.read(os.path.join(args.input_wavs,
i + '.wav'))
mfsc = librosa.feature.melspectrogram(y=audio.astype(float),
sr=fs,
n_fft=int(fs * args.window),
n_mels=args.num_mels,
hop_length=int(fs * args.stride),
power=1)
mfsc = librosa.power_to_db(mfsc, ref=np.max).T.astype(args.astype)
np.save(os.path.join(args.output_features, i), mfsc)
except Exception:
print('Error processing: ' + str(i))
counter += 1
if counter % 1000 == 0:
print('Finished processing: ' + str(counter) + ' files.')
def pack_features(args):
"""
Pack all npy MFSCs in a given directory into a single hdf file.
"""
print('Packing features...')
lst_npys = os.listdir(args.input_features)
lst_npys = [e[:-4] for e in lst_npys if e.endswith('.npy')]
counter = 0
# Variables for Welford’s mean and variance
n, mean, v = 0, np.zeros(args.num_mels), np.zeros(args.num_mels)
kwargs = {'compression': 'gzip', 'compression_opts': 9} if args.compressed else {}
with h5py.File(args.output_file, 'w') as f:
for i in lst_npys:
mfsc = np.load(os.path.join(args.output_features, i + '.npy'))
f.create_dataset(i, data=mfsc, dtype=args.astype,
**kwargs)
for w in range(mfsc.shape[0]):
n += 1
delta = mfsc[w] - mean
mean += delta / n
v += (mfsc[w] - mean) * delta
counter += 1
if counter % 1000 == 0:
print('Finished packing: ' + str(counter) + ' files.')
var = v / (n - 1)
stddev = np.sqrt(var)
f.create_dataset('mean',
data=mean.astype(args.astype),
dtype=args.astype,
**kwargs)
f.create_dataset('variance',
data=var.astype(args.astype),
dtype=args.astype,
**kwargs)
f.create_dataset('stddev',
data=stddev.astype(args.astype),
dtype=args.astype,
**kwargs)
def main(args):
if args.compute_features:
compute_features(args)
if args.pack_features:
pack_features(args)
if not args.compute_features and not args.pack_features:
print('P.S. I didnt do anything. Both compute and pack features are false.')
if __name__ == "__main__":
args = parser.parse_args()
main(args)
print('Success!')
|
daqa-master
|
daqa-mod/compute_audio_features.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import argparse
import datetime
import json
import random
import numpy as np
from qpas.exist import (was_there,
was_there_two_and,
was_there_two_or,
# was_there_source,
# was_there_source_two_and,
# was_there_source_two_or,
was_there_relative,
was_there_immediate_relative,
was_there_similar_ordinal,
was_there_similar_loudness,
was_there_at_least_two_similar_loudness,
was_there_similar_loudness_ordinal,
was_there_at_least_two_similar_loudness_ordinal,
was_there_similar_duration,
was_there_at_least_two_similar_duration,
was_there_similar_duration_ordinal,
was_there_at_least_two_similar_duration_ordinal,
)
from qpas.query import (what_was,
what_was_relative,
what_was_loudness,
what_was_loudness_relative,
what_was_loudness_relative_ordinal,
what_was_duration,
what_was_duration_relative,
what_was_duration_relative_ordinal,
)
from qpas.count import (how_many,
how_many_event,
how_many_ordinal,
how_many_event_two,
how_many_event_two_ordinal,
how_many_sounds_relative,
how_many_sounds_relative_ordinal,
how_many_event_relative,
how_many_event_relative_ordinal,
how_many_sounds_loudness_event,
how_many_sounds_loudness_ordinal,
how_many_sounds_duration_event,
how_many_sounds_duration_ordinal,
)
from qpas.compare import (compare_ordinal,
compare_ordinal_event,
compare_loudness,
compare_loudness_ordinal,
compare_loudness_event_ordinal,
compare_loudness_ordinal_event,
compare_same_loudness,
compare_same_loudness_ordinal,
compare_same_loudness_event_ordinal,
compare_duration,
compare_duration_ordinal,
compare_duration_event_ordinal,
compare_duration_ordinal_event,
compare_same_duration,
compare_same_duration_ordinal,
compare_same_duration_event_ordinal,
)
from qpas.compare_integer import (less_than,
equal_to,
more_than,
)
parser = argparse.ArgumentParser()
# Input
parser.add_argument('--dataset', default='daqa.json', type=str,
help='JSON file describing the dataset.')
parser.add_argument('--input_narrative_file',
default='../daqa/daqa_narratives.json',
help="Path to narratives JSON file.")
parser.add_argument('--start_narrative_idx', default=0, type=int,
help='Start reading from start_narrative_idx.')
# Settings
parser.add_argument('--set', default='new',
help='Set name: train / val / test.')
parser.add_argument('--num_questions_per_narrative', default=10, type=int,
help='Number of questions per narrative.')
parser.add_argument('--patience_narrative', default=10, type=int,
help='Number of failed attempts to reach num_q_per_narr.')
parser.add_argument('--patience_template', default=10, type=int,
help='Number of failed attempts to reach num_q_per_narr.')
parser.add_argument('--rel_diff', default=0.1, type=int,
help='Loudness sensitivity (%).')
parser.add_argument('--max_diff', default=0.05, type=float,
help='Maximum difference between (in)frequent answers.')
parser.add_argument('--seed', default=0, type=int, help='Random Seed.')
parser.add_argument('--version', default='1.0', type=str, help='Version.')
parser.add_argument('--license',
default='Creative Commons Attribution (CC-BY 4.0)',
help='License.')
parser.add_argument('--date',
default=datetime.datetime.today().strftime("%m/%d/%Y"),
help="Date.")
# Output
parser.add_argument('--start_output_idx', default=0, type=int,
help='Start numbering from start_output_idx.')
parser.add_argument('--output_qa_file',
default='../daqa/daqa_questions_answers.json',
help="Path to questions answers JSON file.")
def tokenize_answer(dataset, ans):
# Tokenize answer
anss = ans.split(' ')
for e in dataset['events']:
lst_syn = dataset['sources'][e] + dataset['actions'][e]
lst_syn = ' '.join(s for s in lst_syn)
lst_check = []
for a in anss:
lst_check.append((' ' + a + ' ') in (' ' + lst_syn + ' '))
if all(lst_check):
ans = e
return ans
def add_answer(ans_dist_per_temp, ques_temp, ans_tk, max_diff):
# Only one answer seen so far for this template
if len(ans_dist_per_temp[ques_temp].keys()) <= 1:
return True
# First instance of this answer in this template
if ans_dist_per_temp[ques_temp][ans_tk] == 0:
return True
num_occ = sorted(((v, k) for k, v in ans_dist_per_temp[ques_temp].items()))
# Not the most frequent answer
if num_occ[-1][1] != ans_tk:
return True
# Difference between the (most + 1) and least frequent is less than max_diff
if ((num_occ[-1][0] + 1) - num_occ[0][0]) <= max_diff:
return True
return False
def main(args):
"""Randomly sample questions for given narrative and deduce answer."""
random.seed(args.seed)
np.random.seed(args.seed)
# Read dataset description and narratives
with open(args.dataset, 'r') as f:
dataset = json.load(f)
with open(args.input_narrative_file, 'r') as f:
narratives = json.load(f)
assert args.set == narratives['info']['set'], 'train/val/test mismatch.'
templates = [was_there,
was_there_two_and,
was_there_two_or,
# was_there_source,
# was_there_source_two_and,
# was_there_source_two_or,
was_there_relative,
was_there_immediate_relative,
was_there_similar_ordinal,
was_there_similar_loudness,
was_there_at_least_two_similar_loudness,
was_there_similar_loudness_ordinal,
was_there_at_least_two_similar_loudness_ordinal,
was_there_similar_duration,
was_there_at_least_two_similar_duration,
was_there_similar_duration_ordinal,
was_there_at_least_two_similar_duration_ordinal,
what_was,
what_was_relative,
what_was_loudness,
what_was_loudness_relative,
what_was_loudness_relative_ordinal,
what_was_duration,
what_was_duration_relative,
what_was_duration_relative_ordinal,
how_many,
how_many_event,
how_many_ordinal,
how_many_event_two,
how_many_event_two_ordinal,
how_many_sounds_relative,
how_many_sounds_relative_ordinal,
how_many_event_relative,
how_many_event_relative_ordinal,
how_many_sounds_loudness_event,
how_many_sounds_loudness_ordinal,
how_many_sounds_duration_event,
how_many_sounds_duration_ordinal,
compare_ordinal,
compare_ordinal_event,
compare_loudness,
compare_loudness_ordinal,
compare_loudness_event_ordinal,
compare_loudness_ordinal_event,
compare_same_loudness,
compare_same_loudness_ordinal,
compare_same_loudness_event_ordinal,
compare_duration,
compare_duration_ordinal,
compare_duration_event_ordinal,
compare_duration_ordinal_event,
compare_same_duration,
compare_same_duration_ordinal,
compare_same_duration_event_ordinal,
less_than,
equal_to,
more_than,
]
print('Generating ' + str(args.num_questions_per_narrative)
+ ' questions for each of the ' + str(len(narratives['narratives']))
+ ' narratives.')
idx = args.start_output_idx
lst_questions = []
num_skewed_answers = 0
num_illposed_questions = 0
ans_dist_per_temp = {}
# The delta between (in)frequent answers is irrespective of the set size
max_diff = (args.max_diff
* ((len(narratives['narratives']) - args.start_narrative_idx)
* args.num_questions_per_narrative) / len(templates))
for n in range(args.start_narrative_idx, len(narratives['narratives'])):
narrative = narratives['narratives'][n]
num_questions, patience_narrative = 0, 0
while num_questions < args.num_questions_per_narrative:
question_template = random.choice(templates)
try: # catch illposed questions
patience_template = 0
while patience_template < args.patience_template:
ques, ans = question_template(dataset, narrative, args.rel_diff)
ans_tk = tokenize_answer(dataset, ans)
ques_temp_name = question_template.__name__
if ques_temp_name not in ans_dist_per_temp:
ans_dist_per_temp[ques_temp_name] = {}
if ans_tk not in ans_dist_per_temp[ques_temp_name]:
ans_dist_per_temp[ques_temp_name][ans_tk] = 0
if add_answer(ans_dist_per_temp, ques_temp_name,
ans_tk, max_diff):
question = {
'set': narrative['set'],
'audio_index': narrative['audio_index'],
'audio_filename': narrative['audio_filename'],
'question_template': ques_temp_name,
'question': ques,
'answer': ans,
'answer_token': ans_tk,
}
lst_questions.append(question)
ans_dist_per_temp[ques_temp_name][ans_tk] += 1
idx += 1
num_questions += 1
break
else:
patience_template += 1
num_skewed_answers += 1
if patience_template >= args.patience_template:
print('R1. Out of patience for narrative #' + str(n)
+ ' for template: ' + ques_temp_name + '.')
except AssertionError as error:
print(error)
patience_narrative += 1
num_illposed_questions += 1
if patience_narrative >= args.patience_narrative:
print('R2. Out of patience for narrative #' + str(n) + '.')
break
print('Generated ' + str(idx) + ' questions.')
print('Failed to generate ' + str(num_skewed_answers) + ' questions.'
+ ' Reason: skewed answers.')
print('Failed to generate ' + str(num_illposed_questions) + ' questions.'
+ ' Reason: illposed questions.')
print('Total number of attempts: '
+ str(idx + num_skewed_answers + num_illposed_questions))
output = {
'info': {
'set': args.set,
'version': args.version,
'date': args.date,
'license': args.license,
},
'questions': lst_questions
}
with open(args.output_qa_file, 'w') as f:
json.dump(output, f)
return True
if __name__ == "__main__":
args = parser.parse_args()
main(args)
print('Success!')
|
daqa-master
|
daqa-gen/generate_questions_answers.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Events with urls are a subset of AudioSet, see https://research.google.com/audioset/.
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import json
def main():
sources = {
1: {
'event': 'a000',
'url': 'dAOa3WbL54w',
'start': 160,
'end': 180,
},
2: {
'event': 'a000',
'url': 'dAOa3WbL54w',
'start': 200,
'end': 215,
},
3: {
'event': 'a000',
'url': 'dAOa3WbL54w',
'start': 220,
'end': 238,
},
4: {
'event': 'a000',
'url': 'dAOa3WbL54w',
'start': 250,
'end': 268,
},
5: {
'event': 'a000',
'url': 'dAOa3WbL54w',
'start': 270,
'end': 290,
},
6: {
'event': 'a000',
'url': 'dAOa3WbL54w',
'start': 310,
'end': 326,
},
7: {
'event': 'a000',
'url': 'dAOa3WbL54w',
'start': 330,
'end': 342,
},
8: {
'event': 'a000',
'url': 'dAOa3WbL54w',
'start': 346,
'end': 364,
},
9: {
'event': 'a000',
'url': 'dAOa3WbL54w',
'start': 366,
'end': 377,
},
10: {
'event': 'a000',
'url': 'AqwOCxkhrjI',
'start': 280,
'end': 299,
},
11: {
'event': 'a000',
'url': '3klGi-ujenE',
'start': 65,
'end': 84,
},
12: {
'event': 'a000',
'url': '8W0KcQLImuo',
'start': 130,
'end': 150,
},
13: {
'event': 'a000',
'url': '9KSO1R50AXY',
'start': 33,
'end': 42,
},
14: {
'event': 'a000',
'url': 'AqwOCxkhrjI',
'start': 92,
'end': 112,
},
15: {
'event': 'a000',
'url': 'AqwOCxkhrjI',
'start': 116,
'end': 130,
},
16: {
'event': 'a000',
'url': 'AqwOCxkhrjI',
'start': 131,
'end': 145,
},
17: {
'event': 'a000',
'url': 'AqwOCxkhrjI',
'start': 146,
'end': 159,
},
18: {
'event': 'a000',
'url': 'AqwOCxkhrjI',
'start': 160,
'end': 175,
},
19: {
'event': 'a000',
'url': 'AqwOCxkhrjI',
'start': 176,
'end': 196,
},
20: {
'event': 'a000',
'url': 'AqwOCxkhrjI',
'start': 198,
'end': 218,
},
#######################################################################
21: {
'event': 'b000',
'url': '-6krAYK2LLo',
'start': 11,
'end': 25,
},
22: {
'event': 'b000',
'url': '-6krAYK2LLo',
'start': 38,
'end': 48,
},
23: {
'event': 'b000',
'url': '-8wQV7VJnmM',
'start': 0,
'end': 20,
},
24: {
'event': 'b000',
'url': '-DYZX74qgFQ',
'start': 10,
'end': 24,
},
25: {
'event': 'b000',
'url': '-DYZX74qgFQ',
'start': 37,
'end': 50,
},
26: {
'event': 'b000',
'url': '-DYZX74qgFQ',
'start': 570,
'end': 579,
},
27: {
'event': 'b000',
'url': '-NPqCu4DyAM',
'start': 17,
'end': 28,
},
28: {
'event': 'b000',
'url': '-u5yvewHxzE',
'start': 0,
'end': 17,
},
29: {
'event': 'b000',
'url': '-u5yvewHxzE',
'start': 414,
'end': 424,
},
30: {
'event': 'b000',
'url': '-u5yvewHxzE',
'start': 590,
'end': 604,
},
31: {
'event': 'b000',
'url': '03frQGyrgQ4',
'start': 1,
'end': 21,
},
32: {
'event': 'b000',
'url': '08YFRFx-g7s',
'start': 0,
'end': 17,
},
33: {
'event': 'b000',
'url': '08YFRFx-g7s',
'start': 20,
'end': 29,
},
34: {
'event': 'b000',
'url': '0WWuZRd-O3c',
'start': 0,
'end': 12,
},
35: {
'event': 'b000',
'url': 'fPIG7nrpgec',
'start': 15,
'end': 30,
},
36: {
'event': 'b000',
'url': 'fYvUB-qy4IM',
'start': 0,
'end': 14,
},
37: {
'event': 'b000',
'url': 'gIQ4QrKXjCc',
'start': 0,
'end': 20,
},
38: {
'event': 'b000',
'url': 'i5TlfRqdawk',
'start': 13,
'end': 28,
},
39: {
'event': 'b000',
'url': 'iUyxzXcyrqI',
'start': 8,
'end': 28,
},
40: {
'event': 'b000',
'url': 'iyB5q7bb1l8',
'start': 0,
'end': 13,
},
#######################################################################
41: {
'event': 'b001',
'url': 'DTieJvYa-sA',
'start': 21,
'end': 26,
},
42: {
'event': 'b001',
'url': 'DVEuOBxAyFM',
'start': 10,
'end': 20,
},
43: {
'event': 'b001',
'url': 'E-As4tECwcQ',
'start': 195,
'end': 200,
},
44: {
'event': 'b001',
'url': 'EodzL5d9A78',
'start': 15,
'end': 25,
},
45: {
'event': 'b001',
'url': 'FSm6Z98ALhw',
'start': 345,
'end': 355,
},
46: {
'event': 'b001',
'url': 'G8tT-uKj3Ls',
'start': 12,
'end': 30,
},
47: {
'event': 'b001',
'url': 'HAS6G7Uq4Oc',
'start': 1,
'end': 16,
},
48: {
'event': 'b001',
'url': 'H_Bcux0FRxM',
'start': 34,
'end': 44,
},
49: {
'event': 'b001',
'url': 'I3_SwBhnUj0',
'start': 8,
'end': 20,
},
50: {
'event': 'b001',
'url': 'KZXC1iouJyo',
'start': 1,
'end': 6,
},
51: {
'event': 'b001',
'url': 'MIV0-6O-dLM',
'start': 10,
'end': 18,
},
52: {
'event': 'b001',
'url': 'MOV9rXOes3k',
'start': 0,
'end': 9,
},
53: {
'event': 'b001',
'url': 'W4oEM0W6mhM',
'start': 0,
'end': 7,
},
54: {
'event': 'b001',
'url': 'WCFt-dggFlk',
'start': 6,
'end': 12,
},
55: {
'event': 'b001',
'url': 'YykyGidfpfw',
'start': 0,
'end': 10,
},
56: {
'event': 'b001',
'url': 'ZPZa1zMpxBU',
'start': 0,
'end': 6,
},
57: {
'event': 'b001',
'url': 'eE8QMTqL01I',
'start': 0,
'end': 6,
},
58: {
'event': 'b001',
'url': 'fppKGJD3Y6c',
'start': 7,
'end': 13,
},
59: {
'event': 'b001',
'url': 'j_ZwYJNu5mE',
'start': 0,
'end': 15,
},
60: {
'event': 'b001',
'url': 'm1yOTcjRjcM',
'start': 0,
'end': 17,
},
#######################################################################
61: {
'event': 'c000',
'dir': 'raws/c000_1.wav',
'start': 0,
'end': -1,
},
62: {
'event': 'c000',
'dir': 'raws/c000_2.wav',
'start': 0,
'end': -1,
},
63: {
'event': 'c000',
'dir': 'raws/c000_3.wav',
'start': 0,
'end': -1,
},
64: {
'event': 'c000',
'dir': 'raws/c000_4.wav',
'start': 0,
'end': -1,
},
65: {
'event': 'c000',
'dir': 'raws/c000_5.wav',
'start': 0,
'end': -1,
},
66: {
'event': 'c000',
'dir': 'raws/c000_6.wav',
'start': 0,
'end': -1,
},
67: {
'event': 'c000',
'dir': 'raws/c000_7.wav',
'start': 0,
'end': -1,
},
68: {
'event': 'c000',
'dir': 'raws/c000_8.wav',
'start': 0,
'end': -1,
},
69: {
'event': 'c000',
'dir': 'raws/c000_9.wav',
'start': 0,
'end': -1,
},
70: {
'event': 'c000',
'dir': 'raws/c000_10.wav',
'start': 0,
'end': -1,
},
71: {
'event': 'c000',
'dir': 'raws/c000_11.wav',
'start': 0,
'end': -1,
},
72: {
'event': 'c000',
'dir': 'raws/c000_12.wav',
'start': 0,
'end': -1,
},
73: {
'event': 'c000',
'dir': 'raws/c000_13.wav',
'start': 0,
'end': -1,
},
74: {
'event': 'c000',
'dir': 'raws/c000_14.wav',
'start': 0,
'end': -1,
},
75: {
'event': 'c000',
'dir': 'raws/c000_15.wav',
'start': 0,
'end': -1,
},
76: {
'event': 'c000',
'dir': 'raws/c000_16.wav',
'start': 0,
'end': -1,
},
77: {
'event': 'c000',
'dir': 'raws/c000_17.wav',
'start': 0,
'end': -1,
},
78: {
'event': 'c000',
'dir': 'raws/c000_18.wav',
'start': 0,
'end': -1,
},
79: {
'event': 'c000',
'dir': 'raws/c000_19.wav',
'start': 0,
'end': -1,
},
80: {
'event': 'c000',
'dir': 'raws/c000_20.wav',
'start': 0,
'end': -1,
},
#######################################################################
81: {
'event': 'c001',
'dir': 'raws/c001_1.wav',
'start': 0,
'end': -1,
},
82: {
'event': 'c001',
'dir': 'raws/c001_2.wav',
'start': 0,
'end': -1,
},
83: {
'event': 'c001',
'dir': 'raws/c001_3.wav',
'start': 0,
'end': -1,
},
84: {
'event': 'c001',
'dir': 'raws/c001_4.wav',
'start': 0,
'end': -1,
},
85: {
'event': 'c001',
'dir': 'raws/c001_5.wav',
'start': 0,
'end': -1,
},
86: {
'event': 'c001',
'dir': 'raws/c001_6.wav',
'start': 0,
'end': -1,
},
87: {
'event': 'c001',
'dir': 'raws/c001_7.wav',
'start': 0,
'end': -1,
},
88: {
'event': 'c001',
'dir': 'raws/c001_8.wav',
'start': 0,
'end': -1,
},
89: {
'event': 'c001',
'dir': 'raws/c001_9.wav',
'start': 0,
'end': -1,
},
90: {
'event': 'c001',
'dir': 'raws/c001_10.wav',
'start': 0,
'end': -1,
},
91: {
'event': 'c001',
'dir': 'raws/c001_11.wav',
'start': 0,
'end': -1,
},
92: {
'event': 'c001',
'dir': 'raws/c001_12.wav',
'start': 0,
'end': -1,
},
93: {
'event': 'c001',
'url': 'ApJA85gwNFo',
'start': 7,
'end': 17,
},
94: {
'event': 'c001',
'url': 'ApJA85gwNFo',
'start': 18,
'end': 30,
},
95: {
'event': 'c001',
'url': 'ApJA85gwNFo',
'start': 45,
'end': 56,
},
96: {
'event': 'c001',
'url': 'ApJA85gwNFo',
'start': 60,
'end': 72,
},
97: {
'event': 'c001',
'url': 'ApJA85gwNFo',
'start': 90,
'end': 96,
},
98: {
'event': 'c001',
'url': 'ApJA85gwNFo',
'start': 120,
'end': 130,
},
99: {
'event': 'c001',
'url': 'ApJA85gwNFo',
'start': 230,
'end': 242,
},
100: {
'event': 'c001',
'url': 'ApJA85gwNFo',
'start': 270,
'end': 279,
},
#######################################################################
101: {
'event': 'c002',
'dir': 'raws/c002_1.wav',
'start': 0,
'end': -1,
},
102: {
'event': 'c002',
'dir': 'raws/c002_2.wav',
'start': 0,
'end': -1,
},
103: {
'event': 'c002',
'dir': 'raws/c002_3.wav',
'start': 0,
'end': -1,
},
104: {
'event': 'c002',
'dir': 'raws/c002_4.wav',
'start': 0,
'end': -1,
},
105: {
'event': 'c002',
'dir': 'raws/c002_5.wav',
'start': 0,
'end': -1,
},
106: {
'event': 'c002',
'dir': 'raws/c002_6.wav',
'start': 0,
'end': -1,
},
107: {
'event': 'c002',
'dir': 'raws/c002_7.wav',
'start': 0,
'end': -1,
},
108: {
'event': 'c002',
'dir': 'raws/c002_8.wav',
'start': 0,
'end': -1,
},
109: {
'event': 'c002',
'dir': 'raws/c002_9.wav',
'start': 0,
'end': -1,
},
110: {
'event': 'c002',
'dir': 'raws/c002_10.wav',
'start': 0,
'end': -1,
},
111: {
'event': 'c002',
'dir': 'raws/c002_11.wav',
'start': 0,
'end': -1,
},
112: {
'event': 'c002',
'dir': 'raws/c002_12.wav',
'start': 0,
'end': -1,
},
113: {
'event': 'c002',
'dir': 'raws/c002_13.wav',
'start': 0,
'end': -1,
},
114: {
'event': 'c002',
'dir': 'raws/c002_14.wav',
'start': 0,
'end': -1,
},
115: {
'event': 'c002',
'dir': 'raws/c002_15.wav',
'start': 0,
'end': -1,
},
116: {
'event': 'c002',
'dir': 'raws/c002_16.wav',
'start': 0,
'end': -1,
},
117: {
'event': 'c002',
'dir': 'raws/c002_17.wav',
'start': 0,
'end': -1,
},
118: {
'event': 'c002',
'dir': 'raws/c002_18.wav',
'start': 0,
'end': -1,
},
119: {
'event': 'c002',
'dir': 'raws/c002_19.wav',
'start': 0,
'end': -1,
},
120: {
'event': 'c002',
'dir': 'raws/c002_20.wav',
'start': 0,
'end': -1,
},
#######################################################################
121: {
'event': 'c003',
'url': '2p_d6vsFKJM',
'start': 2,
'end': 7,
},
122: {
'event': 'c003',
'url': '7e2ifgqrN1Q',
'start': 15,
'end': 20,
},
123: {
'event': 'c003',
'url': 'AiQoXi32QIA',
'start': 13,
'end': 18,
},
124: {
'event': 'c003',
'url': 'acIL82JWyq4',
'start': 90,
'end': 95,
},
125: {
'event': 'c003',
'url': 'acIL82JWyq4',
'start': 99,
'end': 105,
},
126: {
'event': 'c003',
'url': 'TpYdG5rqKnc',
'start': 77,
'end': 81,
},
127: {
'event': 'c003',
'url': 'aLHxMaT3uYg',
'start': 82,
'end': 87,
},
128: {
'event': 'c003',
'url': 'bWtCva4PDKE',
'start': 3,
'end': 10,
},
129: {
'event': 'c003',
'url': 'cM4zYIOdrYk',
'start': 1,
'end': 7,
},
130: {
'event': 'c003',
'url': 'fWBzCRl6LUs',
'start': 0,
'end': 4,
},
131: {
'event': 'c003',
'url': 'f_7ujxIzNmU',
'start': 11,
'end': 16,
},
132: {
'event': 'c003',
'url': 'fxbrSjGLrXY',
'start': 161,
'end': 166,
},
133: {
'event': 'c003',
'url': 'rbI18LmDHpw',
'start': 3,
'end': 8,
},
134: {
'event': 'c003',
'url': 'rbI18LmDHpw',
'start': 9,
'end': 16,
},
135: {
'event': 'c003',
'url': 's-jlycmfUsw',
'start': 21,
'end': 27,
},
136: {
'event': 'c003',
'url': 's-jlycmfUsw',
'start': 50,
'end': 56,
},
137: {
'event': 'c003',
'url': 't5fv6TTbsA0',
'start': 510,
'end': 516,
},
138: {
'event': 'c003',
'url': 'u0DxoED_3kA',
'start': 47,
'end': 52,
},
139: {
'event': 'c003',
'url': 'wBeYh9V8Iw4',
'start': 137,
'end': 142,
},
140: {
'event': 'c003',
'url': 'YJG1Zz097M4',
'start': 1,
'end': 9,
},
#######################################################################
141: {
'event': 'c004',
'url': 'ocOYpa4na5k',
'start': 0,
'end': 16,
},
142: {
'event': 'c004',
'url': 'ow2cNtqCNPw',
'start': 0,
'end': 6,
},
143: {
'event': 'c004',
'url': 'ow2cNtqCNPw',
'start': 8,
'end': 13,
},
144: {
'event': 'c004',
'url': 'ow2cNtqCNPw',
'start': 17,
'end': 23,
},
145: {
'event': 'c004',
'url': 'ow2cNtqCNPw',
'start': 27,
'end': 32,
},
146: {
'event': 'c004',
'url': 'ow2cNtqCNPw',
'start': 37,
'end': 43,
},
147: {
'event': 'c004',
'url': 'ow2cNtqCNPw',
'start': 45,
'end': 53,
},
148: {
'event': 'c004',
'url': 'rqzIV5OzbH0',
'start': 30,
'end': 37,
},
149: {
'event': 'c004',
'url': '1Ms9GajaUQ4',
'start': 0,
'end': 10,
},
150: {
'event': 'c004',
'url': '2rcRqeXnsNw',
'start': 17,
'end': 27,
},
151: {
'event': 'c004',
'url': '8yRROnG0-lA',
'start': 11,
'end': 23,
},
152: {
'event': 'c004',
'url': '8yRROnG0-lA',
'start': 24,
'end': 32,
},
153: {
'event': 'c004',
'url': '9EsNtRXnYbE',
'start': 0,
'end': 14,
},
154: {
'event': 'c004',
'url': '9EsNtRXnYbE',
'start': 15,
'end': 30,
},
155: {
'event': 'c004',
'url': 'FyQuHLiMuIk',
'start': 0,
'end': 5,
},
156: {
'event': 'c004',
'url': 'H7xKYPGjhhg',
'start': 10,
'end': 19,
},
157: {
'event': 'c004',
'url': 'H7xKYPGjhhg',
'start': 23,
'end': 29,
},
158: {
'event': 'c004',
'url': 'UPohyk3ynFk',
'start': 4,
'end': 10,
},
159: {
'event': 'c004',
'url': '7qnX0WB1x1k',
'start': 0,
'end': 9,
},
160: {
'event': 'c004',
'url': 'W-o0tTfwuOg',
'start': 39,
'end': 44,
},
#######################################################################
161: {
'event': 'd000',
'dir': 'raws/d000_1.wav',
'start': 0,
'end': -1,
},
162: {
'event': 'd000',
'dir': 'raws/d000_2.wav',
'start': 0,
'end': -1,
},
163: {
'event': 'd000',
'dir': 'raws/d000_3.wav',
'start': 0,
'end': -1,
},
164: {
'event': 'd000',
'dir': 'raws/d000_4.wav',
'start': 0,
'end': -1,
},
165: {
'event': 'd000',
'dir': 'raws/d000_5.wav',
'start': 0,
'end': -1,
},
166: {
'event': 'd000',
'dir': 'raws/d000_6.wav',
'start': 0,
'end': -1,
},
167: {
'event': 'd000',
'dir': 'raws/d000_7.wav',
'start': 0,
'end': -1,
},
168: {
'event': 'd000',
'dir': 'raws/d000_8.wav',
'start': 0,
'end': -1,
},
169: {
'event': 'd000',
'dir': 'raws/d000_9.wav',
'start': 0,
'end': -1,
},
170: {
'event': 'd000',
'dir': 'raws/d000_10.wav',
'start': 0,
'end': -1,
},
171: {
'event': 'd000',
'dir': 'raws/d000_11.wav',
'start': 0,
'end': -1,
},
172: {
'event': 'd000',
'dir': 'raws/d000_12.wav',
'start': 0,
'end': -1,
},
173: {
'event': 'd000',
'dir': 'raws/d000_13.wav',
'start': 0,
'end': -1,
},
174: {
'event': 'd000',
'dir': 'raws/d000_14.wav',
'start': 0,
'end': -1,
},
175: {
'event': 'd000',
'dir': 'raws/d000_15.wav',
'start': 0,
'end': -1,
},
176: {
'event': 'd000',
'dir': 'raws/d000_16.wav',
'start': 0,
'end': -1,
},
177: {
'event': 'd000',
'dir': 'raws/d000_17.wav',
'start': 0,
'end': -1,
},
178: {
'event': 'd000',
'dir': 'raws/d000_18.wav',
'start': 0,
'end': -1,
},
179: {
'event': 'd000',
'dir': 'raws/d000_19.wav',
'start': 0,
'end': -1,
},
180: {
'event': 'd000',
'dir': 'raws/d000_20.wav',
'start': 0,
'end': -1,
},
#######################################################################
181: {
'event': 'd001',
'url': 'nZIY8BKixjc',
'start': 7,
'end': 12,
},
182: {
'event': 'd001',
'url': 'ptIHZv3KdJw',
'start': 0,
'end': 2,
},
183: {
'event': 'd001',
'url': 'tNEGx3WCwBA',
'start': 0,
'end': 4,
},
184: {
'event': 'd001',
'url': 'vmeWtjzGZPs',
'start': 0,
'end': 6,
},
185: {
'event': 'd001',
'url': 'vmeWtjzGZPs',
'start': 7,
'end': 10,
},
186: {
'event': 'd001',
'url': 'vmeWtjzGZPs',
'start': 11,
'end': 16,
},
187: {
'event': 'd001',
'url': '-9ek6eO0RtI',
'start': 259,
'end': 265,
},
188: {
'event': 'd001',
'url': '6qlfodh49BA',
'start': 0,
'end': 2,
},
189: {
'event': 'd001',
'url': '7P-1BJ1A9ME',
'start': 0,
'end': 6,
},
190: {
'event': 'd001',
'url': '9VJL-ktypNw',
'start': 0,
'end': 5,
},
191: {
'event': 'd001',
'url': 'BurGML_ZqSA',
'start': 490.8,
'end': 495,
},
192: {
'event': 'd001',
'url': 'JL76D1HWv-U',
'start': 549,
'end': 555,
},
193: {
'event': 'd001',
'url': 'M47-JuWnx6U',
'start': 0,
'end': 3.6,
},
194: {
'event': 'd001',
'dir': 'raws/d001_1.wav',
'start': 0,
'end': 10,
},
195: {
'event': 'd001',
'dir': 'raws/d001_2.wav',
'start': 11,
'end': 19,
},
196: {
'event': 'd001',
'dir': 'raws/d001_3.wav',
'start': 20,
'end': 30,
},
197: {
'event': 'd001',
'dir': 'raws/d001_4.wav',
'start': 31,
'end': 41,
},
198: {
'event': 'd001',
'url': 'Vbx6TFxSPYY',
'start': 64,
'end': 70,
},
199: {
'event': 'd001',
'url': 'Vbx6TFxSPYY',
'start': 90,
'end': 92.8,
},
200: {
'event': 'd001',
'url': 'Vbx6TFxSPYY',
'start': 96,
'end': 101,
},
#######################################################################
201: {
'event': 'd002',
'url': '3xCWI_22Z9A',
'start': 45,
'end': 51,
},
202: {
'event': 'd002',
'url': '3xCWI_22Z9A',
'start': 61,
'end': 66,
},
203: {
'event': 'd002',
'url': '3xCWI_22Z9A',
'start': 117,
'end': 126,
},
204: {
'event': 'd002',
'url': '5PbIH_kMyis',
'start': 2,
'end': 18,
},
205: {
'event': 'd002',
'url': '64K4SlYR3BU',
'start': 0,
'end': 17,
},
206: {
'event': 'd002',
'url': 'CTBFPn_S5u0',
'start': 0,
'end': 5,
},
207: {
'event': 'd002',
'url': 'CTBFPn_S5u0',
'start': 12,
'end': 17,
},
208: {
'event': 'd002',
'url': 'EakI8v4Ztt4',
'start': 2,
'end': 14,
},
209: {
'event': 'd002',
'url': 'EakI8v4Ztt4',
'start': 29,
'end': 34,
},
210: {
'event': 'd002',
'url': 'FeRaDiSPb2c',
'start': 11,
'end': 16,
},
211: {
'event': 'd002',
'url': 'FeRaDiSPb2c',
'start': 18,
'end': 23,
},
212: {
'event': 'd002',
'url': 'Fw09tDLa-78',
'start': 0,
'end': 5,
},
213: {
'event': 'd002',
'url': 'Fw09tDLa-78',
'start': 40,
'end': 46,
},
214: {
'event': 'd002',
'url': 'G7vXKtePlGM',
'start': 0,
'end': 20,
},
215: {
'event': 'd002',
'url': 'GamZltmhYuc',
'start': 40,
'end': 45,
},
216: {
'event': 'd002',
'url': 'Glc6Ekc67OE',
'start': 25,
'end': 35,
},
217: {
'event': 'd002',
'url': 'Ki7Xvd2_hxY',
'start': 3,
'end': 10,
},
218: {
'event': 'd002',
'url': 'Ki7Xvd2_hxY',
'start': 15,
'end': 20,
},
219: {
'event': 'd002',
'url': 'KrtiLKd4VCI',
'start': 99,
'end': 110,
},
220: {
'event': 'd002',
'url': 'P_dXuddk3fE',
'start': 0,
'end': 18,
},
#######################################################################
221: {
'event': 'f000',
'url': '0JPT13OUVV8',
'start': 39,
'end': 45,
},
222: {
'event': 'f000',
'url': '3uZy0wkterE',
'start': 33,
'end': 40,
},
223: {
'event': 'f000',
'url': '3uZy0wkterE',
'start': 75,
'end': 81,
},
224: {
'event': 'f000',
'url': '3uZy0wkterE',
'start': 84,
'end': 89,
},
225: {
'event': 'f000',
'url': '3uZy0wkterE',
'start': 103,
'end': 118,
},
226: {
'event': 'f000',
'url': '3uZy0wkterE',
'start': 129,
'end': 135,
},
227: {
'event': 'f000',
'url': '3uZy0wkterE',
'start': 225,
'end': 232,
},
228: {
'event': 'f000',
'url': '3uZy0wkterE',
'start': 235,
'end': 249,
},
229: {
'event': 'f000',
'url': '3uZy0wkterE',
'start': 258,
'end': 264,
},
230: {
'event': 'f000',
'url': '3uZy0wkterE',
'start': 338,
'end': 348,
},
231: {
'event': 'f000',
'url': '3uZy0wkterE',
'start': 384,
'end': 396,
},
232: {
'event': 'f000',
'url': '3uZy0wkterE',
'start': 400,
'end': 410,
},
233: {
'event': 'f000',
'url': '3uZy0wkterE',
'start': 626,
'end': 634,
},
234: {
'event': 'f000',
'url': '3uZy0wkterE',
'start': 719,
'end': 725,
},
235: {
'event': 'f000',
'url': '4yub23Xuzos',
'start': 0,
'end': 12,
},
236: {
'event': 'f000',
'url': '4yub23Xuzos',
'start': 61,
'end': 72,
},
237: {
'event': 'f000',
'url': '4yub23Xuzos',
'start': 73,
'end': 85,
},
238: {
'event': 'f000',
'url': '4yub23Xuzos',
'start': 320,
'end': 340,
},
239: {
'event': 'f000',
'url': '4yub23Xuzos',
'start': 360,
'end': 380,
},
240: {
'event': 'f000',
'url': '4yub23Xuzos',
'start': 407,
'end': 416,
},
#######################################################################
241: {
'event': 'f001',
'url': '5EbJQCFom8o',
'start': 8,
'end': 19,
},
242: {
'event': 'f001',
'url': '6z4HD7Dw7i8',
'start': 35,
'end': 40,
},
243: {
'event': 'f001',
'url': 'PCsQ3zgL3CU',
'start': 66,
'end': 86,
},
244: {
'event': 'f001',
'url': '7GEiPdnqJUw',
'start': 4,
'end': 24,
},
245: {
'event': 'f001',
'url': 'BIK1Ds79KVM',
'start': 105,
'end': 125,
},
246: {
'event': 'f001',
'url': 'CDwk_DbprX4',
'start': 37,
'end': 42,
},
247: {
'event': 'f001',
'url': 'D2_KIhSbmt0',
'start': 9,
'end': 17,
},
248: {
'event': 'f001',
'url': 'DMMCiQB7-E4',
'start': 24,
'end': 29,
},
249: {
'event': 'f001',
'url': 'F3dasUA6LqU',
'start': 85,
'end': 104,
},
250: {
'event': 'f001',
'url': 'GyygYycarL0',
'start': 69,
'end': 80,
},
251: {
'event': 'f001',
'url': 'HFpfDaLZtzQ',
'start': 50,
'end': 60,
},
252: {
'event': 'f001',
'url': 'HFpfDaLZtzQ',
'start': 68,
'end': 78,
},
253: {
'event': 'f001',
'url': 'HFpfDaLZtzQ',
'start': 86,
'end': 96,
},
254: {
'event': 'f001',
'url': 'HFpfDaLZtzQ',
'start': 160,
'end': 170,
},
255: {
'event': 'f001',
'url': 'HxO2GRMD_fw',
'start': 47,
'end': 57,
},
256: {
'event': 'f001',
'url': 'I6YfsWzCvLI',
'start': 25,
'end': 34,
},
257: {
'event': 'f001',
'url': 'IYrVF4tHN08',
'start': 1,
'end': 19,
},
258: {
'event': 'f001',
'url': 'LjeZYuAHjpk',
'start': 2,
'end': 14,
},
259: {
'event': 'f001',
'url': 'Mls0tzvQpzQ',
'start': 83,
'end': 92,
},
260: {
'event': 'f001',
'url': 'O2htSqXhdqE',
'start': 65,
'end': 71,
},
#######################################################################
261: {
'event': 'h000',
'url': 'cSrL0BXsO40',
'start': 0,
'end': 17,
},
262: {
'event': 'h000',
'url': 'drVo5VQfsDc',
'start': 0,
'end': 6,
},
263: {
'event': 'h000',
'url': '-dEOa2GkXHw',
'start': 137,
'end': 143,
},
264: {
'event': 'h000',
'url': 'kVQbu_BsZ9o',
'start': 0,
'end': 10,
},
265: {
'event': 'h000',
'url': 'k_kRSOra2qA',
'start': 9.5,
'end': 17,
},
266: {
'event': 'h000',
'url': 'k_kRSOra2qA',
'start': 294,
'end': 304,
},
267: {
'event': 'h000',
'url': '-q1pzc3VMrg',
'start': 30,
'end': 38,
},
268: {
'event': 'h000',
'url': '-q1pzc3VMrg',
'start': 296,
'end': 309,
},
269: {
'event': 'h000',
'url': 'qF90ezvPe14',
'start': 8,
'end': 13,
},
270: {
'event': 'h000',
'url': 'x9Kkv8j42mI',
'start': 21,
'end': 28,
},
271: {
'event': 'h000',
'url': 'yOelIR7hiMc',
'start': 6,
'end': 25,
},
272: {
'event': 'h000',
'url': '0StCxWx9dV8',
'start': 6,
'end': 14,
},
273: {
'event': 'h000',
'url': 'zLo1mkKE4sw',
'start': 31,
'end': 41,
},
274: {
'event': 'h000',
'url': '0150dZu3Na8',
'start': 0,
'end': 7,
},
275: {
'event': 'h000',
'url': '7rk62G1WyG8',
'start': 17,
'end': 24,
},
276: {
'event': 'h000',
'url': '7rk62G1WyG8',
'start': 60,
'end': 73,
},
277: {
'event': 'h000',
'url': '9avOnbp3NA8',
'start': 3,
'end': 20,
},
278: {
'event': 'h000',
'url': '0jzTEIxgsjM',
'start': 11,
'end': 18,
},
279: {
'event': 'h000',
'url': 'E9etGzNH2SM',
'start': 0,
'end': 8,
},
280: {
'event': 'h000',
'url': 'GGyrdlFfowc',
'start': 0,
'end': 12,
},
#######################################################################
281: {
'event': 'h001',
'url': '8TkXXqFWNWQ',
'start': 24,
'end': 42,
},
282: {
'event': 'h001',
'url': 'KpGO2VTksIw',
'start': 61,
'end': 81,
},
283: {
'event': 'h001',
'url': 'KpGO2VTksIw',
'start': 90,
'end': 102,
},
284: {
'event': 'h001',
'url': 'KpGO2VTksIw',
'start': 123,
'end': 132,
},
285: {
'event': 'h001',
'url': 'KpGO2VTksIw',
'start': 166,
'end': 182,
},
286: {
'event': 'h001',
'url': 'KpGO2VTksIw',
'start': 184,
'end': 202,
},
287: {
'event': 'h001',
'url': 'KpGO2VTksIw',
'start': 203,
'end': 212,
},
288: {
'event': 'h001',
'url': 'KpGO2VTksIw',
'start': 214,
'end': 224,
},
289: {
'event': 'h001',
'url': 'KpGO2VTksIw',
'start': 257,
'end': 272,
},
290: {
'event': 'h001',
'url': 'KpGO2VTksIw',
'start': 274,
'end': 285,
},
291: {
'event': 'h001',
'url': 'KpGO2VTksIw',
'start': 305,
'end': 315,
},
292: {
'event': 'h001',
'url': 'KpGO2VTksIw',
'start': 318,
'end': 324,
},
293: {
'event': 'h001',
'url': 'KpGO2VTksIw',
'start': 327,
'end': 339,
},
294: {
'event': 'h001',
'url': 'KpGO2VTksIw',
'start': 397,
'end': 406,
},
295: {
'event': 'h001',
'url': 'KpGO2VTksIw',
'start': 407,
'end': 426,
},
296: {
'event': 'h001',
'url': 'oDQb7qZsz6o',
'start': 373,
'end': 381,
},
297: {
'event': 'h001',
'url': 'soVRoIbewMM',
'start': 35,
'end': 42,
},
298: {
'event': 'h001',
'url': 'xnliLFqdfo0',
'start': 92,
'end': 112,
},
299: {
'event': 'h001',
'url': 'VOUm1PTYpB0',
'start': 34,
'end': 39,
},
300: {
'event': 'h001',
'url': 'zGsY2GGVSao',
'start': 67,
'end': 73,
},
#######################################################################
301: {
'event': 'h002',
'url': 'K9BXym8IG_o',
'start': 3,
'end': 13,
},
302: {
'event': 'h002',
'url': 'LUK71I-yxXI',
'start': 4,
'end': 20,
},
303: {
'event': 'h002',
'url': 'NYG_T2t542Q',
'start': 19,
'end': 35,
},
304: {
'event': 'h002',
'url': 'RYK03ltDcqM',
'start': 1,
'end': 15,
},
305: {
'event': 'h002',
'url': 'RYK03ltDcqM',
'start': 31,
'end': 41,
},
306: {
'event': 'h002',
'url': 'RYK03ltDcqM',
'start': 51,
'end': 59,
},
307: {
'event': 'h002',
'url': 'RYK03ltDcqM',
'start': 67,
'end': 87,
},
308: {
'event': 'h002',
'url': 'Vb_Xvjbj_TI',
'start': 0,
'end': 15,
},
309: {
'event': 'h002',
'url': 'Vb_Xvjbj_TI',
'start': 16,
'end': 30,
},
310: {
'event': 'h002',
'url': 'Vb_Xvjbj_TI',
'start': 31,
'end': 45,
},
311: {
'event': 'h002',
'url': 'Vb_Xvjbj_TI',
'start': 46,
'end': 60,
},
312: {
'event': 'h002',
'url': 'Vb_Xvjbj_TI',
'start': 61,
'end': 80,
},
313: {
'event': 'h002',
'url': 'bcYI2CTlH5o',
'start': 13,
'end': 24,
},
314: {
'event': 'h002',
'url': 'guYpxmm4vFU',
'start': 85,
'end': 95,
},
315: {
'event': 'h002',
'url': 'guYpxmm4vFU',
'start': 100,
'end': 110,
},
316: {
'event': 'h002',
'url': 'guYpxmm4vFU',
'start': 120,
'end': 126,
},
317: {
'event': 'h002',
'url': 'guYpxmm4vFU',
'start': 159,
'end': 179,
},
318: {
'event': 'h002',
'url': 'hxH7Uith0tQ',
'start': 0,
'end': 15,
},
319: {
'event': 'h002',
'url': 'hxH7Uith0tQ',
'start': 16,
'end': 30,
},
320: {
'event': 'h002',
'url': 'hxH7Uith0tQ',
'start': 31,
'end': 45,
},
#######################################################################
321: {
'event': 'h003',
'url': '3rGHjZMdW4Y',
'start': 15,
'end': 24,
},
322: {
'event': 'h003',
'url': '9bIchzOP8PA',
'start': 18,
'end': 25,
},
323: {
'event': 'h003',
'url': 'A8sju2x5nhE',
'start': 339,
'end': 345,
},
324: {
'event': 'h003',
'url': 'FUXSq44CbHo',
'start': 221,
'end': 232,
},
325: {
'event': 'h003',
'url': 'IWxlWrfpk_g',
'start': 8,
'end': 28,
},
326: {
'event': 'h003',
'url': 'NETQtgbQ9-s',
'start': 0,
'end': 9,
},
327: {
'event': 'h003',
'url': 'SqsmuNOtmwM',
'start': 26,
'end': 39,
},
328: {
'event': 'h003',
'url': 'XrKDtjFM9Ec',
'start': 0,
'end': 13,
},
329: {
'event': 'h003',
'url': '_H4iHqtGlAY',
'start': 0,
'end': 15,
},
330: {
'event': 'h003',
'url': 'aa_468eUE1o',
'start': 230,
'end': 242,
},
331: {
'event': 'h003',
'url': 'aomaneVgUs0',
'start': 51,
'end': 57,
},
332: {
'event': 'h003',
'url': 'bW-xjy5-a1s',
'start': 58,
'end': 71,
},
333: {
'event': 'h003',
'url': 'fIPsH57dZIY',
'start': 0,
'end': 20,
},
334: {
'event': 'h003',
'url': 'ojvtp3aHKdc',
'start': 5,
'end': 12,
},
335: {
'event': 'h003',
'dir': 'raws/h003_1.wav',
'start': 0,
'end': -1,
},
336: {
'event': 'h003',
'dir': 'raws/h003_2.wav',
'start': 0,
'end': -1,
},
337: {
'event': 'h003',
'dir': 'raws/h003_3.wav',
'start': 0,
'end': -1,
},
338: {
'event': 'h003',
'dir': 'raws/h003_4.wav',
'start': 0,
'end': -1,
},
339: {
'event': 'h003',
'dir': 'raws/h003_5.wav',
'start': 0,
'end': -1,
},
340: {
'event': 'h003',
'dir': 'raws/h003_6.wav',
'start': 0,
'end': -1,
},
#######################################################################
341: {
'event': 'h004',
'url': 'nFdmth2N8Bo',
'start': 0,
'end': 18,
},
342: {
'event': 'h004',
'url': 'pNrjoDwCnik',
'start': 280,
'end': 294,
},
343: {
'event': 'h004',
'url': 't4wDKhMiKpA',
'start': 13,
'end': 20,
},
344: {
'event': 'h004',
'url': 'uscPSf6C_Js',
'start': 14,
'end': 30,
},
345: {
'event': 'h004',
'url': 'w8an1GY8T00',
'start': 42,
'end': 46,
},
346: {
'event': 'h004',
'url': '2k6Bw9EVz7g',
'start': 17,
'end': 22,
},
347: {
'event': 'h004',
'url': '8g2Uv6QqI_Y',
'start': 185,
'end': 200,
},
348: {
'event': 'h004',
'url': 'BH0rbQ6zHlw',
'start': 6,
'end': 22,
},
349: {
'event': 'h004',
'url': 'GlWecURh_OU',
'start': 94,
'end': 104,
},
350: {
'event': 'h004',
'url': 'LU1vqeS4G4s',
'start': 78,
'end': 88,
},
351: {
'event': 'h004',
'url': 'SA4SG1Nt0mw',
'start': 0,
'end': 5,
},
352: {
'event': 'h004',
'url': '4t524YeonRo',
'start': 7,
'end': 15,
},
353: {
'event': 'h004',
'url': 'UQtbZNMp1nY',
'start': 0,
'end': 14,
},
354: {
'event': 'h004',
'url': 'UhANSJnLXNs',
'start': 0,
'end': 15,
},
355: {
'event': 'h004',
'url': '4t524YeonRo',
'start': 37,
'end': 43,
},
356: {
'event': 'h004',
'url': 'Wy49nszOnxo',
'start': 139,
'end': 149,
},
357: {
'event': 'h004',
'url': '_yFwVTg-V-M',
'start': 0,
'end': 18,
},
358: {
'event': 'h004',
'url': 'e3BdNhbiDwA',
'start': 191,
'end': 201,
},
359: {
'event': 'h004',
'url': 'iLUd4l1JFDI',
'start': 0,
'end': 16,
},
360: {
'event': 'h004',
'url': 'jx1sWITDw-E',
'start': 24,
'end': 37,
},
#######################################################################
361: {
'event': 'p000',
'url': 'QeS7zmkTOig',
'start': 0,
'end': 4,
},
362: {
'event': 'p000',
'url': 'URxsjJi1IL4',
'start': 2,
'end': 21,
},
363: {
'event': 'p000',
'url': 'Zf5gYtlz6Pw',
'start': 3,
'end': 20,
},
364: {
'event': 'p000',
'url': 'yKls2m5kM14',
'start': 0,
'end': 4,
},
365: {
'event': 'p000',
'url': 'yibeLZXOHiU',
'start': 0,
'end': 4,
},
366: {
'event': 'p000',
'url': 'ys60zlhXTs4',
'start': 0,
'end': 16,
},
367: {
'event': 'p000',
'url': '2QcOD8uCu0E',
'start': 0,
'end': 4,
},
368: {
'event': 'p000',
'url': '4BUEj-TxY5g',
'start': 0,
'end': 11,
},
369: {
'event': 'p000',
'url': 'IigiZ3ss6HE',
'start': 8,
'end': 21,
},
370: {
'event': 'p000',
'url': 'NK92DUyyngc',
'start': 13,
'end': 25,
},
371: {
'event': 'p000',
'url': 'fR2lhjlHR4I',
'start': 28,
'end': 48,
},
372: {
'event': 'p000',
'dir': 'raws/p000_1.wav',
'start': 0,
'end': -1,
},
373: {
'event': 'p000',
'dir': 'raws/p000_2.wav',
'start': 0,
'end': -1,
},
374: {
'event': 'p000',
'dir': 'raws/p000_3.wav',
'start': 0,
'end': -1,
},
375: {
'event': 'p000',
'dir': 'raws/p000_4.wav',
'start': 0,
'end': -1,
},
376: {
'event': 'p000',
'dir': 'raws/p000_5.wav',
'start': 0,
'end': -1,
},
377: {
'event': 'p000',
'dir': 'raws/p000_6.wav',
'start': 0,
'end': -1,
},
378: {
'event': 'p000',
'dir': 'raws/p000_7.wav',
'start': 0,
'end': -1,
},
379: {
'event': 'p000',
'dir': 'raws/p000_8.wav',
'start': 0,
'end': -1,
},
380: {
'event': 'p000',
'dir': 'raws/p000_9.wav',
'start': 0,
'end': -1,
},
#######################################################################
381: {
'event': 't000',
'url': '3y2aZEs1F5s',
'start': 75,
'end': 85,
},
382: {
'event': 't000',
'url': '4lNM6Ah99hw',
'start': 0,
'end': 11,
},
383: {
'event': 't000',
'url': '6OHetw29o_A',
'start': 3,
'end': 15,
},
384: {
'event': 't000',
'url': '78R6KgsSPRk',
'start': 0,
'end': 7,
},
385: {
'event': 't000',
'url': 'APYXZHZPCE4',
'start': 38,
'end': 52,
},
386: {
'event': 't000',
'url': 'SavkOa_GGLs',
'start': 12,
'end': 16,
},
387: {
'event': 't000',
'url': 'bW_PMIAIHBE',
'start': 47,
'end': 52,
},
388: {
'event': 't000',
'url': 'dI9HTTk6Mgs',
'start': 5,
'end': 11,
},
389: {
'event': 't000',
'url': 'dJudErPaMWI',
'start': 39,
'end': 48,
},
390: {
'event': 't000',
'url': 'dxcs_lpcwj0',
'start': 5,
'end': 25,
},
391: {
'event': 't000',
'url': 'dxcs_lpcwj0',
'start': 119,
'end': 139,
},
392: {
'event': 't000',
'url': 'h6voPlJG0m0',
'start': 24,
'end': 31,
},
393: {
'event': 't000',
'url': 'jDdYqpYoIGY',
'start': 29,
'end': 41,
},
394: {
'event': 't000',
'url': 'jotE032i05c',
'start': 2,
'end': 22,
},
395: {
'event': 't000',
'url': 'jotE032i05c',
'start': 60,
'end': 72,
},
396: {
'event': 't000',
'url': 'nD_HctFk3Hc',
'start': 434,
'end': 442,
},
397: {
'event': 't000',
'url': 'y2A1Pmiu7yw',
'start': 76,
'end': 86,
},
398: {
'event': 't000',
'url': 'y2A1Pmiu7yw',
'start': 587,
'end': 595,
},
399: {
'event': 't000',
'url': '8pJUJvPfIx0',
'start': 76,
'end': 94,
},
400: {
'event': 't000',
'url': 'mi-s3pLeR3U',
'start': 616,
'end': 634,
},
}
with open('daqa_sources.json', 'w') as f:
json.dump(sources, f)
if __name__ == "__main__":
main()
print('Success!')
|
daqa-master
|
daqa-gen/daqa_sources.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import json
def main():
dataset = {
'events': ['a000', 'b000', 'b001', 'c000', 'c001', 'c002', 'c003',
'c004', 'd000', 'd001', 'd002', 'f000', 'f001', 'h000',
'h001', 'h002', 'h003', 'h004', 'p000', 't000'], # unique
'sources': {
'a000': ['aircraft', 'plane'],
'b000': ['band'],
'b001': ['bird'],
'c000': ['crowd'],
'c001': ['crowd'],
'c002': ['crowd'],
'c003': ['driver', 'car', 'vehicle'],
'c004': ['car', 'vehicle'],
'd000': ['door'],
'd001': ['doorbell'],
'd002': ['dog'],
'f000': ['fire truck', 'fire engine', 'emergency vehicle'],
'f001': ['fire alarm', 'alarm'],
'h000': ['human'],
'h001': ['human'],
'h002': ['human'],
'h003': ['human'],
'h004': ['human'],
'p000': ['phone'],
't000': ['storm'],
},
'actions': {
'a000': ['passing by', 'flying over'],
'b000': ['playing'],
'b001': ['singing'],
'c000': ['babbling'],
'c001': ['applauding', 'clapping'],
'c002': ['rioting', 'making noise'],
'c003': ['honking'],
'c004': ['passing by'],
'd000': ['slamming', 'closing', 'shutting'],
'd001': ['ringing'],
'd002': ['barking', 'making noise'],
'f000': ['passing by'],
'f001': ['going off'],
'h000': ['speaking', 'talking'],
'h001': ['laughing'],
'h002': ['typing on a keyboard', 'typing'],
'h003': ['whistling'],
'h004': ['operating a machine'],
'p000': ['ringing'],
't000': ['thundering'],
},
'consecutive': {
'a000': True,
'b000': False,
'b001': False,
'c000': False,
'c001': False,
'c002': False,
'c003': False,
'c004': True,
'd000': True,
'd001': False,
'd002': False,
'f000': False,
'f001': False,
'h000': True,
'h001': True,
'h002': False,
'h003': False,
'h004': False,
'p000': False,
't000': False,
}
}
with open('daqa_outline.json', 'w') as f:
json.dump(dataset, f)
if __name__ == "__main__":
main()
print('Success!')
|
daqa-master
|
daqa-gen/daqa_outline.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import argparse
import datetime
import json
import os
import random
import numpy as np
import scipy
import scipy.io.wavfile
parser = argparse.ArgumentParser()
# Input
parser.add_argument('--dataset', default='daqa.json', type=str,
help='JSON file describing the dataset.')
parser.add_argument('--events', default='events', type=str,
help='Location of individual audio events.')
parser.add_argument('--backgrounds', default='backgrounds', type=str,
help='Location of some background noise audio.')
parser.add_argument('--data_fs', default=16000, type=int,
help='Sampling frequency (Hz).')
# Settings
parser.add_argument('--min_num_events', default=5, type=int,
help='Minimum number of events per generated audio.')
parser.add_argument('--max_num_events', default=12, type=int,
help='Maximum number of events per generated audio.')
parser.add_argument('--rand_overlap', default=0.5, type=float,
help='Maximum overlap between adjacent events (seconds).')
parser.add_argument('--seed', default=0, type=int, help='Random Seed.')
parser.add_argument('--version', default='1.0', type=str, help='Version.')
parser.add_argument('--date',
default=datetime.datetime.today().strftime("%m/%d/%Y"),
help="Date.")
parser.add_argument('--license',
default='Creative Commons Attribution (CC-BY 4.0)',
help='License.')
# Output
parser.add_argument('--start_idx', default=0, type=int,
help='Start numbering from start_idx.')
parser.add_argument('--num_audio', default=10, type=int,
help='Number of audio to generate.')
parser.add_argument('--filename_prefix', default='daqa', type=str,
help='Filename prefix to audio and JSON files.')
parser.add_argument('--set', default='new',
help='Set name: train / val / test.')
parser.add_argument('--num_digits', default=6, type=int,
help='Number of digits to enumerate the generated files.')
parser.add_argument('--output_audio_dir', default='../daqa/audio/',
help='Directory to output generated audio.')
parser.add_argument('--output_narrative_dir', default='../daqa/narratives/',
help='Directory to output generated narratives.')
parser.add_argument('--output_narrative_file',
default='../daqa/daqa_narratives.json',
help="Path to narratives JSON file.")
def main(args):
"""Randomly sample audio events to form sequences of events."""
random.seed(args.seed)
np.random.seed(args.seed)
# Read dataset description
with open(args.dataset, 'r') as f:
dataset = json.load(f)
# Define naming conventions and directories
prefix = '%s_%s_' % (args.filename_prefix, args.set)
audio_template = '%s%%0%dd.wav' % (prefix, args.num_digits)
audio_template = os.path.join(args.output_audio_dir, audio_template)
narrative_template = '%s%%0%dd.json' % (prefix, args.num_digits)
narrative_template = os.path.join(args.output_narrative_dir,
narrative_template)
if not os.path.isdir(args.output_audio_dir):
os.makedirs(args.output_audio_dir)
if not os.path.isdir(args.output_narrative_dir):
os.makedirs(args.output_narrative_dir)
# Get list of events and backgrounds
lst_events = list(dataset['origins'].keys()) # without .wav
lst_events_wav = os.listdir(args.events)
lst_events_wav = [e[:-4] for e in lst_events_wav if e.endswith('.wav')]
assert len(lst_events) == len(lst_events_wav), 'Dataset mismatch.'
assert sorted(lst_events) == sorted(lst_events_wav), 'Dataset mismatch.'
lst_bckgrnds = os.listdir(args.backgrounds)
lst_bckgrnds = [e for e in lst_bckgrnds if e.endswith('.wav')]
x_consctvs = [k for k, v in dataset['consecutive'].items() if v is False]
num_fails = 0
# Generate audio and narratives from events
lst_narrative_paths = []
for i in range(args.num_audio):
idx = args.start_idx + i
audio_path = audio_template % idx
narrative_path = narrative_template % idx
lst_narrative_paths.append(narrative_path)
num_events = random.randint(args.min_num_events, args.max_num_events)
# Sample num_events number of events (not unique)
sel_events = None
while sel_events is None:
sel_events = random.sample(lst_events, num_events)
# The following checks if the sequence of selected events is ok
sel_events_dx = [x.split('_')[0] for x in sel_events]
# Check if the list has any identical consective events
consecutives = []
for x in range(len(sel_events_dx) - 1):
if sel_events_dx[x] == sel_events_dx[x + 1]:
consecutives.append(sel_events_dx[x])
# Check if any of the events in consecutives are not allowed
if len([x for x in consecutives if x in x_consctvs]) > 0:
sel_events = None # retry
num_fails += 1
sel_bckgrnd = random.sample(lst_bckgrnds, 1)
audio, narrative = gen_audio_narrative(dataset=dataset,
args=args,
selcted_events=sel_events,
selcted_bckgrnd=sel_bckgrnd,
output_index=idx,
output_audio=audio_path,
)
scipy.io.wavfile.write(audio_path, args.data_fs, audio)
with open(narrative_path, 'w') as f:
json.dump(narrative, f)
print('Generated ' + str(args.num_audio) + ' audio sequences ('
+ str(num_fails) + ' failed attempts). Compiliing narratives...')
# Combine all narratives into a single JSON file
lst_narratives = []
for narrative_path in lst_narrative_paths:
with open(narrative_path, 'r') as f:
lst_narratives.append(json.load(f))
output = {
'info': {
'set': args.set,
'version': args.version,
'date': args.date,
'license': args.license,
},
'narratives': lst_narratives
}
with open(args.output_narrative_file, 'w') as f:
json.dump(output, f)
return True
def gen_audio_narrative(dataset,
args,
selcted_events,
selcted_bckgrnd,
output_index,
output_audio):
# Read audio events
lst_audio_events = []
for e in selcted_events:
e_wav = os.path.join(args.events, e + '.wav')
event_fs, event = scipy.io.wavfile.read(e_wav)
assert event_fs == args.data_fs, \
'Audio event sampling frequency != ' + str(args.data_fs) + ' Hz.'
lst_audio_events.append(event)
# Toss an unbiased coin to concatenate or add events
if random.random() < 0.5:
# concatenate
audio = np.concatenate(lst_audio_events)
else:
# add (allows overlap between adjacent events)
audio = lst_audio_events[0]
for event in lst_audio_events[1:]:
idx_overlap = random.randint(0, (args.rand_overlap * args.data_fs))
plhldr = np.zeros(event.shape[0] - idx_overlap, event.dtype)
audio = np.concatenate((audio, plhldr))
audio[-event.shape[0]:] += event
assert len(audio.shape) == 1, 'Audio events not concatenated properly.'
# Toss an unbiased coin to add background noise
background = 'None'
if random.random() < 0.5:
selec_bckgrnd = os.path.join(args.backgrounds, selcted_bckgrnd[0])
bckgrnd_fs, bckgrnd = scipy.io.wavfile.read(selec_bckgrnd)
assert event_fs == args.data_fs, \
'Bckgrnd sampling frequency != ' + str(args.data_fs) + ' Hz.'
idx_trim = random.randint(0, bckgrnd.shape[0] - audio.shape[0])
trim_bckgrnd = bckgrnd[idx_trim:(audio.shape[0] + idx_trim)]
audio += trim_bckgrnd
background = selcted_bckgrnd[0][:-4]
events = []
for idx, sel_event in enumerate(selcted_events):
event_dx = sel_event.split('_')[0]
event = { # 'start_time': 'end_time':
'order': idx,
'event': event_dx,
'audio': sel_event,
'source': random.choice(dataset['sources'][event_dx]),
'action': random.choice(dataset['actions'][event_dx]),
'duration': (float(lst_audio_events[idx].shape[0]) / args.data_fs),
'loudness': dataset['origins'][sel_event]['loudness'],
}
events.append(event)
# Generate JSON
narrative = {
'set': args.set,
'audio_index': output_index,
'audio_filename': os.path.basename(output_audio),
'background': background,
'events': events,
}
return audio, narrative
if __name__ == "__main__":
args = parser.parse_args()
main(args)
print('Success!')
|
daqa-master
|
daqa-gen/generate_audio.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import argparse
import datetime
import json
parser = argparse.ArgumentParser()
# Input
parser.add_argument('--outline', default='daqa_outline.json', type=str,
help='Location of outline file.')
parser.add_argument('--sources', default='daqa_sources.json', type=str,
help='Location of sources file.')
parser.add_argument('--loudness', default='daqa_loudness.json', type=str,
help='Location of loudness file.')
# Settings
parser.add_argument('--version', default='1.0', type=str,
help='Version.')
parser.add_argument('--date',
default=datetime.datetime.today().strftime("%m/%d/%Y"),
help="Date.")
parser.add_argument('--license',
default='Creative Commons Attribution (CC-BY 4.0)',
help='License.')
# Output
parser.add_argument('--output', default='daqa.json', type=str,
help='Location of dataset file.')
def main(args):
# Read files
with open(args.outline, 'r') as f:
outline = json.load(f)
with open(args.sources, 'r') as f:
sources = json.load(f)
with open(args.loudness, 'r') as f:
loudness = json.load(f)
dataset = {
'info': {
'version': args.version,
'date': args.date,
'license': args.license,
},
'events': outline['events'],
'sources': outline['sources'],
'actions': outline['actions'],
'consecutive': outline['consecutive'],
'origins': {},
}
counter = {}
for i in range(len(dataset['events'])):
counter[dataset['events'][i]] = 0
for i in range(1, len(sources.keys()) + 1):
counter[sources[str(i)]['event']] += 1
ins = sources[str(i)]['event'] + '_' + \
str(counter[sources[str(i)]['event']])
dataset['origins'][ins] = sources[str(i)]
dataset['origins'][ins]['filename'] = ins + '.wav'
dataset['origins'][ins]['loudness'] = loudness[ins]
with open(args.output, 'w') as f:
json.dump(dataset, f) # indent=2
if __name__ == "__main__":
args = parser.parse_args()
main(args)
print('Success!')
|
daqa-master
|
daqa-gen/daqa.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from qpas.utils import (compute_rel_diff, get_lst_durations, get_lst_events,
get_lst_loudness, sample_absolute_duration,
sample_absolute_loudness, sample_immediate_preposition,
sample_number, sample_preposition, sanitize_question)
def what_was(dataset, narrative, _):
questions = ['What was the <O> sound you [heard,listened to]?',
'What was the <O> sound?',
'What did the <O> sound [sound,seem] like?',
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
number, ordinal = sample_number(len(lst_events))
question = question.replace('<O>', ordinal) # insert ordinal
question = sanitize_question(question) # correct grammar
event = lst_events[number - 1]
answer = (str(np.random.choice(dataset['sources'][event]))
+ ' '
+ str(np.random.choice(dataset['actions'][event])))
return question, answer
def what_was_relative(dataset, narrative, _):
questions = ['What was the sound <RO> the <S> <A>?',
'What was the sound <RO> [hearing,listening to] the <S> <A>?',
'What was the sound <RO> the <S> <A> was heard?',
'What did you [hear,listen to] <RO> the <S> <A>?',
'What did you [hear,listen to] <RO> [hearing,listening to] the <S> <A>?', # noqa: E501
'What did you [hear,listen to] <RO> the <S> <A> was heard?',
'What was the sound <IO> the <S> <A>?',
'What was the sound <IO> [hearing,listening to] the <S> <A>?',
'What was the sound <IO> the <S> <A> was heard?',
'What did you [hear,listen to] <IO> the <S> <A>?',
'What did you [hear,listen to] <IO> [hearing,listening to] the <S> <A>?', # noqa: E501
'What did you [hear,listen to] <IO> the <S> <A> was heard?',
]
question = str(np.random.choice(questions)) # sample question
preposition = sample_preposition()
immediate_preposition = sample_immediate_preposition()
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
assert len(unique_lst_events) > 0, \
'Question (what_was_relative) illposed.'
event = str(np.random.choice(unique_lst_events))
source = str(np.random.choice(dataset['sources'][event])) # sample source
action = str(np.random.choice(dataset['actions'][event])) # sample action
# Only one of the following two lines will have an effect
question = question.replace('<RO>', preposition) # insert preposition
question = question.replace('<IO>', immediate_preposition)
question = question.replace('<S>', source) # insert source
question = question.replace('<A>', action) # insert action
question = sanitize_question(question) # correct grammar
assert lst_events.count(event) == 1, \
'Question (what_was_relative) illposed.'
event_idx = lst_events.index(event)
if 'before' in question:
if (event_idx - 1) < 0:
answer = 'nothing'
else:
e = lst_events[event_idx - 1]
answer = (str(np.random.choice(dataset['sources'][e]))
+ ' '
+ str(np.random.choice(dataset['actions'][e])))
elif 'after' in question:
if (event_idx + 1) >= len(lst_events):
answer = 'nothing'
else:
e = lst_events[event_idx + 1]
answer = (str(np.random.choice(dataset['sources'][e]))
+ ' '
+ str(np.random.choice(dataset['actions'][e])))
else:
assert False, 'Preposition illdefined in Question (what_was_relative).'
return question, answer
def what_was_loudness(dataset, narrative, rel_diff=0.1):
questions = ['What was the <AL> sound?',
'What was the <AL> sound you [heard,listened to]?',
'What was the <AL> sound that you [heard,listened to]?',
'What was the <AL> sound that was heard?',
]
question = str(np.random.choice(questions)) # sample question
loudness = sample_absolute_loudness()
question = question.replace('<AL>', loudness) # insert loudness
question = sanitize_question(question) # correct grammar
lst_events = get_lst_events(narrative)
lst_loudness = get_lst_loudness(narrative)
if 'loud' in question:
est = np.argmax(lst_loudness)
elif 'quiet' in question:
est = np.argmin(lst_loudness)
else:
assert False, \
'Loudness illdefined in Question (what_was_loudness).'
# Assert a good margin in relative loudness
evt_loudness = lst_loudness[est]
x_loudness = [j for i, j in enumerate(lst_loudness) if i != est]
rel_loudness_diff = compute_rel_diff(np.array(x_loudness),
np.array(evt_loudness))
assert np.sum(rel_loudness_diff < rel_diff) <= 0, \
'Question (what_was_loudness) illposed.'
e = lst_events[est]
answer = (str(np.random.choice(dataset['sources'][e]))
+ ' '
+ str(np.random.choice(dataset['actions'][e])))
return question, answer
def what_was_loudness_relative(dataset, narrative, rel_diff=0.1):
questions = ['What was the <AL> sound <RO> the <S> <A>?',
'What was the <AL> sound <RO> [hearing,listening to] the <S> <A>?',
'What was the <AL> sound <RO> the <S> <A> was heard?',
]
question = str(np.random.choice(questions)) # sample question
loudness = sample_absolute_loudness()
preposition = sample_preposition()
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
assert len(unique_lst_events) > 0, \
'Question (what_was_loudness_relative) illposed.'
event = str(np.random.choice(unique_lst_events))
source = str(np.random.choice(dataset['sources'][event])) # sample source
action = str(np.random.choice(dataset['actions'][event])) # sample action
question = question.replace('<AL>', loudness) # insert loudness
question = question.replace('<RO>', preposition) # insert preposition
question = question.replace('<S>', source) # insert source
question = question.replace('<A>', action) # insert action
question = sanitize_question(question) # correct grammar
assert lst_events.count(event) == 1, \
'Question (what_was_loudness_relative) illposed.'
lst_loudness = get_lst_loudness(narrative)
event_idx = lst_events.index(event)
if 'before' in question:
lst_events_e = lst_events[:event_idx]
lst_events_l = lst_loudness[:event_idx]
elif 'after' in question:
lst_events_e = lst_events[(event_idx + 1):]
lst_events_l = lst_loudness[(event_idx + 1):]
else:
assert False, \
'Preposition illdefined in Question (what_was_loudness_relative).'
assert len(lst_events_e) > 0, \
'Question (what_was_loudness_relative) illposed.'
if 'loud' in question:
est = np.argmax(lst_events_l)
elif 'quiet' in question:
est = np.argmin(lst_events_l)
else:
assert False, \
'Loudness illdefined in Question (what_was_loudness_relative).'
# Assert a good margin in relative loudness
evt_loudness = lst_events_l[est]
x_loudness = [j for i, j in enumerate(lst_events_l) if i != est]
rel_loudness_diff = compute_rel_diff(np.array(x_loudness),
np.array(evt_loudness))
assert np.sum(rel_loudness_diff < rel_diff) <= 0, \
'Question (what_was_loudness_relative) illposed.'
e = lst_events_e[est]
answer = (str(np.random.choice(dataset['sources'][e]))
+ ' '
+ str(np.random.choice(dataset['actions'][e])))
return question, answer
def what_was_loudness_relative_ordinal(dataset, narrative, rel_diff=0.1):
questions = ['What was the <AL> sound <RO> the <O> sound?',
'What was the <AL> sound <RO> [hearing,listening to] the <O> sound?',
'What was the <AL> sound <RO> the <O> sound was heard?',
]
question = str(np.random.choice(questions)) # sample question
loudness = sample_absolute_loudness()
preposition = sample_preposition()
lst_events = get_lst_events(narrative)
number, ordinal = sample_number(len(lst_events))
question = question.replace('<AL>', loudness) # insert loudness
question = question.replace('<RO>', preposition) # insert preposition
question = question.replace('<O>', ordinal) # insert ordinal
question = sanitize_question(question) # correct grammar
lst_loudness = get_lst_loudness(narrative)
event_idx = (number - 1)
answer = None
if 'before' in question:
if (event_idx - 1) < 0:
answer = 'nothing'
else:
lst_events_e = lst_events[:event_idx]
lst_events_l = lst_loudness[:event_idx]
elif 'after' in question:
if (event_idx + 1) >= len(lst_events):
answer = 'nothing'
else:
lst_events_e = lst_events[(event_idx + 1):]
lst_events_l = lst_loudness[(event_idx + 1):]
else:
assert False, \
'Preposition illdefined in Question (what_was_loudness_relative_ordinal).'
if answer is None:
assert len(lst_events_e) > 0, \
'Question (what_was_loudness_relative_ordinal) illposed.'
if 'loud' in question:
est = np.argmax(lst_events_l)
elif 'quiet' in question:
est = np.argmin(lst_events_l)
else:
assert False, \
'Loudness illdefined in Question (what_was_loudness_relative_ordinal).'
# Assert a good margin in relative loudness
evt_loudness = lst_events_l[est]
x_loudness = [j for i, j in enumerate(lst_events_l) if i != est]
rel_loudness_diff = compute_rel_diff(np.array(x_loudness),
np.array(evt_loudness))
assert np.sum(rel_loudness_diff < rel_diff) <= 0, \
'Question (what_was_loudness_relative_ordinal) illposed.'
e = lst_events_e[est]
answer = (str(np.random.choice(dataset['sources'][e]))
+ ' '
+ str(np.random.choice(dataset['actions'][e])))
return question, answer
def what_was_duration(dataset, narrative, rel_diff=0.1):
questions = ['What was the <AD> sound?',
'What was the <AD> sound you [heard,listened to]?',
'What was the <AD> sound that you [heard,listened to]?',
'What was the <AD> sound that was heard?',
]
question = str(np.random.choice(questions)) # sample question
duration = sample_absolute_duration()
question = question.replace('<AD>', duration) # insert duration
question = sanitize_question(question) # correct grammar
lst_events = get_lst_events(narrative)
lst_durations = get_lst_durations(narrative)
if 'long' in question:
est = np.argmax(lst_durations)
elif 'short' in question:
est = np.argmin(lst_durations)
else:
assert False, \
'Duration illdefined in Question (what_was_duration).'
# Assert a good margin in relative duration
evt_duration = lst_durations[est]
x_durations = [j for i, j in enumerate(lst_durations) if i != est]
rel_duration_diff = compute_rel_diff(np.array(x_durations),
np.array(evt_duration))
assert np.sum(rel_duration_diff < rel_diff) <= 0, \
'Question (what_was_duration) illposed.'
e = lst_events[est]
answer = (str(np.random.choice(dataset['sources'][e]))
+ ' '
+ str(np.random.choice(dataset['actions'][e])))
return question, answer
def what_was_duration_relative(dataset, narrative, rel_diff=0.1):
questions = ['What was the <AD> sound <RO> the <S> <A>?',
'What was the <AD> sound <RO> [hearing,listening to] the <S> <A>?',
'What was the <AD> sound <RO> the <S> <A> was heard?',
]
question = str(np.random.choice(questions)) # sample question
duration = sample_absolute_duration()
preposition = sample_preposition()
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
assert len(unique_lst_events) > 0, \
'Question (what_was_duration_relative) illposed.'
event = str(np.random.choice(unique_lst_events))
source = str(np.random.choice(dataset['sources'][event])) # sample source
action = str(np.random.choice(dataset['actions'][event])) # sample action
question = question.replace('<AD>', duration) # insert duration
question = question.replace('<RO>', preposition) # insert preposition
question = question.replace('<S>', source) # insert source
question = question.replace('<A>', action) # insert action
question = sanitize_question(question) # correct grammar
assert lst_events.count(event) == 1, \
'Question (what_was_duration_relative) illposed.'
lst_durations = get_lst_durations(narrative)
event_idx = lst_events.index(event)
if 'before' in question:
lst_events_e = lst_events[:event_idx]
lst_events_d = lst_durations[:event_idx]
elif 'after' in question:
lst_events_e = lst_events[(event_idx + 1):]
lst_events_d = lst_durations[(event_idx + 1):]
else:
assert False, \
'Preposition illdefined in Question (what_was_duration_relative).'
assert len(lst_events_e) > 0, \
'Question (what_was_duration_relative) illposed.'
if 'long' in question:
est = np.argmax(lst_events_d)
elif 'short' in question:
est = np.argmin(lst_events_d)
else:
assert False, \
'Duration illdefined in Question (what_was_duration_relative).'
# Assert a good margin in relative duration
evt_duration = lst_events_d[est]
x_durations = [j for i, j in enumerate(lst_events_d) if i != est]
rel_duration_diff = compute_rel_diff(np.array(x_durations),
np.array(evt_duration))
assert np.sum(rel_duration_diff < rel_diff) <= 0, \
'Question (what_was_duration_relative) illposed.'
e = lst_events_e[est]
answer = (str(np.random.choice(dataset['sources'][e]))
+ ' '
+ str(np.random.choice(dataset['actions'][e])))
return question, answer
def what_was_duration_relative_ordinal(dataset, narrative, rel_diff=0.1):
questions = ['What was the <AD> sound <RO> the <O> sound?',
'What was the <AD> sound <RO> [hearing,listening to] the <O> sound?',
'What was the <AD> sound <RO> the <O> sound was heard?',
]
question = str(np.random.choice(questions)) # sample question
duration = sample_absolute_duration()
preposition = sample_preposition()
lst_events = get_lst_events(narrative)
number, ordinal = sample_number(len(lst_events))
question = question.replace('<AD>', duration) # insert duration
question = question.replace('<RO>', preposition) # insert preposition
question = question.replace('<O>', ordinal) # insert ordinal
question = sanitize_question(question) # correct grammar
lst_durations = get_lst_durations(narrative)
event_idx = (number - 1)
answer = None
if 'before' in question:
if (event_idx - 1) < 0:
answer = 'nothing'
else:
lst_events_e = lst_events[:event_idx]
lst_events_d = lst_durations[:event_idx]
elif 'after' in question:
if (event_idx + 1) >= len(lst_events):
answer = 'nothing'
else:
lst_events_e = lst_events[(event_idx + 1):]
lst_events_d = lst_durations[(event_idx + 1):]
else:
assert False, \
'Preposition illdefined in Question (what_was_duration_relative_ordinal).'
if answer is None:
assert len(lst_events_e) > 0, \
'Question (what_was_duration_relative_ordinal) illposed.'
if 'long' in question:
est = np.argmax(lst_events_d)
elif 'short' in question:
est = np.argmin(lst_events_d)
else:
assert False, \
'Duration illdefined in Question (what_was_duration_relative_ordinal).'
# Assert a good margin in relative duration
evt_duration = lst_events_d[est]
x_durations = [j for i, j in enumerate(lst_events_d) if i != est]
rel_duration_diff = compute_rel_diff(np.array(x_durations),
np.array(evt_duration))
assert np.sum(rel_duration_diff < rel_diff) <= 0, \
'Question (what_was_duration_relative_ordinal) illposed.'
e = lst_events_e[est]
answer = (str(np.random.choice(dataset['sources'][e]))
+ ' '
+ str(np.random.choice(dataset['actions'][e])))
return question, answer
|
daqa-master
|
daqa-gen/qpas/query.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from qpas.utils import (compute_rel_diff, get_lst_all_sources,
get_lst_durations, get_lst_events, get_lst_loudness,
sample_duration, sample_immediate_preposition,
sample_loudness, sample_number, sample_preposition,
sanitize_question)
def was_there(dataset, narrative, _):
questions = ['Did you [hear,listen to] [a,an] <S> <A>?',
'Have you [heard,listened to] [a,an] <S> <A>?',
'Did you [hear,listen to] any <S> <A>?',
'Have you [heard,listened to] any <S> <A>?',
'Did you [hear,listen to] a sound that [sounds like,sounded like,is,was] [a,an] <S> <A>?', # noqa: E501
'Have you [heard,listened to] a sound that [sounds like,sounded like,is,was] [a,an] <S> <A>?', # noqa: E501
'Was there [a,an] <S> <A>?',
'Were there any <S>s <A>?',
]
question = str(np.random.choice(questions)) # sample question
event = str(np.random.choice(dataset['events'])) # sample event
source = str(np.random.choice(dataset['sources'][event])) # sample source
action = str(np.random.choice(dataset['actions'][event])) # sample action
question = question.replace('<S>', source) # insert source
question = question.replace('<A>', action) # insert action
question = sanitize_question(question) # correct grammar
answer = 'yes' if event in get_lst_events(narrative) else 'no'
return question, answer
def was_there_two_and(dataset, narrative, _):
questions = ['Did you [hear,listen to] [a,an] <S1> <A1> and [a,an] <S2> <A2>?',
'Have you [heard,listened to] [a,an] <S1> <A1> and [a,an] <S2> <A2>?',
'Did you [hear,listen to] any <S1> <A1> and any <S2> <A2>?',
'Have you [heard,listened to] any <S1> <A1> and any <S2> <A2>?',
'Did you [hear,listen to] a sound that [sounds like,is] [a,an] <S1> <A1> and a sound [sounds like,is] [a,an] <S2> <A2>?', # noqa: E501
'Did you [hear,listen to] a sound that [sounded like,was] [a,an] <S1> <A1> and a sound [sounded like,was] [a,an] <S2> <A2>?', # noqa: E501
'Have you [heard,listened to] a sound that [sounds like,is] [a,an] <S1> <A1> and a sound [sounds like,is] [a,an] <S2> <A2>?', # noqa: E501
'Have you [heard,listened to] a sound that [sounded like,was] [a,an] <S1> <A1> and a sound [sounded like,was] [a,an] <S2> <A2>?', # noqa: E501
'Was there [a,an] <S1> <A1> and [a,an] <S2> <A2>?',
'Were there any <S1>s <A1> and any <S2>s <A2>?',
]
question = str(np.random.choice(questions)) # sample question
event_1 = str(np.random.choice(dataset['events'])) # sample event
source_1 = str(np.random.choice(dataset['sources'][event_1])) # sample source
action_1 = str(np.random.choice(dataset['actions'][event_1])) # sample action
lst_events = [e for e in dataset['events'] if e != event_1]
event_2 = str(np.random.choice(lst_events)) # sample event
source_2 = str(np.random.choice(dataset['sources'][event_2])) # sample source
action_2 = str(np.random.choice(dataset['actions'][event_2])) # sample action
question = question.replace('<S1>', source_1) # insert source
question = question.replace('<A1>', action_1) # insert action
question = question.replace('<S2>', source_2) # insert source
question = question.replace('<A2>', action_2) # insert action
question = sanitize_question(question) # correct grammar
lst_events = get_lst_events(narrative)
answer = 'yes' if (event_1 in lst_events and event_2 in lst_events) else 'no'
return question, answer
def was_there_two_or(dataset, narrative, _):
questions = ['Did you [hear,listen to] [a,an] <S1> <A1> or [a,an] <S2> <A2>?',
'Have you [heard,listened to] [a,an] <S1> <A1> or [a,an] <S2> <A2>?',
'Did you [hear,listen to] any <S1> <A1> or any <S2> <A2>?',
'Have you [heard,listened to] any <S1> <A1> or any <S2> <A2>?',
'Did you [hear,listen to] a sound that [sounds like,is] [a,an] <S1> <A1> or a sound [sounds like,is] [a,an] <S2> <A2>?', # noqa: E501
'Did you [hear,listen to] a sound that [sounded like,was] [a,an] <S1> <A1> or a sound [sounded like,was] [a,an] <S2> <A2>?', # noqa: E501
'Have you [heard,listened to] a sound that [sounds like,is] [a,an] <S1> <A1> or a sound [sounds like,is] [a,an] <S2> <A2>?', # noqa: E501
'Have you [heard,listened to] a sound that [sounded like,was] [a,an] <S1> <A1> or a sound [sounded like,was] [a,an] <S2> <A2>?', # noqa: E501
'Was there [a,an] <S1> <A1> or [a,an] <S2> <A2>?',
'Were there any <S1>s <A1> or any <S2>s <A2>?',
]
question = str(np.random.choice(questions)) # sample question
event_1 = str(np.random.choice(dataset['events'])) # sample event
source_1 = str(np.random.choice(dataset['sources'][event_1])) # sample source
action_1 = str(np.random.choice(dataset['actions'][event_1])) # sample action
lst_events = [e for e in dataset['events'] if e != event_1]
event_2 = str(np.random.choice(lst_events)) # sample event
source_2 = str(np.random.choice(dataset['sources'][event_2])) # sample source
action_2 = str(np.random.choice(dataset['actions'][event_2])) # sample action
question = question.replace('<S1>', source_1) # insert source
question = question.replace('<A1>', action_1) # insert action
question = question.replace('<S2>', source_2) # insert source
question = question.replace('<A2>', action_2) # insert action
question = sanitize_question(question) # correct grammar
lst_events = get_lst_events(narrative)
answer = 'yes' if (event_1 in lst_events or event_2 in lst_events) else 'no'
return question, answer
def was_there_source(dataset, narrative, _):
questions = ['Did you [hear,listen to] [a,an] <S>?',
'Have you [heard,listened to] [a,an] <S>?'
'Did you [hear,listen to] any <S>?',
'Have you [heard,listened to] any <S>?',
'Was there a sound [produced,made] by [a,an] <S>?',
'Were there any sounds [produced,made] by [a,an] <S>?',
]
question = str(np.random.choice(questions)) # sample question
event = str(np.random.choice(dataset['events'])) # sample event
source = str(np.random.choice(dataset['sources'][event])) # sample source
question = question.replace('<S>', source) # insert source
question = sanitize_question(question) # correct grammar
answer = 'yes' if source in get_lst_all_sources(dataset, narrative) else 'no'
return question, answer
def was_there_source_two_and(dataset, narrative, _):
questions = ['Did you [hear,listen to] [a,an] <S1> and [a,an] <S2>?',
'Have you [heard,listened to] [a,an] <S1> and [a,an] <S2>?'
'Did you [hear,listen to] any <S1> and any <S2>?',
'Have you [heard,listened to] any <S1> and any <S2>?',
'Was there a sound [produced,made] by [a,an] <S1> and a sound [produced,made] by [a,an] <S2>?', # noqa: E501
'Were there any sounds [produced,made] by [a,an] <S1> and any sounds [produced,made] by [a,an] <S2>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
event_1 = str(np.random.choice(dataset['events'])) # sample event
source_1 = str(np.random.choice(dataset['sources'][event_1])) # sample source
lst_events = [e for e in dataset['events'] if e != event_1]
event_2 = str(np.random.choice(lst_events)) # sample event
source_2 = str(np.random.choice(dataset['sources'][event_2])) # sample source
question = question.replace('<S1>', source_1) # insert source
question = question.replace('<S2>', source_2) # insert source
question = sanitize_question(question) # correct grammar
lst_sources = get_lst_all_sources(dataset, narrative)
answer = 'yes' if (source_1 in lst_sources and source_2 in lst_sources) else 'no'
return question, answer
def was_there_source_two_or(dataset, narrative, _):
questions = ['Did you [hear,listen to] [a,an] <S1> or [a,an] <S2>?',
'Have you [heard,listened to] [a,an] <S1> or [a,an] <S2>?'
'Did you [hear,listen to] any <S1> or any <S2>?',
'Have you [heard,listened to] any <S1> or any <S2>?',
'Was there a sound [produced,made] by [a,an] <S1> or a sound [produced,made] by [a,an] <S2>?', # noqa: E501
'Were there any sounds [produced,made] by [a,an] <S1> or any sounds [produced,made] by [a,an] <S2>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
event_1 = str(np.random.choice(dataset['events'])) # sample event
source_1 = str(np.random.choice(dataset['sources'][event_1])) # sample source
lst_events = [e for e in dataset['events'] if e != event_1]
event_2 = str(np.random.choice(lst_events)) # sample event
source_2 = str(np.random.choice(dataset['sources'][event_2])) # sample source
question = question.replace('<S1>', source_1) # insert source
question = question.replace('<S2>', source_2) # insert source
question = sanitize_question(question) # correct grammar
lst_sources = get_lst_all_sources(dataset, narrative)
answer = 'yes' if (source_1 in lst_sources or source_2 in lst_sources) else 'no'
return question, answer
def was_there_relative(dataset, narrative, _):
questions = ['Did you [hear,listen to] [a,an] <S1> <A1> <RO> the <S2> <A2>?', # noqa: E501
'Have you [heard,listened to] [a,an] <S1> <A1> <RO> the <S2> <A2>?', # noqa: E501
'Did you [hear,listen to] any <S1> <A1> <RO> the <S2> <A2>?',
'Have you [heard,listened to] any <S1> <A1> <RO> the <S2> <A2>?',
'Was there [a,an] <S1> <A1> <RO> the <S2> <A2>?',
'Were there any <S1>s <A1> <RO> the <S2> <A2>?',
'Did you [hear,listen to] a sound that [sounds like,sounded like,is,was] [a,an] <S1> <A1> <RO> the <S2> <A2>?', # noqa: E501
'<RO> the <S2> <A2>, did you [hear,listen to] [a,an] <S1> <A1> ?', # noqa: E501
'<RO> the <S2> <A2>, did you [hear,listen to] any <S1> <A1>?',
'<RO> the <S2> <A2>, was there [a,an] <S1> <A1>?',
'<RO> the <S2> <A2>, were there any <S1>s <A1>?',
'<RO> the <S2> <A2>, did you [hear,listen to] a sound that [sounds like,sounded like,is,was] [a,an] <S1> <A1>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
event_1 = str(np.random.choice(dataset['events'])) # sample event
source_1 = str(np.random.choice(dataset['sources'][event_1]))
action_1 = str(np.random.choice(dataset['actions'][event_1]))
preposition = sample_preposition()
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
unique_lst_events = [e for e in unique_lst_events if e != event_1]
assert len(unique_lst_events) > 0, \
'Question (was_there_relative) illposed.'
event_2 = str(np.random.choice(unique_lst_events))
source_2 = str(np.random.choice(dataset['sources'][event_2]))
action_2 = str(np.random.choice(dataset['actions'][event_2]))
question = question.replace('<S1>', source_1) # insert source
question = question.replace('<A1>', action_1) # insert action
question = question.replace('<RO>', preposition) # insert preposition
question = question.replace('<S2>', source_2) # insert source
question = question.replace('<A2>', action_2) # insert action
question = sanitize_question(question) # correct grammar
assert lst_events.count(event_2) == 1, \
'Question (was_there_relative) illposed.'
event_2_idx = lst_events.index(event_2)
if 'before' in preposition:
lst_events = lst_events[:event_2_idx]
elif 'after' in preposition:
lst_events = lst_events[(event_2_idx + 1):]
else:
assert False, 'Preposition illdefined in Question (was_there_relative).'
answer = 'yes' if event_1 in lst_events else 'no'
return question, answer
def was_there_immediate_relative(dataset, narrative, _):
questions = ['Did you [hear,listen to] [a,an] <S1> <A1> <IO> the <S2> <A2>?', # noqa: E501
'Have you [heard,listened to] [a,an] <S1> <A1> <IO> the <S2> <A2>?', # noqa: E501
'Did you [hear,listen to] any <S1> <A1> <IO> the <S2> <A2>?',
'Have you [heard,listened to] any <S1> <A1> <IO> the <S2> <A2>?',
'Was there [a,an] <S1> <A1> <IO> the <S2> <A2>?',
'Were there any <S1>s <A1> <IO> the <S2> <A2>?',
'Did you [hear,listen to] a sound that [sounds like,sounded like,is,was] [a,an] <S1> <A1> <IO> the <S2> <A2>?', # noqa: E501
'<IO> the <S2> <A2>, did you [hear,listen to] [a,an] <S1> <A1> ?', # noqa: E501
'<IO> the <S2> <A2>, did you [hear,listen to] any <S1> <A1>?',
'<IO> the <S2> <A2>, was there [a,an] <S1> <A1>?',
'<IO> the <S2> <A2>, were there any <S1>s <A1>?',
'<IO> the <S2> <A2>, did you [hear,listen to] a sound that [sounds like,sounded like,is,was] [a,an] <S1> <A1>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
event_1 = str(np.random.choice(dataset['events'])) # sample event
source_1 = str(np.random.choice(dataset['sources'][event_1]))
action_1 = str(np.random.choice(dataset['actions'][event_1]))
preposition = sample_immediate_preposition()
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
unique_lst_events = [e for e in unique_lst_events if e != event_1]
assert len(unique_lst_events) > 0, \
'Question (was_there_immediate_relative) illposed.'
event_2 = str(np.random.choice(unique_lst_events))
source_2 = str(np.random.choice(dataset['sources'][event_2]))
action_2 = str(np.random.choice(dataset['actions'][event_2]))
question = question.replace('<S1>', source_1) # insert source
question = question.replace('<A1>', action_1) # insert action
question = question.replace('<IO>', preposition) # insert preposition
question = question.replace('<S2>', source_2) # insert source
question = question.replace('<A2>', action_2) # insert action
question = sanitize_question(question) # correct grammar
assert lst_events.count(event_2) == 1, \
'Question (was_there_immediate_relative) illposed.'
event_2_idx = lst_events.index(event_2)
if 'before' in preposition:
if (event_2_idx - 1) < 0:
target_event = []
else:
target_event = lst_events[event_2_idx - 1]
elif 'after' in preposition:
if (event_2_idx + 1) >= len(lst_events):
target_event = []
else:
target_event = lst_events[event_2_idx + 1]
else:
assert False, \
'Preposition illdefined in Question (was_there_immediate_relative).'
answer = 'yes' if event_1 == target_event else 'no'
return question, answer
def was_there_similar_ordinal(dataset, narrative, _):
questions = ['Were there any similar sounds to the <O> sound?',
'Were there any sounds that were similar to the <O> sound?',
'Was there at least a sound similar to the <O> sound?',
'Was there at least a sound that was similar to the <O> sound?', # noqa: E501
'Was there at least [one,a single] sound similar to the <O> sound?',
'Was there at least [one,a single] sound that was similar to the <O> sound?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
number, ordinal = sample_number(len(lst_events))
question = question.replace('<O>', ordinal) # insert ordinal
question = sanitize_question(question) # correct grammar
event = lst_events[number - 1]
answer = 'yes' if lst_events.count(event) > 1 else 'no' # 1 for reference
return question, answer
def was_there_similar_loudness(dataset, narrative, rel_diff=0.1):
questions = ['Were there any sounds [roughly,approximately] as <L> as the <S> <A>?', # noqa: E501
'Were there any sounds that were [roughly,approximately] as <L> as the <S> <A>?', # noqa: E501
'Were there any sounds that were [roughly,approximately] the same loudness as the <S> <A>?', # noqa: E501
'Was there any sound [roughly,approximately] as <L> as the <S> <A>?', # noqa: E501
'Was there any sound that was [roughly,approximately] as <L> as the <S> <A>?', # noqa: E501
'Was there any sound that was [roughly,approximately] the same loudness as the <S> <A>?', # noqa: E501
'Was there at least a sound [roughly,approximately] as <L> as the <S> <A>?', # noqa: E501
'Was there at least a sound that was [roughly,approximately] as <L> as the <S> <A>?', # noqa: E501
'Was there at least a sound that was [roughly,approximately] the same loudness as <S> <A>?', # noqa: E501
'Was there at least [one,a single] sound [roughly,approximately] as <L> as the <S> <A>?', # noqa: E501
'Was there at least [one,a single] sound that was [roughly,approximately] as <L> as the <S> <A>?', # noqa: E501
'Was there at least [one,a single] sound that was [roughly,approximately] the same loudness as <S> <A>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
loudness = sample_loudness() # sample loudness
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
assert len(unique_lst_events) > 0, \
'Question (was_there_similar_loudness) illposed.'
event = str(np.random.choice(unique_lst_events))
source = str(np.random.choice(dataset['sources'][event])) # sample source
action = str(np.random.choice(dataset['actions'][event])) # sample action
question = question.replace('<L>', loudness) # insert loudness
question = question.replace('<S>', source) # insert source
question = question.replace('<A>', action) # insert action
question = sanitize_question(question) # correct grammar
assert lst_events.count(event) == 1, \
'Question (was_there_similar_loudness) illposed.'
lst_loudness = get_lst_loudness(narrative)
event_idx = lst_events.index(event)
evt_loudness = lst_loudness[event_idx]
x_loudness = [j for i, j in enumerate(lst_loudness) if i != event_idx]
rel_loudness_diff = compute_rel_diff(np.array(x_loudness),
np.array(evt_loudness))
# Assert a good margin in relative loudness
assert np.sum(np.logical_and(rel_loudness_diff > rel_diff,
rel_loudness_diff < (2 * rel_diff))) <= 0, \
'Question (was_there_similar_loudness) illposed.'
answer = 'yes' if np.sum(rel_loudness_diff <= rel_diff) >= 1 else 'no'
return question, answer
def was_there_at_least_two_similar_loudness(dataset, narrative, rel_diff=0.1):
questions = ['Were there at least two sounds [roughly,approximately] as <L> as the <S> <A>?', # noqa: E501
'Were there at least two sounds that were [roughly,approximately] as <L> as the <S> <A>?', # noqa: E501
'Were there at least two sounds that were [roughly,approximately] the same loudness as the <S> <A>?', # noqa: E501
'Was there more than a sound [roughly,approximately] as <L> as the <S> <A>?', # noqa: E501
'Was there more than a sound that was [roughly,approximately] as <L> as the <S> <A>?', # noqa: E501
'Was there more than a sound that was [roughly,approximately] the same loudness as the <S> <A>?', # noqa: E501
'Was there more than [one,a single] sound [roughly,approximately] as <L> as the <S> <A>?', # noqa: E501
'Was there more than [one,a single] sound that was [roughly,approximately] as <L> as the <S> <A>?', # noqa: E501
'Was there more than [one,a single] sound that was [roughly,approximately] the same loudness as the <S> <A>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
loudness = sample_loudness() # sample loudness
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
assert len(unique_lst_events) > 0, \
'Question (was_there_at_least_two_similar_loudness) illposed.'
event = str(np.random.choice(unique_lst_events))
source = str(np.random.choice(dataset['sources'][event])) # sample source
action = str(np.random.choice(dataset['actions'][event])) # sample action
question = question.replace('<L>', loudness) # insert loudness
question = question.replace('<S>', source) # insert source
question = question.replace('<A>', action) # insert action
question = sanitize_question(question) # correct grammar
assert lst_events.count(event) == 1, \
'Question (was_there_at_least_two_similar_loudness) illposed.'
lst_loudness = get_lst_loudness(narrative)
event_idx = lst_events.index(event)
evt_loudness = lst_loudness[event_idx]
x_loudness = [j for i, j in enumerate(lst_loudness) if i != event_idx]
rel_loudness_diff = compute_rel_diff(np.array(x_loudness),
np.array(evt_loudness))
# Assert a good margin in relative loudness
assert np.sum(np.logical_and(rel_loudness_diff > rel_diff,
rel_loudness_diff < (2 * rel_diff))) <= 0, \
'Question (was_there_at_least_two_similar_loudness) illposed.'
answer = 'yes' if np.sum(rel_loudness_diff <= rel_diff) >= 2 else 'no'
return question, answer
def was_there_similar_loudness_ordinal(dataset, narrative, rel_diff=0.1):
questions = ['Were there any sounds [roughly,approximately] as <L> as the <O> sound?', # noqa: E501
'Were there any sounds that were [roughly,approximately] as <L> as the <O> sound?', # noqa: E501
'Were there any sounds that were [roughly,approximately] the same loudness as the <O> sound?', # noqa: E501
'Was there any sound [roughly,approximately] as <L> as the <O> sound?', # noqa: E501
'Was there any sound that was [roughly,approximately] as <L> as the <O> sound?', # noqa: E501
'Was there any sound that was [roughly,approximately] the same loudness as the <O> sound?', # noqa: E501
'Was there at least a sound [roughly,approximately] as <L> as the <O> sound?', # noqa: E501
'Was there at least a sound that was [roughly,approximately] as <L> as the <O> sound?', # noqa: E501
'Was there at least a sound that was [roughly,approximately] the same loudness as the <O> sound?', # noqa: E501
'Was there at least [one,a single] sound that was [roughly,approximately] as <L> as the <O> sound?', # noqa: E501
'Was there at least [one,a single] sound [roughly,approximately] as <L> as the <O> sound?', # noqa: E501
'Was there at least [one,a single] sound that was [roughly,approximately] the same loudness as the <O> sound?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
loudness = sample_loudness() # sample loudness
lst_events = get_lst_events(narrative)
number, ordinal = sample_number(len(lst_events))
question = question.replace('<L>', loudness) # insert loudness
question = question.replace('<O>', ordinal) # insert ordinal
question = sanitize_question(question) # correct grammar
lst_loudness = get_lst_loudness(narrative)
evt_loudness = lst_loudness[number - 1]
x_loudness = [j for i, j in enumerate(lst_loudness) if i != (number - 1)]
rel_loudness_diff = compute_rel_diff(np.array(x_loudness),
np.array(evt_loudness))
# Assert a good margin in relative loudness
assert np.sum(np.logical_and(rel_loudness_diff > rel_diff,
rel_loudness_diff < (2 * rel_diff))) <= 0, \
'Question (was_there_similar_loudness_ordinal) illposed.'
answer = 'yes' if np.sum(rel_loudness_diff <= rel_diff) >= 1 else 'no'
return question, answer
def was_there_at_least_two_similar_loudness_ordinal(dataset,
narrative,
rel_diff=0.1):
questions = ['Were there at least two sounds [roughly,approximately] as <L> as the <O> sound?', # noqa: E501
'Were there at least two sounds that were [roughly,approximately] as <L> as the <O> sound?', # noqa: E501
'Were there at least two sounds that were [roughly,approximately] the same loudness as the <O> sound?', # noqa: E501
'Was there more than a sound [roughly,approximately] as <L> as the <O> sound?', # noqa: E501
'Was there more than a sound that was [roughly,approximately] as <L> as the <O> sound?', # noqa: E501
'Was there more than a sound that was [roughly,approximately] the same loudness as the <O> sound?', # noqa: E501
'Was there more than [one,a single] sound [roughly,approximately] as <L> as the <O> sound?', # noqa: E501
'Was there more than [one,a single] sound that was [roughly,approximately] as <L> as the <O> sound?', # noqa: E501
'Was there more than [one,a single] sound that was [roughly,approximately] the same loudness as the <O> sound?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
loudness = sample_loudness() # sample loudness
lst_events = get_lst_events(narrative)
number, ordinal = sample_number(len(lst_events))
question = question.replace('<L>', loudness) # insert loudness
question = question.replace('<O>', ordinal) # insert ordinal
question = sanitize_question(question) # correct grammar
lst_loudness = get_lst_loudness(narrative)
evt_loudness = lst_loudness[number - 1]
x_loudness = [j for i, j in enumerate(lst_loudness) if i != (number - 1)]
rel_loudness_diff = compute_rel_diff(np.array(x_loudness),
np.array(evt_loudness))
# Assert a good margin in relative loudness
assert np.sum(np.logical_and(rel_loudness_diff > rel_diff,
rel_loudness_diff < (2 * rel_diff))) <= 0, \
'Question (was_there_at_least_two_similar_loudness_ordinal) illposed.'
answer = 'yes' if np.sum(rel_loudness_diff <= rel_diff) >= 2 else 'no'
return question, answer
def was_there_similar_duration(dataset, narrative, rel_diff=0.1):
questions = ['Were there any sounds [roughly,approximately] as <D> as the <S> <A>?', # noqa: E501
'Were there any sounds that were [roughly,approximately] as <D> as the <S> <A>?', # noqa: E501
'Were there any sounds that were [roughly,approximately] the same duration as the <S> <A>?', # noqa: E501
'Was there any sound [roughly,approximately] as <D> as the <S> <A>?', # noqa: E501
'Was there any sound that was [roughly,approximately] as <D> as the <S> <A>?', # noqa: E501
'Was there any sound that was [roughly,approximately] the same duration as the <S> <A>?', # noqa: E501
'Was there at least a sound [roughly,approximately] as <D> as the <S> <A>?', # noqa: E501
'Was there at least a sound that was [roughly,approximately] as <D> as the <S> <A>?', # noqa: E501
'Was there at least a sound that was [roughly,approximately] the same duration as <S> <A>?', # noqa: E501
'Was there at least [one,a single] sound [roughly,approximately] as <D> as the <S> <A>?', # noqa: E501
'Was there at least [one,a single] sound that was [roughly,approximately] as <D> as the <S> <A>?', # noqa: E501
'Was there at least [one,a single] sound that was [roughly,approximately] the same duration as <S> <A>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
duration = sample_duration() # sample duration
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
assert len(unique_lst_events) > 0, \
'Question (was_there_similar_duration) illposed.'
event = str(np.random.choice(unique_lst_events))
source = str(np.random.choice(dataset['sources'][event])) # sample source
action = str(np.random.choice(dataset['actions'][event])) # sample action
question = question.replace('<D>', duration) # insert duration
question = question.replace('<S>', source) # insert source
question = question.replace('<A>', action) # insert action
question = sanitize_question(question) # correct grammar
assert lst_events.count(event) == 1, \
'Question (was_there_similar_duration) illposed.'
lst_durations = get_lst_durations(narrative)
event_idx = lst_events.index(event)
evt_duration = lst_durations[event_idx]
x_durations = [j for i, j in enumerate(lst_durations) if i != event_idx]
rel_durations_diff = compute_rel_diff(np.array(x_durations),
np.array(evt_duration))
# Assert a good margin in relative duration
assert np.sum(np.logical_and(rel_durations_diff > rel_diff,
rel_durations_diff < (2 * rel_diff))) <= 0, \
'Question (was_there_similar_duration) illposed.'
answer = 'yes' if np.sum(rel_durations_diff <= rel_diff) >= 1 else 'no'
return question, answer
def was_there_at_least_two_similar_duration(dataset, narrative, rel_diff=0.1):
questions = ['Were there at least two sounds [roughly,approximately] as <D> as the <S> <A>?', # noqa: E501
'Were there at least two sounds that were [roughly,approximately] as <D> as the <S> <A>?', # noqa: E501
'Were there at least two sounds that were [roughly,approximately] the same duration as the <S> <A>?', # noqa: E501
'Was there more than a sound [roughly,approximately] as <D> as the <S> <A>?', # noqa: E501
'Was there more than a sound that was [roughly,approximately] as <D> as the <S> <A>?', # noqa: E501
'Was there more than a sound that was [roughly,approximately] the same duration as the <S> <A>?', # noqa: E501
'Was there more than [one,a single] sound [roughly,approximately] as <D> as the <S> <A>?', # noqa: E501
'Was there more than [one,a single] sound that was [roughly,approximately] as <D> as the <S> <A>?', # noqa: E501
'Was there more than [one,a single] sound that was [roughly,approximately] the same duration as the <S> <A>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
duration = sample_duration() # sample duration
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
assert len(unique_lst_events) > 0, \
'Question (was_there_at_least_two_similar_duration) illposed.'
event = str(np.random.choice(unique_lst_events))
source = str(np.random.choice(dataset['sources'][event])) # sample source
action = str(np.random.choice(dataset['actions'][event])) # sample action
question = question.replace('<D>', duration) # insert duration
question = question.replace('<S>', source) # insert source
question = question.replace('<A>', action) # insert action
question = sanitize_question(question) # correct grammar
assert lst_events.count(event) == 1, \
'Question (was_there_at_least_two_similar_duration) illposed.'
lst_durations = get_lst_durations(narrative)
event_idx = lst_events.index(event)
evt_duration = lst_durations[event_idx]
x_durations = [j for i, j in enumerate(lst_durations) if i != event_idx]
rel_durations_diff = compute_rel_diff(np.array(x_durations),
np.array(evt_duration))
# Assert a good margin in relative duration
assert np.sum(np.logical_and(rel_durations_diff > rel_diff,
rel_durations_diff < (2 * rel_diff))) <= 0, \
'Question (was_there_at_least_two_similar_duration) illposed.'
answer = 'yes' if np.sum(rel_durations_diff <= rel_diff) >= 2 else 'no'
return question, answer
def was_there_similar_duration_ordinal(dataset, narrative, rel_diff=0.1):
questions = ['Were there any sounds [roughly,approximately] as <D> as the <O> sound?', # noqa: E501
'Were there any sounds that were [roughly,approximately] as <D> as the <O> sound?', # noqa: E501
'Were there any sounds that were [roughly,approximately] the same duration as the <O> sound?', # noqa: E501
'Was there any sound [roughly,approximately] as <D> as the <O> sound?', # noqa: E501
'Was there any sound that was [roughly,approximately] as <D> as the <O> sound?', # noqa: E501
'Was there any sound that was [roughly,approximately] the same duration as the <O> sound?', # noqa: E501
'Was there at least a sound [roughly,approximately] as <D> as the <O> sound?', # noqa: E501
'Was there at least a sound that was [roughly,approximately] as <D> as the <O> sound?', # noqa: E501
'Was there at least a sound that was [roughly,approximately] the same duration as the <O> sound?', # noqa: E501
'Was there at least [one,a single] sound that was [roughly,approximately] as <D> as the <O> sound?', # noqa: E501
'Was there at least [one,a single] sound [roughly,approximately] as <D> as the <O> sound?', # noqa: E501
'Was there at least [one,a single] sound that was [roughly,approximately] the same duration as the <O> sound?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
duration = sample_duration() # sample duration
lst_events = get_lst_events(narrative)
number, ordinal = sample_number(len(lst_events))
question = question.replace('<D>', duration) # insert duration
question = question.replace('<O>', ordinal) # insert ordinal
question = sanitize_question(question) # correct grammar
lst_durations = get_lst_durations(narrative)
evt_duration = lst_durations[number - 1]
x_durations = [j for i, j in enumerate(lst_durations) if i != (number - 1)]
rel_durations_diff = compute_rel_diff(np.array(x_durations),
np.array(evt_duration))
# Assert a good margin in relative duration
assert np.sum(np.logical_and(rel_durations_diff > rel_diff,
rel_durations_diff < (2 * rel_diff))) <= 0, \
'Question (was_there_similar_duration_ordinal) illposed.'
answer = 'yes' if np.sum(rel_durations_diff <= rel_diff) >= 1 else 'no'
return question, answer
def was_there_at_least_two_similar_duration_ordinal(dataset,
narrative,
rel_diff=0.1):
questions = ['Were there at least two sounds [roughly,approximately] as <D> as the <O> sound?', # noqa: E501
'Were there at least two sounds that were [roughly,approximately] as <D> as the <O> sound?', # noqa: E501
'Were there at least two sounds that were [roughly,approximately] the same duration as the <O> sound?', # noqa: E501
'Was there more than a sound [roughly,approximately] as <D> as the <O> sound?', # noqa: E501
'Was there more than a sound that was [roughly,approximately] as <D> as the <O> sound?', # noqa: E501
'Was there more than a sound that was [roughly,approximately] the same duration as the <O> sound?', # noqa: E501
'Was there more than [one,a single] sound [roughly,approximately] as <D> as the <O> sound?', # noqa: E501
'Was there more than [one,a single] sound that was [roughly,approximately] as <D> as the <O> sound?', # noqa: E501
'Was there more than [one,a single] sound that was [roughly,approximately] the same duration as the <O> sound?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
duration = sample_duration() # sample duration
lst_events = get_lst_events(narrative)
number, ordinal = sample_number(len(lst_events))
question = question.replace('<D>', duration) # insert duration
question = question.replace('<O>', ordinal) # insert ordinal
question = sanitize_question(question) # correct grammar
lst_durations = get_lst_durations(narrative)
evt_duration = lst_durations[number - 1]
x_durations = [j for i, j in enumerate(lst_durations) if i != (number - 1)]
rel_durations_diff = compute_rel_diff(np.array(x_durations),
np.array(evt_duration))
# Assert a good margin in relative duration
assert np.sum(np.logical_and(rel_durations_diff > rel_diff,
rel_durations_diff < (2 * rel_diff))) <= 0, \
'Question (was_there_at_least_two_similar_duration_ordinal) illposed.'
answer = 'yes' if np.sum(rel_durations_diff <= rel_diff) >= 2 else 'no'
return question, answer
|
daqa-master
|
daqa-gen/qpas/exist.py
|
daqa-master
|
daqa-gen/qpas/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import re
import numpy as np
def a_or_an(q):
a_an_letter = re.findall(r'\[a,an\] \w', q)
for e in a_an_letter:
a_an, letter = e.split(' ')
if letter in ['a', 'e', 'i', 'o', 'u']:
q = q.replace('[a,an]', 'an', 1) # 1 to denote first occurrence
else:
q = q.replace('[a,an]', 'a', 1)
return q
def options(q):
assert ('[a' not in q) or ('an]' not in q), '[a,an] choice cant be random.'
opt = re.findall(r'\[(.*?)\]', q)
for o in opt:
q = q.replace('[' + o + ']', np.random.choice(o.split(',')))
return q
def spaces(q):
q = q.replace(' ', ' ')
q = q.replace(' ', ' ')
return q
def sanitize_question(q):
q = a_or_an(q)
q = options(q)
q = spaces(q)
q = q.lower()
q = q.capitalize() # capitalizes only first letter
assert '<' not in q, 'Could not sanitize template: ' + q
assert '>' not in q, 'Could not sanitize template: ' + q
assert '[' not in q, 'Could not sanitize template: ' + q
assert ']' not in q, 'Could not sanitize template: ' + q
return q
def sample_conjunction():
return str(np.random.choice(['and', 'or']))
def sample_preposition():
return str(np.random.choice(['before', 'after']))
def sample_immediate_preposition():
return '[just,immediately] ' + sample_preposition()
def numbers_to_ordinals(num):
ordinals = {
1: 'first',
2: 'second',
3: 'third',
4: 'fourth',
5: 'fifth',
6: 'sixth',
7: 'seventh',
8: 'eighth',
9: 'ninth',
10: 'tenth',
11: 'eleventh',
12: 'twelveth',
13: 'thirteenth',
14: 'fourteenth',
15: 'fifteenth',
}
return ordinals[num]
def sample_number(n):
number = int(np.random.randint(1, n + 1, 1)) # human indexing
return number, numbers_to_ordinals(number)
def sample_second_number(n, x_n):
lst_x_n = list(range(1, n + 1)) # human indexing
lst_x_n.remove(x_n)
number = int(np.random.choice(lst_x_n))
return number, numbers_to_ordinals(number)
def sample_loudness():
return str(np.random.choice(['quiet', 'loud']))
def sample_rel_loudness():
return str(np.random.choice(['quieter', 'louder']))
def sample_absolute_loudness():
return str(np.random.choice(['quietest', 'loudest']))
def sample_duration():
return str(np.random.choice(['short', 'long']))
def sample_rel_duration():
return str(np.random.choice(['shorter', 'longer']))
def sample_absolute_duration():
return str(np.random.choice(['shortest', 'longest']))
def get_lst_events(narrative):
le = len(narrative['events'])
return [narrative['events'][e]['event'] for e in range(le)]
def get_lst_sources(narrative):
le = len(narrative['events'])
return [narrative['events'][e]['source'] for e in range(le)]
def get_lst_all_sources(dataset, narrative):
ls = []
for e in range(len(narrative['events'])):
ls += dataset['sources'][narrative['events'][e]['event']]
return ls
def get_lst_actions(narrative):
le = len(narrative['events'])
return [narrative['events'][e]['action'] for e in range(le)]
def get_lst_durations(narrative):
le = len(narrative['events'])
return np.array([narrative['events'][e]['duration'] for e in range(le)])
def get_lst_loudness(narrative):
le = len(narrative['events'])
return np.array([narrative['events'][e]['loudness'] for e in range(le)])
def compute_rel_diff(actual, reference):
return np.abs(actual - reference) / reference
def numbers_to_words(n):
numbers = {
0: 'zero',
1: 'one',
2: 'two',
3: 'three',
4: 'four',
5: 'five',
6: 'six',
7: 'seven',
8: 'eight',
9: 'nine',
10: 'ten',
11: 'eleven',
12: 'twelve',
13: 'thirteen',
14: 'fourteen',
15: 'fifteen',
}
return numbers[n]
|
daqa-master
|
daqa-gen/qpas/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from qpas.utils import (compute_rel_diff, get_lst_durations, get_lst_events,
get_lst_loudness, sample_duration, sample_loudness,
sample_number, sample_second_number,
sample_rel_duration, sample_rel_loudness,
sanitize_question)
def compare_ordinal(dataset, narrative, _):
questions = ['Was the <O1> [sound event,sound] [the same as,similar to] the <O2> [sound event,sound]?', # noqa: E501
'Was the <O1> [sound event,sound] and <O2> [sound event,sound] [the same,similar]?', # noqa: E501
'Were the <O1> and <O2> [sound events,sounds] [the same,similar]?',
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
number_1, ordinal_1 = sample_number(len(lst_events))
number_2, ordinal_2 = sample_second_number(len(lst_events), number_1)
assert number_1 != number_2, 'Question (compare_ordinal) illposed.'
question = question.replace('<O1>', ordinal_1) # insert ordinal
question = question.replace('<O2>', ordinal_2) # insert ordinal
question = sanitize_question(question) # correct grammar
answer = 'yes' if lst_events[number_1 - 1] == lst_events[number_2 - 1] \
else 'no'
return question, answer
def compare_ordinal_event(dataset, narrative, _):
questions = ['Was the <O> [sound event,sound] [a,an] <S> <A>?', # noqa: E501
'Did the <O> [sound event,sound] [sound,seem] like [a,an] <S> <A>?', # noqa: E501
'[Listening to,Hearing] the <O> [sound event,sound], was it [a,an] <S> <A>?', # noqa: E501
'[Listening to,Hearing] the <O> [sound event,sound], did it [sound,seem] like [a,an] <S> <A>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
number, ordinal = sample_number(len(lst_events))
event = str(np.random.choice(dataset['events'])) # sample event
source = str(np.random.choice(dataset['sources'][event])) # sample source
action = str(np.random.choice(dataset['actions'][event])) # sample action
question = question.replace('<O>', ordinal) # insert ordinal
question = question.replace('<S>', source) # insert source
question = question.replace('<A>', action) # insert action
question = sanitize_question(question) # correct grammar
answer = 'yes' if lst_events[number - 1] == event else 'no'
return question, answer
def compare_loudness(dataset, narrative, rel_diff):
questions = ['Was the <S1> <A1> <RL> than the <S2> <A2>?',
'Was the sound of the <S1> <A1> <RL> than the sound of the <S2> <A2>?', # noqa: E501
'[Comparing,Listening to,Hearing] the sound of the <S1> <A1> and the sound of the <S2> <A2>, was the former <RL>?', # noqa: E501
'[Comparing,Listening to,Hearing] the sounds of the <S1> <A1> and the <S2> <A2>, was the former <RL>?', # noqa: E501
'[Comparing,Listening to,Hearing] the sound of the <S2> <A2> and the sound of the <S1> <A1>, was the latter <RL>?', # noqa: E501
'[Comparing,Listening to,Hearing] the sounds of the <S2> <A2> and the <S1> <A1>, was the latter <RL>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
assert len(unique_lst_events) > 0, 'Question (compare_loudness) illposed.'
event_1 = str(np.random.choice(unique_lst_events)) # sample event
source_1 = str(np.random.choice(dataset['sources'][event_1]))
action_1 = str(np.random.choice(dataset['actions'][event_1]))
rel_loudness = sample_rel_loudness()
x_unique_lst_events = [e for e in unique_lst_events if e != event_1]
assert len(x_unique_lst_events) > 0, \
'Question (compare_loudness) illposed.'
event_2 = str(np.random.choice(x_unique_lst_events)) # sample event
source_2 = str(np.random.choice(dataset['sources'][event_2]))
action_2 = str(np.random.choice(dataset['actions'][event_2]))
assert lst_events.count(event_1) == 1, \
'Question (compare_loudness) illposed.'
assert lst_events.count(event_2) == 1, \
'Question (compare_loudness) illposed.'
assert event_1 != event_2, 'Question (compare_loudness) illposed.'
question = question.replace('<S1>', source_1) # insert source
question = question.replace('<A1>', action_1) # insert action
question = question.replace('<RL>', rel_loudness) # insert loudness
question = question.replace('<S2>', source_2) # insert source
question = question.replace('<A2>', action_2) # insert action
question = sanitize_question(question)
lst_loudness = get_lst_loudness(narrative)
e_1_loudness = lst_loudness[lst_events.index(event_1)]
e_2_loudness = lst_loudness[lst_events.index(event_2)]
# Assert a good margin in relative loudness
rel_loudness_diff = compute_rel_diff(np.array(e_1_loudness),
np.array(e_2_loudness))
assert np.sum(rel_loudness_diff < rel_diff) <= 0, \
'Question (compare_loudness) illposed.'
if 'quiet' in question:
answer = 'yes' if e_1_loudness < e_2_loudness else 'no'
elif 'loud' in question:
answer = 'yes' if e_1_loudness > e_2_loudness else 'no'
else:
assert False, 'Loudness illdefined in Question (compare_loudness).'
return question, answer
def compare_loudness_ordinal(dataset, narrative, rel_diff=0.1):
questions = ['Was the <O1> [sound event,sound] <RL> than the <O2> [sound event,sound]?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O1> [sound event,sound] and the <O2> [sound event,sound], was the former <RL>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O1> and <O2> [sound events,sounds], was the former <RL>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O2> [sound event,sound] and the <O1> [sound event,sound], was the latter <RL>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O2> and <O1> [sound events,sounds], was the latter <RL>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
number_1, ordinal_1 = sample_number(len(lst_events))
rel_loudness = sample_rel_loudness()
number_2, ordinal_2 = sample_second_number(len(lst_events), number_1)
assert number_1 != number_2, 'Question (compare_loudness_ordinal) illposed.'
question = question.replace('<O1>', ordinal_1) # insert ordinal
question = question.replace('<RL>', rel_loudness) # insert loudness
question = question.replace('<O2>', ordinal_2) # insert ordinal
question = sanitize_question(question) # correct grammar
lst_loudness = get_lst_loudness(narrative)
e_1_loudness = lst_loudness[number_1 - 1]
e_2_loudness = lst_loudness[number_2 - 1]
# Assert a good margin in relative loudness
rel_loudness_diff = compute_rel_diff(np.array(e_1_loudness),
np.array(e_2_loudness))
assert np.sum(rel_loudness_diff < rel_diff) <= 0, \
'Question (compare_loudness_ordinal) illposed.'
if 'quiet' in question:
answer = 'yes' if e_1_loudness < e_2_loudness else 'no'
elif 'loud' in question:
answer = 'yes' if e_1_loudness > e_2_loudness else 'no'
else:
assert False, 'Loudness illdefined in Question (compare_loudness_ordinal).'
return question, answer
def compare_loudness_event_ordinal(dataset, narrative, rel_diff=0.1):
questions = ['Was the <S> <A> <RL> than the <O> [sound event,sound]?',
'Was the sound of the <S> <A> <RL> than the <O> [sound event,sound]?', # noqa: E501
'[Comparing,Listening to,Hearing] the <S> <A> and the <O> [sound event,sound], was the former <RL>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O> [sound event,sound] and the <S> <A>, was the latter <RL>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
assert len(unique_lst_events) > 0, \
'Question (compare_loudness_event_ordinal) illposed.'
event = str(np.random.choice(unique_lst_events)) # sample event
source = str(np.random.choice(dataset['sources'][event]))
action = str(np.random.choice(dataset['actions'][event]))
rel_loudness = sample_rel_loudness()
number, ordinal = sample_second_number(len(lst_events), lst_events.index(event) + 1)
assert lst_events.count(event) == 1, \
'Question (compare_loudness_event_ordinal) illposed.'
assert lst_events.index(event) != (number - 1), \
'Question (compare_loudness_event_ordinal) illposed.'
question = question.replace('<S>', source) # insert source
question = question.replace('<A>', action) # insert action
question = question.replace('<RL>', rel_loudness) # insert loudness
question = question.replace('<O>', ordinal) # insert ordinal
question = sanitize_question(question) # correct grammar
lst_loudness = get_lst_loudness(narrative)
e_1_loudness = lst_loudness[lst_events.index(event)]
e_2_loudness = lst_loudness[number - 1]
# Assert a good margin in relative loudness
rel_loudness_diff = compute_rel_diff(np.array(e_1_loudness),
np.array(e_2_loudness))
assert np.sum(rel_loudness_diff < rel_diff) <= 0, \
'Question (compare_loudness_event_ordinal) illposed.'
if 'quiet' in question:
answer = 'yes' if e_1_loudness < e_2_loudness else 'no'
elif 'loud' in question:
answer = 'yes' if e_1_loudness > e_2_loudness else 'no'
else:
assert False, \
'Loudness illdefined in Question (compare_loudness_event_ordinal).'
return question, answer
def compare_loudness_ordinal_event(dataset, narrative, rel_diff=0.1):
questions = ['Was the <O> [sound event,sound] <RL> than the <S> <A>?',
'Was the <O> [sound event,sound] <RL> than the sound of the <S> <A>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O> [sound event,sound] and the <S> <A>, was the former <RL>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <S> <A> and the <O> [sound event,sound], was the latter <RL>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
assert len(unique_lst_events) > 0, \
'Question (compare_loudness_ordinal_event) illposed.'
event = str(np.random.choice(unique_lst_events)) # sample event
source = str(np.random.choice(dataset['sources'][event]))
action = str(np.random.choice(dataset['actions'][event]))
rel_loudness = sample_rel_loudness()
number, ordinal = sample_second_number(len(lst_events), lst_events.index(event) + 1)
assert lst_events.count(event) == 1, \
'Question (compare_loudness_ordinal_event) illposed.'
assert lst_events.index(event) != (number - 1), \
'Question (compare_loudness_ordinal_event) illposed.'
question = question.replace('<S>', source) # insert source
question = question.replace('<A>', action) # insert action
question = question.replace('<RL>', rel_loudness) # insert loudness
question = question.replace('<O>', ordinal) # insert ordinal
question = sanitize_question(question) # correct grammar
lst_loudness = get_lst_loudness(narrative)
e_1_loudness = lst_loudness[number - 1]
e_2_loudness = lst_loudness[lst_events.index(event)]
# Assert a good margin in relative loudness
rel_loudness_diff = compute_rel_diff(np.array(e_1_loudness),
np.array(e_2_loudness))
assert np.sum(rel_loudness_diff < rel_diff) <= 0, \
'Question (compare_loudness_ordinal_event) illposed.'
if 'quiet' in question:
answer = 'yes' if e_1_loudness < e_2_loudness else 'no'
elif 'loud' in question:
answer = 'yes' if e_1_loudness > e_2_loudness else 'no'
else:
assert False, \
'Loudness illdefined in Question (compare_loudness_ordinal_event).'
return question, answer
def compare_same_loudness(dataset, narrative, rel_diff=0.1):
questions = ['Was the <S1> <A1> [roughly,approximately] as <L> as the <S2> <A2>?', # noqa: E501
'Was the sound of the <S1> <A1> [roughly,approximately] as <L> as the sound of the <S2> <A2>?', # noqa: E501
'Was the sound of the <S1> <A1> [roughly,approximately] the same loudness as the sound of the <S2> <A2>?', # noqa: E501
'[Comparing,Listening to,Hearing] the sound of the <S1> <A1> and the sound of the <S2> <A2>, did they [roughly,approximately] have the same loudness?', # noqa: E501
'[Comparing,Listening to,Hearing] the sounds of the <S1> <A1> and the <S2> <A2>, did they [roughly,approximately] have the same loudness?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
assert len(unique_lst_events) > 0, \
'Question (compare_same_loudness) illposed.'
event_1 = str(np.random.choice(unique_lst_events)) # sample event
source_1 = str(np.random.choice(dataset['sources'][event_1]))
action_1 = str(np.random.choice(dataset['actions'][event_1]))
loudness = sample_loudness()
x_unique_lst_events = [e for e in unique_lst_events if e != event_1]
assert len(x_unique_lst_events) > 0, \
'Question (compare_same_loudness) illposed.'
event_2 = str(np.random.choice(x_unique_lst_events)) # sample event
source_2 = str(np.random.choice(dataset['sources'][event_2]))
action_2 = str(np.random.choice(dataset['actions'][event_2]))
assert lst_events.count(event_1) == 1, \
'Question (compare_same_loudness) illposed.'
assert lst_events.count(event_2) == 1, \
'Question (compare_same_loudness) illposed.'
assert event_1 != event_2, 'Question (compare_same_loudness) illposed.'
question = question.replace('<S1>', source_1) # insert source
question = question.replace('<A1>', action_1) # insert action
question = question.replace('<L>', loudness) # insert loudness
question = question.replace('<S2>', source_2) # insert source
question = question.replace('<A2>', action_2) # insert action
question = sanitize_question(question)
lst_loudness = get_lst_loudness(narrative)
e_1_loudness = lst_loudness[lst_events.index(event_1)]
e_2_loudness = lst_loudness[lst_events.index(event_2)]
rel_loudness_diff = compute_rel_diff(np.array(e_1_loudness),
np.array(e_2_loudness))
# Assert a good margin in relative loudness
assert np.sum(np.logical_and(rel_loudness_diff > rel_diff,
rel_loudness_diff < (2 * rel_diff))) <= 0, \
'Question (compare_same_loudness) illposed.'
answer = 'yes' if rel_loudness_diff <= rel_diff else 'no'
return question, answer
def compare_same_loudness_ordinal(dataset, narrative, rel_diff=0.1):
questions = ['Was the <O1> [sound event,sound] [roughly,approximately] as <L> as the <O2> [sound event,sound]?', # noqa: E501
'Was the <O1> and <O2> [sound events,sounds] [roughly,approximately] as <L>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O1> [sound event,sound] and the <O2> [sound event,sound], were they [roughly,approximately] as loud?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O1> [sound event,sound] and the <O2> [sound event,sound], did they [roughly,approximately] have the same loudness?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O1> and <O2> [sound events,sounds], were they [roughly,approximately] as loud?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O1> and <O2> [sound events,sounds], did they have [roughly,approximately] the same loudness?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
number_1, ordinal_1 = sample_number(len(lst_events))
loudness = sample_loudness()
number_2, ordinal_2 = sample_second_number(len(lst_events), number_1)
assert number_1 != number_2, 'Question (compare_same_loudness_ordinal) illposed.'
question = question.replace('<O1>', ordinal_1) # insert ordinal
question = question.replace('<L>', loudness) # insert loudness
question = question.replace('<O2>', ordinal_2) # insert ordinal
question = sanitize_question(question) # correct grammar
lst_loudness = get_lst_loudness(narrative)
e_1_loudness = lst_loudness[number_1 - 1]
e_2_loudness = lst_loudness[number_2 - 1]
rel_loudness_diff = compute_rel_diff(np.array(e_1_loudness),
np.array(e_2_loudness))
# Assert a good margin in relative loudness
assert np.sum(np.logical_and(rel_loudness_diff > rel_diff,
rel_loudness_diff < (2 * rel_diff))) <= 0, \
'Question (compare_same_loudness_ordinal) illposed.'
answer = 'yes' if rel_loudness_diff <= rel_diff else 'no'
return question, answer
def compare_same_loudness_event_ordinal(dataset, narrative, rel_diff=0.1):
questions = ['Was the <S> <A> [roughly,approximately] as <L> as the <O> [sound event,sound]?', # noqa: E501
'[Comparing,Listening to,Hearing] the <S> <A> and the <O> [sound event,sound], were they [roughly,approximately] as loud?', # noqa: E501
'[Comparing,Listening to,Hearing] the sound of the <S> <A> and the <O> [sound event,sound], were they [roughly,approximately] as loud?', # noqa: E501
'[Comparing,Listening to,Hearing] the <S> <A> and the <O> [sound event,sound], did they [roughly,approximately] have the same loudness?', # noqa: E501
'[Comparing,Listening to,Hearing] the sound of the <S> <A> and the <O> [sound event,sound], did they [roughly,approximately] have the same loudness?', # noqa: E501
'Was the <O> [sound event,sound] [roughly,approximately] as <L> as the <S> <A>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O> [sound event,sound] and the <S> <A>, were they [roughly,approximately] as loud?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O> [sound event,sound] and the sound of the <S> <A>, were they [roughly,approximately] as loud?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O> [sound event,sound] and the <S> <A>, did they [roughly,approximately] have the same loudness?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O> [sound event,sound] and the sound of the <S> <A>, did they [roughly,approximately] have the same loudness?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
assert len(unique_lst_events) > 0, \
'Question (compare_same_loudness_event_ordinal) illposed.'
event = str(np.random.choice(unique_lst_events)) # sample event
source = str(np.random.choice(dataset['sources'][event]))
action = str(np.random.choice(dataset['actions'][event]))
loudness = sample_loudness()
number, ordinal = sample_second_number(len(lst_events), lst_events.index(event) + 1)
assert lst_events.count(event) == 1, \
'Question (compare_same_loudness_event_ordinal) illposed.'
assert lst_events.index(event) != (number - 1), \
'Question (compare_same_loudness_event_ordinal) illposed.'
question = question.replace('<S>', source) # insert source
question = question.replace('<A>', action) # insert action
question = question.replace('<L>', loudness) # insert loudness
question = question.replace('<O>', ordinal) # insert ordinal
question = sanitize_question(question) # correct grammar
lst_loudness = get_lst_loudness(narrative)
e_1_loudness = lst_loudness[lst_events.index(event)]
e_2_loudness = lst_loudness[number - 1]
rel_loudness_diff = compute_rel_diff(np.array(e_1_loudness),
np.array(e_2_loudness))
# Assert a good margin in relative loudness
assert np.sum(np.logical_and(rel_loudness_diff > rel_diff,
rel_loudness_diff < (2 * rel_diff))) <= 0, \
'Question (compare_same_loudness_event_ordinal) illposed.'
answer = 'yes' if rel_loudness_diff <= rel_diff else 'no'
return question, answer
def compare_duration(dataset, narrative, rel_diff=0.1):
questions = ['Was the <S1> <A1> <RD> than the <S2> <A2>?',
'Was the sound of the <S1> <A1> <RD> than the sound of the <S2> <A2>?', # noqa: E501
'[Comparing,Listening to,Hearing] the sound of the <S1> <A1> and the sound of the <S2> <A2>, was the former <RD>?', # noqa: E501
'[Comparing,Listening to,Hearing] the sounds of the <S1> <A1> and the <S2> <A2>, was the former <RD>?', # noqa: E501
'[Comparing,Listening to,Hearing] the sound of the <S2> <A2> and the sound of the <S1> <A1>, was the latter <RD>?', # noqa: E501
'[Comparing,Listening to,Hearing] the sounds of the <S2> <A2> and the <S1> <A1>, was the latter <RD>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
assert len(unique_lst_events) > 0, \
'Question (compare_duration) illposed.'
event_1 = str(np.random.choice(unique_lst_events)) # sample event
source_1 = str(np.random.choice(dataset['sources'][event_1]))
action_1 = str(np.random.choice(dataset['actions'][event_1]))
rel_duration = sample_rel_duration()
x_unique_lst_events = [e for e in unique_lst_events if e != event_1]
assert len(x_unique_lst_events) > 0, \
'Question (compare_duration) illposed.'
event_2 = str(np.random.choice(x_unique_lst_events)) # sample event
source_2 = str(np.random.choice(dataset['sources'][event_2]))
action_2 = str(np.random.choice(dataset['actions'][event_2]))
assert lst_events.count(event_1) == 1, \
'Question (compare_duration) illposed.'
assert lst_events.count(event_2) == 1, \
'Question (compare_duration) illposed.'
assert event_1 != event_2, 'Question (compare_duration) illposed.'
question = question.replace('<S1>', source_1) # insert source
question = question.replace('<A1>', action_1) # insert action
question = question.replace('<RD>', rel_duration) # insert duration
question = question.replace('<S2>', source_2) # insert source
question = question.replace('<A2>', action_2) # insert action
question = sanitize_question(question)
lst_duration = get_lst_durations(narrative)
e_1_duration = lst_duration[lst_events.index(event_1)]
e_2_duration = lst_duration[lst_events.index(event_2)]
# Assert a good margin in relative duration
rel_duration_diff = compute_rel_diff(np.array(e_1_duration),
np.array(e_2_duration))
assert np.sum(rel_duration_diff < rel_diff) <= 0, \
'Question (compare_duration) illposed.'
if 'short' in question:
answer = 'yes' if e_1_duration < e_2_duration else 'no'
elif 'long' in question:
answer = 'yes' if e_1_duration > e_2_duration else 'no'
else:
assert False, 'Duration illdefined in Question (compare_duration).'
return question, answer
def compare_duration_ordinal(dataset, narrative, rel_diff=0.1):
questions = ['Was the <O1> [sound event,sound] <RD> than the <O2> [sound event,sound]?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O1> [sound event,sound] and the <O2> [sound event,sound], was the former <RD>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O1> and <O2> [sound events,sounds], was the former <RD>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O2> [sound event,sound] and the <O1> [sound event,sound], was the latter <RD>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O2> and <O1> [sound events,sounds], was the latter <RD>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
number_1, ordinal_1 = sample_number(len(lst_events))
rel_duration = sample_rel_duration()
number_2, ordinal_2 = sample_second_number(len(lst_events), number_1)
assert number_1 != number_2, 'Question (compare_duration_ordinal) illposed.'
question = question.replace('<O1>', ordinal_1) # insert ordinal
question = question.replace('<RD>', rel_duration) # insert duration
question = question.replace('<O2>', ordinal_2) # insert ordinal
question = sanitize_question(question) # correct grammar
lst_duration = get_lst_durations(narrative)
e_1_duration = lst_duration[number_1 - 1]
e_2_duration = lst_duration[number_2 - 1]
# Assert a good margin in relative duration
rel_duration_diff = compute_rel_diff(np.array(e_1_duration),
np.array(e_2_duration))
assert np.sum(rel_duration_diff < rel_diff) <= 0, \
'Question (compare_duration_ordinal) illposed.'
if 'short' in question:
answer = 'yes' if e_1_duration < e_2_duration else 'no'
elif 'long' in question:
answer = 'yes' if e_1_duration > e_2_duration else 'no'
else:
assert False, 'Duration illdefined in Question (compare_duration_ordinal).'
return question, answer
def compare_duration_event_ordinal(dataset, narrative, rel_diff=0.1):
questions = ['Was the <S> <A> <RD> than the <O> [sound event,sound]?',
'Was the sound of the <S> <A> <RD> than the <O> [sound event,sound]?', # noqa: E501
'[Comparing,Listening to,Hearing] the <S> <A> and the <O> [sound event,sound], was the former <RD>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O> [sound event,sound] and the <S> <A>, was the latter <RD>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
assert len(unique_lst_events) > 0, \
'Question (compare_duration_event_ordinal) illposed.'
event = str(np.random.choice(unique_lst_events)) # sample event
source = str(np.random.choice(dataset['sources'][event]))
action = str(np.random.choice(dataset['actions'][event]))
rel_duration = sample_rel_duration()
number, ordinal = sample_second_number(len(lst_events), lst_events.index(event) + 1)
assert lst_events.count(event) == 1, \
'Question (compare_duration_event_ordinal) illposed.'
assert lst_events.index(event) != (number - 1), \
'Question (compare_duration_event_ordinal) illposed.'
question = question.replace('<S>', source) # insert source
question = question.replace('<A>', action) # insert action
question = question.replace('<RD>', rel_duration) # insert duration
question = question.replace('<O>', ordinal) # insert ordinal
question = sanitize_question(question) # correct grammar
lst_duration = get_lst_durations(narrative)
e_1_duration = lst_duration[lst_events.index(event)]
e_2_duration = lst_duration[number - 1]
# Assert a good margin in relative duration
rel_duration_diff = compute_rel_diff(np.array(e_1_duration),
np.array(e_2_duration))
assert np.sum(rel_duration_diff < rel_diff) <= 0, \
'Question (compare_duration_event_ordinal) illposed.'
if 'short' in question:
answer = 'yes' if e_1_duration < e_2_duration else 'no'
elif 'long' in question:
answer = 'yes' if e_1_duration > e_2_duration else 'no'
else:
assert False, \
'Duration illdefined in Question (compare_duration_event_ordinal).'
return question, answer
def compare_duration_ordinal_event(dataset, narrative, rel_diff=0.1):
questions = ['Was the <O> [sound event,sound] <RD> than the <S> <A>?',
'Was the <O> [sound event,sound] <RD> than the sound of the <S> <A>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O> [sound event,sound] and the <S> <A>, was the former <RD>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <S> <A> and the <O> [sound event,sound], was the latter <RD>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
assert len(unique_lst_events) > 0, \
'Question (compare_duration_ordinal_event) illposed.'
event = str(np.random.choice(unique_lst_events)) # sample event
source = str(np.random.choice(dataset['sources'][event]))
action = str(np.random.choice(dataset['actions'][event]))
rel_duration = sample_rel_duration()
number, ordinal = sample_second_number(len(lst_events), lst_events.index(event) + 1)
assert lst_events.count(event) == 1, \
'Question (compare_duration_ordinal_event) illposed.'
assert lst_events.index(event) != (number - 1), \
'Question (compare_duration_ordinal_event) illposed.'
question = question.replace('<S>', source) # insert source
question = question.replace('<A>', action) # insert action
question = question.replace('<RD>', rel_duration) # insert duration
question = question.replace('<O>', ordinal) # insert ordinal
question = sanitize_question(question) # correct grammar
lst_duration = get_lst_durations(narrative)
e_1_duration = lst_duration[number - 1]
e_2_duration = lst_duration[lst_events.index(event)]
# Assert a good margin in relative duration
rel_duration_diff = compute_rel_diff(np.array(e_1_duration),
np.array(e_2_duration))
assert np.sum(rel_duration_diff < rel_diff) <= 0, \
'Question (compare_duration_ordinal_event) illposed.'
if 'short' in question:
answer = 'yes' if e_1_duration < e_2_duration else 'no'
elif 'long' in question:
answer = 'yes' if e_1_duration > e_2_duration else 'no'
else:
assert False, \
'Duration illdefined in Question (compare_duration_ordinal_event).'
return question, answer
def compare_same_duration(dataset, narrative, rel_diff=0.1):
questions = ['Was the <S1> <A1> [roughly,approximately] as <D> as the <S2> <A2>?', # noqa: E501
'Was the sound of the <S1> <A1> [roughly,approximately] as <D> as the sound of the <S2> <A2>?', # noqa: E501
'Was the sound of the <S1> <A1> [roughly,approximately] the same duration as the sound of the <S2> <A2>?', # noqa: E501
'[Comparing,Listening to,Hearing] the sound of the <S1> <A1> and the sound of the <S2> <A2>, did they [roughly,approximately] have the same duration?', # noqa: E501
'[Comparing,Listening to,Hearing] the sounds of the <S1> <A1> and the <S2> <A2>, did they [roughly,approximately] have the same duration?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
assert len(unique_lst_events) > 0, \
'Question (compare_same_duration) illposed.'
event_1 = str(np.random.choice(unique_lst_events)) # sample event
source_1 = str(np.random.choice(dataset['sources'][event_1]))
action_1 = str(np.random.choice(dataset['actions'][event_1]))
duration = sample_duration()
x_unique_lst_events = [e for e in unique_lst_events if e != event_1]
assert len(x_unique_lst_events) > 0, \
'Question (compare_same_duration) illposed.'
event_2 = str(np.random.choice(x_unique_lst_events)) # sample event
source_2 = str(np.random.choice(dataset['sources'][event_2]))
action_2 = str(np.random.choice(dataset['actions'][event_2]))
assert lst_events.count(event_1) == 1, \
'Question (compare_same_duration) illposed.'
assert lst_events.count(event_2) == 1, \
'Question (compare_same_duration) illposed.'
assert event_1 != event_2, 'Question (compare_same_duration) illposed.'
question = question.replace('<S1>', source_1) # insert source
question = question.replace('<A1>', action_1) # insert action
question = question.replace('<D>', duration) # insert duration
question = question.replace('<S2>', source_2) # insert source
question = question.replace('<A2>', action_2) # insert action
question = sanitize_question(question)
lst_duration = get_lst_durations(narrative)
e_1_duration = lst_duration[lst_events.index(event_1)]
e_2_duration = lst_duration[lst_events.index(event_2)]
rel_duration_diff = compute_rel_diff(np.array(e_1_duration),
np.array(e_2_duration))
# Assert a good margin in relative duration
assert np.sum(np.logical_and(rel_duration_diff > rel_diff,
rel_duration_diff < (2 * rel_diff))) <= 0, \
'Question (compare_same_duration) illposed.'
answer = 'yes' if rel_duration_diff <= rel_diff else 'no'
return question, answer
def compare_same_duration_ordinal(dataset, narrative, rel_diff=0.1):
questions = ['Was the <O1> [sound event,sound] [roughly,approximately] as <D> as the <O2> [sound event,sound]?', # noqa: E501
'Was the <O1> and <O2> [sound events,sounds] [roughly,approximately] as <D>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O1> [sound event,sound] and the <O2> [sound event,sound], were they [roughly,approximately] as <D>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O1> [sound event,sound] and the <O2> [sound event,sound], did they [roughly,approximately] have the same duration?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O1> and <O2> [sound events,sounds], were they [roughly,approximately] as <D>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O1> and <O2> [sound events,sounds], did they [roughly,approximately] have the same duration?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
number_1, ordinal_1 = sample_number(len(lst_events))
duration = sample_duration()
number_2, ordinal_2 = sample_second_number(len(lst_events), number_1)
assert number_1 != number_2, 'Question (compare_same_duration_ordinal) illposed.'
question = question.replace('<O1>', ordinal_1) # insert ordinal
question = question.replace('<D>', duration) # insert duration
question = question.replace('<O2>', ordinal_2) # insert ordinal
question = sanitize_question(question) # correct grammar
lst_duration = get_lst_durations(narrative)
e_1_duration = lst_duration[number_1 - 1]
e_2_duration = lst_duration[number_2 - 1]
rel_duration_diff = compute_rel_diff(np.array(e_1_duration),
np.array(e_2_duration))
# Assert a good margin in relative duration
assert np.sum(np.logical_and(rel_duration_diff > rel_diff,
rel_duration_diff < (2 * rel_diff))) <= 0, \
'Question (compare_same_duration_ordinal) illposed.'
answer = 'yes' if rel_duration_diff <= rel_diff else 'no'
return question, answer
def compare_same_duration_event_ordinal(dataset, narrative, rel_diff=0.1):
questions = ['Was the <S> <A> [roughly,approximately] as <D> as the <O> [sound event,sound]?', # noqa: E501
'[Comparing,Listening to,Hearing] the <S> <A> and the <O> [sound event,sound], were they [roughly,approximately] as <D>?', # noqa: E501
'[Comparing,Listening to,Hearing] the sound of the <S> <A> and the <O> [sound event,sound], were they [roughly,approximately] as <D>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <S> <A> and the <O> [sound event,sound], did they [roughly,approximately] have the same duration?', # noqa: E501
'[Comparing,Listening to,Hearing] the sound of the <S> <A> and the <O> [sound event,sound], did they [roughly,approximately] have the same duration?', # noqa: E501
'Was the <O> [sound event,sound] [roughly,approximately] as <D> as the <S> <A>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O> [sound event,sound] and the <S> <A>, were they [roughly,approximately] as <D>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O> [sound event,sound] and the sound of the <S> <A>, were they [roughly,approximately] as <D>?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O> [sound event,sound] and the <S> <A>, did they [roughly,approximately] have the same duration?', # noqa: E501
'[Comparing,Listening to,Hearing] the <O> [sound event,sound] and the sound of the <S> <A>, did they [roughly,approximately] have the same duration?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
assert len(unique_lst_events) > 0, \
'Question (compare_same_duration_event_ordinal) illposed.'
event = str(np.random.choice(unique_lst_events)) # sample event
source = str(np.random.choice(dataset['sources'][event]))
action = str(np.random.choice(dataset['actions'][event]))
duration = sample_duration()
number, ordinal = sample_second_number(len(lst_events), lst_events.index(event) + 1)
assert lst_events.count(event) == 1, \
'Question (compare_same_duration_event_ordinal) illposed.'
assert lst_events.index(event) != (number - 1), \
'Question (compare_same_duration_event_ordinal) illposed.'
question = question.replace('<S>', source) # insert source
question = question.replace('<A>', action) # insert action
question = question.replace('<D>', duration) # insert duration
question = question.replace('<O>', ordinal) # insert ordinal
question = sanitize_question(question) # correct grammar
lst_duration = get_lst_durations(narrative)
e_1_duration = lst_duration[lst_events.index(event)]
e_2_duration = lst_duration[number - 1]
rel_duration_diff = compute_rel_diff(np.array(e_1_duration),
np.array(e_2_duration))
# Assert a good margin in relative duration
assert np.sum(np.logical_and(rel_duration_diff > rel_diff,
rel_duration_diff < (2 * rel_diff))) <= 0, \
'Question (compare_same_duration_event_ordinal) illposed.'
answer = 'yes' if rel_duration_diff <= rel_diff else 'no'
return question, answer
|
daqa-master
|
daqa-gen/qpas/compare.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from qpas.utils import (compute_rel_diff, get_lst_durations, get_lst_events,
get_lst_loudness, numbers_to_words, sample_duration,
sample_loudness, sample_number, sample_second_number,
sample_preposition, sanitize_question)
def how_many(dataset, narrative, _):
questions = ['How many [sound events,sounds] were there?',
'How many [sound events,sounds] [did,could] you [hear,listen to]?',
'How many [sound events,sounds] have you [heard,listened to]?',
'What is the number of [sound events,sounds]?',
'What is the number of [sound events,sounds] [did,could] you [hear,listen to]?', # noqa: E501
'What is the number of [sound events,sounds] have you [heard,listened to]?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
question = sanitize_question(question) # correct grammar
lst_events = get_lst_events(narrative)
answer = numbers_to_words(len(lst_events))
return question, answer
def how_many_event(dataset, narrative, _):
questions = ['How many times was [a,an] <S> <A>?',
'How many times did you [hear,listen to] [a,an] <S> <A>?',
'How many times have you [heard,listened to] [a,an] <S> <A>?',
'What is the number of times [a,an] <S> <A>?',
'What is the number of times did you [hear,listen to] [a,an] <S> <A>?', # noqa: E501
'What is the number of times you [heard,listened to] [a,an] <S> <A>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
event = str(np.random.choice(lst_events))
source = str(np.random.choice(dataset['sources'][event])) # sample source
action = str(np.random.choice(dataset['actions'][event])) # sample action
question = question.replace('<S>', source) # insert source
question = question.replace('<A>', action) # insert action
question = sanitize_question(question) # correct grammar
answer = numbers_to_words(lst_events.count(event))
return question, answer
def how_many_ordinal(dataset, narrative, _):
questions = ['How many times did you [hear,listen to] a sound that [sounded,seemed] like the <O> [sound event,sound]?', # noqa: E501
'What is the number of times did you [hear,listen to] a sound that [sounded,seemed] like the <O> [sound event,sound]?', # noqa: E501
'[Hearing,Listening to] the <O> [sound event,sound], how many sounds were [the same, similar]?', # noqa: E501
'[Hearing,Listening to] the <O> [sound event,sound], what is the number of sounds that were [the same, similar]?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
number, ordinal = sample_number(len(lst_events))
question = question.replace('<O>', ordinal) # insert ordinal
question = sanitize_question(question) # correct grammar
event = lst_events[number - 1]
answer = numbers_to_words(lst_events.count(event) - 1) # -1 for base event
return question, answer
def how_many_event_two(dataset, narrative, _):
questions = ['How many times was [a,an] <S1> <A1> [or,and] [a,an] <S2> <A2>?',
'How many times did you [hear,listen to] [a,an] <S1> <A1> [or,and] [a,an] <S2> <A2>?', # noqa: E501
'How many times have you [heard,listened to] [a,an] <S1> <A1> [or,and] [a,an] <S2> <A2>?', # noqa: E501
'What is the number of times [a,an] <S1> <A1> [or,and] [a,an] <S2> <A2>?', # noqa: E501
'What is the number of times did you [hear,listen to] [a,an] <S1> <A1> [or,and] [a,an] <S2> <A2>?', # noqa: E501
'What is the number of times you [heard,listened to] [a,an] <S1> <A1> [or,and] [a,an] <S2> <A2>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
event_1 = str(np.random.choice(dataset['events'])) # sample event
source_1 = str(np.random.choice(dataset['sources'][event_1]))
action_1 = str(np.random.choice(dataset['actions'][event_1]))
x_lst_events = [e for e in dataset['events'] if e != event_1]
event_2 = str(np.random.choice(x_lst_events)) # sample event
source_2 = str(np.random.choice(dataset['sources'][event_2]))
action_2 = str(np.random.choice(dataset['actions'][event_2]))
assert event_1 != event_2, 'Question (how_many_event_two) illposed.'
question = question.replace('<S1>', source_1) # insert source
question = question.replace('<A1>', action_1) # insert action
question = question.replace('<S2>', source_2) # insert source
question = question.replace('<A2>', action_2) # insert action
question = sanitize_question(question) # correct grammar
lst_events = get_lst_events(narrative)
answer = numbers_to_words(lst_events.count(event_1)
+ lst_events.count(event_2))
return question, answer
def how_many_event_two_ordinal(dataset, narrative, _):
questions = ['How many times did you [hear,listen to] a sound that [sounded,seemed] like the <O1> [sound event,sound] [or,and] the <O2> [sound event,sound]?', # noqa: E501
'What is the number of times did you [hear,listen to] a sound that [sounded,seemed] like the <O1> [sound event,sound] [or,and] the <O2> [sound event,sound]?', # noqa: E501
'[Hearing,Listening to] the <O1> [sound event,sound] and the <O2> [sound event,sound], how many sounds were [the same,similar]?', # noqa: E501
'[Hearing,Listening to] the <O1> [sound event,sound] and the <O2> [sound event,sound], what is the number of sounds that were [the same,similar]?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
number_1, ordinal_1 = sample_number(len(lst_events))
number_2, ordinal_2 = sample_second_number(len(lst_events), number_1)
question = question.replace('<O1>', ordinal_1) # insert ordinal
question = question.replace('<O2>', ordinal_2) # insert ordinal
question = sanitize_question(question) # correct grammar
event_1 = lst_events[number_1 - 1]
event_2 = lst_events[number_2 - 1]
answer = numbers_to_words((lst_events.count(event_1) - 1) # -1 for base event
+ (lst_events.count(event_2) - 1))
return question, answer
def how_many_sounds_relative(dataset, narrative, _):
questions = ['How many [sound events,sounds] <RO> the <S> <A> were there?',
'How many [sound events,sounds] <RO> the <S> <A> [did,could] you [hear,listen to]?', # noqa: E501
'How many [sound events,sounds] <RO> the <S> <A> have you [heard,listened to]?', # noqa: E501
'What is the number of [sound events,sounds] <RO> the <S> <A>?',
'What is the number of [sound events,sounds] <RO> the <S> <A> [did,could] you [hear,listen to]?', # noqa: E501
'What is the number of [sound events,sounds] <RO> the <S> <A> have you [heard,listened to]?', # noqa: E501
'There is [a,an] <S> <A>; how many [sound events,sounds] [did,could] you hear <RO>?', # noqa: E501
'There is [a,an] <S> <A>; how many [sound events,sounds] have you heard <RO>?', # noqa: E501
'There is [a,an] <S> <A>; what is the number of [sound events,sounds] [did,could] you hear <RO>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
preposition = sample_preposition()
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
assert len(unique_lst_events) > 0, \
'Question (how_many_sounds_relative) illposed.'
event = str(np.random.choice(unique_lst_events))
source = str(np.random.choice(dataset['sources'][event])) # sample source
action = str(np.random.choice(dataset['actions'][event])) # sample action
question = question.replace('<RO>', preposition) # insert preposition
question = question.replace('<S>', source) # insert source
question = question.replace('<A>', action) # insert action
question = sanitize_question(question) # correct grammar
assert lst_events.count(event) == 1, \
'Question (how_many_sounds_relative) illposed.'
event_idx = lst_events.index(event)
if 'before' in question:
lst_events_e = lst_events[:event_idx]
elif 'after' in question:
lst_events_e = lst_events[(event_idx + 1):]
else:
assert False, \
'Preposition illdefined in Question (how_many_sounds_relative).'
answer = numbers_to_words(len(lst_events_e))
return question, answer
def how_many_sounds_relative_ordinal(dataset, narrative, _):
questions = ['How many [sound events,sounds] after the <O> [sound event,sound] were there?', # noqa: E501
'How many [sound events,sounds] after the <O> [sound event,sound] [did,could] you [hear,listen to]?', # noqa: E501
'How many [sound events,sounds] after the <O> [sound event,sound] have you [heard,listened to]?', # noqa: E501
'What is the number of [sound events,sounds] after the <O> [sound event,sound]?', # noqa: E501
'What is the number of [sound events,sounds] after the <O> [sound event,sound] [did,could] you [hear,listen to]?', # noqa: E501
'What is the number of [sound events,sounds] after the <O> [sound event,sound] have you [heard,listened to]?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
lst_events = get_lst_events(narrative)
number, ordinal = sample_number(len(lst_events))
question = question.replace('<O>', ordinal) # insert ordinal
question = sanitize_question(question) # correct grammar
assert number < (len(lst_events) - 1), \
'Question (how_many_sounds_relative_ordinal) illposed.'
lst_events_e = lst_events[number:]
answer = numbers_to_words(len(lst_events_e))
return question, answer
def how_many_event_relative(dataset, narrative, _):
questions = ['How many <S1>s <A1> <RO> the <S2> <A2> were there?',
'How many <S1>s <A1> <RO> the <S2> <A2> [did,could] you [hear,listen to]?', # noqa: E501
'How many <S1>s <A1> <RO> the <S2> <A2> have you [heard,listened to]?', # noqa: E501
'What is the number of <S1>s <A1> <RO> the <S2> <A2>?',
'What is the number of <S1>s <A1> <RO> the <S2> <A2> [did,could] you [hear,listen to]?', # noqa: E501
'What is the number of <S1>s <A1> <RO> the <S2> <A2> have you [heard,listened to]?', # noqa: E501
'There is [a,an] <S2> <A2>; how many <S1>s <A1> [did,could] you hear <RO>?', # noqa: E501
'There is [a,an] <S2> <A2>; how many <S1>s <A1> have you heard <RO>?', # noqa: E501
'There is [a,an] <S2> <A2>; what is the number of <S1>s <A1> [did,could] you hear <RO>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
event_1 = str(np.random.choice(dataset['events'])) # sample event
source_1 = str(np.random.choice(dataset['sources'][event_1]))
action_1 = str(np.random.choice(dataset['actions'][event_1]))
preposition = sample_preposition()
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
unique_lst_events = [e for e in unique_lst_events if e != event_1]
assert len(unique_lst_events) > 0, \
'Question (how_many_event_relative) illposed.'
event_2 = str(np.random.choice(unique_lst_events))
source_2 = str(np.random.choice(dataset['sources'][event_2]))
action_2 = str(np.random.choice(dataset['actions'][event_2]))
question = question.replace('<S1>', source_1) # insert source
question = question.replace('<A1>', action_1) # insert action
question = question.replace('<RO>', preposition) # insert preposition
question = question.replace('<S2>', source_2) # insert source
question = question.replace('<A2>', action_2) # insert action
question = sanitize_question(question) # correct grammar
assert lst_events.count(event_2) == 1, \
'Question (how_many_event_relative) illposed.'
event_2_idx = lst_events.index(event_2)
if 'before' in question:
lst_events_e = lst_events[:event_2_idx]
elif 'after' in question:
lst_events_e = lst_events[(event_2_idx + 1):]
else:
assert False, \
'Relative preposition illdefined in Question (how_many_event_relative).'
answer = numbers_to_words(lst_events_e.count(event_1))
return question, answer
def how_many_event_relative_ordinal(dataset, narrative, _):
questions = ['How many <S>s <A> <RO> the <O> [sound event,sound] were there?',
'How many <S>s <A> <RO> the <O> [sound event,sound] [did,could] you [hear,listen to]?', # noqa: E501
'How many <S>s <A> <RO> the <O> [sound event,sound] have you [heard,listened to]?', # noqa: E501
'What is the number of <S>s <A> <RO> the <O> [sound event,sound]?',
'What is the number of <S>s <A> <RO> the <O> [sound event,sound] [did,could] you [hear,listen to]?', # noqa: E501
'What is the number of <S>s <A> <RO> the <O> [sound event,sound] have you [heard,listened to]?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
event = str(np.random.choice(dataset['events'])) # sample event
source = str(np.random.choice(dataset['sources'][event]))
action = str(np.random.choice(dataset['actions'][event]))
preposition = sample_preposition()
lst_events = get_lst_events(narrative)
number, ordinal = sample_number(len(lst_events))
question = question.replace('<S>', source) # insert source
question = question.replace('<A>', action) # insert action
question = question.replace('<RO>', preposition) # insert preposition
question = question.replace('<O>', ordinal) # insert ordinal
question = sanitize_question(question) # correct grammar
if 'before' in question:
assert number > 1, 'Question (how_many_event_relative_ordinal) illposed.'
lst_events_e = lst_events[:(number - 1)]
elif 'after' in question:
assert number < (len(lst_events) - 1), \
'Question (how_many_event_relative_ordinal) illposed.'
lst_events_e = lst_events[number:]
else:
assert False, \
'Relative preposition illdefined in Question (how_many_event_relative_ordinal).' # noqa: E501
answer = numbers_to_words(lst_events_e.count(event))
return question, answer
def how_many_sounds_loudness_event(dataset, narrative, rel_diff=0.1):
questions = ['How many [sound events,sounds] [roughly,approximately] as <L> as the <S> <A>?', # noqa: E501
'How many [sound events,sounds] that are [roughly,approximately] as <L> as the <S> <A> [did,could] you [hear,listen to]?', # noqa: E501
'How many [sound events,sounds] that are [roughly,approximately] as <L> as the <S> <A> have you heard?', # noqa: E501
'How many [sound events,sounds] that have [roughly,approximately] the same loudness as the <S> <A>?', # noqa: E501
'How many [sound events,sounds] that have [roughly,approximately] the same loudness as the <S> <A> [did,could] you [hear,listen to]?', # noqa: E501
'How many [sound events,sounds] that have [roughly,approximately] the same loudness as the <S> <A> have you heard?', # noqa: E501
'What is the number of [sound events,sounds] [roughly,approximately] as <L> as the <S> <A>?', # noqa: E501
'What is the number of [sound events,sounds] that are [roughly,approximately] as <L> as the <S> <A> [did,could] you [hear,listen to]?', # noqa: E501
'What is the number of [sound events,sounds] that are [roughly,approximately] as <L> as the <S> <A> have you heard?', # noqa: E501
'What is the number of [sound events,sounds] that have [roughly,approximately] the same loudness as the <S> <A>?', # noqa: E501
'What is the number of [sound events,sounds] that have [roughly,approximately] the same loudness as the <S> <A> [did,could] you [hear,listen to]?', # noqa: E501
'What is the number of [sound events,sounds] that have [roughly,approximately] the same loudness as the <S> <A> have you heard?', # noqa: E501
'There is [a,an] <S> <A>; how many [sound events,sounds] that are [roughly,approximately] as <L>?', # noqa: E501
'There is [a,an] <S> <A>; what is the number of [sound events,sounds] that are [roughly,approximately] as <L>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
loudness = sample_loudness() # sample loudness
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
assert len(unique_lst_events) > 0, \
'Question (how_many_sounds_loudness_event) illposed.'
event = str(np.random.choice(unique_lst_events))
source = str(np.random.choice(dataset['sources'][event])) # sample source
action = str(np.random.choice(dataset['actions'][event])) # sample action
question = question.replace('<L>', loudness) # insert loudness
question = question.replace('<S>', source) # insert source
question = question.replace('<A>', action) # insert action
question = sanitize_question(question) # correct grammar
assert lst_events.count(event) == 1, \
'Question (how_many_sounds_loudness_event) illposed.'
lst_loudness = get_lst_loudness(narrative)
event_idx = lst_events.index(event)
evt_loudness = lst_loudness[event_idx]
x_loudness = [j for i, j in enumerate(lst_loudness) if i != event_idx]
rel_loudness_diff = compute_rel_diff(np.array(x_loudness),
np.array(evt_loudness))
# Assert a good margin in relative loudness
assert np.sum(np.logical_and(rel_loudness_diff > rel_diff,
rel_loudness_diff < (2 * rel_diff))) <= 0, \
'Question (how_many_sounds_loudness_event) illposed.'
answer = numbers_to_words(np.sum(rel_loudness_diff <= rel_diff))
return question, answer
def how_many_sounds_loudness_ordinal(dataset, narrative, rel_diff=0.1):
questions = ['How many [sound events,sounds] [roughly,approximately] as <L> as the <O> sound?', # noqa: E501
'How many [sound events,sounds] that are [roughly,approximately] as <L> as the <O> sound?', # noqa: E501
'How many [sound events,sounds] that are [roughly,approximately] as <L> as the <O> sound [did,could] you [hear,listen to]?', # noqa: E501
'How many [sound events,sounds] that are [roughly,approximately] as <L> as the <O> sound have you heard?', # noqa: E501
'How many [sound events,sounds] that have [roughly,approximately] the same loudness as the <O> sound?', # noqa: E501
'How many [sound events,sounds] that have [roughly,approximately] the same loudness as the <O> sound [did,could] you [hear,listen to]?', # noqa: E501
'How many [sound events,sounds] that have [roughly,approximately] the same loudness as the <O> sound have you heard?', # noqa: E501
'What is the number of [sound events,sounds] [roughly,approximately] as <L> as the <O> sound?', # noqa: E501
'What is the number of [sound events,sounds] that are [roughly,approximately] as <L> as the <O> sound?', # noqa: E501
'What is the number of [sound events,sounds] that are [roughly,approximately] as <L> as the <O> sound [did,could] you [hear,listen to]?', # noqa: E501
'What is the number of [sound events,sounds] that are [roughly,approximately] as <L> as the <O> sound have you heard?', # noqa: E501
'What is the number of [sound events,sounds] that have [roughly,approximately] the same loudness as the <O> sound?', # noqa: E501
'What is the number of [sound events,sounds] that have [roughly,approximately] the same loudness as the <O> sound [did,could] you [hear,listen to]?', # noqa: E501
'What is the number of [sound events,sounds] that have [roughly,approximately] the same loudness as the <O> sound have you heard?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
loudness = sample_loudness() # sample loudness
lst_events = get_lst_events(narrative)
number, ordinal = sample_number(len(lst_events))
question = question.replace('<L>', loudness) # insert loudness
question = question.replace('<O>', ordinal) # insert ordinal
question = sanitize_question(question) # correct grammar
lst_loudness = get_lst_loudness(narrative)
evt_loudness = lst_loudness[number - 1]
x_loudness = [j for i, j in enumerate(lst_loudness) if i != (number - 1)]
rel_loudness_diff = compute_rel_diff(np.array(x_loudness),
np.array(evt_loudness))
# Assert a good margin in relative loudness
assert np.sum(np.logical_and(rel_loudness_diff > rel_diff,
rel_loudness_diff < (2 * rel_diff))) <= 0, \
'Question (how_many_sounds_loudness_ordinal) illposed.'
answer = numbers_to_words(np.sum(rel_loudness_diff <= rel_diff))
return question, answer
def how_many_sounds_duration_event(dataset, narrative, rel_diff=0.1):
questions = ['How many [sound events,sounds] [roughly,approximately] as <D> as the <S> <A>?', # noqa: E501
'How many [sound events,sounds] that are [roughly,approximately] as <D> as the <S> <A> [did,could] you [hear,listen to]?', # noqa: E501
'How many [sound events,sounds] that are [roughly,approximately] as <D> as the <S> <A> have you heard?', # noqa: E501
'How many [sound events,sounds] that have [roughly,approximately] the same duration as the <S> <A>?', # noqa: E501
'How many [sound events,sounds] that have [roughly,approximately] the same duration as the <S> <A> [did,could] you [hear,listen to]?', # noqa: E501
'How many [sound events,sounds] that have [roughly,approximately] the same duration as the <S> <A> have you heard?', # noqa: E501
'What is the number of [sound events,sounds] [roughly,approximately] as <D> as the <S> <A>?', # noqa: E501
'What is the number of [sound events,sounds] that are [roughly,approximately] as <D> as the <S> <A> [did,could] you [hear,listen to]?', # noqa: E501
'What is the number of [sound events,sounds] that are [roughly,approximately] as <D> as the <S> <A> have you heard?', # noqa: E501
'What is the number of [sound events,sounds] that have [roughly,approximately] the same duration as the <S> <A>?', # noqa: E501
'What is the number of [sound events,sounds] that have [roughly,approximately] the same duration as the <S> <A> [did,could] you [hear,listen to]?', # noqa: E501
'What is the number of [sound events,sounds] that have [roughly,approximately] the same duration as the <S> <A> have you heard?', # noqa: E501
'There is [a,an] <S> <A>; how many [sound events,sounds] that are [roughly,approximately] as <D>?', # noqa: E501
'There is [a,an] <S> <A>; what is the number of [sound events,sounds] that are [roughly,approximately] as <D>?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
duration = sample_duration() # sample duration
lst_events = get_lst_events(narrative)
unique_lst_events = [e for e in lst_events if lst_events.count(e) == 1]
assert len(unique_lst_events) > 0, \
'Question (how_many_sounds_duration_event) illposed.'
event = str(np.random.choice(unique_lst_events))
source = str(np.random.choice(dataset['sources'][event])) # sample source
action = str(np.random.choice(dataset['actions'][event])) # sample action
question = question.replace('<D>', duration) # insert duration
question = question.replace('<S>', source) # insert source
question = question.replace('<A>', action) # insert action
question = sanitize_question(question) # correct grammar
assert lst_events.count(event) == 1, \
'Question (how_many_sounds_duration_event) illposed.'
lst_durations = get_lst_durations(narrative)
event_idx = lst_events.index(event)
evt_duration = lst_durations[event_idx]
x_durations = [j for i, j in enumerate(lst_durations) if i != event_idx]
rel_durations_diff = compute_rel_diff(np.array(x_durations),
np.array(evt_duration))
# Assert a good margin in relative loudness
assert np.sum(np.logical_and(rel_durations_diff > rel_diff,
rel_durations_diff < (2 * rel_diff))) <= 0, \
'Question (how_many_sounds_duration_event) illposed.'
answer = numbers_to_words(np.sum(rel_durations_diff <= rel_diff))
return question, answer
def how_many_sounds_duration_ordinal(dataset, narrative, rel_diff=0.1):
questions = ['How many [sound events,sounds] [roughly,approximately] as <D> as the <O> sound?', # noqa: E501
'How many [sound events,sounds] that are [roughly,approximately] as <D> as the <O> sound?', # noqa: E501
'How many [sound events,sounds] that are [roughly,approximately] as <D> as the <O> sound [did,could] you [hear,listen to]?', # noqa: E501
'How many [sound events,sounds] that are [roughly,approximately] as <D> as the <O> sound have you heard?', # noqa: E501
'How many [sound events,sounds] that have [roughly,approximately] the same duration as the <O> sound?', # noqa: E501
'How many [sound events,sounds] that have [roughly,approximately] the same duration as the <O> sound [did,could] you [hear,listen to]?', # noqa: E501
'How many [sound events,sounds] that have [roughly,approximately] the same duration as the <O> sound have you heard?', # noqa: E501
'What is the number of [sound events,sounds] [roughly,approximately] as <D> as the <O> sound?', # noqa: E501
'What is the number of [sound events,sounds] that are [roughly,approximately] as <D> as the <O> sound?', # noqa: E501
'What is the number of [sound events,sounds] that are [roughly,approximately] as <D> as the <O> sound [did,could] you [hear,listen to]?', # noqa: E501
'What is the number of [sound events,sounds] that are [roughly,approximately] as <D> as the <O> sound have you heard?', # noqa: E501
'What is the number of [sound events,sounds] that have [roughly,approximately] the same duration as the <O> sound?', # noqa: E501
'What is the number of [sound events,sounds] that have [roughly,approximately] the same duration as the <O> sound [did,could] you [hear,listen to]?', # noqa: E501
'What is the number of [sound events,sounds] that have [roughly,approximately] the same duration as the <O> sound have you heard?', # noqa: E501
]
question = str(np.random.choice(questions)) # sample question
duration = sample_duration() # sample duration
lst_events = get_lst_events(narrative)
number, ordinal = sample_number(len(lst_events))
question = question.replace('<D>', duration) # insert duration
question = question.replace('<O>', ordinal) # insert ordinal
question = sanitize_question(question) # correct grammar
lst_durations = get_lst_durations(narrative)
evt_duration = lst_durations[number - 1]
x_durations = [j for i, j in enumerate(lst_durations) if i != (number - 1)]
rel_durations_diff = compute_rel_diff(np.array(x_durations),
np.array(evt_duration))
# Assert a good margin in relative loudness
assert np.sum(np.logical_and(rel_durations_diff > rel_diff,
rel_durations_diff < (2 * rel_diff))) <= 0, \
'Question (how_many_sounds_duration_ordinal) illposed.'
answer = numbers_to_words(np.sum(rel_durations_diff <= rel_diff))
return question, answer
|
daqa-master
|
daqa-gen/qpas/count.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.