python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import dataclasses
from typing import Dict, List, NamedTuple, Tuple
import unittest
from co3d.dataset import data_types as types
from co3d.dataset.data_types import FrameAnnotation
class TestDatasetTypes(unittest.TestCase):
def setUp(self):
self.entry = FrameAnnotation(
frame_number=23,
sequence_name="1",
frame_timestamp=1.2,
image=types.ImageAnnotation(path="/tmp/1.jpg", size=(224, 224)),
mask=types.MaskAnnotation(path="/tmp/1.png", mass=42.0),
viewpoint=types.ViewpointAnnotation(
R=(
(1, 0, 0),
(1, 0, 0),
(1, 0, 0),
),
T=(0, 0, 0),
principal_point=(100, 100),
focal_length=(200, 200),
),
)
def test_asdict_rec(self):
first = [dataclasses.asdict(self.entry)]
second = types._asdict_rec([self.entry])
self.assertEqual(first, second)
def test_parsing(self):
"""Test that we handle collections enclosing dataclasses."""
class NT(NamedTuple):
annot: FrameAnnotation
dct = dataclasses.asdict(self.entry)
parsed = types._dataclass_from_dict(dct, FrameAnnotation)
self.assertEqual(parsed, self.entry)
# namedtuple
parsed = types._dataclass_from_dict(NT(dct), NT)
self.assertEqual(parsed.annot, self.entry)
# tuple
parsed = types._dataclass_from_dict((dct,), Tuple[FrameAnnotation])
self.assertEqual(parsed, (self.entry,))
# list
parsed = types._dataclass_from_dict(
[
dct,
],
List[FrameAnnotation],
)
self.assertEqual(
parsed,
[
self.entry,
],
)
# dict
parsed = types._dataclass_from_dict({"k": dct}, Dict[str, FrameAnnotation])
self.assertEqual(parsed, {"k": self.entry})
|
co3d-main
|
tests/test_types.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import unittest
import numpy as np
import tempfile
import torch
from pytorch3d.renderer.cameras import look_at_view_transform, PerspectiveCameras
from pytorch3d.implicitron.dataset.json_index_dataset import FrameData
from pytorch3d.implicitron.evaluation.evaluate_new_view_synthesis import eval_batch
from pytorch3d.implicitron.models.base_model import ImplicitronRender
from co3d.challenge.io import (
load_mask,
store_mask,
load_depth,
store_depth,
load_image,
store_image,
load_1bit_png_mask,
store_1bit_png_mask,
store_rgbda_frame,
load_rgbda_frame,
)
from co3d.challenge.utils import get_result_directory_file_names, evaluate_file_folders
from co3d.challenge.metric_utils import eval_one
from co3d.challenge.data_types import RGBDAFrame
class TestIO(unittest.TestCase):
def test_save_load(self):
H = 100
W = 200
with tempfile.TemporaryDirectory() as tmpd:
for data_type in ["image", "mask", "depth", "depth_mask"]:
with self.subTest(data_type):
for _ in range(10):
C = {"depth_mask": 1, "mask": 1, "depth": 1, "image": 3}[data_type]
data = np.random.uniform(size=(C, H, W))
if data_type in ("mask", "depth_mask"):
data = (data > 0.5).astype(np.float32)
if C == 1:
data = data[0]
load_fun, store_fun = {
"mask": (load_mask, store_mask),
"depth": (load_depth, store_depth),
"image": (load_image, store_image),
"depth_mask": (load_1bit_png_mask, store_1bit_png_mask),
}[data_type]
fl = os.path.join(tmpd, f"{data_type}.png")
store_fun(data, fl)
data_ = load_fun(fl)
self.assertTrue(np.allclose(data, data_, atol=1 / 255))
class TestMetricUtils(unittest.TestCase):
def test_against_eval_batch(self):
H = 100
W = 200
for _ in range(20):
implicitron_render = _random_implicitron_render(2, H, W, "cpu")
for has_depth_mask in [True, False]:
frame_data = _random_frame_data(2, H, W, "cpu")
if not has_depth_mask:
frame_data.depth_mask = None
eval_batch_result = eval_batch(
frame_data,
implicitron_render,
)
pred_rgbda = RGBDAFrame(
image=implicitron_render.image_render[0].numpy(),
mask=implicitron_render.mask_render[0].numpy(),
depth=implicitron_render.depth_render[0].numpy(),
)
gt_rgbda = RGBDAFrame(
image=frame_data.image_rgb[0].numpy(),
mask=frame_data.fg_probability[0].numpy(),
depth=frame_data.depth_map[0].numpy(),
depth_mask=frame_data.depth_mask[0].numpy() if has_depth_mask else None,
)
eval_one_result = eval_one(
pred=pred_rgbda,
target=gt_rgbda,
)
# print("eval_batch; eval_one")
for k in ["iou", "psnr_fg", "psnr", "depth_abs_fg"]:
self.assertTrue(
np.allclose(eval_batch_result[k], eval_one_result[k], atol=1e-5)
)
# print(f"{k:15s}: {eval_batch_result[k]:1.3e} - {eval_one_result[k]:1.3e}")
class TestEvalScript(unittest.TestCase):
def test_fake_data(self):
N = 30
H = 120
W = 200
with tempfile.TemporaryDirectory() as tmp_pred, tempfile.TemporaryDirectory() as tmp_gt:
_generate_random_submission_data(tmp_pred, N, H, W)
_generate_random_submission_data(tmp_gt, N, H, W)
avg_result, per_example_result = evaluate_file_folders(tmp_pred, tmp_gt)
metrics = list(avg_result.keys())
for m in metrics:
self.assertTrue(
np.allclose(
np.mean([r[m] for r in per_example_result]),
avg_result[m],
)
)
self.assertTrue(len(per_example_result) == N)
def test_wrong_fake_data(self):
N = 30
H = 120
W = 200
# different number of eval/test examples
for N_pred in [N - 2, N + 2]:
with tempfile.TemporaryDirectory() as tmp_pred, tempfile.TemporaryDirectory() as tmp_gt:
_generate_random_submission_data(tmp_pred, N_pred, H, W)
_generate_random_submission_data(tmp_gt, N, H, W)
msg = (
"Unexpected submitted evaluation examples"
if N_pred > N
else "There are missing evaluation examples"
)
with self.assertRaisesRegex(ValueError, msg):
evaluate_file_folders(tmp_pred, tmp_gt)
# some eval examples missing depth/image
with tempfile.TemporaryDirectory() as tmp_pred, tempfile.TemporaryDirectory() as tmp_gt:
_generate_random_submission_data(tmp_pred, N_pred, H, W)
_generate_random_submission_data(tmp_gt, N, H, W)
pred_file_names = get_result_directory_file_names(tmp_pred)
first_ex = pred_file_names[list(pred_file_names.keys())[0]]
for file_type in ["depth", "image"]:
os.remove(first_ex + f"_{file_type}.png")
with self.assertRaisesRegex(
ValueError,
"Some evaluation examples are incomplete",
):
evaluate_file_folders(tmp_pred, tmp_gt)
def _generate_random_submission_data(folder, N, H, W):
for example_num in range(N):
root_path = os.path.join(folder, f"example_{example_num}")
store_rgbda_frame(_random_rgbda_frame(H, W), root_path)
def _random_implicitron_render(
N: int,
H: int,
W: int,
device: torch.device,
):
mask = _random_input_tensor(N, 1, H, W, True, device)
return ImplicitronRender(
depth_render=_random_input_tensor(N, 1, H, W, False, device),
image_render=_random_input_tensor(N, 3, H, W, False, device) * mask,
mask_render=mask,
)
def _random_rgbda_frame(H: int, W: int):
return RGBDAFrame(
image=np.random.uniform(size=(3, H, W)).astype(np.float32),
mask=(np.random.uniform(size=(1, H, W)) > 0.5).astype(np.float32),
depth=np.random.uniform(size=(1, H, W)).astype(np.float32) + 0.1,
)
def _random_frame_data(
N: int,
H: int,
W: int,
device: torch.device,
):
R, T = look_at_view_transform(azim=torch.rand(N) * 360)
cameras = PerspectiveCameras(R=R, T=T, device=device)
depth_map_common = (
torch.stack(
torch.meshgrid(
torch.linspace(0.0, 1.0, H),
torch.linspace(0.0, 1.0, W),
)
).mean(dim=0)
+ 0.1
)
depth_map = _random_input_tensor(N, 1, H, W, False, device) + depth_map_common[None]
random_args = {
"frame_number": torch.arange(N),
"frame_timestamp": torch.linspace(0.0, 1.0, N),
"sequence_category": ["random"] * N,
"camera": cameras,
"fg_probability": _random_input_tensor(N, 1, H, W, True, device),
"depth_map": depth_map,
"mask_crop": torch.ones(N, 1, H, W, device=device),
"depth_mask": _random_input_tensor(N, 1, H, W, True, device),
"sequence_name": ["sequence"] * N,
"image_rgb": _random_input_tensor(N, 3, H, W, False, device),
"frame_type": ["test_unseen", *(["test_known"] * (N - 1))],
}
return FrameData(**random_args)
def _random_input_tensor(
N: int,
C: int,
H: int,
W: int,
is_binary: bool,
device: torch.device,
) -> torch.Tensor:
T = torch.rand(N, C, H, W, device=device)
if is_binary:
T = (T > 0.5).float()
return T
if __name__ == "__main__":
unittest.main()
|
co3d-main
|
tests/test_challenge_evaluate.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import json
from joblib import Parallel, delayed
from collections import defaultdict
from tabulate import tabulate
from typing import List
from collections import Counter
from co3d.dataset.data_types import (
load_dataclass_jgzip,
FrameAnnotation,
SequenceAnnotation,
)
DATASET_ROOT = os.getenv("CO3DV2_DATASET_ROOT")
def _count_category(category):
fa_file = os.path.join(DATASET_ROOT, category, "frame_annotations.jgz")
sa_file = os.path.join(DATASET_ROOT, category, "sequence_annotations.jgz")
frame_annos = load_dataclass_jgzip(fa_file, List[FrameAnnotation])
# sequence_annos = load_dataclass_jgzip(sa_file, List[SequenceAnnotation])
seq_to_frame_annos = defaultdict(list)
for fa in frame_annos:
seq_to_frame_annos[fa.sequence_name].append(fa)
seq_to_frame_annos = dict(seq_to_frame_annos)
seq_set_cnt = Counter()
for _, frame_anno_list in seq_to_frame_annos.items():
seq_set, _ = frame_anno_list[0].meta["frame_type"].split("_")
seq_set_cnt.update([seq_set])
seq_set_cnt.update(["all"])
return dict(seq_set_cnt)
def main():
# get the category list
with open(os.path.join(DATASET_ROOT, "category_to_subset_name_list.json"), "r") as f:
category_to_subset_name_list = json.load(f)
categories = sorted(list(category_to_subset_name_list.keys()))
cat_to_n_per_set = {}
counts_per_category = Parallel(n_jobs=20)(
delayed(_count_category)(c) for c in categories
)
cat_to_n_per_set = dict(zip(categories, counts_per_category))
seq_sets_ = list(cat_to_n_per_set[categories[0]].keys())
tab = []
for category in cat_to_n_per_set:
n_per_set = [cat_to_n_per_set[category].get(set_, 0) for set_ in seq_sets_]
tab.append([category, *n_per_set])
totals = [sum(t[i] for t in tab) for i in [1, 2, 3, 4]]
tab.append(["TOTAL", *totals])
print(tabulate(tab, headers=["category", *seq_sets_]))
if __name__=="__main__":
main()
|
co3d-main
|
examples/print_co3d_stats.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import torch
import math
import sys
import json
import random
from tqdm import tqdm
from omegaconf import DictConfig
from typing import Tuple
from co3d.utils import dbir_utils
from pytorch3d.renderer.cameras import CamerasBase, PerspectiveCameras
from pytorch3d.renderer.camera_utils import join_cameras_as_batch
from pytorch3d.implicitron.dataset.json_index_dataset import JsonIndexDataset
from pytorch3d.implicitron.dataset.json_index_dataset_map_provider_v2 import (
JsonIndexDatasetMapProviderV2
)
from pytorch3d.implicitron.tools.config import expand_args_fields
from pytorch3d.implicitron.models.visualization.render_flyaround import render_flyaround
from pytorch3d.implicitron.dataset.dataset_base import FrameData
from pytorch3d.vis.plotly_vis import plot_scene
from pytorch3d.implicitron.tools.vis_utils import (
get_visdom_connection,
make_depth_image,
)
from pytorch3d.implicitron.tools.point_cloud_utils import (
get_rgbd_point_cloud,
)
DATASET_ROOT = os.getenv("CO3DV2_DATASET_ROOT")
logger = logging.getLogger(__file__)
def main(
output_dir: str = os.path.join(os.path.dirname(__file__), "show_co3d_dataset_files"),
n_show_sequences_per_category: int = 2,
visdom_env: str = "show_co3d_dataset",
visualize_point_clouds: bool = False,
visualize_3d_scene: bool = True,
n_frames_show: int = 20,
):
"""
Visualizes object point clouds from the CO3D dataset.
Note that the code iterates over all CO3D categories and (by default) exports
2 videos per a category subset. Hence, the whole loop will run for
a long time (3-4 hours).
"""
# make the script reproducible
random.seed(30)
# log info messages
logging.basicConfig(level=logging.INFO)
# make the output dir
os.makedirs(output_dir, exist_ok=True)
# get the category list
if DATASET_ROOT is None:
raise ValueError(
"Please set the CO3DV2_DATASET_ROOT environment variable to a valid"
" CO3Dv2 dataset root folder."
)
with open(os.path.join(DATASET_ROOT, "category_to_subset_name_list.json"), "r") as f:
category_to_subset_name_list = json.load(f)
# get the visdom connection
viz = get_visdom_connection()
# iterate over the co3d categories
categories = sorted(list(category_to_subset_name_list.keys()))
for category in tqdm(categories):
subset_name_list = category_to_subset_name_list[category]
for subset_name in subset_name_list:
# obtain the dataset
expand_args_fields(JsonIndexDatasetMapProviderV2)
dataset_map = JsonIndexDatasetMapProviderV2(
category=category,
subset_name=subset_name,
test_on_train=False,
only_test_set=False,
load_eval_batches=True,
dataset_JsonIndexDataset_args=DictConfig(
{"remove_empty_masks": False, "load_point_clouds": True}
),
).get_dataset_map()
train_dataset = dataset_map["train"]
# select few sequences to visualize
sequence_names = list(train_dataset.seq_annots.keys())
# select few sequence names
show_sequence_names = random.sample(
sequence_names,
k=min(n_show_sequences_per_category, len(sequence_names)),
)
for sequence_name in show_sequence_names:
# load up a bunch of frames
show_dataset_idx = [
x[2] for x in list(train_dataset.sequence_frames_in_order(sequence_name))
]
random.shuffle(show_dataset_idx)
show_dataset_idx = show_dataset_idx[:n_frames_show]
data_to_show = [train_dataset[i] for i in show_dataset_idx]
data_to_show_collated = data_to_show[0].collate(data_to_show)
# show individual frames
all_ims = []
for k in ["image_rgb", "depth_map", "depth_mask", "fg_probability"]:
# all_ims_now = torch.stack([d[k] for d in data_to_show])
all_ims_now = getattr(data_to_show_collated, k)
if k=="depth_map":
all_ims_now = make_depth_image(
all_ims_now, torch.ones_like(all_ims_now)
)
if k in ["depth_mask", "fg_probability", "depth_map"]:
all_ims_now = all_ims_now.repeat(1, 3, 1, 1)
all_ims.append(all_ims_now.clamp(0.0, 1.0))
all_ims = torch.cat(all_ims, dim=2)
title = f"random_frames"
viz.images(
all_ims, nrow=all_ims.shape[-1], env=visdom_env,
win=title, opts={"title": title},
)
if visualize_3d_scene:
# visualize a 3d plotly plot of the scene
camera_show = data_to_show_collated.camera
pointcloud_show = get_rgbd_point_cloud(
data_to_show_collated.camera,
data_to_show_collated.image_rgb,
data_to_show_collated.depth_map,
(data_to_show_collated.fg_probability > 0.5).float(),
mask_points=True,
)
viz.plotlyplot(
plot_scene(
{
sequence_name: {
"camera":camera_show,
"point_cloud": pointcloud_show
}
}
),
env=visdom_env,
win="3d_scene",
)
if not visualize_point_clouds:
continue
for load_dataset_pointcloud in [True, False]:
model = PointcloudRenderingModel(
train_dataset,
sequence_name,
device="cuda:0",
load_dataset_pointcloud=load_dataset_pointcloud,
)
video_path = os.path.join(
output_dir,
category,
f"{subset_name}_l{load_dataset_pointcloud}",
)
os.makedirs(os.path.dirname(video_path), exist_ok=True)
logger.info(f"Rendering rotating video {video_path}")
render_flyaround(
train_dataset,
sequence_name,
model,
video_path,
n_flyaround_poses=40,
fps=20,
trajectory_type="circular_lsq_fit",
max_angle=2 * math.pi,
trajectory_scale=1.5,
scene_center=(0.0, 0.0, 0.0),
up=(0.0, -1.0, 0.0),
traj_offset=1.0,
n_source_views=1,
visdom_show_preds=True,
visdom_environment=visdom_env,
visualize_preds_keys=(
"images_render",
"masks_render",
"depths_render",
),
)
class PointcloudRenderingModel(torch.nn.Module):
def __init__(
self,
train_dataset: JsonIndexDataset,
sequence_name: str,
render_size: Tuple[int, int] = [400, 400],
device = None,
load_dataset_pointcloud: bool = False,
):
super().__init__()
self._render_size = render_size
self._pointcloud = dbir_utils.get_sequence_pointcloud(
train_dataset,
sequence_name,
load_dataset_pointcloud=load_dataset_pointcloud,
).to(device)
def forward(
self,
camera: CamerasBase,
**kwargs,
):
render = dbir_utils.render_point_cloud(
camera[[0]],
self._render_size,
self._pointcloud,
point_radius=0.01,
)
return {
"images_render": render.image_render,
"masks_render": render.mask_render,
"depths_render": render.depth_render,
}
if __name__=="__main__":
main()
|
co3d-main
|
examples/show_co3d_dataset.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import torch
import warnings
from tqdm import tqdm
from omegaconf import DictConfig
from pytorch3d.implicitron.models.generic_model import ImplicitronRender
from pytorch3d.implicitron.dataset.dataset_base import FrameData
from pytorch3d.implicitron.dataset.dataset_map_provider import DatasetMap
from pytorch3d.implicitron.dataset.json_index_dataset_map_provider_v2 import (
JsonIndexDatasetMapProviderV2
)
from pytorch3d.implicitron.tools.config import expand_args_fields
from co3d.utils import dbir_utils
from co3d.challenge.co3d_submission import CO3DSubmission
from co3d.challenge.data_types import CO3DTask, CO3DSequenceSet
from co3d.dataset.utils import redact_eval_frame_data, _check_valid_eval_frame_data
DATASET_ROOT = os.getenv("CO3DV2_DATASET_ROOT")
DATASET_ROOT_HIDDEN = os.getenv("CO3DV2_HIDDEN_DATASET_ROOT")
logger = logging.getLogger(__name__)
def get_dataset_map(
dataset_root: str,
category: str,
subset_name: str,
) -> DatasetMap:
"""
Obtain the dataset map that contains the train/val/test dataset objects.
"""
expand_args_fields(JsonIndexDatasetMapProviderV2)
dataset_map_provider = JsonIndexDatasetMapProviderV2(
category=category,
subset_name=subset_name,
dataset_root=dataset_root,
test_on_train=False,
only_test_set=False,
load_eval_batches=True,
dataset_JsonIndexDataset_args=DictConfig({"remove_empty_masks": False}),
)
return dataset_map_provider.get_dataset_map()
@torch.no_grad()
def update_dbir_submission_with_category_and_subset_predictions(
submission: CO3DSubmission,
dataset_root: str,
category: str,
subset_name: str,
num_workers: int = 12,
cheat_with_gt_data: bool = True,
load_dataset_pointcloud: bool = False,
point_radius: float = 0.01,
):
"""
Updates the CO3DSubmission object `submission` with predictions of a DBIR
model extracted for a given category, and a dataset subset.
Args:
submission: CO3DSubmission object.
dataset_root: Path to the root dataset folder containing CO3Dv2.
category: A CO3Dv2 category to evaluate.
subset_name: The name of the evaluation subset of the category.
num_workers: Number of processes to use for evaluation.
cheat_with_gt_data: If `True`, bypasses the DBIR stage and only simply
uses ground truth test data. This, of course, only works for the
development set which is not redacted.
load_dataset_pointcloud: If `True`, uses the ground truth dataset
pointclouds instead of unprojecting known views.
point_radius: The radius of the rendered points.
"""
logger.info(
"Runing depth-based image rendering (DBIR) new view synthesis "
f"on category '{category}' subset '{subset_name}'"
)
# Get the evaluation device.
device = torch.device("cuda") if torch.cuda.is_available() else device("cpu")
# Determine the sequence set and the task we are solving
sequence_set = submission.sequence_set
task = submission.task
# Obtain the CO3Dv2 dataset map
dataset_map = get_dataset_map(dataset_root, category, subset_name)
if task==CO3DTask.MANY_VIEW and not cheat_with_gt_data:
# Obtain the point cloud of the corresponding evaluation sequence
# by unprojecting depth maps of the known training views in the sequence:
train_dataset = dataset_map["train"]
sequence_name = train_dataset[0].sequence_name
sequence_pointcloud = dbir_utils.get_sequence_pointcloud(
train_dataset,
sequence_name,
load_dataset_pointcloud=load_dataset_pointcloud,
)
# Move the pointcloud to the right device
sequence_pointcloud = sequence_pointcloud.to(device)
# The test dataloader simply iterates over test_dataset.eval_batches
# this is done by setting test_dataset.eval_batches as the batch sampler
test_dataset = dataset_map["test"]
test_dataloader = torch.utils.data.DataLoader(
test_dataset,
batch_sampler=test_dataset.eval_batches,
num_workers=num_workers,
collate_fn=FrameData.collate,
)
# loop over eval examples
logger.info(
f"Rendering {len(test_dataloader)} test views for {category}/{subset_name}"
)
if sequence_set==CO3DSequenceSet.TEST:
# the test set contains images with redacted foreground masks which cause
# the test dataloader to spam a warning message,
# we suppress this warning with the following line
warnings.filterwarnings("ignore", message="Empty masks_for_bbox.*")
for eval_index, eval_frame_data in enumerate(tqdm(test_dataloader)):
# the first element of eval_frame_data is the actual evaluation image,
# the 2nd-to-last elements are the knwon source images used for building
# the reconstruction (source images are present only for the few-view task)
# move the eval data to the requested device
eval_frame_data = eval_frame_data.to(device)
# sanity check that the eval frame data has correctly redacted entries
_check_valid_eval_frame_data(eval_frame_data, task, sequence_set)
if cheat_with_gt_data:
# Cheat by taking the ground truth data. This should give in perfect metrics.
mask_render = (eval_frame_data.fg_probability[:1] > 0.5).float()
render_crop = ImplicitronRender(
depth_render = eval_frame_data.depth_map[:1],
image_render = eval_frame_data.image_rgb[:1] * mask_render,
mask_render = mask_render,
)
else:
if task==CO3DTask.MANY_VIEW:
# we use the sequence pointcloud extracted above
scene_pointcloud = sequence_pointcloud
elif task==CO3DTask.FEW_VIEW:
# we build the pointcloud by unprojecting the depth maps of the known views
# which are elements (1:end) of the eval batch
scene_pointcloud = dbir_utils.get_eval_frame_data_pointcloud(
eval_frame_data,
)
else:
raise ValueError(task)
# Redact the frame data so we are sure we cannot use the data
# from the actual unobserved evaluation sample
eval_frame_data = redact_eval_frame_data(eval_frame_data)
# Obtain the image render. In case dataset_test.box_crop==True,
# we need to paste the render back to the original image bounds.
render_crop = dbir_utils.render_point_cloud(
eval_frame_data.camera[[0]],
eval_frame_data.image_rgb.shape[-2:],
scene_pointcloud,
point_radius=point_radius,
)
# cut the valid part of the render and paste into the original image canvas
render_full_image = dbir_utils.paste_render_to_original_image(
eval_frame_data, render_crop
)
# get the image, mask, depth as numpy arrays for the challenge submission
image, mask, depth = [
getattr(render_full_image, f"{data_type}_render").cpu().numpy()[0]
for data_type in ["image", "mask", "depth"]
]
# add the results to the submission object
submission.add_result(
category=category,
subset_name=subset_name,
sequence_name=eval_frame_data.sequence_name[0],
frame_number=int(eval_frame_data.frame_number[0]),
image=image,
mask=mask,
depth=depth,
)
# reset all warnings
warnings.simplefilter("always")
def make_dbir_submission(
dataset_root = DATASET_ROOT,
task = CO3DTask.MANY_VIEW,
sequence_set = CO3DSequenceSet.DEV,
clear_submission_files: bool = False,
num_eval_workers: int = 4,
cheat_with_gt_data: bool = False,
fill_results_from_cache: bool = False,
skip_evaluation: bool = False,
submit_to_eval_ai: bool = False,
):
"""
Make a Depth-based-image-rendering (DBIR) submission for the CO3DChallenge.
Args:
dataset_root: Path to the root dataset folder.
task: The co3d task - either CO3DTask.MANY_VIEW or CO3DTask.FEW_VIEW.
sequence_set: The sequence set to evaluate on:
CO3DSequenceSet.DEV for for the development set
CO3DSequenceSet.TEST for for the test set
clear_submission_files: Delete all previous intermediate submission files before
commencing the current submission run.
num_eval_workers: Number of processes that conduct evaluation.
cheat_with_gt_data: If `True`, bypasses the DBIR stage and only simply
uses ground truth test data. This, of course, only works for the
development set which is not redacted.
fill_results_from_cache: If `True`, skips running the DBIR model and rather
loads the results exported from a previous run.
skip_evaluation: If `True`, will not locally evaluate the predictions.
submit_to_eval_ai: If `True`, will automatically submit the exported result
archive to EvalAI using the CLI interface (needs to be installed with
`pip install evalai`). This requires setting the EVAL_AI_PERSONAL_TOKEN
environment variable to your personal EVAL_AI token.
"""
# the folder storing all predictions and results of the submission
submission_output_folder = os.path.join(
os.path.split(os.path.abspath(__file__))[0],
f"dbir_submission_output_{task.value}_{sequence_set.value}",
)
if cheat_with_gt_data:
# make sure that the cheated results have a cheater stamp in their name
submission_output_folder += "_cheating"
# create the submission object
submission = CO3DSubmission(
task=task,
sequence_set=sequence_set,
output_folder=submission_output_folder,
dataset_root=DATASET_ROOT,
)
if task==CO3DTask.FEW_VIEW and submission.has_only_single_sequence_subset():
# if only a single-sequence dataset is downloaded, only the many-view task
# is available
logger.warning(
f"Cannot evaluate the few-view task in {sequence_set.value} when only the"
" singlesequence subset of CO3D is present."
)
return
if fill_results_from_cache:
# only take existing results
submission.fill_results_from_cache()
else:
# Clear all files generated by potential previous submissions.
# Hint: disable this in case you want to resume an evaluation.
if clear_submission_files:
submission.clear_files()
# Get all category names and subset names for the selected task/sequence_set
eval_batches_map = submission.get_eval_batches_map()
# Iterate over the categories and the corresponding subset lists.
for eval_i, (category, subset_name) in enumerate(eval_batches_map.keys()):
logger.info(
f"Evaluating category {category}; subset {subset_name}"
+ f" ({eval_i+1} / {len(eval_batches_map)})"
)
# Generate new views for all evaluation examples in category/subset_name.
update_dbir_submission_with_category_and_subset_predictions(
submission=submission,
dataset_root=dataset_root,
category=category,
subset_name=subset_name,
cheat_with_gt_data=cheat_with_gt_data,
)
# Locally evaluate the submission in case we dont evaluate on the hidden test set.
if (not skip_evaluation and sequence_set != CO3DSequenceSet.TEST):
submission.evaluate(num_workers=num_eval_workers)
# Export the submission predictions for submition to the evaluation server.
# This also validates completeness of the produced predictions.
submission.export_results(validate_results=True)
if submit_to_eval_ai:
# submit the results to the EvalAI server.
submission.submit_to_eval_ai()
# sanity check - reevaluate the archive file and copare results
# submission_reeval = CO3DSubmission(
# task=task,
# sequence_set=sequence_set,
# output_folder=os.path.join(submission_output_folder, "_reeval"),
# dataset_root=DATASET_ROOT,
# on_server=True,
# server_data_folder=DATASET_ROOT_HIDDEN,
# )
# submission_reeval.evaluate_archive_file(
# submission.submission_archive, num_workers=num_eval_workers
# )
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
# iterate over all tasks and sequence sets
for sequence_set in [CO3DSequenceSet.DEV, CO3DSequenceSet.TEST]:
for task in [CO3DTask.MANY_VIEW, CO3DTask.FEW_VIEW]:
make_dbir_submission(
task=task,
sequence_set=sequence_set,
cheat_with_gt_data=False,
fill_results_from_cache=False,
skip_evaluation=False,
submit_to_eval_ai=True,
)
|
co3d-main
|
examples/example_co3d_challenge_submission.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
from dataset.download_dataset_impl import build_arg_parser, download_dataset
DEFAULT_LINK_LIST_FILE = os.path.join(os.path.dirname(__file__), "links.json")
DEFAULT_SHA256S_FILE = os.path.join(os.path.dirname(__file__), "co3d_sha256.json")
if __name__ == "__main__":
parser = build_arg_parser("CO3D", DEFAULT_LINK_LIST_FILE, DEFAULT_SHA256S_FILE)
parser.add_argument(
"--single_sequence_subset",
action="store_true",
default=False,
help="Download the single-sequence subset of the dataset.",
)
args = parser.parse_args()
download_dataset(
str(args.link_list_file),
str(args.download_folder),
n_download_workers=int(args.n_download_workers),
n_extract_workers=int(args.n_extract_workers),
download_categories=args.download_categories,
checksum_check=bool(args.checksum_check),
single_sequence_subset=bool(args.single_sequence_subset),
clear_archives_after_unpacking=bool(args.clear_archives_after_unpacking),
sha256s_file=str(args.sha256_file),
skip_downloaded_archives=not bool(args.redownload_existing_archives),
)
|
co3d-main
|
co3d/download_dataset.py
|
co3d-main
|
co3d/__init__.py
|
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
|
co3d-main
|
co3d/dataset/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import sys
import dataclasses
import gzip
import json
from dataclasses import dataclass, Field, MISSING
from typing import Any, cast, Dict, IO, Optional, Tuple, Type, TypeVar, Union
import numpy as np
if sys.version_info >= (3, 8, 0):
from typing import get_args, get_origin
elif sys.version_info >= (3, 7, 0):
def get_origin(cls): # pragma: no cover
return getattr(cls, "__origin__", None)
def get_args(cls): # pragma: no cover
return getattr(cls, "__args__", None)
else:
raise ImportError("This module requires Python 3.7+")
_X = TypeVar("_X")
TF3 = Tuple[float, float, float]
@dataclass
class ImageAnnotation:
# path to jpg file, relative w.r.t. dataset_root
path: str
# H x W
size: Tuple[int, int] # TODO: rename size_hw?
@dataclass
class DepthAnnotation:
# path to png file, relative w.r.t. dataset_root, storing `depth / scale_adjustment`
path: str
# a factor to convert png values to actual depth: `depth = png * scale_adjustment`
scale_adjustment: float
# path to png file, relative w.r.t. dataset_root, storing binary `depth` mask
mask_path: Optional[str]
@dataclass
class MaskAnnotation:
# path to png file storing (Prob(fg | pixel) * 255)
path: str
# (soft) number of pixels in the mask; sum(Prob(fg | pixel))
mass: Optional[float] = None
@dataclass
class ViewpointAnnotation:
# In right-multiply (PyTorch3D) format. X_cam = X_world @ R + T
R: Tuple[TF3, TF3, TF3]
T: TF3
focal_length: Tuple[float, float]
principal_point: Tuple[float, float]
intrinsics_format: str = "ndc_norm_image_bounds"
# Defines the co-ordinate system where focal_length and principal_point live.
# Possible values: ndc_isotropic | ndc_norm_image_bounds (default)
# ndc_norm_image_bounds: legacy PyTorch3D NDC format, where image boundaries
# correspond to [-1, 1] x [-1, 1], and the scale along x and y may differ
# ndc_isotropic: PyTorch3D 0.5+ NDC convention where the shorter side has
# the range [-1, 1], and the longer one has the range [-s, s]; s >= 1,
# where s is the aspect ratio. The scale is same along x and y.
@dataclass
class FrameAnnotation:
"""A dataclass used to load annotations from json."""
# can be used to join with `SequenceAnnotation`
sequence_name: str
# 0-based, continuous frame number within sequence
frame_number: int
# timestamp in seconds from the video start
frame_timestamp: float
image: ImageAnnotation
depth: Optional[DepthAnnotation] = None
mask: Optional[MaskAnnotation] = None
viewpoint: Optional[ViewpointAnnotation] = None
meta: Optional[Dict[str, Any]] = None
@dataclass
class PointCloudAnnotation:
# path to ply file with points only, relative w.r.t. dataset_root
path: str
# the bigger the better
quality_score: float
n_points: Optional[int]
@dataclass
class VideoAnnotation:
# path to the original video file, relative w.r.t. dataset_root
path: str
# length of the video in seconds
length: float
@dataclass
class SequenceAnnotation:
sequence_name: str
category: str
video: Optional[VideoAnnotation] = None
point_cloud: Optional[PointCloudAnnotation] = None
# the bigger the better
viewpoint_quality_score: Optional[float] = None
def dump_dataclass(obj: Any, f: IO, binary: bool = False) -> None:
"""
Args:
f: Either a path to a file, or a file opened for writing.
obj: A @dataclass or collection hierarchy including dataclasses.
binary: Set to True if `f` is a file handle, else False.
"""
if binary:
f.write(json.dumps(_asdict_rec(obj)).encode("utf8"))
else:
json.dump(_asdict_rec(obj), f)
def load_dataclass(f: IO, cls: Type[_X], binary: bool = False) -> _X:
"""
Loads to a @dataclass or collection hierarchy including dataclasses
from a json recursively.
Call it like load_dataclass(f, typing.List[FrameAnnotationAnnotation]).
raises KeyError if json has keys not mapping to the dataclass fields.
Args:
f: Either a path to a file, or a file opened for writing.
cls: The class of the loaded dataclass.
binary: Set to True if `f` is a file handle, else False.
"""
if binary:
asdict = json.loads(f.read().decode("utf8"))
else:
asdict = json.load(f)
if isinstance(asdict, list):
# in the list case, run a faster "vectorized" version
cls = get_args(cls)[0]
res = list(_dataclass_list_from_dict_list(asdict, cls))
else:
res = _dataclass_from_dict(asdict, cls)
return res
def _dataclass_list_from_dict_list(dlist, typeannot):
"""
Vectorised version of `_dataclass_from_dict`.
The output should be equivalent to
`[_dataclass_from_dict(d, typeannot) for d in dlist]`.
Args:
dlist: list of objects to convert.
typeannot: type of each of those objects.
Returns:
iterator or list over converted objects of the same length as `dlist`.
Raises:
ValueError: it assumes the objects have None's in consistent places across
objects, otherwise it would ignore some values. This generally holds for
auto-generated annotations, but otherwise use `_dataclass_from_dict`.
"""
cls = get_origin(typeannot) or typeannot
if typeannot is Any:
return dlist
if all(obj is None for obj in dlist): # 1st recursion base: all None nodes
return dlist
if any(obj is None for obj in dlist):
# filter out Nones and recurse on the resulting list
idx_notnone = [(i, obj) for i, obj in enumerate(dlist) if obj is not None]
idx, notnone = zip(*idx_notnone)
converted = _dataclass_list_from_dict_list(notnone, typeannot)
res = [None] * len(dlist)
for i, obj in zip(idx, converted):
res[i] = obj
return res
is_optional, contained_type = _resolve_optional(typeannot)
if is_optional:
return _dataclass_list_from_dict_list(dlist, contained_type)
# otherwise, we dispatch by the type of the provided annotation to convert to
if issubclass(cls, tuple) and hasattr(cls, "_fields"): # namedtuple
# For namedtuple, call the function recursively on the lists of corresponding keys
types = cls._field_types.values()
dlist_T = zip(*dlist)
res_T = [
_dataclass_list_from_dict_list(key_list, tp)
for key_list, tp in zip(dlist_T, types)
]
return [cls(*converted_as_tuple) for converted_as_tuple in zip(*res_T)]
elif issubclass(cls, (list, tuple)):
# For list/tuple, call the function recursively on the lists of corresponding positions
types = get_args(typeannot)
if len(types) == 1: # probably List; replicate for all items
types = types * len(dlist[0])
dlist_T = zip(*dlist)
res_T = (
_dataclass_list_from_dict_list(pos_list, tp)
for pos_list, tp in zip(dlist_T, types)
)
if issubclass(cls, tuple):
return list(zip(*res_T))
else:
return [cls(converted_as_tuple) for converted_as_tuple in zip(*res_T)]
elif issubclass(cls, dict):
# For the dictionary, call the function recursively on concatenated keys and vertices
key_t, val_t = get_args(typeannot)
all_keys_res = _dataclass_list_from_dict_list(
[k for obj in dlist for k in obj.keys()], key_t
)
all_vals_res = _dataclass_list_from_dict_list(
[k for obj in dlist for k in obj.values()], val_t
)
indices = np.cumsum([len(obj) for obj in dlist])
assert indices[-1] == len(all_keys_res)
keys = np.split(list(all_keys_res), indices[:-1])
# vals = np.split(all_vals_res, indices[:-1])
all_vals_res_iter = iter(all_vals_res)
return [cls(zip(k, all_vals_res_iter)) for k in keys]
elif not dataclasses.is_dataclass(typeannot):
return dlist
# dataclass node: 2nd recursion base; call the function recursively on the lists
# of the corresponding fields
assert dataclasses.is_dataclass(cls)
fieldtypes = {
f.name: (_unwrap_type(f.type), _get_dataclass_field_default(f))
for f in dataclasses.fields(typeannot)
}
# NOTE the default object is shared here
key_lists = (
_dataclass_list_from_dict_list([obj.get(k, default) for obj in dlist], type_)
for k, (type_, default) in fieldtypes.items()
)
transposed = zip(*key_lists)
return [cls(*vals_as_tuple) for vals_as_tuple in transposed]
def _dataclass_from_dict(d, typeannot):
if d is None or typeannot is Any:
return d
is_optional, contained_type = _resolve_optional(typeannot)
if is_optional:
# an Optional not set to None, just use the contents of the Optional.
return _dataclass_from_dict(d, contained_type)
cls = get_origin(typeannot) or typeannot
if issubclass(cls, tuple) and hasattr(cls, "_fields"): # namedtuple
types = cls._field_types.values()
return cls(*[_dataclass_from_dict(v, tp) for v, tp in zip(d, types)])
elif issubclass(cls, (list, tuple)):
types = get_args(typeannot)
if len(types) == 1: # probably List; replicate for all items
types = types * len(d)
return cls(_dataclass_from_dict(v, tp) for v, tp in zip(d, types))
elif issubclass(cls, dict):
key_t, val_t = get_args(typeannot)
return cls(
(_dataclass_from_dict(k, key_t), _dataclass_from_dict(v, val_t))
for k, v in d.items()
)
elif not dataclasses.is_dataclass(typeannot):
return d
assert dataclasses.is_dataclass(cls)
fieldtypes = {f.name: _unwrap_type(f.type) for f in dataclasses.fields(typeannot)}
return cls(**{k: _dataclass_from_dict(v, fieldtypes[k]) for k, v in d.items()})
def _unwrap_type(tp):
# strips Optional wrapper, if any
if get_origin(tp) is Union:
args = get_args(tp)
if len(args) == 2 and any(a is type(None) for a in args): # noqa: E721
# this is typing.Optional
return args[0] if args[1] is type(None) else args[1] # noqa: E721
return tp
def _get_dataclass_field_default(field: Field) -> Any:
if field.default_factory is not MISSING:
# pyre-fixme[29]: `Union[dataclasses._MISSING_TYPE,
# dataclasses._DefaultFactory[typing.Any]]` is not a function.
return field.default_factory()
elif field.default is not MISSING:
return field.default
else:
return None
def _asdict_rec(obj):
return dataclasses._asdict_inner(obj, dict)
def dump_dataclass_jgzip(outfile: str, obj: Any) -> None:
"""
Dumps obj to a gzipped json outfile.
Args:
obj: A @dataclass or collection hiererchy including dataclasses.
outfile: The path to the output file.
"""
with gzip.GzipFile(outfile, "wb") as f:
dump_dataclass(obj, cast(IO, f), binary=True)
def load_dataclass_jgzip(outfile, cls):
"""
Loads a dataclass from a gzipped json outfile.
Args:
outfile: The path to the loaded file.
cls: The type annotation of the loaded dataclass.
Returns:
loaded_dataclass: The loaded dataclass.
"""
with gzip.GzipFile(outfile, "rb") as f:
return load_dataclass(cast(IO, f), cls, binary=True)
def _resolve_optional(type_: Any) -> Tuple[bool, Any]:
"""Check whether `type_` is equivalent to `typing.Optional[T]` for some T."""
if get_origin(type_) is Union:
args = get_args(type_)
if len(args) == 2 and args[1] == type(None): # noqa E721
return True, args[0]
if type_ is Any:
return True, Any
return False, type_
|
co3d-main
|
co3d/dataset/data_types.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch
import copy
from pytorch3d.implicitron.dataset.dataset_base import FrameData
from co3d.challenge.data_types import CO3DTask, CO3DSequenceSet
def redact_eval_frame_data(fd: FrameData) -> FrameData:
"""
Redact all information about the test element (1st image)
of the evaluation frame data `fd`.
This is done by zeroing all elements of the relevant tensors in `fd`
followed by removing the sequence_point_cloud field.
"""
fd_redacted = copy.deepcopy(fd)
for redact_field_name in [
"fg_probability",
"image_rgb",
"depth_map",
"mask_crop",
]:
# zero-out all elements in the redacted tensor
field_val = getattr(fd, redact_field_name)
field_val[:1] *= 0
# also remove the point cloud info
fd_redacted.sequence_point_cloud_idx = None
fd_redacted.sequence_point_cloud = None
return fd_redacted
def _check_valid_eval_frame_data(
fd: FrameData,
task: CO3DTask,
sequence_set: CO3DSequenceSet,
):
"""
Check that the evaluation batch `fd` is redacted correctly.
"""
is_redacted = torch.stack(
[
getattr(fd, k).abs().sum((1,2,3)) <= 0
for k in ["image_rgb", "depth_map", "fg_probability"]
]
)
if sequence_set==CO3DSequenceSet.TEST:
# first image has to be redacted
assert is_redacted[:, 0].all()
# all depth maps have to be redacted
assert is_redacted[1, :].all()
# no known views should be redacted
assert not is_redacted[:, 1:].all(dim=0).any()
elif sequence_set==CO3DSequenceSet.DEV:
# nothing should be redacted
assert not is_redacted.all(dim=0).any()
else:
raise ValueError(sequence_set)
|
co3d-main
|
co3d/dataset/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import shutil
import requests
import functools
import json
import warnings
from argparse import ArgumentParser
from typing import List, Optional
from multiprocessing import Pool
from tqdm import tqdm
from .check_checksum import check_co3d_sha256
def download_dataset(
link_list_file: str,
download_folder: str,
n_download_workers: int = 4,
n_extract_workers: int = 4,
download_categories: Optional[List[str]] = None,
checksum_check: bool = False,
single_sequence_subset: bool = False,
clear_archives_after_unpacking: bool = False,
skip_downloaded_archives: bool = True,
sha256s_file: Optional[str] = None,
):
"""
Downloads and unpacks the dataset in CO3D format.
Note: The script will make a folder `<download_folder>/_in_progress`, which
stores files whose download is in progress. The folder can be safely deleted
the download is finished.
Args:
link_list_file: A text file with the list of zip file download links.
download_folder: A local target folder for downloading the
the dataset files.
n_download_workers: The number of parallel workers
for downloading the dataset files.
n_extract_workers: The number of parallel workers
for extracting the dataset files.
download_categories: A list of categories to download.
If `None`, downloads all.
checksum_check: Enable validation of the downloaded file's checksum before
extraction.
single_sequence_subset: Whether the downloaded dataset is the single-sequence
subset of the full dataset.
clear_archives_after_unpacking: Delete the unnecessary downloaded archive files
after unpacking.
skip_downloaded_archives: Skip re-downloading already downloaded archives.
"""
if checksum_check and not sha256s_file:
raise ValueError(
"checksum_check is requested but ground-truth SHA256 file not provided!"
)
if not os.path.isfile(link_list_file):
raise ValueError(
"Please specify `link_list_file` with a valid path to a json"
" with zip file download links."
" For CO3Dv2, the file is stored in the co3d github:"
" https://github.com/facebookresearch/co3d/blob/main/co3d/links.json"
)
if not os.path.isdir(download_folder):
raise ValueError(
"Please specify `download_folder` with a valid path to a target folder"
+ " for downloading the dataset."
+ f" {download_folder} does not exist."
)
# read the link file
with open(link_list_file, "r") as f:
links = json.load(f)
# get the full dataset links or the single-sequence subset links
links = links["singlesequence"] if single_sequence_subset else links["full"]
# split to data links and the links containing json metadata
metadata_links = []
data_links = []
for category_name, urls in links.items():
for url in urls:
link_name = os.path.split(url)[-1]
if single_sequence_subset:
link_name = link_name.replace("_singlesequence", "")
if category_name.upper() == "METADATA":
metadata_links.append((link_name, url))
else:
data_links.append((category_name, link_name, url))
if download_categories is not None:
co3d_categories = set(l[0] for l in data_links)
not_in_co3d = [c for c in download_categories if c not in co3d_categories]
if len(not_in_co3d) > 0:
raise ValueError(
f"download_categories {str(not_in_co3d)} are not valid"
+ "dataset categories."
)
data_links = [(c, ln, l) for c, ln, l in data_links if c in download_categories]
with Pool(processes=n_download_workers) as download_pool:
print(f"Downloading {len(metadata_links)} dataset metadata files ...")
for _ in tqdm(
download_pool.imap(
functools.partial(_download_metadata_file, download_folder),
metadata_links,
),
total=len(metadata_links),
):
pass
print(f"Downloading {len(data_links)} dataset files ...")
download_ok = {}
for link_name, ok in tqdm(
download_pool.imap(
functools.partial(
_download_category_file,
download_folder,
checksum_check,
single_sequence_subset,
sha256s_file,
skip_downloaded_archives,
),
data_links,
),
total=len(data_links),
):
download_ok[link_name] = ok
if not all(download_ok.values()):
not_ok_links = [n for n, ok in download_ok.items() if not ok]
not_ok_links_str = "\n".join(not_ok_links)
raise AssertionError(
"The SHA256 checksums did not match for some of the downloaded files:\n"
+ not_ok_links_str + "\n"
+ "This is most likely due to a network failure."
+ " Please restart the download script."
)
metadata_links = [ml for ml in metadata_links if ml[1].endswith(".zip")]
print(f"Extracting {len(data_links)} dataset files and {len(metadata_links)} metadata files...")
with Pool(processes=n_extract_workers) as extract_pool:
for _ in tqdm(
extract_pool.imap(
functools.partial(
_unpack_category_file,
download_folder,
clear_archives_after_unpacking,
),
metadata_links + data_links,
),
total=len(metadata_links) + len(data_links),
):
pass
print("Done")
def build_arg_parser(
dataset_name: str,
default_link_list_file: str,
default_sha256_file: str,
) -> ArgumentParser:
parser = ArgumentParser(description=f"Download the {dataset_name} dataset.")
parser.add_argument(
"--download_folder",
type=str,
required=True,
help="A local target folder for downloading the the dataset files.",
)
parser.add_argument(
"--n_download_workers",
type=int,
default=4,
help="The number of parallel workers for downloading the dataset files.",
)
parser.add_argument(
"--n_extract_workers",
type=int,
default=4,
help="The number of parallel workers for extracting the dataset files.",
)
parser.add_argument(
"--download_categories",
type=lambda x: [x_.strip() for x_ in x.split(",")],
default=None,
help=f"A comma-separated list of {dataset_name} categories to download."
+ " Example: 'orange,car' will download only oranges and cars",
)
parser.add_argument(
"--link_list_file",
type=str,
default=default_link_list_file,
help=(
f"The file with html links to the {dataset_name} dataset files."
+ " In most cases the default local file `links.json` should be used."
),
)
parser.add_argument(
"--sha256_file",
type=str,
default=default_sha256_file,
help=(
f"The file with SHA256 hashes of {dataset_name} dataset files."
+ " In most cases the default local file `co3d_sha256.json` should be used."
),
)
parser.add_argument(
"--checksum_check",
action="store_true",
default=True,
help="Check the SHA256 checksum of each downloaded file before extraction.",
)
parser.add_argument(
"--no_checksum_check",
action="store_false",
dest="checksum_check",
default=False,
help="Does not check the SHA256 checksum of each downloaded file before extraction.",
)
parser.set_defaults(checksum_check=True)
parser.add_argument(
"--clear_archives_after_unpacking",
action="store_true",
default=False,
help="Delete the unnecessary downloaded archive files after unpacking.",
)
parser.add_argument(
"--redownload_existing_archives",
action="store_true",
default=False,
help="Redownload the already-downloaded archives.",
)
return parser
def _unpack_category_file(
download_folder: str,
clear_archive: bool,
link: str,
):
*_, link_name, url = link
local_fl = os.path.join(download_folder, link_name)
print(f"Unpacking dataset file {local_fl} ({link_name}) to {download_folder}.")
shutil.unpack_archive(local_fl, download_folder)
if clear_archive:
os.remove(local_fl)
def _download_category_file(
download_folder: str,
checksum_check: bool,
single_sequence_subset: bool,
sha256s_file: Optional[str],
skip_downloaded_files: bool,
link: str,
):
category, link_name, url = link
local_fl_final = os.path.join(download_folder, link_name)
if skip_downloaded_files and os.path.isfile(local_fl_final):
print(f"Skipping {local_fl_final}, already downloaded!")
return link_name, True
in_progress_folder = os.path.join(download_folder, "_in_progress")
os.makedirs(in_progress_folder, exist_ok=True)
local_fl = os.path.join(in_progress_folder, link_name)
print(f"Downloading dataset file {link_name} ({url}) to {local_fl}.")
_download_with_progress_bar(url, local_fl, link_name)
if checksum_check:
print(f"Checking SHA256 for {local_fl}.")
try:
check_co3d_sha256(
local_fl,
sha256s_file=sha256s_file,
single_sequence_subset=single_sequence_subset,
)
except AssertionError:
warnings.warn(
f"Checksums for {local_fl} did not match!"
+ " This is likely due to a network failure,"
+ " please restart the download script."
)
return link_name, False
os.rename(local_fl, local_fl_final)
return link_name, True
def _download_metadata_file(download_folder: str, link: str):
local_fl = os.path.join(download_folder, link[0])
# remove the singlesequence postfix in case we are downloading the s.s. subset
local_fl = local_fl.replace("_singlesequence", "")
print(f"Downloading dataset metadata file {link[1]} ({link[0]}) to {local_fl}.")
_download_with_progress_bar(link[1], local_fl, link[0])
def _download_with_progress_bar(url: str, fname: str, filename: str):
# taken from https://stackoverflow.com/a/62113293/986477
resp = requests.get(url, stream=True)
print(url)
total = int(resp.headers.get("content-length", 0))
with open(fname, "wb") as file, tqdm(
desc=fname,
total=total,
unit="iB",
unit_scale=True,
unit_divisor=1024,
) as bar:
for datai, data in enumerate(resp.iter_content(chunk_size=1024)):
size = file.write(data)
bar.update(size)
if datai % max((max(total // 1024, 1) // 20), 1) == 0:
print(f"{filename}: Downloaded {100.0*(float(bar.n)/max(total, 1)):3.1f}%.")
print(bar)
|
co3d-main
|
co3d/dataset/download_dataset_impl.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import glob
import argparse
import hashlib
import json
from typing import Optional
from multiprocessing import Pool
from tqdm import tqdm
DEFAULT_SHA256S_FILE = os.path.join(__file__.rsplit(os.sep, 2)[0], "co3d_sha256.json")
BLOCKSIZE = 65536
def main(
download_folder: str,
sha256s_file: str,
dump: bool = False,
n_sha256_workers: int = 4,
single_sequence_subset: bool = False,
):
if not os.path.isfile(sha256s_file):
raise ValueError(f"The SHA256 file does not exist ({sha256s_file}).")
expected_sha256s = get_expected_sha256s(
sha256s_file=sha256s_file,
single_sequence_subset=single_sequence_subset,
)
zipfiles = sorted(glob.glob(os.path.join(download_folder, "*.zip")))
print(f"Extracting SHA256 hashes for {len(zipfiles)} files in {download_folder}.")
extracted_sha256s_list = []
with Pool(processes=n_sha256_workers) as sha_pool:
for extracted_hash in tqdm(
sha_pool.imap(_sha256_file_and_print, zipfiles),
total=len(zipfiles),
):
extracted_sha256s_list.append(extracted_hash)
pass
extracted_sha256s = dict(
zip([os.path.split(z)[-1] for z in zipfiles], extracted_sha256s_list)
)
if dump:
print(extracted_sha256s)
with open(sha256s_file, "w") as f:
json.dump(extracted_sha256s, f, indent=2)
missing_keys, invalid_keys = [], []
for k in expected_sha256s.keys():
if k not in extracted_sha256s:
print(f"{k} missing!")
missing_keys.append(k)
elif expected_sha256s[k] != extracted_sha256s[k]:
print(
f"'{k}' does not match!"
+ f" ({expected_sha256s[k]} != {extracted_sha256s[k]})"
)
invalid_keys.append(k)
if len(invalid_keys) + len(missing_keys) > 0:
raise ValueError(
f"Checksum checker failed!"
+ f" Non-matching checksums: {str(invalid_keys)};"
+ f" missing files: {str(missing_keys)}."
)
def get_expected_sha256s(
sha256s_file: str,
single_sequence_subset: bool = False,
):
with open(sha256s_file, "r") as f:
expected_sha256s = json.load(f)
if single_sequence_subset:
return expected_sha256s["singlesequence"]
else:
return expected_sha256s["full"]
def check_co3d_sha256(
path: str,
sha256s_file: str,
expected_sha256s: Optional[dict] = None,
single_sequence_subset: bool = False,
do_assertion: bool = True,
):
zipname = os.path.split(path)[-1]
if expected_sha256s is None:
expected_sha256s = get_expected_sha256s(
sha256s_file=sha256s_file,
single_sequence_subset=single_sequence_subset,
)
extracted_hash = sha256_file(path)
if do_assertion:
assert (
extracted_hash == expected_sha256s[zipname]
), f"{zipname}: ({extracted_hash} != {expected_sha256s[zipname]})"
else:
return extracted_hash == expected_sha256s[zipname]
def sha256_file(path: str):
sha256_hash = hashlib.sha256()
with open(path, "rb") as f:
file_buffer = f.read(BLOCKSIZE)
while len(file_buffer) > 0:
sha256_hash.update(file_buffer)
file_buffer = f.read(BLOCKSIZE)
digest_ = sha256_hash.hexdigest()
# print(f"{digest_} {path}")
return digest_
def _sha256_file_and_print(path: str):
digest_ = sha256_file(path)
print(f"{path}: {digest_}")
return digest_
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Check SHA256 hashes of the CO3D dataset."
)
parser.add_argument(
"--download_folder",
type=str,
help="A local target folder for downloading the the dataset files.",
)
parser.add_argument(
"--sha256s_file",
type=str,
help="A local target folder for downloading the the dataset files.",
default=DEFAULT_SHA256S_FILE,
)
parser.add_argument(
"--num_workers",
type=int,
default=4,
help="The number of sha256 extraction workers.",
)
parser.add_argument(
"--dump_sha256s",
action="store_true",
help="Store sha256s hashes.",
)
parser.add_argument(
"--single_sequence_subset",
action="store_true",
default=False,
help="Check the single-sequence subset of the dataset.",
)
args = parser.parse_args()
main(
str(args.download_folder),
dump=bool(args.dump_sha256s),
n_sha256_workers=int(args.num_workers),
single_sequence_subset=bool(args.single_sequence_subset),
sha256s_file=str(args.sha256s_file),
)
|
co3d-main
|
co3d/dataset/check_checksum.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""
Evaluation of Implicitron models on CO3Dv2 challenge.
"""
import logging
import os
import torch
import json
import warnings
from typing import Optional, Union, Dict, Tuple
from tqdm import tqdm
from omegaconf import DictConfig, OmegaConf
import numpy as np
import pytorch3d
from pytorch3d.implicitron.models.generic_model import ImplicitronRender, GenericModel
from pytorch3d.implicitron.tools.config import get_default_args
from pytorch3d.implicitron.dataset.dataset_base import FrameData
from pytorch3d.implicitron.dataset.dataset_map_provider import DatasetMap
from pytorch3d.implicitron.dataset.json_index_dataset_map_provider_v2 import (
JsonIndexDatasetMapProviderV2
)
from pytorch3d.implicitron.tools.config import expand_args_fields
from pytorch3d.implicitron.tools.model_io import (
parse_epoch_from_model_path,
find_last_checkpoint,
)
from pytorch3d.implicitron.models.renderer.base import (
# BaseRenderer,
EvaluationMode,
# ImplicitFunctionWrapper,
# RendererOutput,
# RenderSamplingMode,
)
from co3d.utils import dbir_utils
from co3d.challenge.co3d_submission import CO3DSubmission
from co3d.challenge.data_types import CO3DTask, CO3DSequenceSet
from co3d.challenge.utils import (
get_co3d_task_from_subset_name,
get_co3d_sequence_set_from_subset_name,
)
from co3d.dataset.utils import redact_eval_frame_data, _check_valid_eval_frame_data
from co3d.challenge.metric_utils import EVAL_METRIC_NAMES
DATASET_ROOT = os.getenv("CO3DV2_DATASET_ROOT")
DATASET_ROOT_HIDDEN = os.getenv("CO3DV2_HIDDEN_DATASET_ROOT")
# HACK: implicitron_trainer is not part of a package; forcing it in the path
_pytorch3d_root = os.path.dirname(os.path.dirname(pytorch3d.__file__))
implicitron_trainer_dir = os.path.join(_pytorch3d_root, "projects", "implicitron_trainer")
# sys.path.insert(0, implicitron_trainer_dir)
from projects.implicitron_trainer.experiment import Experiment
logger = logging.getLogger(__name__)
def evaluate_implicitron_exp_dir_map(
category_subset_implicitron_exp_dirs: Union[Dict[Tuple[str, str], str], str],
task: CO3DTask,
sequence_set: CO3DSequenceSet,
submission_output_folder: str,
num_eval_workers: int = 4,
submit_to_eval_ai: bool = False,
skip_evaluation: bool = False,
fill_results_from_cache: bool = False,
implicitron_exp_dir_submission_output_subfolder: Optional[str] = None,
):
"""
Evalulates and submits to EvalAI either:
1) all Implicitron class-specific models, or
2) a single model trained for all categories.
Args:
category_subset_implicitron_exp_dirs: Two options:
1) a dict {(category_name, subset_name): implicitron_exp_dir_path} containing
a mapping from each CO3Dv2 category and subset to the path of the
corresponding implicitron model exp dir.
2) a string containing the path to a single model used for reconstructing
all categories.
task: The co3d task - either CO3DTask.MANY_VIEW or CO3DTask.FEW_VIEW.
sequence_set: The sequence set to evaluate on:
CO3DSequenceSet.DEV for for the development set
CO3DSequenceSet.TEST for for the test set
submission_output_folder: Directory containing the submission output files.
num_eval_workers: Number of processes that conduct evaluation.
submit_to_eval_ai: If `True`, will automatically submit the exported result
archive to EvalAI using the CLI interface (needs to be installed with
`pip install evalai`). This requires setting the EVAL_AI_PERSONAL_TOKEN
environment variable to your personal EVAL_AI token.
skip_evaluation: Skip the local evaluation.
implicitron_exp_dir_submission_output_subfolder:
If set to a string, loads precomputed results from
```
category_subset_implicitron_exp_dirs[(category, subset)]
/implicitron_exp_dir_submission_output_subfolder
```
for each (category, subset).
Such precomputed results are typically output by:
```
evaluate_implicitron_exp_dir(
category_subset_implicitron_exp_dirs[(category, subset)],
...
)
"""
submission = CO3DSubmission(
task=task,
sequence_set=sequence_set,
output_folder=submission_output_folder,
dataset_root=DATASET_ROOT,
)
if fill_results_from_cache:
submission.fill_results_from_cache()
else:
if not isinstance(category_subset_implicitron_exp_dirs, str):
# check that we have all models in case the we were given one model per
# category/subset_name
for category, subset_name in submission.get_eval_batches_map():
if (category, subset_name) not in category_subset_implicitron_exp_dirs:
raise ValueError(
f"Missing implicitron exp dir for {category}/{subset_name}."
)
for category, subset_name in submission.get_eval_batches_map():
if isinstance(category_subset_implicitron_exp_dirs, str):
# a single model that does it all
current_implicitron_exp_dir = category_subset_implicitron_exp_dirs
else:
# subset-specific models
current_implicitron_exp_dir = category_subset_implicitron_exp_dirs[
(category, subset_name)
]
if implicitron_exp_dir_submission_output_subfolder is not None:
submission.link_results_from_existing_output_folder(
os.path.join(
current_implicitron_exp_dir,
implicitron_exp_dir_submission_output_subfolder,
)
)
else:
update_implicitron_submission_with_category_and_subset_predictions(
submission=submission,
implicitron_exp_dir=current_implicitron_exp_dir,
dataset_root=DATASET_ROOT,
category=category,
subset_name=subset_name,
n_known_frames_for_test=9 if task==CO3DTask.MANY_VIEW else 0,
)
# Locally evaluate the submission in case we dont evaluate on the hidden test set.
if sequence_set != CO3DSequenceSet.TEST and not skip_evaluation:
submission.evaluate(num_workers=num_eval_workers)
if submit_to_eval_ai:
# Export the submission predictions for submition to the evaluation server.
# This also validates completeness of the produced predictions.
submission.export_results(validate_results=True)
# submit the results to the EvalAI server.
submission.submit_to_eval_ai()
def evaluate_implicitron_exp_dir(
implicitron_exp_dir: str,
task: Optional[CO3DTask] = None,
sequence_set: Optional[CO3DSequenceSet] = None,
subset_name: Optional[str] = None,
category: Optional[str] = None,
result_dump_file: Optional[str] = None,
clear_submission_cache_before_evaluation: bool = False,
clear_submission_cache_after_evaluation: bool = False,
submission_output_folder: Optional[str] = None,
num_eval_workers: int = 4,
):
"""
Run evaluation for an experiment directory of Implicitron.
Unless overriden by the user, this function automatically parses the
category / subset / task / sequence_set / dataset_root
from the implicitron experiment config stored in implicitron_exp_dir.
Args:
implicitron_exp_dir: The directory of an Implicitron experiment.
task: The co3d task - either CO3DTask.MANY_VIEW or CO3DTask.FEW_VIEW.
sequence_set: The sequence set to evaluate on:
CO3DSequenceSet.DEV for for the development set
CO3DSequenceSet.TEST for for the test set
subset_name: The name of the CO3Dv2 subset.
E.g. "manyview_dev_0", "fewview_dev", ...
category: The name of the CO3Dv2 category to evaluate.
result_dump_file: Path to the json file with evaluation results.
clear_submission_cache_before_evaluation: Delete all previous intermediate
submission files before commencing the current evaluation run.
clear_submission_cache_after_evaluation: Delete all intermediate
submission files after the evaluation run.
submission_output_folder: The path to the folder with intermediate
submission files.
num_eval_workers: Number of processes that conduct evaluation.
"""
if result_dump_file is None:
result_dump_file = os.path.join(
implicitron_exp_dir, "results_challenge_eval.json"
)
cfg = load_implicitron_config_from_exp_dir(implicitron_exp_dir)
# assert few config settings
assert (
cfg.data_source_ImplicitronDataSource_args.dataset_map_provider_class_type
=="JsonIndexDatasetMapProviderV2"
)
# read the category / subset / task / sequence_set / dataset_root from
# the implicitron config
dataset_provider_args = (
cfg
.data_source_ImplicitronDataSource_args
.dataset_map_provider_JsonIndexDatasetMapProviderV2_args
)
if subset_name is None:
subset_name = dataset_provider_args.subset_name
if category is None:
category = dataset_provider_args.category
if task is None:
task = get_co3d_task_from_subset_name(subset_name)
if sequence_set is None:
sequence_set = get_co3d_sequence_set_from_subset_name(subset_name)
dataset_root = (
DATASET_ROOT
if DATASET_ROOT is not None
else dataset_provider_args.dataset_root
)
logger.info(
f"Evaluating Implicitron model on category {category}; subset {subset_name}"
)
# the folder storing all predictions and results of the submission
if submission_output_folder is None:
submission_output_folder = get_default_implicitron_exp_dir_submission_output_folder(
implicitron_exp_dir,
task,
sequence_set,
)
# create the submission object
submission = CO3DSubmission(
task=task,
sequence_set=sequence_set,
output_folder=submission_output_folder,
dataset_root=DATASET_ROOT,
)
if task==CO3DTask.FEW_VIEW and submission.has_only_single_sequence_subset():
# if only a single-sequence dataset is downloaded, only the many-view task
# is available
raise ValueError(
f"Cannot evaluate the few-view task in {sequence_set.value} when only the"
" singlesequence subset of CO3D is present."
)
if clear_submission_cache_before_evaluation:
submission.clear_files()
# Generate new views for all evaluation examples in category/subset_name.
update_implicitron_submission_with_category_and_subset_predictions(
submission=submission,
implicitron_exp_dir=implicitron_exp_dir,
dataset_root=dataset_root,
category=category,
subset_name=subset_name,
n_known_frames_for_test=9 if task==CO3DTask.MANY_VIEW else 0,
)
# Locally evaluate the submission in case we dont evaluate on the hidden test set.
if sequence_set == CO3DSequenceSet.TEST:
logger.warning("Cannot evaluate on the hidden test set. Skipping evaluation.")
category_subset_results = {m: 0.0 for m in EVAL_METRIC_NAMES}
else:
results = submission.evaluate(num_workers=num_eval_workers)
category_subset_results = results[(category, subset_name)][0]
# add the eval epoch as well
category_subset_results["eval_epoch"] = parse_epoch_from_model_path(
find_last_checkpoint(implicitron_exp_dir)
)
logger.info("Implicitron model results:")
logger.info(f"category={category} / subset_name={subset_name}")
print_category_subset_results(category_subset_results)
if clear_submission_cache_after_evaluation:
submission.clear_files()
logger.info(f"Dumping challenge eval results to {result_dump_file}.")
with open(result_dump_file, "w") as f:
json.dump(category_subset_results, f)
return category_subset_results
@torch.no_grad()
def update_implicitron_submission_with_category_and_subset_predictions(
submission: CO3DSubmission,
implicitron_exp_dir: str,
dataset_root: str,
category: str,
subset_name: str,
num_workers: int = 12,
n_known_frames_for_test: int = 0,
):
"""
Updates the CO3DSubmission object `submission` with predictions of a DBIR
model extracted for a given category, and a dataset subset.
Args:
submission: CO3DSubmission object.
implicitron_exp_dir: Implicitron experiment directory to load the model from.
dataset_root: Path to the root dataset folder containing CO3Dv2.
category: A CO3Dv2 category to evaluate.
subset_name: The name of the evaluation subset of the category.
num_workers: Number of processes to use for evaluation.
n_known_frames_for_test: The number of known frames to append to the test batches.
"""
logger.info(
"Runing depth-based image rendering (DBIR) new view synthesis "
f"on category '{category}' subset '{subset_name}'"
)
# Get the evaluation device.
device = torch.device("cuda") if torch.cuda.is_available() else device("cpu")
# load the implicitron model
model = load_model_from_implicitron_exp_dir(implicitron_exp_dir)
# Determine the sequence set and the task we are solving
sequence_set = submission.sequence_set
task = submission.task
# Obtain the CO3Dv2 dataset map
dataset_map = get_dataset_map(
dataset_root,
category,
subset_name,
n_known_frames_for_test=n_known_frames_for_test,
)
# The test dataloader simply iterates over test_dataset.eval_batches
# this is done by setting test_dataset.eval_batches as the batch sampler
test_dataset = dataset_map["test"]
eval_batches = test_dataset.get_eval_batches()
test_dataloader = torch.utils.data.DataLoader(
test_dataset,
batch_sampler=eval_batches,
num_workers=num_workers,
collate_fn=FrameData.collate,
)
# loop over eval examples
logger.info(
f"Rendering {len(test_dataloader)} test views for {category}/{subset_name}"
)
if sequence_set==CO3DSequenceSet.TEST:
# the test set contains images with redacted foreground masks which cause
# the test dataloader to spam a warning message,
# we suppress this warning with the following line
warnings.filterwarnings("ignore", message="Empty masks_for_bbox.*")
for eval_index, eval_frame_data in enumerate(tqdm(test_dataloader)):
# the first element of eval_frame_data is the actual evaluation image,
# the 2nd-to-last elements are the knwon source images used for building
# the reconstruction (source images are present only for the few-view task)
# move the eval data to the requested device
eval_frame_data = eval_frame_data.to(device)
# sanity check that the eval frame data has correctly redacted entries
_check_valid_eval_frame_data(eval_frame_data, task, sequence_set)
# Redact the frame data so we are sure we cannot use the data
# from the actual unobserved evaluation sample
eval_frame_data = redact_eval_frame_data(eval_frame_data)
# Obtain the image render. In case dataset_test.box_crop==True,
# we need to paste the render back to the original image bounds.
model_preds = model(
**eval_frame_data,
eval_mode=EvaluationMode.EVALUATION,
)
render_crop = model_preds["implicitron_render"]
# cut the valid part of the render and paste into the original image canvas
render_full_image = dbir_utils.paste_render_to_original_image(
eval_frame_data, render_crop
)
# get the image, mask, depth as numpy arrays for the challenge submission
image, mask, depth = [
getattr(render_full_image, f"{data_type}_render").cpu().numpy()[0]
for data_type in ["image", "mask", "depth"]
]
# clip the rendered image to [0, 1] range
image = image.clip(0.0, 1.0)
# add the results to the submission object
submission.add_result(
category=category,
subset_name=subset_name,
sequence_name=eval_frame_data.sequence_name[0],
frame_number=int(eval_frame_data.frame_number[0]),
image=image,
mask=mask,
depth=depth,
)
# reset all warnings
warnings.simplefilter("always")
def get_default_implicitron_exp_dir_submission_output_folder(
implicitron_exp_dir: str,
task: CO3DTask,
sequence_set: CO3DSequenceSet,
):
return os.path.join(
implicitron_exp_dir,
f"implicitron_submission_output_{task.value}_{sequence_set.value}",
)
def parse_co3d_challenge_settings_from_implicitron_exp_dir(
implicitron_exp_dir: str
) -> Tuple[CO3DSequenceSet, CO3DTask, str, str]:
"""
Reads the config of an implicitron experiment stored in `implicitron_exp_dir` and
returns the configuration of the corresponding challenge entry.
Args:
implicitron_exp_dir: The directory of an Implicitron experiment.
Returns:
sequence_set: CO3D sequence set of the experiment.
task: The CO3D task of the experiment.
category: The category of the experiment.
subset_name: The name of the CO3D subset.
"""
cfg = load_implicitron_config_from_exp_dir(implicitron_exp_dir)
dataset_provider_args = (
cfg
.data_source_ImplicitronDataSource_args
.dataset_map_provider_JsonIndexDatasetMapProviderV2_args
)
subset_name = dataset_provider_args.subset_name
category = dataset_provider_args.category
task = get_co3d_task_from_subset_name(subset_name)
sequence_set = get_co3d_sequence_set_from_subset_name(subset_name)
return sequence_set, task, category, subset_name
def load_implicitron_config_from_exp_dir(implicitron_exp_dir: str):
cfg_filename = os.path.join(implicitron_exp_dir, "expconfig.yaml")
cfg_load = OmegaConf.load(cfg_filename)
cfg_default = get_default_args(Experiment)
cfg = OmegaConf.merge(cfg_default, cfg_load)
cfg.exp_dir = implicitron_exp_dir
return cfg
def load_model_from_implicitron_exp_dir(exp_dir: str) -> GenericModel:
cfg = load_implicitron_config_from_exp_dir(exp_dir)
experiment = Experiment(**cfg)
experiment.model_factory.force_resume = True
model = experiment.model_factory(accelerator=None, exp_dir=exp_dir)
model.cuda()
model.eval()
return model
def get_dataset_map(
dataset_root: str,
category: str,
subset_name: str,
n_known_frames_for_test: int = 0,
) -> DatasetMap:
"""
Obtain the dataset map that contains the train/val/test dataset objects.
"""
expand_args_fields(JsonIndexDatasetMapProviderV2)
dataset_map_provider = JsonIndexDatasetMapProviderV2(
category=category,
subset_name=subset_name,
dataset_root=dataset_root,
test_on_train=False,
only_test_set=False,
load_eval_batches=True,
dataset_JsonIndexDataset_args=DictConfig({"remove_empty_masks": False}),
n_known_frames_for_test=n_known_frames_for_test,
)
return dataset_map_provider.get_dataset_map()
def print_category_subset_results(category_subset_results: Dict[str, float]):
for k, v in category_subset_results.items():
print(f"{k:20s}: {v:1.3f}")
|
co3d-main
|
co3d/utils/evaluate_implicitron_model.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import dataclasses
import torch
from typing import Tuple
from pytorch3d.renderer.cameras import CamerasBase
from pytorch3d.implicitron.dataset.json_index_dataset import JsonIndexDataset
from pytorch3d.implicitron.dataset.dataset_base import FrameData
from pytorch3d.structures import Pointclouds
from pytorch3d.implicitron.dataset.json_index_dataset import _get_clamp_bbox
from pytorch3d.implicitron.models.base_model import ImplicitronRender
from pytorch3d.implicitron.dataset.visualize import get_implicitron_sequence_pointcloud
from pytorch3d.implicitron.tools.point_cloud_utils import (
render_point_cloud_pytorch3d,
get_rgbd_point_cloud,
)
def render_point_cloud(
camera: CamerasBase,
render_size: Tuple[int, int],
pointcloud: Pointclouds,
point_radius: float = 0.03,
) -> ImplicitronRender:
"""
Render the point cloud `pointcloud` to the camera `camera` using the
PyTorch3D point cloud renderer.
Args:
camera: Rendering camera.
render_size: 2-tuple of integers denoting the render size (HxW)
pointcloud: The point cloud to render.
point_radius: Radius of the rendered points.
"""
# render the sequence point cloud to each evaluation view
data_rendered, render_mask, depth_rendered = render_point_cloud_pytorch3d(
camera,
pointcloud,
render_size=render_size,
point_radius=point_radius,
topk=10,
eps=1e-2,
bin_size=0,
)
# cast to the implicitron render
return ImplicitronRender(
depth_render=depth_rendered,
image_render=data_rendered,
mask_render=render_mask,
)
def paste_render_to_original_image(
frame_data: FrameData,
render: ImplicitronRender,
) -> ImplicitronRender:
"""
Paste a rendering result `render` into the original image coordinate frame.
Args:
frame_data: The `FrameData` object as returned by the `JsonIndexDataset`.
render: A render to be pasted into the original image coordinates.
"""
# size of the render
render_size = render.image_render.shape[2:]
# estimate render scale w.r.t. the frame_data images
render_scale_factors = [
sr / s for sr, s in zip(render_size, frame_data.image_rgb.shape[2:])
]
assert abs(render_scale_factors[0]-render_scale_factors[1]) <= 1e-2, (
"non-isotropic render rescale"
)
# original image size
orig_size = frame_data.image_size_hw[0].tolist()
# bounding box of the crop in the original image
if frame_data.crop_bbox_xywh is not None:
bbox_xywh = frame_data.crop_bbox_xywh[0]
else:
bbox_xywh = torch.LongTensor([0, 0, orig_size[1], orig_size[0]])
# get the valid part of the render
render_bounds_wh = [None, None]
for axis in [0, 1]:
# resize the mask crop to the size of the render
if render_size != frame_data.mask_crop.shape[2:]:
mask_crop_render_size = torch.nn.functional.interpolate(
frame_data.mask_crop, size=render_size, mode="nearest"
)
else:
mask_crop_render_size = frame_data.mask_crop
# get the bounds of the mask_crop along dimemsion = 1-axis
valid_dim_pix = mask_crop_render_size[0, 0].sum(dim=axis).reshape(-1).nonzero()
assert valid_dim_pix.min()==0
render_bounds_wh[axis] = valid_dim_pix.max().item() + 1
render_out = {}
for render_type, render_val in dataclasses.asdict(render).items():
if render_val is None:
continue
# get the valid part of the render
render_valid_ = render_val[..., :render_bounds_wh[1], :render_bounds_wh[0]]
# resize the valid part to the original size
render_resize_ = torch.nn.functional.interpolate(
render_valid_,
size=tuple(reversed(bbox_xywh[2:].tolist())),
mode="bilinear" if render_type=="image_render" else "nearest",
align_corners=False if render_type=="image_render" else None,
)
# paste the original-sized crop to the original image
render_pasted_ = render_resize_.new_zeros(1, render_resize_.shape[1], *orig_size)
render_pasted_[
...,
bbox_xywh[1]:(bbox_xywh[1]+render_resize_.shape[2]),
bbox_xywh[0]:(bbox_xywh[0]+render_resize_.shape[3]),
] = render_resize_
render_out[render_type] = render_pasted_
# if True:
# # debug visualize
# from visdom import Visdom
# viz = Visdom()
# visdom_env = "debug_paste_render_to_original_image"
# viz.image(
# render.image_render[0],
# env=visdom_env,
# win="original",
# )
# viz.image(
# render_out["image_render"][0],
# env=visdom_env,
# win="pasted",
# )
# import pdb; pdb.set_trace()
# pass
return ImplicitronRender(**render_out)
def get_sequence_pointcloud(
dataset: JsonIndexDataset,
sequence_name: str,
num_workers: int = 12,
max_loaded_frames: int = 50,
max_n_points: int = int(1e5),
seed: int = 42,
load_dataset_pointcloud: bool = False,
) -> Pointclouds:
"""
Given a `dataset` object and the name of a sequence in it (`sequence_name`),
generate a 3D pointcloud containing the main foreground object of the scene.
Args:
dataset: A dataset of containing sequence annotations.
sequence_name: The name of the sequence to reconstruct.
num_workers: Number of cores to use for loading the sequence data.
max_n_points: Maximum number of points to keep in the point cloud.
seed: Random seed for reproducibility.
load_dataset_pointcloud: If `True` uses the CO3D ground truth dataset
point cloud, otherwise generates the point cloud by unprojecting
the depth maps of known frames.
"""
with torch.random.fork_rng(): # fork rng for reproducibility
torch.manual_seed(seed)
sequence_pointcloud, _ = get_implicitron_sequence_pointcloud(
dataset,
sequence_name,
mask_points=True,
max_frames=max_loaded_frames,
num_workers=num_workers,
load_dataset_point_cloud=load_dataset_pointcloud,
)
sequence_pointcloud = _subsample_pointcloud(sequence_pointcloud, max_n_points)
return sequence_pointcloud
def get_eval_frame_data_pointcloud(
eval_frame_data: FrameData,
max_n_points: int = int(3e4),
):
"""
Generate a pointcloud by unprojecting the known depth maps of a `FrameData` object
`eval_frame_data`.
Args:
eval_frame_data: `FrameData` to unproject.
max_n_points: Maximum number of points to keep in the point cloud.
"""
batch_size = eval_frame_data.image_rgb.shape[0]
pointcloud = get_rgbd_point_cloud(
eval_frame_data.camera[list(range(1, batch_size))],
eval_frame_data.image_rgb[1:],
eval_frame_data.depth_map[1:],
(eval_frame_data.fg_probability[1:] > 0.5).float(),
mask_points=True,
)
return _subsample_pointcloud(pointcloud, max_n_points)
def _subsample_pointcloud(p: Pointclouds, n: int):
n_points = p.num_points_per_cloud().item()
if n_points > n:
# subsample the point cloud in case it is bigger than max_n_points
subsample_idx = torch.randperm(
n_points,
device=p.points_padded().device,
)[:n]
p = Pointclouds(
points=p.points_padded()[:, subsample_idx],
features=p.features_padded()[:, subsample_idx],
)
return p
|
co3d-main
|
co3d/utils/dbir_utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import shutil
import logging
import errno
import pickle
import glob
import hashlib
import time
from tabulate import tabulate
from typing import Optional, Tuple, List
from dataclasses import dataclass
import numpy as np
import csv
from co3d.challenge.metric_utils import EVAL_METRIC_NAMES, EVAL_METRIC_MISSING_VALUE
from .blank_predictions_results import BLANK_PREDICTION_RESULTS
from .utils import evaluate_file_folders, get_result_directory_file_names
from .data_types import RGBDAFrame, CO3DTask, CO3DSequenceSet
from .io import (
load_all_eval_batches,
store_rgbda_frame,
export_result_file_dict_to_hdf5,
make_hdf5_file_links,
link_file_to_db_file,
link_rgbda_frame_files,
)
CO3D_CHALLENGE_ID = 1819
CO3D_PHASE_ID = {
(CO3DTask.MANY_VIEW, CO3DSequenceSet.DEV): 3541,
(CO3DTask.MANY_VIEW, CO3DSequenceSet.TEST): 3542,
(CO3DTask.FEW_VIEW, CO3DSequenceSet.DEV): 3543,
(CO3DTask.FEW_VIEW, CO3DSequenceSet.TEST): 3544,
}
EVAL_AI_PERSONAL_TOKEN = os.getenv("EVAL_AI_PERSONAL_TOKEN")
MAX_EXPORT_ARCHIVE_SIZE_GB = 2.0
logger = logging.getLogger(__file__)
@dataclass
class CO3DSubmissionRender:
"""
Contains information about a single predicted image.
category: The name of the category of the prediction.
subset_name: The dataset subset of the prediction.
frame_number: The number of the corresponding ground truth frame.
rgbda_frame: The actual render.
"""
category: str
subset_name: str
sequence_name: str
frame_number: int
rgbda_frame: Optional[RGBDAFrame] = None
def get_image_path(self, root_dir: str):
return os.path.join(
CO3DSubmission.get_submission_cache_image_dir(
root_dir,
self.category,
self.subset_name,
),
self.get_image_name(),
)
def get_hash(self):
return (self.category, self.subset_name, self.sequence_name, self.frame_number)
def get_image_name(self):
return get_submission_image_name(
self.category, self.sequence_name, self.frame_number
)
class CO3DSubmission:
"""
Maintains all data needed for a sucessful submission to the CO3D Challenge
evaluation server. The class can also locally evaluate predictions if
a local copy of the CO3Dv2 dataset is present.
See https://eval.ai/web/challenges/challenge-page/1819/overview for more details
about the challenge.
In order to create a CO3Dv2 submission, evaluate and submit the results, please follow
these steps:
1) Start by importing the `CO3DSubmission` class and instantiate a submission run.
For example, the following code:
```python
from co3d.challenge.co3d_submission import CO3DSubmission
output_folder = "./co3d_submission_files"
task = CO3DTask.MANY_VIEW
sequence_set = CO3DSequenceSet.TEST
submission = CO3DSubmission(
task=task
sequence_set=sequence_set,
output_folder=output_folder,
dataset_root=dataset_root,
)
```
will instantiate a CO3D submission object `submission` that stores (and optionally
evaluates) results of the `manyview` task on the `test` set. All results will be
stored in the `output_folder`. Note that a user has to also specify the local root
folder of the CO3D dataset in `dataset_root`.
2) Obtain the dictionary of evaluation examples `eval_batches_map` from `submission`.
```python
eval_batches_map = submission.get_eval_batches_map()
```
here, `eval_batches_map` is a dictionary of the following form:
```
{(category: str, subset_name: str): eval_batches} # eval_batches_map
```
where `eval_batches` look as follows:
```python
[
[
(sequence_name_0: str, frame_number_0: int),
(sequence_name_0: str, frame_number_1: int),
...
(sequence_name_0: str, frame_number_M_0: int),
],
...
[
(sequence_name_N: str, frame_number_0: int),
(sequence_name_N: str, frame_number_1: int),
...
(sequence_name_N: str, frame_number_M_N: int),
]
] # eval_batches
```
Containing a list of `N` evaluation examples, each consisting of a tuple of
`M_i` frames with numbers `frame_number_j` from a given sequence name `sequence_name_i`.
Note that the mapping between `frame_number` and `sequence_name` to the CO3D data
is stored in the respective `frame_annotations.jgz` and `sequence_annotation.jgz`
files in `<dataset_root>/<sequence_category>`.
For the <b>Many-view task</b> (`CO3DTask.MANYVIEW`), each evaluation batch has a single
(`M_i=1`) frame, which is the target evaluation frame.
For the <b>Few-view task</b> (`CO3DTask.FEWVIEW`), each batch has several frames (`M_i>1`),
where the first frame is the target frame which should be predicted given the knowledge
of the source frames that correspondond oto the 2nd-to-last elements of each batch.
3) Next we iterate over eval_batches, predict new views, and store our predictions
with the `submission` object.
```python
# iterate over evaluation subsets and categories
for (category, subset_name), eval_batches in eval_batches_map.items():
# iterate over all evaluation examples of a given category and subset
for eval_batch in eval_batches:
# parse the evaluation sequence name and target frame number from eval_batch
sequence_name, frame_number = eval_batch[0][:2]
# `predict_new_view` is a user-defined function which generates
# the test view (corresponding to the first element of the eval batch)
image, depth, mask = predict_new_view(eval_batch, ...)
# add the render to the submission
submission.add_result(
category=category,
subset_name=subset_name,
sequence_name=sequence_name,
frame_number=frame_number,
image=image,
mask=mask,
depth=depth,
)
```
4) Export the submission object to a hdf5 file that can be uploaded to the EvalAI server:
```
submission.export_results()
```
5) Submit the submission to the EvalAI server:
```
submission.submit_to_eval_ai()
```
"""
def __init__(
self,
task: CO3DTask,
sequence_set: CO3DSequenceSet,
output_folder: str,
dataset_root: Optional[str] = None,
eval_ai_personal_token: Optional[str] = EVAL_AI_PERSONAL_TOKEN,
export_format: str = "hdf5",
# ---- the following are only for internal use, do not modify ----
on_server: bool = False,
server_data_folder: Optional[str] = None,
max_processing_time: int = -1,
):
"""
Initialize the CO3DSubmission object.
task: The CO3D task=track:
`CO3DTask.manyview` for the "Many-view" task.
`CO3DTask.fewview` for the "Few-view" task.
sequence_set: The challenge sequence set.
`CO3DSequenceSet.dev` for the development set.
`CO3DSequenceSet.test` for the test set.
output_folder: The folder containing all outputs needed for the challenge submission.
dataset_root: The path to the root folder of a local copy of the CO3Dv2 dataset.
eval_ai_personal_token: A personal eval_ai token. Required for the cli
submission with `self.submit_to_eval_ai`.
export_format: The format of the exported archive. Currently only "hdf5" is supported.
server_data_folder: (Internal-use-only)
on_server: (Internal-use-only)
max_processing_time: (Internal-use-only)
"""
self.task = task
self.sequence_set = sequence_set
self.output_folder = output_folder
self.dataset_root = dataset_root
self.server_data_folder = server_data_folder
self.on_server = on_server
self.export_format = export_format
self.eval_ai_personal_token = eval_ai_personal_token
self.max_processing_time = max_processing_time
submission_archive_ext = self.export_format
self.submission_archive = os.path.join(
output_folder, f"submission_{task.value}_{sequence_set.value}.{submission_archive_ext}"
)
self.evaluate_exceptions_file = os.path.join(output_folder, "eval_exceptions.pkl")
self.submission_cache = os.path.join(output_folder, "submission_cache")
os.makedirs(self.submission_cache, exist_ok=True)
self._result_list: List[CO3DSubmissionRender] = []
self._eval_batches_map = None
@staticmethod
def get_submission_cache_image_dir(
output_folder: str,
category: str,
subset_name: str,
):
"""
Get the cache folder containing all predictions of a given category frame set.
Args:
output_folder: The root submission folder.
category: CO3D category name (e.g. "apple", "orange")
subset_name: CO3D subset name (e.g. "manyview_dev_0", "manyview_test_0")
"""
return os.path.join(output_folder, category, subset_name)
def has_only_single_sequence_subset(self):
"""
Returns:
has_only_single_sequence: Returns `True` if the present version of the CO3Dv2
dataset contains only single-sequence data. Otherwise returns `False`.
"""
if self.dataset_root is None:
raise ValueError("dataset_root has to be specified.")
eval_batches_map = load_all_eval_batches(self.dataset_root)
if any(
"fewview_" in subset_name for category, subset_name in eval_batches_map.keys()
):
return False
else:
return True
def add_result(
self,
category: str,
subset_name: str,
sequence_name: str,
frame_number: int,
image: np.ndarray,
mask: np.ndarray,
depth: np.ndarray,
) -> None:
"""
Adds a single user-predicted image to the current submission.
Args:
category: The CO3D category of the image (e.g. "apple", "car").
subset_name: The name of the subset which the image comes from
(e.g. "manyview_dev_0", "manyview_test_0").
sequence_name: The name of the sequence which the image comes from.
frame_number: The number of the corresponding ground truth frame.
image: 3xHxW numpy.ndarray containing the RGB image.
The color range is [0-1] and `image` should be of the same size
as the corresponding ground truth image.
mask: `1xHxW numpy.ndarray containing the binary foreground mask of the
rendered object.
The values should be in {0, 1} and `mask` should be of the same size
as the corresponding ground truth image.
depth: `1xHxW numpy.ndarray containing the rendered depth map of the predicted
image.
The depth map should be of the same size as the corresponding
ground truth image.
"""
res = self._add_result_metadata(
category,
subset_name,
sequence_name,
frame_number,
)
res_file = res.get_image_path(self.submission_cache)
os.makedirs(os.path.dirname(res_file), exist_ok=True)
logger.debug(f"Storing submission files {res_file}.")
store_rgbda_frame(
RGBDAFrame(image=image, mask=mask, depth=depth),
res_file,
)
def _link_existing_render(
self,
render_submission_cache: str,
render: CO3DSubmissionRender,
) -> None:
"""
Link a single stored existing render to the current submission.
Args:
render_submission_cache: The path to the submission cache of the render.
render: The linked render.
"""
res = self._add_result_metadata(
render.category,
render.subset_name,
render.sequence_name,
render.frame_number,
)
rgbda_file_link_src = res.get_image_path(self.submission_cache)
rgbda_file_existing = render.get_image_path(render_submission_cache)
os.makedirs(os.path.dirname(rgbda_file_link_src), exist_ok=True)
logger.debug(
f"Linking submission file {rgbda_file_link_src} to {rgbda_file_existing}."
)
link_rgbda_frame_files(rgbda_file_existing, rgbda_file_link_src)
def _add_result_metadata(
self,
category: str,
subset_name: str,
sequence_name: str,
frame_number: int,
) -> CO3DSubmissionRender:
res = CO3DSubmissionRender(
category=category,
subset_name=subset_name,
sequence_name=sequence_name,
frame_number=frame_number,
rgbda_frame=None,
)
self._result_list.append(res)
# if res.get_hash() in [r.get_hash() for r in self._result_list]:
# logger.warning(
# f"{str(res.get_hash())} already in the result list! Skipping."
# )
# else:
# self._result_list.append(res)
return res
def _get_result_frame_index(self):
return {(res.sequence_name, res.frame_number): res for res in self._result_list}
def get_eval_batches_map(self, only_target_frame: bool = False):
"""
Returns a dictionary of evaluation examples of the following form:
```
{(category: str, subset_name: str): eval_batches} # eval_batches_map
```
where `eval_batches` look as follows:
```
[
[
(sequence_name_0: str, frame_number_0: int),
(sequence_name_0: str, frame_number_1: int),
...
(sequence_name_0: str, frame_number_M: int),
],
...
[
(sequence_name_N: str, frame_number_0: int),
(sequence_name_N: str, frame_number_1: int),
...
(sequence_name_N: str, frame_number_M: int),
]
] # eval_batches
```
Here, `eval_batches' containing a list of `N` evaluation examples,
each consisting of a tuple of frames with numbers `frame_number_j`
from a given sequence name `sequence_name_i`.
Note that the mapping between `frame_number` and `sequence_name` to the CO3D
data is stored in the respective `frame_annotations.jgz` and `sequence_annotation.jgz`
files in `<dataset_root>/<category>`.
Args:
only_target_frame: Returns only the first (target evaluation) frame
for each eval batch.
Returns:
eval_batches_map: A dictionary of evaluation examples for each category.
"""
if self._eval_batches_map is None:
self._eval_batches_map = load_all_eval_batches(
self.dataset_root,
self.task,
self.sequence_set,
remove_frame_paths=False,
only_target_frame=False,
)
if only_target_frame:
# take only the first (target evaluation) frame for each eval batch
eval_batches_map = {}
for (category, subset_name), eval_batches in self._eval_batches_map.items():
eval_batches_map[(category, subset_name)] = [
b[0] for b in eval_batches
]
else:
eval_batches_map = self._eval_batches_map
return eval_batches_map
def clear_files(self):
"""
Remove all generated submission files.
"""
if os.path.isdir(self.output_folder):
shutil.rmtree(self.output_folder)
if os.path.isdir(self.submission_cache):
shutil.rmtree(self.submission_cache)
if os.path.isfile(self.submission_archive):
os.remove(self.submission_archive)
def validate_export_results(self):
"""
Validate the submission by checking whether all required prediction files
are present.
"""
if self.dataset_root is None or not os.path.isdir(self.dataset_root):
raise ValueError(
"For validating the results, dataset_root has to be defined"
+ " and has to point to a valid root folder of the CO3D dataset."
)
eval_batches_map = self.get_eval_batches_map(only_target_frame=True)
result_frame_index = self._get_result_frame_index()
valid = True
for (category, subset_name), eval_batches in eval_batches_map.items():
eval_batches_2tuple = [tuple(b[:2]) for b in eval_batches]
missing_preds = [
b for b in eval_batches_2tuple if b not in result_frame_index
]
if len(missing_preds) > 0:
valid = False
logger.info(
f"{category}/{subset_name} is missing predictions."
)
logger.debug(str(missing_preds))
additional_results = [
idx for idx, res in result_frame_index.items() if (
idx not in eval_batches_2tuple
and res.category==category and res.subset_name==subset_name
)
]
if len(additional_results) > 0:
valid = False
logger.info(
f"{category}/{subset_name} has additional results."
)
logger.debug(str(additional_results))
return valid
def submit_to_eval_ai(
self,
challenge_id: int = CO3D_CHALLENGE_ID,
):
"""
Submit the exported results to the EvalAI server.
"""
logger.info(f"Submitting {self.submission_archive} to EvalAI.")
if not os.path.isfile(self.submission_archive):
raise ValueError(
f"Submission archive {self.submission_archive} does not exist."
" Please run submission.export_results() first."
)
try:
import evalai
except ModuleNotFoundError:
raise ValueError(
"Cannot find EvalAI cli package."
" Please install it with pip: `pip install evalai`"
)
if self.eval_ai_personal_token is None or len(self.eval_ai_personal_token)==0:
raise ValueError(
"For EvalAI submission, the personal token"
+" self.eval_ai_personal_token has to be set!"
+" Please obtain it from you EvalAI profile page https://eval.ai/web/profile"
+" by clicking on 'Get your Auth Token' button."
)
# run the evalai imports
from click.testing import CliRunner
from evalai.challenges import challenge
from evalai.add_token import set_token
runner = CliRunner()
# set the eval ai auth token
result = runner.invoke(set_token, [self.eval_ai_personal_token])
if result.exit_code!=0:
raise ValueError("Could not set the eval_ai personal token.")
# get the challenge phase ID
phase_id = CO3D_PHASE_ID[(self.task, self.sequence_set)]
# run the submission script
os.system(
f"evalai challenge {challenge_id} phase {phase_id}"
+ f" submit --file {self.submission_archive} --large"
)
# the following, unfortunately, does not accept keyboard input
# result = runner.invoke(
# challenge, [
# str(challenge_id),
# "phase", str(phase_id),
# "submit",
# "--file", self.submission_archive,
# "--large",
# ],
# input="/n",
# )
# if result.output != 0:
# raise ValueError(
# "Submission failed:"
# + result.output
# )
def export_results(self, validate_results: bool = True):
"""
Export the generated evaluation images for a submission to the EvalAI server.
Args:
validate_results: If `True`, checks whether the added results are valid
before submission. This requires setting `self.dataset_root` to a directory
containing a local copy of the CO3D dataset.
"""
if validate_results:
# optionally check that all results are correct
valid_results = self.validate_export_results()
if not valid_results:
logger.warning(
"The submission results are invalid."
" The evaluation will be incomplete."
)
# zip the directory
logger.info(f"Archiving {self.submission_cache} to {self.submission_archive}.")
if self.export_format=="zip":
raise ValueError(
f"Please export the data using the 'hdf5' format."
f"'zip' is no longer supported."
)
# First we need to remove all links to the ground truth directories
# that were potentially created during a call to self.evaluate().
self._clear_gt_links()
shutil.make_archive(
base_name=self.submission_archive.replace(".zip", ""),
format="zip",
root_dir=self.submission_cache,
base_dir=".",
)
elif self.export_format=="hdf5":
self._export_results_to_hdf5()
else:
raise ValueError(f"Unknown export format {self.export_format}.")
exported_file_size = os.path.getsize(self.submission_archive) / 1e9
if exported_file_size > MAX_EXPORT_ARCHIVE_SIZE_GB:
logger.warning(
f"The exported result file {self.submission_archive} is bigger"
f" than {exported_file_size} GB! Please ensure that your submission file"
f" is smaller to prevent submission upload failures."
)
# finally export the result
logger.warning(
f"Exported result file ({exported_file_size:.2f} GB):"
f"\n\n ===> {self.submission_archive} <==="
f"\n\nYou can now submit the file to the EvalAI server:"
f" In order to do so, run submission.submit_to_eval_ai() to directly"
f" submit the results file using EvalAI-cli (command line interface)."
f" For the latter, make sure to `pip install evalai` and to set"
f" the EVAL_AI_PERSONAL_TOKEN env. variable to your EvalAI Auth token."
f"\n\nAlternativelly, you can submit the file using the submission webpage:"
f" https://eval.ai/web/challenges/challenge-page/{CO3D_CHALLENGE_ID}/submission"
f" ('{self.task.value}-{self.sequence_set.value}' track)\n"
f"Please note a submission using the 'Upload file' option will fail"
f" due the large size of the file. Use the 'File URL' option instead."
)
def _clear_gt_links(self):
gt_folders = glob.glob(os.path.join(self.submission_cache, "*", "GT_*"))
for gt_folder in gt_folders:
logger.debug(f"Clearing GT link directory {gt_folder}.")
shutil.rmtree(gt_folder)
def _export_results_to_hdf5(self):
# get all fls in the submission cache
all_fls = sorted(glob.glob(os.path.join(self.submission_cache, "*", "*", "*.png")))
result_dict = {
os.path.join(*(os.path.normpath(f).split(os.path.sep)[-3:])): f
for f in all_fls
if not os.path.split(os.path.dirname(f))[-1].startswith("GT_")
}
export_result_file_dict_to_hdf5(self.submission_archive, result_dict)
def link_results_from_existing_output_folder(self, output_folder: str) -> None:
"""
Link all results stored in a different output folder to the current
submission object.
Args:
output_folder: The output folder containing all results that will be
linked to the current submission object.
"""
other = CO3DSubmission(
task=self.task,
sequence_set=self.sequence_set,
output_folder=output_folder,
)
other.fill_results_from_cache()
for other_res in other._result_list:
self._link_existing_render(
os.path.join(output_folder, "submission_cache"),
other_res,
)
def fill_results_from_cache(self):
"""
Analyze the results already stored in self.submission_cache and register them
with the submission object.
"""
if not os.path.isdir(self.submission_cache):
logger.info(f"{self.submission_cache} folder does not exist.")
return
categories = os.listdir(self.submission_cache)
for category in categories:
cat_dir = os.path.join(self.submission_cache, category)
if not os.path.isdir(cat_dir):
continue
subset_names = os.listdir(cat_dir)
for subset_name in subset_names:
if subset_name.startswith("GT_"):
continue
submission_dir = os.path.join(cat_dir, subset_name)
submission_files = get_result_directory_file_names(submission_dir)
logger.info(
f"Adding {len(submission_files)} cached results"
f" from {category}/{subset_name}"
)
for submission_file in submission_files:
category_, sequence_name, frame_number = (
_submision_file_to_category_sequence_name_frame_number(
submission_file
)
)
assert category_==category
self._add_result_metadata(
category,
subset_name,
sequence_name,
frame_number,
)
def _fill_cache_from_hdf5(self, archive_path: str):
make_hdf5_file_links(archive_path, self.submission_cache)
def _is_timed_out(self):
if self.max_processing_time > 0:
return (time.time() - self._eval_start_time) > self.max_processing_time
else:
return False
def _get_remaining_submission_time(self):
if self.max_processing_time > 0:
return self.max_processing_time - (time.time() - self._eval_start_time)
else:
return float("Inf")
def evaluate_archive_file(
self,
archive_path: str,
num_workers: int = 0,
print_per_example_results: bool = False,
):
"""
Extract a file with exported results `archive_path` and evaluate.
Args:
archive_path: A path to the archive file cantaining exported results.
Such archive file can be exported using `self.export_results`.
"""
os.makedirs(self.submission_cache, exist_ok=True)
logger.info(f"Extracting {archive_path} into {self.submission_cache}.")
if self.export_format=="zip":
shutil.unpack_archive(archive_path, self.submission_cache, "zip")
elif self.export_format=="hdf5":
self._fill_cache_from_hdf5(archive_path)
else:
raise ValueError(f"Unknown export format {self.export_format}")
logger.info(f"Filling results from cache {self.submission_cache}.")
self.fill_results_from_cache()
return self.evaluate(
num_workers=num_workers,
print_per_example_results=print_per_example_results,
)
def evaluate(
self,
num_workers: int = 0,
print_per_example_results: bool = False,
):
"""
Locally evaluate the submission. Please not that this is possible only
on the unredacted development set.
"""
if not self.on_server:
if not os.path.isdir(self.dataset_root):
raise ValueError("For evaluation dataset_root has to be specified.")
if self.sequence_set == CO3DSequenceSet.TEST:
raise ValueError("Cannot evaluate on the hidden test set!")
else:
# server-side evaluation, do not use
if (
self.server_data_folder is not None
and os.path.isfile(self.server_data_folder)
and self.server_data_folder.endswith(".hdf5")
):
# this is ok, we allow hdf5 files here
logger.info(f"Server folder {self.server_data_folder} is a HDF5 file!")
# with open(self.server_data_folder,'rb') as f:
# md5hash = hashlib.md5(f.read()).hexdigest()
# logger.info(f"HDF5 file hash = {md5hash}")
elif (
self.server_data_folder is not None
and self.server_data_folder.endswith(".dbm")
):
logger.info(f"Server folder {self.server_data_folder} is a DBM file!")
for pfix in [".dat", ".dir"]:
if not os.path.isfile(self.server_data_folder + pfix):
raise ValueError(
f"The DBM {pfix} file for {self.server_data_folder} is missing!"
)
# ok again dbm is good
pass
elif (
self.server_data_folder is None
or not os.path.isdir(self.server_data_folder)
):
raise ValueError(
"For evaluation on the server server_data_folder has to be specified."
)
self._eval_start_time = time.time()
eval_batches_map = self.get_eval_batches_map(only_target_frame=True)
# buffers for results and exceptions
eval_exceptions = {}
eval_results = {}
for subset_i, ((category, subset_name), eval_batches) in enumerate(
eval_batches_map.items()
):
subset_eval_start = time.time()
logger.info(
f"Evaluating {category}/{subset_name} ({subset_i}/{len(eval_batches_map)})."
)
if self.max_processing_time > 0:
logger.info(
f"Remaining submission time: {self._get_remaining_submission_time():1.2f}."
)
pred_category_subset_dir = CO3DSubmission.get_submission_cache_image_dir(
self.submission_cache,
category,
subset_name,
)
# The case with no predicted results, or timed-out eval
if (
(not os.path.isdir(pred_category_subset_dir))
or (len(os.listdir(pred_category_subset_dir))==0)
or self._is_timed_out()
):
if self._is_timed_out():
logger.warning(f"Evaluation timed-out for {category}/{subset_name}!")
else:
logger.info(f"No evaluation predictions for {category}/{subset_name}")
eval_results[(category, subset_name)] = (None, None)
eval_exceptions[(category, subset_name)] = None
continue
# Make a temporary GT folder with symlinks to GT data based on eval batches
gt_category_subset_dir = CO3DSubmission.get_submission_cache_image_dir(
self.submission_cache,
category,
"GT_" + subset_name,
)
for b in eval_batches:
if self.on_server:
_link_eval_batch_data_from_server_db_to_gt_tempdir(
self.server_data_folder,
gt_category_subset_dir,
category,
b,
)
else:
_link_eval_batch_data_from_dataset_root_to_gt_tempdir(
self.dataset_root,
gt_category_subset_dir,
category,
b,
)
# Evaluate and catch any exceptions.
try:
eval_results[(category, subset_name)] = evaluate_file_folders(
pred_category_subset_dir,
gt_category_subset_dir,
num_workers=num_workers,
remaining_time=self._get_remaining_submission_time(),
print_per_example_results=print_per_example_results,
)
except Exception as exc:
logger.warning(f"Evaluation of {category}/{subset_name} failed!", exc_info=True)
eval_results[(category, subset_name)] = (None, None)
eval_exceptions[(category, subset_name)] = exc
if eval_results[(category, subset_name)][0] is not None:
# Print the current subset result
eval_result_string = " ".join([
f"{k}={v:.3f}"
for k, v in eval_results[(category, subset_name)][0].items()
])
logger.info(f"{category}/{subset_name} result: {eval_result_string}")
subset_eval_time = time.time() - subset_eval_start
logger.info(f"Evaluated {category}/{subset_name} in {subset_eval_time:.1f} sec")
# fill in missing eval results with blank prediction results
for (category, subset_name), (eval_result, _) in eval_results.items():
if eval_result is None:
logger.info(
f"Replacing metrics in {category}/{subset_name}"
+" with a blank prediction result."
)
eval_result_ = {}
for m in EVAL_METRIC_NAMES:
blank_render_metric_val = BLANK_PREDICTION_RESULTS[
(self.task, self.sequence_set)
][(category, subset_name)][m]
# eval_result_[m] = _get_missing_metric_val(m)
eval_result_[m] = blank_render_metric_val
eval_results[(category, subset_name)] = eval_result_, None
# Get the average results.
average_results = {}
for m in EVAL_METRIC_NAMES:
average_results[m] = sum(
eval_result[m] for eval_result, _ in eval_results.values()
) / len(eval_results)
eval_results[("MEAN", "-")] = average_results, None
# Generate a nice table and print.
tab_rows = []
for (category, subset_name), (eval_result, _) in eval_results.items():
tab_row = [category, subset_name]
tab_row.extend([eval_result[k] for k in EVAL_METRIC_NAMES])
tab_rows.append(tab_row)
table_str = tabulate(
tab_rows, headers=["Category", "Subset name", *EVAL_METRIC_NAMES]
)
logger.info("\n"+table_str)
# Store the human-readable table
table_txt_file = os.path.join(self.output_folder, "results.csv")
logger.info(f"Dumping the results table to {table_txt_file}.")
header=["Category", "Subset name", *EVAL_METRIC_NAMES]
with open(table_txt_file, 'w', encoding='UTF8', newline='') as f:
writer = csv.writer(f)
writer.writerow(header)
writer.writerows(tab_rows)
# Store the recorded exceptions in the submissions folder.
with open(self.evaluate_exceptions_file, "wb") as f:
pickle.dump(eval_exceptions, f)
return eval_results
def _get_missing_metric_val(m: str):
return EVAL_METRIC_MISSING_VALUE[m]
def get_submission_image_name(category: str, sequence_name: str, frame_number: str):
return f"{category}_{sequence_name}_{frame_number}"
def _link_eval_batch_data_from_dataset_root_to_gt_tempdir(
dataset_root: str,
temp_dir: str,
category: str,
frame_index: Tuple[str, int, str],
):
sequence_name, frame_number, gt_image_path = frame_index
image_name = get_submission_image_name(category, sequence_name, frame_number)
os.makedirs(temp_dir, exist_ok=True)
for data_type in ["image", "depth", "mask", "depth_mask"]:
gt_data_path = gt_image_path.replace("/images/", f"/{data_type}s/")
if data_type=="depth":
gt_data_path = gt_data_path.replace(".jpg", ".jpg.geometric.png")
elif data_type in ("mask", "depth_mask"):
gt_data_path = gt_data_path.replace(".jpg", ".png")
tgt_image_name = f"{image_name}_{data_type}.png"
src = os.path.join(dataset_root, gt_data_path)
dst = os.path.join(temp_dir, tgt_image_name)
logger.debug(f"{src} <--- {dst}")
_symlink_force(src, dst)
def _link_eval_batch_data_from_server_db_to_gt_tempdir(
server_folder: str,
temp_dir: str,
category: str,
frame_index: Tuple[str, int, str],
):
sequence_name, frame_number, _ = frame_index
image_name = get_submission_image_name(category, sequence_name, frame_number)
os.makedirs(temp_dir, exist_ok=True)
for data_type in ["image", "depth", "mask", "depth_mask"]:
image_name_postfixed = image_name + f"_{data_type}.png"
dst = os.path.join(temp_dir, image_name_postfixed)
if server_folder.endswith(".hdf5") or server_folder.endswith(".dbm"):
# the folder is in fact an hdf5/dbm file
# so we just make a symlink pointing from the `dst` file
# to the hdf5/dbm database
db_file = server_folder
logger.debug(f"{dst}<---HDF5/DBM file path: {server_folder}")
link_file_to_db_file(db_file, dst)
else:
src = os.path.join(server_folder, image_name_postfixed)
logger.debug(f"{src}<---{dst}")
_symlink_force(src, dst)
def _submision_file_to_category_sequence_name_frame_number(file: str):
toks = os.path.split(file)[-1].split("_")
category = toks[0]
frame_number = int(toks[-1])
sequence_name = "_".join(toks[1:-1])
return category, sequence_name, frame_number
def _symlink_force(target, link_name):
try:
os.symlink(target, link_name)
except OSError as e:
if e.errno == errno.EEXIST:
os.remove(link_name)
os.symlink(target, link_name)
else:
raise e
|
co3d-main
|
co3d/challenge/co3d_submission.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import json
import logging
import numpy as np
import dbm
import functools
import h5py
from io import BytesIO
from PIL import Image
from typing import Optional, Callable, Dict, Union
from tqdm import tqdm
from .data_types import CO3DSequenceSet, CO3DTask, RGBDAFrame
logger = logging.getLogger(__file__)
def store_rgbda_frame(rgbda_frame: RGBDAFrame, fl: str):
assert np.isfinite(rgbda_frame.depth).all()
store_mask(rgbda_frame.mask[0], fl + "_mask.png")
store_depth(rgbda_frame.depth[0], fl + "_depth.png")
store_image(rgbda_frame.image, fl + "_image.png")
if rgbda_frame.depth_mask is not None:
store_1bit_png_mask(rgbda_frame.depth_mask[0], fl + "depth_mask.png")
def link_rgbda_frame_files(fl_existing: str, fl_src_link: str):
for pfix in ["_mask.png", "_depth.png", "_image.png", "_depth_mask.png"]:
link_tgt = fl_existing+pfix
link_src = fl_src_link+pfix
if os.path.islink(link_src):
os.remove(link_src)
elif os.path.isfile(link_src):
raise ValueError(f"Link source {link_src} is an actual file (not a link).")
if not os.path.isfile(link_tgt):
if pfix=="_depth_mask.png":
pass
else:
raise ValueError(f"Target file {link_tgt} does not exist!")
else:
if os.path.islink(link_src):
os.remove(link_src)
os.symlink(link_tgt, link_src)
def load_rgbda_frame(fl: str, check_for_depth_mask: bool = False) -> RGBDAFrame:
f = RGBDAFrame(
mask=load_mask(fl + "_mask.png")[None],
depth=load_depth(fl + "_depth.png")[None],
image=load_image(fl + "_image.png"),
)
if not np.isfinite(f.depth).all():
f.depth[~np.isfinite(f.depth)] = 0.0 # chuck the infs in depth
if check_for_depth_mask:
depth_mask_path = fl + "_depth_mask.png"
if os.path.isfile(depth_mask_path):
f.depth_mask = load_1bit_png_mask(depth_mask_path)[None]
return f
def store_1bit_png_mask(mask: np.ndarray, fl: str):
"""
mask: HxW
"""
Image.fromarray((mask*255).astype('u1'), mode='L').convert('1').save(fl, "PNG")
def load_1bit_png_mask(file: str) -> np.ndarray:
with Image.open(_handle_db_file(file)) as pil_im:
mask = (np.array(pil_im.convert("L")) > 0.0).astype(np.float32)
return mask
def load_mask(fl: str):
return np.array(Image.open(_handle_db_file(fl))).astype(np.float32) / 255.0
def store_mask(mask: np.ndarray, fl: str, mode: str = "L"):
"""
mask: HxW
"""
assert mask.ndim == 2
if mode == "L":
mpil = Image.fromarray((mask * 255.0).astype(np.uint8), mode="L").convert("L")
elif mode == "I;16":
mpil = Image.fromarray((mask * 255.0).astype(np.uint8), mode="I;16").convert(
"I;16"
)
else:
raise ValueError(mode)
mpil.save(fl, "PNG")
def load_depth(fl: str):
depth_pil = Image.open(_handle_db_file(fl))
depth = (
np.frombuffer(np.array(depth_pil, dtype=np.uint16), dtype=np.float16)
.astype(np.float32)
.reshape((depth_pil.size[1], depth_pil.size[0]))
)
assert depth.ndim == 2
return depth
def store_depth(depth: np.ndarray, fl: str):
assert depth.ndim == 2
depth_uint16 = np.frombuffer(depth.astype(np.float16), dtype=np.uint16).reshape(
depth.shape
)
Image.fromarray(depth_uint16).save(fl)
def load_image(fl: str):
return np.array(Image.open(_handle_db_file(fl))).astype(np.float32).transpose(2, 0, 1) / 255.0
def store_image(image: np.ndarray, fl: str):
assert image.ndim == 3
Image.fromarray((image.transpose(1, 2, 0) * 255.0).astype(np.uint8)).save(fl)
def _handle_db_file(fl_or_db_link: str):
"""
In case `fl_or_db_link` is a symlink pointing at an .hdf5 or .dbm database file,
this function returns a BytesIO object yielding the underlying file's binary data.
Otherwise, the function simply returns `fl_or_db_link`.
"""
fl_or_bytes_io = fl_or_db_link
for db_format, data_load_fun in (
(".hdf5", _get_image_data_from_h5),
(".dbm", _get_image_data_from_dbm),
):
fl_or_bytes_io = _maybe_get_db_image_data_bytes_io_from_file(
fl_or_db_link,
db_format,
data_load_fun,
)
if not isinstance(fl_or_bytes_io, str):
# logger.info(f"{fl} is {db_format}!")
break
return fl_or_bytes_io
def _maybe_get_db_image_data_bytes_io_from_file(
fl_or_db_link: str,
db_format: str,
data_load_fun: Callable,
) -> Union[str, BytesIO]:
"""
In case `fl_or_db_link` is a symlink pointing at a database file `db_path` with
of type `db_format`, this function calls `data_load_fun(fl_or_db_link, db_path)`
to retrieve a BytesIO object yielding the `fl`s binary data.
Otherwise, the function simply returns `fl_or_db_link`.
"""
if os.path.islink(fl_or_db_link):
realpath = os.readlink(fl_or_db_link)
if not realpath.endswith(db_format):
return fl_or_db_link
db_path = fl_or_db_link
else:
return fl_or_db_link
return data_load_fun(realpath, db_path)
@functools.lru_cache(maxsize=1)
def _cached_dbm_open_for_read(dbmpath: str):
db = dbm.open(dbmpath, "r")
return db
def _get_image_data_from_dbm(dbmpath: str, fl: str):
flname = os.path.split(fl)[-1]
db = _cached_dbm_open_for_read(dbmpath)
# with dbm.open(dbmpath, "r") as db:
bin_data = db[flname]
return BytesIO(bin_data)
def _get_image_data_from_h5(h5path: str, fl: str):
with h5py.File(h5path, "r") as f:
flname = os.path.split(fl)[-1]
file_index = f["binary_data"].attrs
if flname not in file_index:
raise IndexError(f"{flname} not in {h5path}!")
idx = file_index[flname]
bin_data = f["binary_data"][idx]
return BytesIO(bin_data)
def get_category_to_subset_name_list(
dataset_root: str,
task: Optional[CO3DTask] = None,
sequence_set: Optional[CO3DSequenceSet] = None,
):
"""
Get the mapping from categories to existing subset names.
Args:
dataset_root: The dataset root folder.
task: CO3D Challenge task.
sequence_set: CO3D Challenge sequence_set.
Returns:
category_to_subset_name_list: A dict of the following form:
{
category: [subset_name_0, subset_name_1, ...],
...
}
"""
json_file = os.path.join(dataset_root, "category_to_subset_name_list.json")
with open(json_file, "r") as f:
category_to_subset_name_list = json.load(f)
# filter per-category subset lists by the selected task
if task is not None:
category_to_subset_name_list = {
category: [
subset_name
for subset_name in subset_name_list
if subset_name.startswith(task.value)
]
for category, subset_name_list in category_to_subset_name_list.items()
}
# filter per-category subset lists by the selected sequence set
if sequence_set is not None:
category_to_subset_name_list = {
category: [
subset_name
for subset_name in subset_name_list
if f"_{sequence_set.value}" in subset_name
]
for category, subset_name_list in category_to_subset_name_list.items()
}
# remove the categories with completely empty subset_name_lists
category_to_subset_name_list = {
c: l for c, l in category_to_subset_name_list.items() if len(l) > 0
}
# sort by category
category_to_subset_name_list = dict(sorted(category_to_subset_name_list.items()))
return category_to_subset_name_list
def load_all_eval_batches(
dataset_root: str,
task: Optional[CO3DTask] = None,
sequence_set: Optional[CO3DSequenceSet] = None,
remove_frame_paths: bool = False,
only_target_frame: bool = True,
):
"""
Load eval batches files stored in dataset_root into a dictionary:
{
(category, subset_name): eval_batches_index,
...
}
Args:
dataset_root: The root of the CO3DV2 dataset.
task: CO3D challenge task.
sequence_set: CO3D challenge sequence set.
remove_frame_paths: If `True`, removes the paths to frames from the loaded
dataset index.
only_target_frame: Loads only the first (evaluation) frame from each eval batch.
Returns:
eval_batches_dict: Output dictionary.
"""
category_to_subset_name_list = get_category_to_subset_name_list(
dataset_root,
task=task,
sequence_set=sequence_set,
)
eval_batches_dict = {}
for category, subset_name_list in category_to_subset_name_list.items():
for subset_name in subset_name_list:
# load the subset eval batches
eval_batches_dict[(category, subset_name)] = _load_eval_batches_file(
dataset_root,
category,
subset_name,
remove_frame_paths=remove_frame_paths,
only_target_frame=only_target_frame,
)
return eval_batches_dict
def _load_eval_batches_file(
dataset_root: str,
category: str,
subset_name: str,
remove_frame_paths: bool = True,
only_target_frame: bool = True,
):
eval_batches_fl = os.path.join(
dataset_root,
category,
"eval_batches",
f"eval_batches_{subset_name}.json",
)
with open(eval_batches_fl, "r") as f:
eval_batches = json.load(f)
if only_target_frame:
eval_batches = [
b[0] for b in eval_batches
] # take only the first (target evaluation) frame
if remove_frame_paths:
eval_batches = [b[:2] for b in eval_batches]
return eval_batches
def export_result_file_dict_to_hdf5(h5path: str, filedict: Dict[str, str]):
"""
Export the result files to an hdf5 file that will be sent to the EvalAI server:
Args:
h5path: Target hdf5 file path.
filedict: Dict in form {relative_file_path: absolute_file_path}
"""
logger.info(f"Exporting {len(filedict)} files to HDF5 file {h5path}.")
if len(filedict)==0:
raise ValueError("No data to export!")
assert h5path.endswith(".hdf5")
if os.path.isfile(h5path):
os.remove(h5path)
os.makedirs(os.path.dirname(h5path), exist_ok=True)
with h5py.File(h5path, "w", libver='latest') as fh5:
dt = h5py.special_dtype(vlen=np.dtype('uint8'))
max_path_len = max(len(p) for p in filedict.keys())
dset = fh5.create_dataset(
'binary_data', (len(filedict), ), dtype=dt, compression="gzip"
)
filepath_dset = fh5.create_dataset(
'filepaths',
(len(filedict), ),
dtype=h5py.string_dtype('utf-8', max_path_len),
# dtype=np.dtype(f'U{max_path_len}'),
compression="gzip"
)
index = {}
for idx, (rel_path, store_file) in enumerate(tqdm(filedict.items(), total=len(filedict))):
_store_binary_file_data_to_hd5_dataset(dset, store_file, idx)
flname = os.path.split(rel_path)[-1]
assert flname not in index, "Duplicate filenames!"
index[flname] = idx
filepath_dset[idx] = rel_path
logger.info(f"Updating index of {h5path}.")
dset.attrs.update(index)
def make_hdf5_file_links(h5path: str, root: str):
"""
Link all files whose binary data are stored in an HDF5 file `h5path` to
files under the root folder.
Args:
h5path: HDF5 file.
root: The root folder for exporting symlinks.
"""
logger.info(f"Making file links in {root} to DB data in {h5path}.")
assert h5path.endswith(".hdf5")
with h5py.File(h5path, "r") as fh5:
filepaths = [f.decode("UTF-8") for f in np.array(fh5["filepaths"])]
file_name_to_tgt_file = {
os.path.split(p)[-1]: os.path.join(root, p) for p in filepaths
}
dset = fh5["binary_data"]
index = dset.attrs
all_dirs = set(os.path.dirname(p) for p in file_name_to_tgt_file.values())
for dir_ in all_dirs:
os.makedirs(dir_, exist_ok=True)
for flname, _ in tqdm(index.items(), total=len(index)):
tgt_file = file_name_to_tgt_file[flname]
link_file_to_db_file(h5path, tgt_file)
def link_file_to_db_file(db_file: str, file: str, overwrite: bool = True):
"""
Make a symlink file->db_file
"""
if db_file.endswith(".hdf5"):
token = "__HDF5__:"
elif db_file.endswith(".dbm"):
token = "__DBM__:"
else:
raise ValueError(db_file)
if overwrite and (os.path.isfile(file) or os.path.islink(file)):
os.remove(file)
os.symlink(db_file, file)
# symlinks are cleaner ... do not use this anymore:
# with open(file, "w") as f:
# f.write(token+os.path.normpath(os.path.abspath(db_file)))
def _store_binary_file_data_to_hd5_dataset(dset, fl: str, idx: int):
with open(fl, "rb") as fin:
binary_data = fin.read()
dset[idx] = np.fromstring(binary_data, dtype='uint8')
|
co3d-main
|
co3d/challenge/io.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
|
co3d-main
|
co3d/challenge/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import math
import numpy as np
import logging
import time
from typing import Optional
from typing import Tuple
from .data_types import RGBDAFrame
EVAL_METRIC_NAMES = ["psnr_masked", "psnr_fg", "psnr_full_image", "depth_abs_fg", "iou"]
EVAL_METRIC_MISSING_VALUE = {
"psnr_masked": 0.0,
"psnr_fg": 0.0,
"psnr_full_image": 0.0,
"depth_abs_fg": 100000.0,
"iou": 0.0,
}
logger = logging.getLogger(__file__)
def eval_one(
pred: RGBDAFrame,
target: RGBDAFrame,
):
return eval_one_rgbda(
pred.image,
pred.depth,
pred.mask,
target.image,
target.depth,
target.mask,
gt_depth_mask=target.depth_mask,
)
def eval_one_rgbda(
image_rgb: np.ndarray,
depth_map: np.ndarray,
fg_mask: np.ndarray,
gt_image_rgb: np.ndarray,
gt_depth_map: np.ndarray,
gt_fg_mask: np.ndarray,
gt_depth_mask: Optional[np.ndarray] = None,
crop_around_fg_mask: bool = False,
gt_fg_mask_threshold: Optional[float] = 0.5,
):
"""
Args:
image_rgb: 3xHxW, black background
depth_map: 1xHxW
fg_mask: 1xHxW in {0, 1}
gt_image_rgb: 3xHxW, black background
gt_depth_map: 1xHxW
gt_fg_mask: 1xHxW in {0, 1}
gt_depth_mask: 1xHxW in {0, 1}
Returns:
eval_result: a dictionary {metric_name: str: metric_value: float}
"""
# with Timer("start"):
for xn, x in zip(
("image_rgb", "fg_mask", "depth_map"),
(image_rgb, fg_mask, depth_map),
):
if not np.isfinite(x).all():
raise ValueError(f"Non-finite element in {xn}")
if gt_fg_mask_threshold is not None:
# threshold the gt mask if note done before
gt_fg_mask = (gt_fg_mask > gt_fg_mask_threshold).astype(np.float32)
# chuck non-finite depth
gt_depth_map[~np.isfinite(gt_depth_map)] = 0
if gt_depth_mask is not None:
gt_depth_map = gt_depth_map * gt_depth_mask
if crop_around_fg_mask:
raise NotImplementedError("")
fg_mask_box_xxyy = _get_bbox_from_mask(gt_fg_mask[0])
[
image_rgb,
depth_map,
fg_mask,
gt_image_rgb,
gt_depth_map,
gt_fg_mask,
gt_depth_mask,
] = [
x[
:,
fg_mask_box_xxyy[2]:fg_mask_box_xxyy[3],
fg_mask_box_xxyy[0]:fg_mask_box_xxyy[1],
] for x in [
image_rgb,
depth_map,
fg_mask,
gt_image_rgb,
gt_depth_map,
gt_fg_mask,
gt_depth_mask,
]
]
gt_image_rgb_masked = gt_image_rgb * gt_fg_mask
# with Timer("psnrs"):
psnr_masked = calc_psnr(image_rgb, gt_image_rgb_masked)
psnr_full_image = calc_psnr(image_rgb, gt_image_rgb)
psnr_fg = calc_psnr(image_rgb, gt_image_rgb_masked, mask=gt_fg_mask)
# with Timer("depth"):
mse_depth, abs_depth, aux_depth = calc_mse_abs_depth(
depth_map,
gt_depth_map,
gt_fg_mask,
crop=5,
)
# with Timer("iou"):
iou = calc_iou(fg_mask, gt_fg_mask)
return {
"psnr_masked": psnr_masked,
"psnr_fg": psnr_fg,
"psnr_full_image": psnr_full_image,
"depth_abs_fg": abs_depth,
"iou": iou,
}
def calc_psnr(
x: np.ndarray,
y: np.ndarray,
mask: Optional[np.ndarray] = None,
) -> np.float32:
"""
Calculates the Peak-signal-to-noise ratio between tensors `x` and `y`.
"""
mse = calc_mse(x, y, mask=mask)
psnr = np.log10(np.clip(mse, 1e-10, None)) * (-10.0)
return psnr
def calc_mse(
x: np.ndarray,
y: np.ndarray,
mask: Optional[np.ndarray] = None,
) -> np.float32:
"""
Calculates the mean square error between tensors `x` and `y`.
"""
if mask is None:
return np.mean((x - y) ** 2)
else:
mask_expand = np.broadcast_to(mask, x.shape)
return (((x - y) ** 2) * mask_expand).sum() / np.clip(
mask_expand.sum(), 1e-5, None
)
def rgb_l1(
pred: np.ndarray, target: np.ndarray, mask: Optional[np.ndarray] = None
) -> np.float32:
"""
Calculates the mean absolute error between the predicted colors `pred`
and ground truth colors `target`.
"""
if mask is None:
mask = np.ones_like(pred[:1])
return (np.abs(pred - target) * mask).sum() / np.clip(mask.sum(), 1, None)
def calc_mse_abs_depth(
pred: np.ndarray,
target: np.ndarray,
mask: np.ndarray,
crop: int,
get_best_scale: bool = True,
best_scale_clamp_thr: float = 1e-4,
) -> np.float32:
# crop
if crop > 0:
target = target[:, crop:-crop, crop:-crop]
pred = pred[:, crop:-crop, crop:-crop]
mask = mask[:, crop:-crop, crop:-crop]
target = target * mask
dmask = (target > 0.0).astype(np.float32)
dmask_mass = np.clip(dmask.sum(), 1e-4, None)
scale_l1 = scale_l2 = None
for l_norm in ["l1", "l2"]:
if get_best_scale:
# mult preds by a scalar "scale_best"
# s.t. we get best possible mse error
_optimal_scale = {
"l1": _optimal_l1_scale,
"l2": _optimal_l2_scale,
}[l_norm]
scale_best = _optimal_scale(
pred * dmask, target * dmask, best_scale_clamp_thr
)
pred_scaled = pred * scale_best
if l_norm=="l1":
scale_l1 = scale_best
elif l_norm=="l2":
scale_l2 = scale_best
else:
raise ValueError(l_norm)
else:
pred_scaled = pred
df = target - pred_scaled
if l_norm=="l1":
abs_depth = (dmask * np.abs(df)).sum() / dmask_mass
elif l_norm=="l2":
mse_depth = (dmask * (df ** 2)).sum() / dmask_mass
else:
raise ValueError(l_norm)
return mse_depth, abs_depth, {"scale_l1": scale_l1, "scale_l2": scale_l2}
def _optimal_l2_scale(pred, gt, clamp_thr):
"""
Return the scale s that minimizes ||gt - s pred||^2.
The inverse scale is clamped to [eps, Inf]
"""
xy = pred * gt
xx = pred * pred
scale_best = xy.mean() / np.clip(xx.mean(), clamp_thr, None)
return scale_best
def _optimal_l1_scale(pred, gt, clamp_thr):
"""
Return the scale s that minimizes |gt - s pred|_1.
The scale is clamped in [-max_scale, max_scale].
This function operates along the specified axis.
"""
max_scale = 1 / clamp_thr
x, y = pred.reshape(-1), gt.reshape(-1)
pivots = y / np.clip(x, 1e-10, None)
perm = np.argsort(pivots)
pivots = pivots[perm]
x_sorted = x[perm]
score = -np.abs(x).sum() + 2 * np.cumsum(np.abs(x_sorted))
# find the index of first positive score
i = (score <= 0).astype(np.float32).sum().astype(np.int64)
# i = torch.unsqueeze(i, dim)
if i >= len(pivots.reshape(-1)):
# logger.warning("Scale outside of bounds!")
return 1.0
else:
scale = pivots[i]
scale = np.clip(scale, -max_scale, max_scale)
# scale = torch.take_along_dim(pivots, i, dim=dim)
# scale = torch.clip(scale, min=-max_scale, max=max_scale)
# outshape = [s for si, s in enumerate(y.shape) if si != dim]
# scale = scale.view(outshape)
return float(scale)
def calc_iou(
predict: np.ndarray,
target: np.ndarray,
mask: Optional[np.ndarray] = None,
threshold: Optional[float] = 0.5,
) -> np.float32:
"""
This is a great loss because it emphasizes on the active
regions of the predict and targets
"""
if threshold is not None:
predict = (predict >= threshold).astype(np.float32)
target = (target >= threshold).astype(np.float32)
if mask is not None:
predict = predict * mask
target = target * mask
intersect = (predict * target).sum()
union = (predict + target - predict * target).sum() + 1e-4
return intersect / union
def _get_bbox_from_mask(
mask: np.ndarray,
box_crop_context: float = 0.1,
thr: float = 0.5,
decrease_quant: float = 0.05,
) -> Tuple[int, int, int, int]:
# bbox in xywh
masks_for_box = np.zeros_like(mask)
while masks_for_box.sum() <= 1.0:
masks_for_box = (mask > thr).astype(np.float32)
thr -= decrease_quant
assert thr > 0.0
x0, x1 = _get_1d_bounds(masks_for_box.sum(axis=-2))
y0, y1 = _get_1d_bounds(masks_for_box.sum(axis=-1))
h, w = y1 - y0 + 1, x1 - x0 + 1
if box_crop_context > 0.0:
c = box_crop_context
x0 -= w * c / 2
y0 -= h * c / 2
h += h * c
w += w * c
x1 = x0 + w
y1 = y0 + h
x0, x1 = [np.clip(x_, 0, mask.shape[1]) for x_ in [x0, x1]]
y0, y1 = [np.clip(y_, 0, mask.shape[0]) for y_ in [y0, y1]]
return np.round(np.array(x0, x1, y0, y1)).astype(int).tolist()
def _get_1d_bounds(arr: np.ndarray) -> Tuple[int, int]:
nz = np.flatnonzero(arr)
return nz[0], nz[-1]
class Timer:
def __init__(self, name=None):
self.name = name if name is not None else "timer"
def __enter__(self):
self.start = time.time()
def __exit__(self, exc_type, exc_value, traceback):
logger.info(f"{self.name} - {time.time() - self.start:.3e} sec")
|
co3d-main
|
co3d/challenge/metric_utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum
import numpy as np
from dataclasses import dataclass
from typing import Optional
@dataclass
class RGBDAFrame:
image: np.ndarray
mask: np.ndarray
depth: np.ndarray
depth_mask: Optional[np.ndarray] = None
class CO3DTask(Enum):
MANY_VIEW = "manyview"
FEW_VIEW = "fewview"
class CO3DSequenceSet(Enum):
TRAIN = "train"
DEV = "dev"
TEST = "test"
|
co3d-main
|
co3d/challenge/data_types.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import zipfile
import glob
import logging
import multiprocessing
import numpy as np
import time
from tqdm import tqdm
from collections import defaultdict
from typing import List, Dict, Tuple
from .data_types import CO3DSequenceSet, CO3DTask, RGBDAFrame
from .metric_utils import eval_one, EVAL_METRIC_NAMES, Timer
from .io import load_rgbda_frame
logger = logging.getLogger(__file__)
def get_co3d_task_from_subset_name(subset_name: str) -> CO3DTask:
if subset_name.startswith("manyview"):
return CO3DTask.MANY_VIEW
elif subset_name.startswith("fewview"):
return CO3DTask.FEW_VIEW
else:
raise ValueError(f"Invalid subset name {subset_name}!")
def get_co3d_sequence_set_from_subset_name(subset_name: str) -> CO3DSequenceSet:
return CO3DSequenceSet(subset_name.split("_")[1])
def unzip(file_path: str, output_dir: str):
with zipfile.ZipFile(file_path, "r") as zip_ref:
zip_ref.extractall(output_dir)
def check_user_submission_file_paths(
ground_truth_files: Dict[str, str],
user_submission_files: Dict[str, str],
):
missing_gt_examples = [
gt_example_name
for gt_example_name in ground_truth_files
if gt_example_name not in user_submission_files
]
if len(missing_gt_examples) > 0:
raise ValueError(
f"There are missing evaluation examples: {str(missing_gt_examples)}"
)
additional_user_examples = [
user_example
for user_example in user_submission_files
if user_example not in ground_truth_files
]
if len(additional_user_examples) > 0:
raise ValueError(
f"Unexpected submitted evaluation examples {str(additional_user_examples)}"
)
def get_data_type_postfix(data_type: str):
assert data_type in ["image", "mask", "depth", "depth_mask"]
return f"_{data_type}.png"
def get_result_directory_file_names(
result_dir: str, has_depth_masks: bool = False,
) -> Dict[str, str]:
"""
Result directory structure:
<test_example_name>-image.png
<test_example_name>-mask.png
<test_example_name>-depth.png
...
Returns:
result_files: dict {test_example_name_i: root_path_i}
"""
result_type_files = {}
for result_type in ("image", "mask", "depth"):
postfix = get_data_type_postfix(result_type)
matching_files = sorted(glob.glob(os.path.join(result_dir, f"*{postfix}")))
if has_depth_masks and result_type=="mask":
matching_files = [
f for f in matching_files
if not f.endswith(get_data_type_postfix("depth_mask"))
]
result_type_files[result_type] = {
os.path.split(f)[-1][: -len(postfix)]: f for f in matching_files
}
example_names = sorted(
list(
set(
[
n
for t in ("image", "mask", "depth")
for n in result_type_files[t].keys()
]
)
)
)
missing_examples = defaultdict(list)
for example_name in example_names:
for result_type in ("image", "mask", "depth"):
if example_name not in result_type_files[result_type]:
missing_examples[example_name].append(result_type)
if len(missing_examples) > 0:
msg = "\n".join(
[f" {k} missing {str(v)}" for k, v in missing_examples.items()]
)
raise ValueError(
f"Some evaluation examples in {result_dir} are incomplete:\n"
+ msg
)
result_files = {
example_name: result_type_files["image"][example_name][: -len("_image.png")]
for example_name in example_names
}
return result_files
def _evaluate_pred_gt_pair(args: Tuple[str, str, str, float, bool]):
gt_example, gt_file, pred_file, max_time, print_status = args
cur_time = time.time()
if cur_time > max_time:
raise ValueError(
" @@@@@@@@@@@@@@@@@@@@@\n"
" Evaluation timed out!\n"
" @@@@@@@@@@@@@@@@@@@@@"
)
# with Timer("io"):
gt_rgbda = load_rgbda_frame(gt_file, check_for_depth_mask=True)
pred_rgbda = load_rgbda_frame(pred_file)
# with Timer("check"):
check_same_rgbda_sizes(gt_rgbda, pred_rgbda, gt_example)
# with Timer("eval"):
eval_result_one = eval_one(pred_rgbda, gt_rgbda)
for k, v in eval_result_one.items():
if not np.isfinite(v):
raise ValueError(f"{gt_example} - {k} is does not have a finite value.")
if print_status:
msg = "; ".join([f"{k}={v:.3f}" for k, v in eval_result_one.items()])
sz = str(list(gt_rgbda.image.shape[-2:])).replace(" ", "")
logger.info(
f"eval_one({gt_example}-[{sz}]): {msg}; {max_time-cur_time:.1f} sec left"
)
return eval_result_one
def evaluate_file_folders(
pred_folder: str,
gt_folder: str,
num_workers: int = 0,
remaining_time: float = float("Inf"),
print_per_example_results: bool = True,
):
# determine how much time do we have for the evaluation
max_time = time.time() + remaining_time
user_submission_files = get_result_directory_file_names(pred_folder)
ground_truth_files = get_result_directory_file_names(gt_folder, has_depth_masks=True)
logger.info(f"Evaluating folders: prediction={pred_folder}; gt={gt_folder}")
check_user_submission_file_paths(
ground_truth_files,
user_submission_files,
)
# At this point we are sure that ground_truth_files contain the same
# examples as user_submission_files.
if num_workers <= 0:
# Iterate over the gt examples:
per_example_results = [
_evaluate_pred_gt_pair(
(
gt_example,
ground_truth_files[gt_example],
user_submission_files[gt_example],
max_time,
print_per_example_results,
)
) for gt_example in tqdm(list(ground_truth_files))
]
# gt_rgbda = load_rgbda_frame(ground_truth_files[gt_example], check_for_depth_mask=True)
# pred_rgbda = load_rgbda_frame(user_submission_files[gt_example])
# check_same_rgbda_sizes(gt_rgbda, pred_rgbda, gt_example)
# per_example_results.append(eval_one(pred_rgbda, gt_rgbda))
else:
# parallel processing
arg_list = [
(
gt_example,
ground_truth_files[gt_example],
user_submission_files[gt_example],
max_time,
print_per_example_results,
) for gt_example in list(ground_truth_files)
]
pool = multiprocessing.Pool(num_workers)
per_example_results = [
result for result in tqdm(
pool.imap(_evaluate_pred_gt_pair, arg_list),
total=len(arg_list),
)
]
pool.terminate()
result = {
metric: (sum(r[metric] for r in per_example_results) / len(per_example_results))
for metric in EVAL_METRIC_NAMES
}
return result, per_example_results
def check_same_rgbda_sizes(gt: RGBDAFrame, pred: RGBDAFrame, example_name: str):
for data_type in ("image", "mask", "depth"):
gt_size, pred_size = [getattr(x, data_type).shape for x in [gt, pred]]
if gt_size != pred_size:
raise ValueError(
f"{example_name}'s size does not match the ground truth."
f"{data_type} size: {str(gt_size)} != {str(pred_size)}"
" (ground-truth vs. prediction)."
)
return True
def get_annotations_folder(phase_codename: str):
assert phase_codename in {"dev", "test"}
return os.path.join("annotations", phase_codename)
|
co3d-main
|
co3d/challenge/utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from io import StringIO
import os
import csv
from typing import List, Any
from .data_types import CO3DTask, CO3DSequenceSet
BLANK_PREDICTION_RESULTS = {}
def _read_result_csv(s: str):
# with open(fl, "r") as f:
f = StringIO(s)
csvreader = csv.reader(f)
rows = [row for row in csvreader]
rows = rows[1:]
header = rows[0]
data = rows[1:-1]
def _getcol(col_name: str, row: List[Any]) -> Any:
c = row[header.index(col_name)]
try:
return float(c)
except:
return c
parsed = {
(_getcol("Category", r), _getcol("Subset name", r)): {
k: _getcol(k, r) for k in header
} for r in data
}
return parsed
CSVs = {
"fewview_dev": """
Category,Subset name,psnr_masked,psnr_fg,psnr_full_image,depth_abs_fg,iou
apple,fewview_dev,18.40938866633708,6.884780900276403,5.732459292886711,0.48950375965076004,0.0
backpack,fewview_dev,18.375179837644755,11.884768822089297,5.492699127831022,0.580444590643848,0.0
ball,fewview_dev,15.65596825167019,5.697924649467918,5.391241119316918,0.43991856992712286,0.0
banana,fewview_dev,18.461971791362227,6.118058719441003,5.8697287026999625,0.5312080518960041,0.0
baseballbat,fewview_dev,20.451565072548348,6.7702838462526325,6.133595679990426,0.787964382936369,0.0
baseballglove,fewview_dev,15.899123723379235,8.491206359449485,5.952075366998026,0.5044438522210485,0.0
bench,fewview_dev,13.835660454286623,6.1021708060060185,5.338972434739994,0.8728473659927769,0.0
bicycle,fewview_dev,14.85079899106894,7.178515383648441,5.4468849723020165,0.7596495667817377,0.0
book,fewview_dev,13.526778301589218,5.929520397898452,6.1038428839075625,0.7119168557685552,0.0
bottle,fewview_dev,17.756936987543572,7.695879675777415,5.792669536453962,1.1126274259151023,0.0
bowl,fewview_dev,12.117324340446702,3.522034136500667,6.132690804727037,0.548212652825193,0.0
broccoli,fewview_dev,17.60270342882336,8.135587140185267,5.636059385848195,0.48109570750702163,0.0
cake,fewview_dev,14.831394456777907,6.641730746137352,5.778288244687103,0.4713467452914664,0.0
car,fewview_dev,12.199833440326447,6.2695458065545955,5.7147062915561,0.6731242096715442,0.0
carrot,fewview_dev,18.42032666772822,6.336027619876071,5.2655157144357,0.7425826445279987,0.0
cellphone,fewview_dev,18.54815997270957,9.132949039155196,5.920507132031587,0.7256476083461838,0.0
chair,fewview_dev,14.254104990224922,6.8885175096457525,5.42230365019509,0.8701949198272996,0.0
couch,fewview_dev,12.096141908081652,8.498063614467037,6.839693292778098,0.6672055849897333,0.0
cup,fewview_dev,16.30300593190912,6.263725950094426,5.419278138684526,1.109737605178693,0.0
donut,fewview_dev,17.760249549810045,7.19401090262162,5.406775287613137,0.5831024075924244,0.0
frisbee,fewview_dev,13.006974807290442,5.348851057119092,6.081314892526941,0.6282357528069842,0.0
hairdryer,fewview_dev,18.307693246477385,7.653327373043194,5.796698293526376,0.5692578716769887,0.0
handbag,fewview_dev,16.863888776603684,9.668777191048893,5.885582988575421,0.6140305534695657,0.0
hotdog,fewview_dev,16.576000201780598,6.7813353163227275,6.479828364566311,0.5515738226619902,0.0
hydrant,fewview_dev,14.35863704229326,5.557106534568748,5.486735221682155,0.7370800150837736,0.0
keyboard,fewview_dev,18.319239151881423,10.9398173290579,5.471888028766401,0.591969625411462,0.0
kite,fewview_dev,13.759580600059902,6.095096560743659,5.5797533716568335,0.3686704352187232,0.0
laptop,fewview_dev,17.958107529829775,10.58932076091378,5.9870485037655365,0.6760399403943799,0.0
microwave,fewview_dev,12.641232654595555,7.5579894876019935,5.7736075695959785,0.7816656712123962,0.0
motorcycle,fewview_dev,13.902730964332383,7.837737363341203,5.6993349939287,0.8026270041676278,0.0
mouse,fewview_dev,22.139654039699753,11.380540045528843,5.26534717648027,0.6258851366555073,0.0
orange,fewview_dev,16.965398815565717,5.392140191707388,5.868309801114943,0.45518186645635506,0.0
parkingmeter,fewview_dev,17.969579417828633,8.303453741571293,5.550653705252322,2.7703986799279625,0.0
pizza,fewview_dev,14.044388259713267,6.467125499434811,6.349638051827558,0.5445261030741094,0.0
plant,fewview_dev,15.912698636112678,8.209728015160032,5.41847542705161,0.9729385734872266,0.0
remote,fewview_dev,18.901389746835065,8.809855001539868,5.6508358729724995,0.5809070430213752,0.0
sandwich,fewview_dev,14.961081916655587,5.359419050654777,6.486182655727676,0.5273259918746086,0.0
skateboard,fewview_dev,15.12940600031295,6.633805444460857,6.075841409914119,0.5708218125938797,0.0
stopsign,fewview_dev,18.52676122564753,6.61671306856769,5.412139613407474,6.290707304470178,0.0
suitcase,fewview_dev,16.493029339685542,10.757954804495968,6.232275999259873,0.5967537541074001,0.0
teddybear,fewview_dev,12.49373038673622,5.562061567728542,5.8834174182726855,0.6012993745910462,0.0
toaster,fewview_dev,15.590308176317933,8.571510283192422,5.8223530170835565,0.7087675899756055,0.0
toilet,fewview_dev,11.053325723237059,3.745954412389449,5.831752233322646,0.7324808735388084,0.0
toybus,fewview_dev,15.74397288343334,5.87386919966778,5.694742423634763,0.644572040998336,0.0
toyplane,fewview_dev,15.271423476084475,4.920347774565625,5.603913746917713,0.5686183372913356,0.0
toytrain,fewview_dev,19.250492955217194,8.365187557837626,5.5957012947860445,0.6429103676877059,0.0
toytruck,fewview_dev,15.813126824200825,7.367196186168707,5.59287438907558,0.5748745851615271,0.0
tv,fewview_dev,18.455985344741848,11.821412211957313,5.87636504861574,0.6193668766022515,0.0
umbrella,fewview_dev,13.388214509185625,6.669691460242465,5.398996667950242,0.5547154568934756,0.0
vase,fewview_dev,17.385895374160103,7.695607020715037,5.667400967410725,1.0544596567185702,0.0
wineglass,fewview_dev,14.92593215613611,5.489494483032894,5.883318241506832,2.09036588666451,0.0
MEAN,-,16.028754842096472,7.3270142749005025,5.768476753918801,0.8374863237526772,0.0
""",
"fewview_test": """
Category,Subset name,psnr_masked,psnr_fg,psnr_full_image,depth_abs_fg,iou
apple,fewview_test,18.51983235506069,6.710896207691665,5.622396257710374,0.45868530307683764,0.0
backpack,fewview_test,15.329616295156082,9.704246779430184,6.021398266902823,0.5274631579925675,0.0
ball,fewview_test,16.999140797902346,6.393148333684946,6.167099298585788,0.42074640466733093,0.0
banana,fewview_test,17.20449002482513,6.2347690237546765,5.337301584435467,0.5906480660508107,0.0
baseballbat,fewview_test,20.598735999896142,6.724621984421882,5.929346230877072,0.46383516633969724,0.0
baseballglove,fewview_test,16.250018316676424,8.485414452103313,5.35050821728197,0.5755057054113818,0.0
bench,fewview_test,13.380691505741307,6.217615311139159,5.389882231932645,0.8591881917970259,0.0
bicycle,fewview_test,15.804150486121728,8.539006404409536,7.293404052140095,0.7740858337090635,0.0
book,fewview_test,14.350489743207989,5.356299926470255,5.138131270946916,0.6249600811612394,0.0
bottle,fewview_test,17.257503711230473,7.332068784914889,5.825424785199224,1.0062512850600411,0.0
bowl,fewview_test,12.7586871865527,5.952472495887487,7.350451995400975,0.7734948803009338,0.0
broccoli,fewview_test,17.69069033947863,8.250871950138103,5.718669980890903,0.5437043438960382,0.0
cake,fewview_test,14.809462963950144,6.142164342026519,6.145654847812541,0.45489466623242036,0.0
car,fewview_test,11.914391205648087,6.5335541836879925,5.90360267479956,0.9021454444786102,0.0
carrot,fewview_test,20.060924545297425,6.219697054467009,5.261149123525815,0.7081597814658059,0.0
cellphone,fewview_test,21.520117285013956,10.847631110964242,5.41747877060995,1.0517241006106035,0.0
chair,fewview_test,14.691657730804202,8.959579180137167,6.878377818012938,0.8045192519054911,0.0
couch,fewview_test,11.545670382508696,8.419983656626247,6.902446179473004,0.6761085327114593,0.0
cup,fewview_test,17.79448614165711,6.495705819546957,5.5050360165654855,0.8834131631626546,0.0
donut,fewview_test,18.596152225400257,6.892531195772306,6.240000810567556,0.5443665622620474,0.0
frisbee,fewview_test,14.370690470903668,6.048295011020775,6.136056575421687,0.4830201400666513,0.0
hairdryer,fewview_test,18.47390481689051,7.494774772300304,5.743646634555602,0.5239972887128962,0.0
handbag,fewview_test,13.87987101022776,8.280409779606966,6.572322491579377,0.6866448922525301,0.0
hotdog,fewview_test,18.436410464732152,7.713564800659037,5.859372904290447,0.5873852722036716,0.0
hydrant,fewview_test,14.768617799865435,5.67036284794227,5.71565321761019,0.9328092564314482,0.0
keyboard,fewview_test,18.875163364703024,10.97846088231997,5.392007807994692,0.42114457863505195,0.0
kite,fewview_test,12.882975207164943,6.079375329369365,5.243720977367847,0.571440938913041,0.0
laptop,fewview_test,16.68965246676936,9.765618650745138,6.127183977142236,0.8968296529628422,0.0
microwave,fewview_test,13.859058432153368,8.649172226048128,6.809269971869398,0.8740670698190732,0.0
motorcycle,fewview_test,12.922201328542098,7.659321482648036,5.3469570020173816,0.7923491167407205,0.0
mouse,fewview_test,25.03083236821661,10.870194079196883,5.61381320415904,0.5803283306516662,0.0
orange,fewview_test,17.906264108511905,5.863058031859002,5.902648030774557,0.4927651700044394,0.0
parkingmeter,fewview_test,24.486359595107576,10.777998512312754,4.875545759481984,3.9189161735406275,0.0
pizza,fewview_test,15.25053153218815,6.195657831341678,5.888809317232928,0.5366542850357786,0.0
plant,fewview_test,14.533347345876026,8.213483475587314,5.9657101837783895,0.8745105580745663,0.0
remote,fewview_test,18.685696193857062,9.167126712684974,5.283444994288521,0.5784209284648094,0.0
sandwich,fewview_test,14.954638830523134,5.489779040424508,6.203690658497073,0.582476274688696,0.0
skateboard,fewview_test,18.921604245076754,8.111335322871586,4.540996792864179,0.8144729054641098,0.0
stopsign,fewview_test,20.83021952727707,7.7066182145576425,5.596606825038416,6.195708155269956,0.0
suitcase,fewview_test,14.568523293458965,8.872585021337093,5.526936386940414,0.5437482494754128,0.0
teddybear,fewview_test,13.184137897313038,5.667378086474551,5.638538121962938,0.6289599526865502,0.0
toaster,fewview_test,15.398766247640951,8.138341096517484,6.073562974743127,0.7335666912630792,0.0
toilet,fewview_test,10.138714105703048,3.8756171226863025,5.85450160774978,0.7892172212095283,0.0
toybus,fewview_test,15.925097991923954,6.517829456639026,5.691133527297476,0.6022958688384993,0.0
toyplane,fewview_test,16.703705769834098,5.323541429433026,5.46165954412417,0.5639341931778066,0.0
toytrain,fewview_test,17.859279914562713,7.8933999002371715,5.604032948369101,0.6932112812874591,0.0
toytruck,fewview_test,16.971557700694344,7.745719186191729,5.794916102483104,0.564653671235697,0.0
tv,fewview_test,18.037750946556894,13.741247943038163,8.747561838523023,0.5162819237405952,0.0
umbrella,fewview_test,13.092407842058238,6.756963662911218,5.447907114523638,0.534506784839016,0.0
vase,fewview_test,18.54297573271471,8.090029952142554,5.668374190385807,0.84122947818443,0.0
wineglass,fewview_test,16.386668940524114,5.5524702294978345,5.735686759902533,1.4353355366647544,0.0
MEAN,-,16.463618328111792,7.555333495840728,5.871765271698825,0.8516623875064206,0.0
""",
"manyview_dev": """
Category,Subset name,psnr_masked,psnr_fg,psnr_full_image,depth_abs_fg,iou
apple,manyview_dev_0,18.264030492114536,8.350223131127144,4.366539721003419,0.4195637484678012,0.0
apple,manyview_dev_1,14.137138507072345,6.6045994842301345,6.240087240624211,0.43567804409070654,0.0
ball,manyview_dev_0,14.673712693605873,6.091306495279248,5.217217027846326,0.35927968102112323,0.0
ball,manyview_dev_1,11.090845071075146,4.64095367064294,2.463653189968876,0.30228020972164427,0.0
bench,manyview_dev_0,13.333540945296608,4.137188797564715,3.844656341335867,0.8008696769825814,0.0
bench,manyview_dev_1,11.474174975542255,3.892151505117967,4.14563643434561,0.8577265682977291,0.0
book,manyview_dev_0,13.964168705937992,5.302433873449493,5.950633752149304,0.668803861808978,0.0
book,manyview_dev_1,12.398406799192342,4.119572830245314,6.039375672561894,0.8608240982086351,0.0
bowl,manyview_dev_0,16.958798002755774,4.9461020198227335,5.578702964374623,0.6690737351712432,0.0
bowl,manyview_dev_1,12.420483353954074,5.756645234213993,6.069489156010504,0.5819949787763078,0.0
broccoli,manyview_dev_0,19.630737300870244,9.406282525085935,6.402535226376115,0.7907156923061898,0.0
broccoli,manyview_dev_1,18.781287064441447,8.09672300742875,4.67134680549106,0.4626196557341922,0.0
cake,manyview_dev_0,14.799043006158593,5.867235047104056,5.7329760554862945,0.5205964759006821,0.0
cake,manyview_dev_1,17.84162321617,9.41822453353167,3.7158681607815254,0.3612821873000541,0.0
donut,manyview_dev_0,19.315033141413654,9.455566547834058,3.910254156226572,0.5413953368124613,0.0
donut,manyview_dev_1,22.26734997183049,10.174649831308487,4.199195894665875,0.5521516658527057,0.0
hydrant,manyview_dev_0,14.599159376924849,5.655154414726878,5.289620369144585,0.9737327772204973,0.0
hydrant,manyview_dev_1,14.544431000855953,5.876377992594626,4.506377178812374,1.0210153410111495,0.0
mouse,manyview_dev_0,22.553107676356586,12.793445604091437,5.927286492328659,0.5816200334131308,0.0
mouse,manyview_dev_1,17.89414321396086,8.956320087603723,7.097351162295129,0.5222896946353802,0.0
orange,manyview_dev_0,13.732343455171254,5.052956697685929,5.679024711561304,0.40213060027513875,0.0
orange,manyview_dev_1,14.71190574360874,4.956667990371484,5.836996460679712,0.43328379232231895,0.0
plant,manyview_dev_0,17.56722473025224,10.851111767732277,6.940102616941581,0.9601928359930311,0.0
plant,manyview_dev_1,18.62091024389777,11.114146143571679,8.919832772445316,0.845715675126882,0.0
remote,manyview_dev_0,12.004470911615606,2.3372367853347664,5.928692360063941,0.6355222400483482,0.0
remote,manyview_dev_1,13.035720177392095,4.368321832863184,3.7645273565115303,0.6257342864206513,0.0
skateboard,manyview_dev_0,14.087374862144243,6.183930758291541,7.7026533167035085,0.7381270587952287,0.0
skateboard,manyview_dev_1,15.24606555170737,6.935641480347134,6.728247832458047,0.6846367731825937,0.0
suitcase,manyview_dev_0,13.819257223346327,5.727869083939035,5.9663188950446795,0.42728104332046707,0.0
suitcase,manyview_dev_1,23.33527836247522,12.70130752964975,5.440617175698944,0.7376517524662343,0.0
teddybear,manyview_dev_0,15.310590723595963,7.5183318102880765,5.187722505560557,0.6132311702409632,0.0
teddybear,manyview_dev_1,19.00287693135702,11.380410989980264,5.372428296399181,0.655451568067443,0.0
toaster,manyview_dev_0,16.09490094737935,7.357336873218335,5.733018822009381,0.6335824697011363,0.0
toaster,manyview_dev_1,13.391233953784758,6.32606222531527,6.035255066975607,0.7543408733149064,0.0
toytrain,manyview_dev_0,14.60365232137707,8.252354438191217,7.28055045581793,0.5177963318470418,0.0
toytrain,manyview_dev_1,20.508004149463403,10.310151926704073,8.745624247957407,0.4164560185628414,0.0
toytruck,manyview_dev_0,18.495843812347488,9.077851138541167,4.742593752879244,0.8234759152694971,0.0
toytruck,manyview_dev_1,12.550467820571148,5.368998580430165,6.689171662380995,0.581289871598415,0.0
vase,manyview_dev_0,18.188943183563104,9.441252383753767,3.3505357321672142,0.7542355580664746,0.0
vase,manyview_dev_1,18.434184156563,9.303826519080554,6.071437833814365,0.9019223769623579,0.0
MEAN,-,16.092061594428568,7.352673089707325,5.58710387189748,0.635639291857879,0.0
""",
"manyview_test": """
Category,Subset name,psnr_masked,psnr_fg,psnr_full_image,depth_abs_fg,iou
apple,manyview_test_0,16.22478731544839,6.660985912339718,8.662890866941595,0.5735152991789598,0.0
backpack,manyview_test_0,18.664239087697137,12.092836660079621,3.9911394799946835,0.7187691122198704,0.0
ball,manyview_test_0,17.053273275949497,11.47813547143793,5.494760070704971,0.24760313752451854,0.0
banana,manyview_test_0,19.09250116156104,5.624412642679121,4.915562631182255,0.6388887597635459,0.0
baseballbat,manyview_test_0,17.662719299079523,3.56448996833759,6.856655466723437,0.5858372717711078,0.0
baseballglove,manyview_test_0,15.822024491958919,9.008496845518556,4.958078518403922,0.517665349356982,0.0
bench,manyview_test_0,16.177405149477067,5.64144135201049,6.639758049666188,0.9396015318702626,0.0
bicycle,manyview_test_0,18.929300038845177,8.384269505927424,4.978158575183426,0.7192708133061682,0.0
book,manyview_test_0,14.243260388807064,6.680398318324483,5.9082871869853735,0.9097958583065434,0.0
bottle,manyview_test_0,14.627587579689477,5.485474059329347,5.806882899714011,1.2365226740951725,0.0
bowl,manyview_test_0,12.58297015755071,4.721445807873399,6.174942733659999,0.5651215302382757,0.0
broccoli,manyview_test_0,15.348378477682894,9.138928269423888,6.406522886996562,0.46622630548488525,0.0
cake,manyview_test_0,12.406031259153915,9.13497199802905,6.954300602123617,0.7135451548332193,0.0
car,manyview_test_0,10.536444455719398,6.3033794761422826,5.589254154468083,0.6075981188742273,0.0
carrot,manyview_test_0,15.052122330808963,5.001683408210913,6.975324034802911,0.6913476205193215,0.0
cellphone,manyview_test_0,18.548592045129272,5.477199696294225,5.405821575968376,0.8925134146832333,0.0
chair,manyview_test_0,9.288750627933801,5.559044610507649,5.063084903423689,0.5832447059416495,0.0
couch,manyview_test_0,15.542901771081734,10.090205474555033,7.091879909602398,0.530379736402723,0.0
cup,manyview_test_0,14.565042555686277,4.3989084024686305,5.8416712646107225,0.9809843195171222,0.0
donut,manyview_test_0,15.455254561260311,7.186638190791148,6.08943365801032,0.42916104004956795,0.0
frisbee,manyview_test_0,16.030436839496698,8.25580372425949,3.6125508386557295,0.7820506512812717,0.0
hairdryer,manyview_test_0,22.640570140053246,11.702523731191262,4.159711019086314,0.616971255937149,0.0
handbag,manyview_test_0,24.14781075331437,15.091930028917984,5.223221264801334,0.562664145074455,0.0
hotdog,manyview_test_0,12.244917262623947,4.72460505473762,6.9914703226785,0.5147290560374835,0.0
hydrant,manyview_test_0,16.892200853920816,6.5057584631969645,6.307555495359107,0.8690763104982895,0.0
keyboard,manyview_test_0,14.937059706035933,10.816605585432766,4.857196169187754,0.5188802050007122,0.0
kite,manyview_test_0,15.068337896849323,6.205118297721433,5.276287557112783,0.7494832801627337,0.0
laptop,manyview_test_0,14.59345603707514,7.090074167371421,6.2162237610589814,0.7413216109605885,0.0
motorcycle,manyview_test_0,14.442903913583953,8.56222345535462,6.50899995433291,0.7010114811016933,0.0
mouse,manyview_test_0,29.8885518296015,14.145685466149715,5.406173914859613,0.5942925002348606,0.0
orange,manyview_test_0,11.525661011646141,5.745001890928845,5.983235030110308,0.327592487953461,0.0
parkingmeter,manyview_test_0,18.046203929985666,6.461002560728408,5.027716754597319,1.5829406195750064,0.0
pizza,manyview_test_0,15.152783189315754,6.578112135320982,7.482842326935612,0.7078538179251567,0.0
plant,manyview_test_0,20.369369422864448,11.73336728848978,5.490938199184393,0.5563616188902266,0.0
remote,manyview_test_0,21.93996425442841,9.915599775483262,3.2277628694594647,0.8952884887902877,0.0
sandwich,manyview_test_0,14.156122339232516,4.782614236412581,5.172885855269289,0.4726663784145917,0.0
skateboard,manyview_test_0,17.199716318802558,9.3986630162228,6.582697215433262,0.7526901207787688,0.0
suitcase,manyview_test_0,20.5543872349586,15.449636313939182,6.392103915747007,0.5623042520735794,0.0
teddybear,manyview_test_0,15.056483227336162,6.023824258666201,2.385989674021068,0.6859612539860361,0.0
toaster,manyview_test_0,17.538889427176077,10.389092700641873,7.350896986214959,0.6917412312874205,0.0
toilet,manyview_test_0,8.581683038527455,4.304701570881858,5.715072710684154,0.5228074506396895,0.0
toybus,manyview_test_0,13.421701717928093,5.104459961535013,7.832131890256459,0.5177220835646305,0.0
toyplane,manyview_test_0,25.939823270757692,11.015747754038403,5.005751206904976,0.5705696772343116,0.0
toytrain,manyview_test_0,17.831418296523193,7.494011795501741,4.629191510823262,0.6318052729776739,0.0
toytruck,manyview_test_0,20.369297725379987,9.285414438061778,4.844672681479939,0.48828556766453685,0.0
umbrella,manyview_test_0,12.752391495654509,6.657169727823324,2.556125460617257,0.428359657679186,0.0
vase,manyview_test_0,20.277671704818363,6.07655429478755,4.941408622390838,0.8391219139438616,0.0
wineglass,manyview_test_0,19.455250191811363,7.197566433072046,6.442702595780869,3.173690609010777,0.0
MEAN,-,16.64330518875463,7.882212795773946,5.6547484431710435,0.7209548906794958,0.0
"""
}
for task in [CO3DTask.FEW_VIEW, CO3DTask.MANY_VIEW]:
for seq_set in [CO3DSequenceSet.DEV, CO3DSequenceSet.TEST]:
BLANK_PREDICTION_RESULTS[(task, seq_set)] = _read_result_csv(
CSVs[f"{task.value}_{seq_set.value}"]
)
|
co3d-main
|
co3d/challenge/blank_predictions_results.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
from setuptools import setup # type: ignore
setup(
name="cc_net",
version="1.0.0",
packages=["cc_net"],
# metadata to display on PyPI
author="Guillaume Wenzek",
author_email="guw@fb.com",
description="Tools to download and clean Common Crawl",
keywords="common crawl dataset",
url="https://github.com/facebookresearch/cc_net",
license="CC-BY-NC-4.0",
long_description=Path("README.md").read_text(),
long_description_content_type="text/markdown",
project_urls={
"Bug Tracker": "https://github.com/facebookresearch/cc_net/issues",
"Source Code": "https://github.com/facebookresearch/cc_net",
},
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3.7",
],
python_requires=">=3.7",
install_requires=[
"beautifulsoup4>=4.7.1",
"pandas>=0.23.4",
"requests>=2.22.0",
"fasttext>=0.9.1",
"sentencepiece>=0.1.82",
"kenlm @ git+https://github.com/kpu/kenlm.git@master",
"func_argparse>=1.1.1",
"psutil>=5.6.3",
"sacremoses",
"submitit>=1.0.0",
"typing_extensions",
],
extras_require={
"dev": ["mypy==0.790", "pytest", "black==19.3b0", "isort==5.6.4"],
# To use scripts inside cc_net/tools
"tools": ["lxml", "sentence_splitter"],
# Memory-efficient hashset.
# This fork only compiles the kind of dict used by cc_net.
# Full version is at https://github.com/atom-moyer/getpy
"getpy": ["getpy @ git+https://github.com/gwenzek/getpy.git@v0.9.10-subset"],
},
package_data={"cc_net": ["data/*"]},
)
|
cc_net-main
|
setup.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Main script to download a CC dump, remove duplicates, split by language and
filter the documents.
The pipeline parameters are described in the `Config` class.
"""
import hashlib
import json
import time
import warnings
from argparse import ArgumentParser
from collections import defaultdict
from itertools import repeat
from pathlib import Path
from typing import Any, Dict, Iterable, List, NamedTuple, Optional, Sequence, Tuple
import func_argparse
# Local scripts
from cc_net import dedup, execution, jsonql, minify, perplexity, process_wet_file
from cc_net import regroup as regroup_module
from cc_net import split_by_lang
from cc_net.execution import Executor
# Constant
FILE_DIR = Path(__file__).parent
CUTOFF_CSV = FILE_DIR / "data" / "cutoff.csv"
DEFAULT_PIPELINE = [
"dedup",
"lid",
"keep_lang",
"sp",
"lm",
"pp_bucket",
"drop",
"split_by_lang",
]
class Config(NamedTuple):
"""
Mine Common Crawl with the given settings.
config_name
dump: CC dump id
output_dir: working directory
mined_dir: name of the destination folder, full path will be {ouput_dir}/{mined_dir}/{dump_id}
execution: chose how to parallelize the execution
num_shards: number of shards to split the dump
num_segments_per_shard: allow to download a small portion of CC (eg for tests)
min_len: remove documents shorter than this (in chars)
hashes_in_mem: number of shards hashes to use for dedup
lang_whitelist: only treat those languages
lang_blacklist: ignore those languages
lang_threshold: remove docs whose top language score is lower than this
keep_bucket: keep only those perplexity bucket chose from (head, middle, tail, all)
lm_dir: folder containing LMs
lm_languages: only use LMs for the following languages
cutoff: cutoff file to use for split in head/middle/tail
mine_num_processes: number of processes to use for mining
target_size: size of finals files produce during `regroup` stage
cleanup_after_regroup: delete intermediary files after regroup
task_parallelism: max number of task to run in parallel
pipeline: restricts the mining pipeline to the given steps. Order is important !
experiments: (HACK) enable specific experiments in the code
"""
config_name: str = "base"
dump: str = "2017-51"
output_dir: Path = Path("data")
mined_dir: str = "mined"
execution: str = "auto"
num_shards: int = 1600
num_segments_per_shard: int = -1
metadata: Optional[str] = None
min_len: int = 300
hash_in_mem: int = 50
lang_whitelist: Sequence[str] = []
lang_blacklist: Sequence[str] = []
lang_threshold: float = 0.5
keep_bucket: Sequence[str] = []
lm_dir: Path = Path("data/lm_sp")
cutoff: Path = CUTOFF_CSV
lm_languages: Optional[Sequence[str]] = None
mine_num_processes: int = 16
target_size: str = "4G"
cleanup_after_regroup: bool = True
task_parallelism: int = -1
pipeline: Sequence[str] = DEFAULT_PIPELINE
experiments: Sequence[str] = []
cache_dir: Optional[Path] = None
def get_executor(
self, name: str, timeout_hour: int = 1, mem_gb: int = 1, cpus: int = 1
) -> Executor:
name = "_".join((name, self.config_name, *self.experiments))
return execution.get_executor(
name,
self.output_dir / "logs",
self.execution,
timeout_hour=timeout_hour,
mem_gb=mem_gb,
cpus=cpus,
task_parallelism=self.task_parallelism,
)
def get_cc_shard(self, shard: int) -> process_wet_file.CCShardReader:
dump_cache: Optional[Path] = None
if self.cache_dir:
self.cache_dir.mkdir(exist_ok=True)
dump_cache = self.cache_dir / self.dump
dump_cache.mkdir(exist_ok=True)
return process_wet_file.CCShardReader(
self.dump,
shard=shard,
num_shards=self.num_shards,
num_segments_per_shard=self.num_segments_per_shard,
min_len=self.min_len,
cache_dir=dump_cache,
)
@classmethod
def from_json(cls, json_file: Path) -> "Config":
raw_lines = json_file.read_text().splitlines()
raw_lines = [l for l in raw_lines if not l.strip().startswith("//")]
json_config = json.loads("".join(raw_lines))
path_keys = ["cache_dir", "lm_dir", "output_dir"]
for key in path_keys:
if key in json_config:
json_config[key] = Path(json_config[key])
return Config(**json_config)
@property
def will_split(self) -> bool:
return "split_by_lang" in self.pipeline or "split_by_segment" in self.pipeline
def get_lm_languages(self) -> Sequence[str]:
if self.lm_languages is not None:
return self.lm_languages
if self.lang_whitelist:
return self.lang_whitelist
languages = [m.name.split(".")[0] for m in self.lm_dir.glob("*.arpa.bin")]
if self.lang_blacklist:
languages = [l for l in languages if l not in self.lang_blacklist]
return languages
def get_mined_dir(self, regroup: bool = False) -> Path:
if self.will_split and not regroup:
return self.output_dir / f"{self.mined_dir}_split" / self.dump
return self.output_dir / self.mined_dir / self.dump
BASE_CONFIG = Config()
BYLANG_CONFIG = Config(
config_name="by_lang",
mined_dir="mined_by_lang",
pipeline=list(BASE_CONFIG.pipeline[:-1]) + ["split_by_lang"],
)
REPRODUCE_CONFIG = Config(
config_name="reproduce",
dump="2019-09",
mined_dir="reproduce",
pipeline=["fetch_metadata", "keep_lang", "keep_bucket", "split_by_lang"],
metadata="https://dl.fbaipublicfiles.com/cc_net/1.0.0",
# Optional filtering:
# It won't change much the execution speed, but decreases the disk requirement.
# Restrict languages
lang_whitelist=["fr"],
# Restrict perplexity buckets
# Top languages have been split in perplexity buckets according
# to a Wikipedia trained LM.
# The buckets from low perplexity (good) to high (bad) are:
# ["head", "middle", "tail"]
# Languages without a LM have only one bucket "all".
# It won't change much the execution speed, but decreases the disk requirement.
keep_bucket=["head", "all"],
mine_num_processes=1,
)
TEST_CONFIG = BASE_CONFIG._replace(
config_name="test",
dump="2019-09",
output_dir=Path("test_data"),
execution="local",
num_shards=4,
num_segments_per_shard=1,
hash_in_mem=2,
mine_num_processes=2,
lang_whitelist=["de", "it", "fr"],
target_size="32M",
cleanup_after_regroup=False,
cache_dir=Path("test_data/wet_cache"),
)
PREDEF_CONFIGS = {
"base": BASE_CONFIG,
"by_lang": BYLANG_CONFIG,
"test": TEST_CONFIG,
"test_slurm": TEST_CONFIG._replace(execution="slurm,partition=dev"),
"debug": TEST_CONFIG._replace(config_name="debug", mine_num_processes=0),
"reproduce": REPRODUCE_CONFIG,
"augment": BASE_CONFIG._replace(
config_name="augment", dump="2019-13", lang_blacklist=["en"]
),
}
def tmp(output: Path) -> Path:
return output.parent / (output.stem + ".tmp" + output.suffix)
def finalize(tmp_output: Path, output: Path) -> None:
if not tmp_output.exists():
warnings.warn(f"Targeted tmp output {tmp_output} doesn't exists.")
return
tmp_index = tmp_output.parent / (tmp_output.name + ".index")
tmp_output.rename(output)
if tmp_index.exists():
tmp_index.rename(output.parent / (output.name + ".index"))
def _transpose(iterable: Sequence[Tuple[Any, ...]], n=-1) -> Tuple[List, ...]:
if n < 0:
n = len(iterable[0])
columns: tuple = tuple([] for _ in range(n))
for row in iterable:
assert len(row) == n, f"Found tuple of len({len(row)}, expected {n}: {row}"
for i in range(n):
columns[i].append(row[i])
return columns
def hashes(conf: Config) -> List[Path]:
"""Computes hashes for each shard."""
hashes_dir = conf.output_dir / "hashes" / conf.dump
outputs = [hashes_dir / f"{shard:04d}.bin" for shard in range(conf.num_shards)]
missing_outputs = [(shard, o) for shard, o in enumerate(outputs) if not o.exists()]
if not missing_outputs:
return outputs
hashes_dir.mkdir(parents=True, exist_ok=True)
# With FlatHashSet we need ~2Gb of RAM / shard, but we need to account for
# overhead due to how the dynamic allocation works.
ex = conf.get_executor(f"hashes_{conf.dump}", mem_gb=4, timeout_hour=6, cpus=2)
ex(_hashes_shard, repeat(conf), *_transpose(missing_outputs))
# Wait a bit so that files appears on the disk.
time.sleep(20)
assert all(o.exists() for o in outputs)
return outputs
def _hashes_shard(conf: Config, shard: int, output: Path):
tmp_output = tmp(output)
jsonql.run_pipes(
dedup.HashesCollector(field="raw_content", output=tmp_output),
inputs=conf.get_cc_shard(shard),
)
finalize(tmp_output, output)
return f"Hashed {output}"
HASHES_IN_MEM = [0, 1, 2, 5, 10, 20, 50, 100, 200, 400]
def mine(conf: Config) -> List[Path]:
"""Remove dups, run LID and LMs, and split by lang and quality."""
mined_dir = conf.get_mined_dir()
if conf.will_split:
# Give a directories when splitting
outputs = [mined_dir / f"{shard:04d}" for shard in range(conf.num_shards)]
else:
# Files otherwise
outputs = [
mined_dir / f"{shard:04d}.json.gz" for shard in range(conf.num_shards)
]
if "mini_again" in conf.experiments:
mined_dir = conf.output_dir / "mini_again" / conf.dump
outputs = [mined_dir / f"{shard:04d}" for shard in range(conf.num_shards)]
# TODO: try to reduce this / make it a function of "hash_in_mem" / num_langs
mem_gb = 60 + 1 * conf.hash_in_mem
timeout_hour = 5
if "hashes" in conf.experiments:
# HACK: used for generating paper figures
outputs = [
conf.output_dir / f"hashes_exp/{conf.dump}_0000_dedup{h:03d}.json.gz"
for h in HASHES_IN_MEM
]
mem_gb = int(max(HASHES_IN_MEM) * 1.2)
timeout_hour = 8
missing_outputs = [(shard, o) for shard, o in enumerate(outputs) if not o.exists()]
if "mini_again" in conf.experiments:
missing_outputs = [
(shard, o)
for shard, o in enumerate(outputs)
if shard in [5, 139] and not o.exists()
]
if not missing_outputs:
return outputs
mined_dir.mkdir(parents=True, exist_ok=True)
ex = conf.get_executor(
f"mine_{conf.dump}",
mem_gb=mem_gb,
timeout_hour=timeout_hour,
cpus=conf.mine_num_processes + 1,
)
# Compute hashes firsts.
if "dedup" in conf.pipeline:
hashes_groups = list(jsonql.grouper(hashes(conf), conf.hash_in_mem))
hashes_files: Iterable[List[Path]] = [
hashes_groups[shard // conf.hash_in_mem] for shard, o in missing_outputs
]
else:
hashes_files = repeat([])
ex(_mine_shard, repeat(conf), hashes_files, *_transpose(missing_outputs))
assert all(o.exists() for o in outputs)
return outputs
def _get_segment(tmp_output: Path, doc: dict) -> str:
segment: str = doc["cc_segment"].split("/")[-1]
return str(tmp_output / segment.replace(".warc.wet.gz", ".json.gz"))
def _mine_shard(conf: Config, hashes: List[Path], shard: int, output: Path) -> str:
assert conf.pipeline
tmp_output = tmp(output)
if "hashes" in conf.experiments:
# HACK: used for generating paper figures
hashes_in_mem = shard
hashes = hashes[: HASHES_IN_MEM[hashes_in_mem]]
shard = 0
cc_shard = conf.get_cc_shard(shard)
steps: Dict[str, Optional[jsonql.Transformer]] = {}
lang_id = Path("bin") / "lid.bin"
steps["lid_before_dedup"] = split_by_lang.Classifier(
model=lang_id, field="raw_content", out_field="lid_before_dedup", top=5
)
steps["dedup"] = dedup.DuplicatesRemover(field="raw_content", hashes_files=hashes)
steps["lid"] = split_by_lang.Classifier(
model=lang_id,
field="raw_content",
out_field="language",
top=1,
threshold=conf.lang_threshold,
)
steps["lid_after_dedup"] = split_by_lang.Classifier(
model=lang_id, field="raw_content", out_field="lid_after_dedup", top=5
)
if conf.lang_blacklist:
steps["keep_lang"] = jsonql.where(
[lambda doc: doc.get("language") not in set(conf.lang_blacklist)]
)
elif conf.lang_whitelist:
steps["keep_lang"] = jsonql.where(
[lambda doc: doc.get("language") in set(conf.lang_whitelist)]
)
else:
steps["keep_lang"] = None
tok_field = "tokenized"
steps["sp"] = perplexity.MultiSentencePiece(
{l: conf.lm_dir / f"{l}.sp.model" for l in conf.get_lm_languages()},
field="raw_content",
output_field=tok_field,
normalize=True,
)
steps["lm"] = perplexity.DocLM(
{l: conf.lm_dir / f"{l}.arpa.bin" for l in conf.get_lm_languages()},
field=tok_field,
output_field="perplexity",
normalize=False, # Normalization is done before SentencePiece
# load_method=kenlm.LoadMethod.PARALLEL_READ,
)
steps["pp_bucket"] = perplexity.PerplexityBucket(CUTOFF_CSV)
steps["drop"] = perplexity.DropKeys(tok_field)
steps["keep_bucket"] = None
if conf.keep_bucket:
steps["keep_bucket"] = jsonql.where(
[lambda doc: doc.get("bucket", "all") in conf.keep_bucket]
)
if "fetch_metadata" in conf.pipeline:
# TODO: better default
assert conf.metadata is not None
steps["fetch_metadata"] = minify.MetadataFetcher(
f"{conf.metadata}/{conf.dump}/"
)
steps["minify"] = minify.Minifier()
pattern = str(tmp_output / "{language}_{bucket}.json.gz")
steps["split_by_lang"] = jsonql.split(pattern=str(pattern), mkdir=True)
steps["split_by_segment"] = jsonql.split(
split_fn=lambda doc: _get_segment(tmp_output, doc), mkdir=True
)
pipeline = filter(None, (steps[s] for s in conf.pipeline))
jsonql.run_pipes(
*pipeline,
inputs=cc_shard,
processes=conf.mine_num_processes,
chunksize=100,
# The splitter takes care of writing to files.
output=tmp_output if not conf.will_split else None,
)
finalize(tmp_output, output)
return f"Mined {output}"
def regroup(conf: Config, all_dirs: List[Path]) -> Path:
"""Reshards each language/quality after 'mine'."""
regroup_dir = conf.get_mined_dir(regroup=True)
assert all_dirs
all_files = [f for d in all_dirs for f in d.glob("*.json.gz")]
if not all_files:
print(f"No .json.gz file found in {all_dirs[0]}")
splits: Dict[str, List[Path]] = defaultdict(list)
for f in all_files:
split = f.name.split(".")[0]
splits[split].append(f)
print(f"Identified {len(all_files)} files to regroup from {len(splits)} splits.")
inputs: List[List[Path]] = []
outputs: List[Path] = []
target_size = jsonql.parse_size(conf.target_size)
for split, files in splits.items():
cuts = list(regroup_module.determine_groups(files, target_size=target_size))
if not cuts:
continue
pattern = f"{split}_????.json.gz"
existing_outputs = sorted(regroup_dir.glob(pattern))
if not conf.cleanup_after_regroup:
# We still have all the inputs so it is safe to overwrite existing outputs.
assert len(existing_outputs) <= len(cuts)
existing_outputs = []
if len(existing_outputs) > 0 and len(cuts) == 1:
# append to existing file if size allows it.
new_size = (
sum(f.stat().st_size for f in cuts[0])
+ existing_outputs[-1].stat().st_size
)
if new_size < target_size:
print(f"Will append {cuts[0]} to {existing_outputs[-1]}")
cuts[0].insert(0, existing_outputs.pop(-1))
n_existing = len(existing_outputs)
for i, cut in enumerate(cuts):
# avoid overwriting existing files.
j = i + n_existing
output = regroup_dir / f"{split}_{j:04}.json.gz"
inputs.append(cut)
outputs.append(output)
print(
str(regroup_dir / pattern),
"->",
len(cuts),
f"shards ({n_existing} already there).",
)
ex = conf.get_executor(f"regroup_{conf.dump}", mem_gb=1, timeout_hour=12, cpus=2)
ex(_regroup, repeat(conf), inputs, outputs)
return regroup_dir
def _regroup(conf: Config, inputs: List[Path], output: Path) -> str:
output.parent.mkdir(parents=True, exist_ok=True)
regroup_module.fast_reshard(
inputs, output, tmp=tmp(output), rm_original=conf.cleanup_after_regroup
)
return f"Regrouped {output}"
def move_segments(conf: Config, all_dirs: Sequence[Path]) -> Path:
"""Reshards each language/quality after 'mine'."""
# check that mining is over.
regroup_dir = conf.get_mined_dir(regroup=True)
assert all_dirs, "Received no dirs to move"
assert all(
d.is_dir() for d in all_dirs
), f"move_segments was expecting dirs received files: {all_dirs[:10]}..."
regroup_dir.parent.mkdir(exist_ok=True)
regroup_dir.mkdir(exist_ok=True)
ex = conf.get_executor(f"moveseg_{conf.dump}", mem_gb=1, timeout_hour=1, cpus=2)
def _move_segments(subdir: Path, regroup_dir: Path) -> str:
n = 0
for f in subdir.iterdir():
if not f.is_file() or f.is_symlink():
continue
n += f.name.endswith(".json.gz")
new_name = regroup_dir / f.name
target = new_name.resolve()
assert f.resolve() != target
# this make the job idempotent.
f.rename(new_name)
f.symlink_to(target)
if n == 0:
return ""
return f"Moved {n} .json.gz files from {subdir} to {regroup_dir}"
ex(_move_segments, all_dirs, repeat(regroup_dir))
print(f"Results are in {regroup_dir}")
return regroup_dir
def _validate_test(conf: Config, output_dir: Path, generate: bool = False):
stats: Dict[str, dict] = {}
for file in sorted(output_dir.glob("*.json.gz")):
fname = "/".join((file.parent.name, file.name))
# The order of documents is not guaranteed inside a shard,
lines = sorted(jsonql.open_read(file))
content = "\n".join(lines)
size = len(content)
checksum = hashlib.sha1(bytes(content, encoding="utf-8")).hexdigest()
# first_document = json.loads(lines[0])
stats[fname] = {"size": size, "checksum": checksum}
def dump(x):
return json.dumps(x, indent=2, ensure_ascii=False)
print("*** Stats ***")
stats_raw = dump(stats)
stats_file = FILE_DIR / "data" / "test_stats.json"
if generate:
print("Saving stats to", stats_file)
stats_file.write_text(stats_raw)
return
expected_stats: Dict[str, dict] = {}
if stats_file.exists():
expected_stats = json.loads(stats_file.read_text())
if expected_stats == stats:
print("Everything looks good !")
return
stats_file.with_suffix(".actual.json").write_text(stats_raw)
print("*** Expected Stats ***")
print(dump(expected_stats))
print("*** Diff ***")
for fname in sorted(expected_stats.keys()):
print(fname)
assert fname in expected_stats, "missing file " + fname
if expected_stats[fname]["size"] != stats[fname]["size"]:
print(
" - Expected size",
expected_stats[fname]["size"],
", size",
stats[fname]["size"],
)
if expected_stats[fname]["checksum"] != stats[fname]["checksum"]:
print(
" - Expected checksum",
expected_stats[fname]["checksum"],
", checksum",
stats[fname]["checksum"],
)
def get_main_parser() -> ArgumentParser:
# Generates the 'main' parser by patching a 'Config' parser
p = func_argparse.func_argparser(Config)
# Override defaults value to None, so we know what was set by the user.
# Note that it will keep the original default values in the help message.
p.set_defaults(**{f: None for f in Config._fields})
p.add_argument("--config", type=str, default="base")
p.set_defaults(__command=main)
return p
def main(config: str = "base", **config_as_dict: Any) -> None:
# Use the given 'config' as default value.
config_base = config
if config_base in PREDEF_CONFIGS:
conf = PREDEF_CONFIGS[config_base]
elif Path(config_base).exists():
conf = Config.from_json(Path(config_base))
else:
raise ValueError(
f"Invalid value {config_base} for --config. "
f"Choose from ({', '.join(PREDEF_CONFIGS)}) or give an existing .json file."
)
conf = conf._replace(**{k: v for (k, v) in config_as_dict.items() if v is not None})
print(f"Will run cc_net.mine.main with the following config:", conf)
all_files = mine(conf)
if conf.will_split:
assert all_files
assert all(d.is_dir() for d in all_files)
all_dirs = all_files
if "split_by_lang" in conf.pipeline:
# Only try regrouping if we split the shards.
regroup(conf, all_dirs)
elif "split_by_segment" in conf.pipeline:
# If we split by segment then regrouping is trivial, since segments appear in only one shard.
move_segments(conf, all_dirs)
if conf.config_name == "test":
_validate_test(conf, conf.get_mined_dir(regroup=True))
if __name__ == "__main__":
func_argparse.parse_and_call(get_main_parser())
|
cc_net-main
|
cc_net/mine.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Creates mono-lingual corpus from Wikipedia.
"""
import functools
import re
import subprocess
import urllib.request
from pathlib import Path
from typing import Dict
import func_argparse
from bs4 import BeautifulSoup # type: ignore
from cc_net import jsonql, text_normalizer
CIRRUS_URL = "https://dumps.wikimedia.org/other/cirrussearch"
CIRRUS_DUMP_RE = re.compile(r"^(.*)wiki-\d+-cirrussearch-content\.json\.gz")
def tmp(file: Path) -> Path:
return file.parent / ("tmp." + file.name)
def opening(file: Path, output: Path = None, n_docs: int = 1_000_000):
"""Will dump the tokenized opening text of the given Wikipedia.
Args:
- file: File containing the Wikipedia dump.
- output: Output file.
- n_docs: How many docs to parse
- tokenize: whether to tokenize the text
- lang: Language code used to chose the tokenizer
"""
assert file.exists()
return jsonql.run_pipes(
functools.partial(extract_opening_text, n_docs=n_docs),
file=file,
output=tmp(output) if output else None,
)
if output:
tmp(output).replace(output)
def extract_opening_text(source, n_docs: int = 10_000):
i = 0
for doc in jsonql.read_jsons(source):
if not doc:
continue
text = doc.get("opening_text")
if not text:
continue
yield text_normalizer.normalize(text)
i += 1
if i >= n_docs:
break
def dl(lang: str, output_dir: Path, date: str = None):
"""Download the cirrus extract for the given lang.
See https://dumps.wikimedia.org/other/cirrussearch for the full list of files.
Args:
- lang: The Wikipedia code for the language.
- output_dir: Output directory. File will be `{lang}.json.gz`
- date: Date of a specific Cirrus dump.
"""
urls = get_cirrus_urls(date)
assert (
lang in urls
), f"--lang {lang} not found. Available languages are: {urls.keys()}"
assert output_dir, "--output_dir folder needed."
output_dir.mkdir(exist_ok=True)
output = output_dir / (lang + ".json.gz")
print(f"Downloading {lang} wiki from {urls[lang]} to {output}")
wget(urls[lang], output)
def get_cirrus_urls(date: str = None) -> Dict[str, str]:
if date is None:
cirrus_page = BeautifulSoup(
urllib.request.urlopen(CIRRUS_URL), features="html.parser"
)
dumps = [a.get("href").strip("/") for a in cirrus_page.findAll("a")]
dumps.remove("..")
dumps.remove("current")
# We take the oldest dump since the most recent might be incomplete.
# The page only link to the N latest dumps so the dump won't be too old.
date = min(dumps)
cirrus_url = "/".join((CIRRUS_URL, date))
print("Will use the Wikipedia dump from:", date, cirrus_url)
cirrus_page = BeautifulSoup(
urllib.request.urlopen(cirrus_url), features="html.parser"
)
urls = {}
for link in cirrus_page.findAll("a"):
match = CIRRUS_DUMP_RE.match(link.get("href"))
if not match:
continue
urls[match.group(1)] = "/".join([cirrus_url, link.get("href")])
assert urls, f"No valid download urls found at {cirrus_url}"
return urls
def wget(url: str, output: Path):
subprocess.run(["wget", url, "-O", tmp(output), "-q"], check=True)
tmp(output).replace(output)
assert (
output.stat().st_size > 10_000
), f"File {output} downloaded from {url} looks too small"
if __name__ == "__main__":
func_argparse.main(dl, opening)
|
cc_net-main
|
cc_net/get_wiki_cirrus.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Manipulate files containing one json per line.
"""
import argparse
import collections
import contextlib
import functools
import glob
import gzip
import importlib
import inspect
import io
import itertools
import json
import logging
import multiprocessing
import os
import re
import sys
import tempfile
import time
import typing as tp
import warnings
import zlib
from pathlib import Path
from typing import (
Callable,
Dict,
Iterable,
Iterator,
List,
Optional,
Sequence,
TextIO,
Tuple,
Union,
)
import numpy as np
import psutil # type: ignore
import requests
from typing_extensions import Protocol
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(levelname)s %(process)d:%(name)s - %(message)s",
datefmt="%Y-%m-%d %H:%M",
)
NEWLINE = " N3WL1N3 "
FilterFn = Callable[[dict], bool]
FileDescriptor = Union[Path, List[Path], str]
WritableFileLike = Union[FileDescriptor, TextIO, "SimpleIO", None]
ReadableFileLike = Union[Iterable[str], FileDescriptor, None]
def io_parser():
"""Parser shared by all commands to get input/output files."""
parser = argparse.ArgumentParser(add_help=False)
file_help = """File to read from. Can be specified several times for several files.
Be careful that bash will expand glob patterns **before** sending the args
to python. To use globs put it inside single quotes:
jsonql where --file 'data/perplexity/*.json' '{length} > 100' | head -1
jsonql --file 'data/perplexity/*.json' where '{length} > 100' | head -1
[Invalid] jsonql where '{length} > 100' --file data/perplexity/*.json | head -1
[Invalid] jsonql where --file data/perplexity/*.json '{length} > 100' | head -1
"""
parser.add_argument("-f", "--file", type=Path, action="append", help=file_help)
parser.add_argument("-o", "--output", type=Path, default="-")
parser.add_argument("--processes", type=int, default=1)
return parser
def get_parser():
parser = argparse.ArgumentParser(
description="Read a set of json files and allow to query them"
)
subparsers = parser.add_subparsers()
def add_subparser(function, arguments):
doc = function.__doc__.split("\n")[0]
p = subparsers.add_parser(function.__name__, help=doc, parents=[io_parser()])
p.set_defaults(command=function)
for k, v in arguments.items():
p.add_argument(k, **v)
add_subparser(
select,
{
"columns": dict(nargs="+", help="Extract the value of the given fields"),
"--skip_empty": dict(
action="store_true", help="Skip lines without the requested fields"
),
"--separator": dict(
default="\t", help="Separator to use between the different columns"
),
"--newline": dict(
default=NEWLINE,
help="Replace newlines found in the text by the given string",
),
},
)
add_subparser(
where,
{
"clauses": dict(nargs="+", help=""),
"--requires": dict(
action="append", help="Python module required by the clauses code."
),
},
)
add_subparser(
merge,
{
"columns": dict(nargs="+", help=""),
"--separator": dict(
default="\t", help="Separator to use between the different columns"
),
"--newline": dict(
default=NEWLINE, help="Replace the given string by actual newlines"
),
},
)
add_subparser(
describe,
{
"columns": dict(nargs="*", help=""),
"--bins": dict(
default="auto", help="Number of bins for computing the histograms"
),
"--cumulative": dict(
action="store_true", help="Compute cumulative histograms"
),
"--weights": dict(type=str, help="Column used to weight histograms"),
},
)
add_subparser(split, {"--pattern": dict(type=str)})
add_subparser(shard, {})
return parser
def _split_array(array, sep):
last = 0
for i, x in enumerate(array):
if x != sep:
continue
yield array[last:i]
last = i + 1
if last != len(array):
yield array[last:]
def main(raw_args):
parser = get_parser()
pipeline = []
file = "-"
output = "-"
processes = 1
for args_group in _split_array(raw_args, "--"):
args = vars(parser.parse_args(args_group))
command = args.pop("command")
file = args.pop("file") or file
output = args.pop("output") or output
processes = args.pop("processes") or processes
pipeline.append(as_pipe(command, args))
if not pipeline:
parser.print_help()
return
run_pipes(*pipeline, file=Path(file), output=Path(output), processes=processes)
class Transformer:
"""
Wrapper around functions transforming documents.
This allows `run_pipes` to automatically parallelize the pipeline.
Provides:
* Automatic logging. Logging can be changed with the `summary` method.
Loggin frequency with _log_freq (in second) or $JSONQL_LOG_FREQ env variable.
* Automatic parallelization without pickling. The transformers are shared
across processes, and the object is usually not pickled.
* Basic pickling / unpickling in case it's still needed.
By default will only pickle the arguments passed to the constructor.
* Delayed initialization. Internal state which is not pickable should be set
inside the `_prepare` function.
"""
parallelisable: bool = True
expect_json: bool = False
warn_when_pickling: bool = False
ready: bool = False
def __init_subclass__(cls, expect_json: bool = None):
"""Detects if the subclass expects json as input."""
spec = inspect.getfullargspec(cls.do)
if expect_json is None:
expect_json = spec.annotations.get(spec.args[1], None) == dict
cls.expect_json = expect_json
def __new__(cls, *args, **kwargs):
"""Creates the transformer and save the arguments passed to the constructor."""
t = super().__new__(cls)
Transformer.__init__(t, args, kwargs)
return t
def __init__(self, state_args: tuple = None, state_kwargs: dict = None):
"""
Init the transformer counters.
If state_args/state_kwargs are set they will override whatever was
originally passed to the subclass constructor.
"""
if state_args is not None:
self.__args = state_args
if state_kwargs is not None:
self.__kwargs = state_kwargs
self.start_time = time.time()
self.__last_log = self.start_time
self.processed = 0
# Log every 5 min unless specified other wise.
self._log_freq = int(os.environ.get("JSONQL_LOG_FREQ", 5 * 60))
self.__cls = type(self)
self._logger = logging.getLogger(self.__cls.__name__)
def __call__(self, x):
assert self.ready, f"{self} is not ready."
if x is None:
return
y = self.do(x)
self.processed += 1
if time.time() - self.__last_log > self._log_freq:
self.log_summary()
return y
def do(self, x):
raise NotImplementedError(f"'do' not implemented in {type(self)}")
def summary(self) -> List[str]:
return [self.speed_summary()]
def speed_summary(self) -> str:
delay = time.time() - self.start_time
h = delay / 3600
s = self.processed / delay
return f"Processed {self.processed:_} documents in {h:.2}h ({s:5.1f} doc/s)."
def log(self, message):
self._logger.info(message)
def log_summary(self) -> None:
if not self.ready:
self.log("Not ready.")
return
summ = self.summary() or []
for line in summ:
self.log(line)
self.__last_log = time.time()
def map(self, source: Iterable) -> Iterator:
if self.ready:
for x in source:
yield self(x)
# since we have been prepared by caller,
# caller is also responsible for calling `close`.
return
else:
with self:
for x in source:
yield self(x)
def __getstate__(self) -> Tuple[tuple, dict, bool]:
return (self.__args, self.__kwargs, self.expect_json)
def __setstate__(self, state: Tuple[tuple, dict, bool]):
if self.warn_when_pickling:
warnings.warn(f"Unpickling transformer: {type(self)}. This can be slow.")
(args, kwargs, expect_json) = state
# When unpickling `__new__` isn't called so we have to doit ourselves.
Transformer.__init__(self, state_args=args, state_kwargs=kwargs)
type(self).__init__(self, *args, **kwargs)
assert self.expect_json == expect_json
# __setstate__ is called by multiprocessing right before calling
# the object so we need to initialize everything.
self.__enter__()
def _prepare(self) -> None:
pass
def __enter__(self) -> "Transformer":
# In multiprocessing __enter__ is always called twice, so we are idempotent.
# Because we call __enter__ when deserializing this transformer and
# also when the parent transformer is deserialized.
self.start_time = time.time()
if self.ready:
return self
self._prepare()
self.ready = True
return self
def __exit__(self, *args) -> None:
self.close()
self.log_summary()
def close(self) -> None:
pass
def as_pipe(transformer, kwargs):
if isinstance(transformer, type):
return transformer(**kwargs)
return lambda source: transformer(source, **kwargs)
def compose(fns: List[Transformer]) -> Transformer:
if len(fns) == 1:
return fns[0]
return MultiTransformer(fns)
class MultiTransformer(Transformer):
def __init__(self, transformers: List[Transformer]):
super().__init__()
self.transformers = transformers
def __repr__(self) -> str:
pipeline = " | ".join(type(t).__name__ for t in self.transformers)
return f"<{pipeline}>"
def do(self, x):
for t in self.transformers:
x = t(x)
return x
def _prepare(self):
for t in self.transformers:
t.__enter__()
return self
def __exit__(self, *args):
for t in self.transformers:
t.__exit__(*args)
def summary(self):
return itertools.chain(*(t.summary() for t in self.transformers))
class Mapper(Transformer):
def __init__(self, fn):
super().__init__()
self.fn = fn
def do(self, x):
return self.fn(x)
def run_pipe(
command,
kwargs: dict = None,
file: ReadableFileLike = None,
output: WritableFileLike = None,
):
kwargs = kwargs or {}
if isinstance(kwargs, argparse.ArgumentParser):
kwargs = vars(kwargs.parse_args())
file = file or Path(kwargs.pop("file", "-"))
output = output or Path(kwargs.pop("output", "-"))
return run_pipes(as_pipe(command, kwargs), file=file, output=output)
def run_pipes(
*fns: Union[Transformer, Callable[[Iterable], Iterable]],
inputs: Iterable[dict] = None,
file: ReadableFileLike = None,
output: WritableFileLike = None,
processes: int = 1,
chunksize: int = 10_000,
):
"""
Run full document processing pipeline.
- fns: list of functions to run over the documents. Can be:
* `Iterable -> Iterable` function
* jsonql.Transformer instance
Using transformers allow the pipeline to process documents in parallel.
- inputs: iterable to read the documents from
- file: if inputs is not given, will read documents from this file.
- output: writable file like.
- processes: number of processes to use. -1 means all CPU available.
- chunksize: chunksize for multiprocessing.Pool.imap_unordered
"""
expect_json = len(fns) and isinstance(fns[0], Transformer) and fns[0].expect_json
if expect_json and inputs is None:
fns = (JsonReader(),) + fns
transformers = []
for t in fns:
if not isinstance(t, Transformer):
break
if not t.parallelisable:
break
transformers.append(t)
pipes = fns[len(transformers) :]
log = logging.getLogger(__name__).info
if inputs is None:
data: Iterable = open_read(file)
else:
data = inputs
if processes == -1:
processes = os.cpu_count() or 0
with contextlib.suppress(BrokenPipeError), contextlib.ExitStack() as stack:
if transformers:
log(f"preparing {transformers}")
transform = stack.enter_context(compose(transformers))
if processes <= 1:
data = transform.map(data)
else:
p = multiprocessing.current_process()
log(f"Will start {processes} processes from {p.name}, Pid: {p.pid}")
pool = stack.enter_context(
multiprocessing.Pool(
processes=processes,
initializer=_set_global_transformer,
initargs=(transform,),
)
)
data = pool.imap_unordered(
_global_transformer, data, chunksize=chunksize
)
for fn in pipes:
if isinstance(fn, Transformer):
data = fn.map(data)
else:
data = fn(data)
write_jsons(data, output)
# Allows to share transformer acroos subprocess.
# Used by `run_pipes`
_GLOBAL_TRANSFORMER: Optional[Transformer] = None
def _set_global_transformer(transformer: Transformer):
global _GLOBAL_TRANSFORMER
p = multiprocessing.current_process()
logging.info(
f"Started subprocess {p.name}:{p.pid} from {os.getppid()} for {transformer}"
)
assert transformer.ready, f"{transformer} isn't ready"
_GLOBAL_TRANSFORMER = transformer
def _global_transformer(document: str) -> Optional[dict]:
assert _GLOBAL_TRANSFORMER is not None
return _GLOBAL_TRANSFORMER(document)
def lines(file: ReadableFileLike) -> Iterator[str]:
return (line.strip("\n") for line in open_read(file))
def read_jsons(file: ReadableFileLike, strict=False) -> Iterator[dict]:
reader = JsonReader(strict=strict)
lines = open_read(file)
for line in lines:
if line is None:
continue
yield reader(line)
reader.log_summary()
def write_jsons(source: Iterable[dict], file: WritableFileLike) -> None:
eol = os.linesep
with open_write(file) as o:
for res in source:
if res is None:
continue
if isinstance(res, dict):
json.dump(res, o, ensure_ascii=False)
o.write(eol)
continue
if isinstance(res, str):
res = res.rstrip("\n")
print(res, file=o)
class JsonReader(Transformer):
def __init__(self, strict: bool = False):
super().__init__()
self.ready = True
self.strict = strict
self.num_errors = 0
def do(self, line: str) -> Optional[dict]:
if line is None:
return None
if isinstance(line, dict):
return line
line = line.rstrip("\n")
if not line:
return None
try:
return json.loads(line)
except json.decoder.JSONDecodeError as e:
self.log_error(e)
if self.strict:
raise
return None
def log_error(self, e: json.decoder.JSONDecodeError):
self.num_errors += 1
if self.num_errors > 10:
return
MAX_LEN = 80
snippet, snippet_len = e.doc, len(e.doc)
col = e.pos
if snippet_len > MAX_LEN:
if col < MAX_LEN:
start = 0
elif snippet_len - col < MAX_LEN:
start = snippet_len - MAX_LEN
else:
start = col - MAX_LEN // 2
snippet = e.doc[start : start + MAX_LEN]
col = col - start
logging.warning(
"\n".join(
[
f"Invalid json (length={len(e.doc)}) {e}",
snippet,
" " * (col - 1) + "^",
]
)
)
def summary(self):
summ = super().summary()
if self.num_errors > 0:
summ.append(f"Skipped {self.num_errors} invalid json.")
return summ
def compile_column(column, newline):
if callable(column):
return column
if column == "*":
return json.dumps
if re.match(r"[_a-z][_a-z0-9]*", column):
def extract_col(doc):
v = doc.get(column, "")
if isinstance(v, str) and newline != "\n":
v = v.rstrip("\n").replace("\n", newline)
return v
return extract_col
return compile_expr(column)
def select(lines, columns, skip_empty=False, separator="\t", newline="\n"):
"""Yields the content of the requested columns."""
column_parsers = [compile_column(c, newline) for c in columns]
for doc in read_jsons(lines):
values = []
empty = True
for parse_col in column_parsers:
v = parse_col(doc)
values.append(str(v) or "")
empty = empty and v is None
if skip_empty and empty:
continue
yield separator.join(values)
def compile_expr(clause: Union[str, FilterFn], requires: List[str] = None):
if not isinstance(clause, str):
return clause
args_re = r"(?i:\{([_a-z][_a-z0-9]*)\})"
args_list = list(re.findall(args_re, clause))
if not args_list:
# This is only a warning because you may want to have eg random sampling
# that doesn't depend on the document.
logging.warn(
f"Warning: No variable found in expression: <{clause}>\n"
"Variables should be written inside braces, eg: {language}=='en'"
)
python_like = re.sub(args_re, r"doc.get('\1', None)", clause)
requires = requires or []
modules = {r: importlib.import_module(r) for r in requires}
return eval(f"lambda doc: {python_like}", modules)
class where(Transformer):
"""Filters the data using python code.
Ex: `jsonql where 'len({text}) > 100'`
"""
def __init__(
self, clauses: Sequence[Union[str, FilterFn]], requires: List[str] = []
):
super().__init__()
self.raw_clauses = clauses
self.requires = requires
self.n_selected = 0
self.clauses: List[FilterFn] = []
def _prepare(self):
self.clauses = [compile_expr(c, self.requires) for c in self.raw_clauses]
def do(self, doc: dict) -> Optional[dict]:
assert self.clauses
if not doc or not all((c(doc) for c in self.clauses)):
return None
self.n_selected += 1
return doc
def summary(self):
n_selected, n_docs = self.n_selected, self.processed
selectivity = n_selected / n_docs if n_docs else 0
return [f"Selected {n_selected} documents out of {n_docs} ({selectivity:5.1%})"]
def merge(lines, columns, separator="\t", newline=NEWLINE):
"""Reads tab separated columns and output a json using the given headers.
Headers are of form {key}[%{type}]
{type} can be one of {"f": float, "i": int, "b": bool, "s": string}.
Default type is string.
A special header "_" means interpret this column as json, and append all other
columns to it. Must appear only once and on last position.
Ex:
`echo '1\thello' | jsonql merge n t` --> `{"n": "1", "t": "hello"}`
`echo '1\thello" | jsonql merge n%i t` --> `{"n": 1, "t": "hello"}`
`echo '1\thello\t{"f": "bar"}' | jsonql merge n%i t _` --> `{"n": 1, "t": "hello", "f": "bar"}`
"""
handle_newlines = lambda s: s.replace(newline, "\n")
type_mapping: Dict[str, Callable] = {
"f": float,
"i": int,
"b": bool,
"s": handle_newlines,
}
type_parsing = [
type_mapping.get(f.split("%")[-1], handle_newlines) for f in columns
]
columns = [f.split("%")[0] for f in columns]
doc_index = columns.index("_") if "_" in columns else -1
read_json = JsonReader()
def parse(line):
parts = line.split(separator, len(columns) - 1)
doc: Dict[str, tp.Any] = {}
for i, value in enumerate(parts):
if columns[i] == "_":
doc.update(read_json(parts[doc_index]))
else:
try:
doc[columns[i]] = type_parsing[i](value)
except ValueError:
logging.error(
f"Error when parsing column {i} of line: {line[:100]}..."
)
return doc
for line in lines:
yield json.dumps(parse(line))
class split(Transformer):
"""Split a files in several smaller files based on the value of a field."""
# Not parallelisable since we are writing to files.
parallelisable = False
def __init__(
self,
pattern: Union[Path, str] = None,
split_fn: Callable[[dict], str] = None,
mkdir: bool = False,
):
super().__init__()
assert not (
pattern and split_fn
), "split can't have both a pattern and a split_fn"
if split_fn is not None:
self.split_fn = split_fn
else:
assert pattern, "split need either a pattern or a split_fn"
self.split_fn = self.make_split_fn(str(pattern))
self.mkdir = mkdir
self.o: dict = {}
def make_split_fn(self, pattern: str) -> Callable[[dict], str]:
candidates = list(re.findall(r"(?i:\{([_a-z][_a-z0-9]*)\})", pattern))
return lambda doc: pattern.format(**{c: doc[c] for c in candidates})
def do(self, doc):
filename = self.split_fn(doc)
if not filename:
return
o = self.o.get(filename, None)
if o is None:
if self.mkdir:
Path(filename).parent.mkdir(parents=True, exist_ok=True)
self.o[filename] = open_write(filename)
print(json.dumps(doc, ensure_ascii=False), file=self.o[filename], flush=True)
def summary(self):
summ = super().summary()
summ.append(f"Found {len(self.o)} splits.")
return summ
def close(self):
for file in self.o.values():
file.close()
def histogram(values, bins, weights):
hist, bins = np.histogram(values, bins=bins)
# n_bins = len(hist)
if weights is not None:
# Bins can't be auto-determined if weights is supplied.
# So we first compute the bins without the weights then recompute
# the histogram with the weights.
hist, bins = np.histogram(values, bins=bins, weights=weights)
# cumsum = np.cumsum(hist)
# total = cumsum[-1]
# for i in range(n_bins - 1):
# if cumsum[i] / total > 0.9:
# useful_range = np.linspace(bins[0], bins[i + 1], n_bins)
# new_bins = np.append(useful_range, [bins[-1]])
# return np.histogram(values, bins=new_bins, weights=weights)
return hist, bins
def _parse_bins(bins):
try:
if isinstance(bins, str):
if "," in bins:
bins = [int(b) for b in bins.split(",")]
else:
bins = int(bins)
except ValueError:
pass
return bins
ALL_DOCUMENTS = "<ALL_DOCUMENTS>"
MAX_LABEL_LEN = 100
def bar_chart(hist, bins):
n = sum(hist)
max_h = max(hist)
out = []
for i, h in enumerate(hist):
h_size = 80 * h // max_h
dh_size = 80 * (h - hist[i - 1]) // max_h
if h_size == 0 or dh_size == 0:
continue
bar = "█" * h_size
out.append(f"{bins[i]:8.3f} {bar:80} ({h:5d}, {h / n:5.1%}) {bins[i+1]:8.3f}")
out.append(f"{bins[-1]:8.3f}")
return out
def display_stats(stats, key, weights=None, bins="auto", cumulative=False):
out = []
documents = stats[ALL_DOCUMENTS]
count = stats.get(key, 0)
r = count / documents if documents else 0
out.append(f"Field {key} saw {count} times ({r:5.1%})")
length = stats.get(key + ".length", None)
avg_length = length // count if length else 0
if length is not None:
out[-1] += f", average length is {length // count}"
values = stats.get(key + ".val", None)
if values:
out[-1] += f", histogram is: (bins={bins})"
if weights:
if weights not in stats:
logging.warn(f"Warning: weights column {weights} not found.")
if weights + ".val" not in stats:
logging.warn(
f"Warning: weights column {weights} is not a numeric column."
)
weights = stats.get(weights + ".val")
hist, bins = histogram(values, _parse_bins(bins), weights)
if cumulative:
hist = np.cumsum(hist)
out += bar_chart(hist, bins)
cnt = stats.get(key + ".cnt", None)
if avg_length < MAX_LABEL_LEN and cnt and max(cnt.values()) > 1:
cnt = sorted(cnt.items(), key=lambda kv: kv[1], reverse=True)
out[-1] += ", top 100 labels:"
for label, n in cnt[:100]:
if n < 5:
continue
out.append(f"{label:25}: {n:6} ({n / count:5.1%})")
return out
def describe(source, columns=None, weights=None, **kwargs):
"""Compute some statistics about a dataset.
Stats can be restricted to a subset of columns."""
MAX_HIST_SIZE = 100_000_000
MAX_CNT_SIZE = 1000
stats = {ALL_DOCUMENTS: 0}
needed = columns + [weights] if columns else None
for doc in read_jsons(source):
stats[ALL_DOCUMENTS] += 1
for k, v in doc.items():
if needed and k not in needed:
continue
stats[k] = get_or_set(stats, k, 0) + 1
if isinstance(v, str):
stats[k + ".length"] = get_or_set(stats, k + ".length", 0) + len(v)
if len(v) > MAX_LABEL_LEN: # Don't treat too long string as labels
continue
cnt = get_or_set(stats, k + ".cnt", collections.defaultdict(int))
if v in cnt or len(cnt) < MAX_CNT_SIZE:
cnt[v] += 1
elif type(v) in (int, float):
values = get_or_set(stats, k + ".val", [])
if len(values) < MAX_HIST_SIZE:
values.append(v)
elif type(v) is list and len(v) and type(v[0]) in (int, float):
values = get_or_set(stats, k + ".val", [])
if len(values) < MAX_HIST_SIZE:
values += v
elif type(v) is dict:
cnt = get_or_set(stats, k + ".cnt", collections.defaultdict(int))
for label in v:
if label in cnt or len(cnt) < MAX_CNT_SIZE:
cnt[label] += 1
documents = stats[ALL_DOCUMENTS]
yield f"Stats computed on {documents} documents:"
for k in stats:
if columns and k not in columns:
continue
if "." in k or k == ALL_DOCUMENTS:
continue
for line in display_stats(stats, k, weights=weights, **kwargs):
yield line
def shard(lines):
"""Shard a file in several smaller ones."""
# The creation of the shard is handle in a generic way. Do we need this ?
return lines
# *** Utils ***
def get_or_set(dictionary, key, default):
if key not in dictionary:
dictionary[key] = default
return dictionary[key]
class SimpleIO(Protocol):
"""A subset of methods from TextIO."""
def close(self) -> None:
...
def write(self, line: str) -> int:
...
def __enter__(self) -> "SimpleIO":
...
def __exit__(self, exc_type, exc_value, traceback):
...
def open_read(filename: ReadableFileLike) -> Iterable[str]:
"""Open the given file, list of files or files matching the given glob and read lines.
`filename` is None or "-" -> reads from stdin
`filename` is a Path / str -> interprets filename as a glob and open files matching it
`filename` is a list -> opens sequentially all files from the list using `open_read`
`filename` is something else -> returns the object wrapped in a `nullcontext`
This allows to pass already openened files or iterables.
`open_read` will decompress gzip files, given they have ".gz" suffix.
"""
if filename is None:
return sys.stdin
if isinstance(filename, list):
assert isinstance(filename[0], Path)
if len(filename) == 0:
return []
if len(filename) > 1:
return _yield_from(filename)
filename = tp.cast(Path, filename[0])
if isinstance(filename, str):
if filename.startswith("http://") or filename.startswith("https://"):
return open_remote_file(filename)
filename = Path(filename)
if not isinstance(filename, Path):
# we might have received an iterable, return it unmodified.
return filename # type: ignore
# Expand glob patterns only when reading
files = [Path(f) for f in sorted(glob.glob(str(filename)))]
if len(files) > 1:
return _yield_from(files)
if len(files) == 1:
filename = files[0]
assert isinstance(filename, Path)
if filename.name.endswith("]"):
return block_reader(filename)
logging.getLogger(__name__).info(f"Opening {filename} with mode 'rt'")
if filename.suffix == ".gz":
file: TextIO = gzip.open(filename, "rt") # type: ignore
else:
file = open(filename, "rt")
return _close_when_exhausted(file)
def _close_when_exhausted(file: TextIO) -> Iterable[str]:
with file:
yield from file
def _yield_from(files: list) -> Iterable[str]:
for file in files:
yield from open_read(file)
def open_write(
filename: WritableFileLike, max_size: str = "4G"
) -> tp.ContextManager[TextIO]:
"""Open the given file, list of files or files matching the given glob.
The return value is a ContextManager meant to be used inside a `with` block:
```
with open_write("foo.txt") as o:
...
Write mode:
replaces "?" from filename by numbers ranging from 0 to 9, generatings files of size `max_size`.
If filename ends with ".gz", creates a blocked gzip file with random access.
"""
if filename is None:
return contextlib.nullcontext(sys.stdout)
if isinstance(filename, list):
if len(filename) > 1:
return MultiFile(filename, "w", max_size)
else:
filename = tp.cast(Path, filename[0])
if isinstance(filename, str):
filename = Path(filename)
if not isinstance(filename, Path):
assert hasattr(filename, "write"), f"{filename} doesn't have a .write method."
# We return a 'TextIO' even though we only check for `.write` method,
# this works better with eg `print`.
return contextlib.nullcontext(tp.cast(TextIO, filename))
mode = "wt"
if "?" in filename.name:
return sharded_file(filename, mode, max_size)
logging.getLogger(__name__).info(f"Opening {filename} with mode {mode}")
# TODO: should we use another format ?
if filename.suffix == ".gz":
return BlockedGzipWriter(Path(filename), mode, block_size="64M")
return open(filename, "wt")
def parse_size(size):
unit_map = {"B": 1, "K": 1024, "M": 1024 ** 2, "G": 1024 ** 3}
unit = size[-1].upper()
assert (
unit in unit_map
), f"Unsupported size unit for {size}. Use one of: {unit_map.keys()}."
return int(size[:-1]) * unit_map[unit]
class MultiFile(SimpleIO):
def __init__(self, files: Iterable[Path], mode="w", max_size="4G"):
self.name = str(files)
self.mode = mode
self.files = iter(files)
self.max_size = parse_size(max_size)
self.current_handle: Optional[TextIO] = None
self.current_block_size = 0
self._open_next_handle() # Opening 1st handle allows to write directly.
def write(self, content) -> int:
# Avoid splitting newlines to a new file.
# use current_block_size since it's faster than `tell()`
if content != "\n" and self.current_block_size >= self.max_size:
self._open_next_handle()
if self.current_handle is None:
raise Exception("No more files to write to...")
written = self.current_handle.write(content)
self.current_block_size += written
return written
def _open_next_handle(self) -> bool:
self.close()
file = next(self.files, None)
if file is None:
return False
self.current_handle = open_write(file).__enter__()
self.current_block_size = 0
return True
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
@property
def closed(self):
return self.current_handle is None
def close(self):
if self.current_handle is None:
return
# log("Closing", self.current_handle.name, "with mode", self.current_handle.mode)
self.current_handle.__exit__(None, None, None)
self.current_handle = None
# not sure it helps since connections are reseted anyway.
_session = functools.lru_cache()(requests.Session)
def request_get_content(url: str, n_retry: int = 3) -> bytes:
"""Retrieve the binary content at url.
Retry on connection errors.
"""
t0 = time.time()
logging.info(f"Starting download of {url}")
for i in range(1, n_retry + 1):
try:
r = _session().get(url)
r.raise_for_status()
break
except requests.exceptions.RequestException as e:
# Sleep and try again on error, unless it's a 404.
message = e.args[0] if isinstance(e.args[0], str) else ""
if i == n_retry or "Client Error" in message:
raise e
warnings.warn(
f"Swallowed error {e} while downloading {url} ({i} out of {n_retry})"
)
time.sleep(10 * 2 ** i)
dl_time = time.time() - t0
dl_speed = len(r.content) / dl_time / 1024
logging.info(
f"Downloaded {url} [{r.status_code}] took {dl_time:.0f}s ({dl_speed:.1f}kB/s)"
)
return r.content
def open_remote_file(url: str, cache: Path = None) -> Iterable[str]:
"""Download the files at the given url to memory and opens it as a file.
Assumes that the file is small, and fetch it when this function is called.
"""
if cache and cache.exists():
return open_read(cache)
# TODO: open the remote file in streaming mode.
# The hard part is that we need to write the content on disk at the same time,
# to implement disk caching.
raw_bytes = request_get_content(url)
content = io.BytesIO(raw_bytes)
if url.endswith(".gz"):
f: TextIO = gzip.open(content, mode="rt") # type: ignore
else:
f = io.TextIOWrapper(content)
if cache and not cache.exists():
# The file might have been created while downloading/writing.
tmp_cache = _tmp(cache)
tmp_cache.write_bytes(raw_bytes)
if not cache.exists():
tmp_cache.replace(cache)
else:
tmp_cache.unlink()
return _close_when_exhausted(f)
def sharded_file(file_pattern: Path, mode: str, max_size: str = "4G") -> MultiFile:
folder, name = file_pattern.parent, file_pattern.name
assert "?" in name, f"Can't expand give file_pattern: {file_pattern}"
n = name.count("?")
assert 0 < n < 8
assert "?" * n in name, f"The '?' need to be adjacents in {file_pattern}"
assert "r" not in mode
files = (folder / name.replace("?" * n, f"%0{n}d" % i) for i in range(10 ** n))
return MultiFile(files, mode, max_size)
class SplitFile:
def __init__(self, filename: Path, chunk: int, n_chunks: int, mode: str = "r"):
assert mode == "r"
size = os.path.getsize(filename)
self.handle = open(filename, mode)
start = chunk * size // n_chunks
self.end: int = (chunk + 1) * size // n_chunks
if start > 0:
self.handle.seek(start - 1)
# Skip incomplete line. This avoid crashing when reading eg the middle
# of a unicode char. `self.handle.buffer` is a binary file reader.
self.handle.buffer.readline() # type: ignore
def __enter__(self):
return self
def __iter__(self):
while True:
line = self.handle.readline()
if not line:
return
yield line
if self.handle.tell() >= self.end:
return
def readlines(self):
return list(self.__iter__())
def close(self):
self.handle.close()
def __exit__(self, *args):
self.close()
def get_block_readers(filename: Path, n_readers, mode="t"):
index_filename = filename.parent / (filename.name + ".index")
if not index_filename.exists():
return [gzip.open(filename, "r" + mode)]
index: List[int] = np.load(index_filename)
n_chunks = len(index)
chunk_per_reader = int(np.ceil(n_chunks / n_readers))
n_readers = int(np.ceil(n_chunks / chunk_per_reader))
start = 0
readers = []
for i in range(n_readers):
end = index[min((i + 1) * chunk_per_reader - 1, n_chunks - 1)]
r = _blocked_gzip_reader(filename, start, end, mode)
readers.append(r)
start = end
return readers
def block_reader(filename: Path) -> Iterable[str]:
root, pattern = str(filename)[:-1].split("[", 1)
assert root.endswith(".gz"), "Can only read block of a .gz file for now."
ii, nn = pattern.strip().split("/")
i, n_readers = int(ii), int(nn)
index_filename = root + ".index"
assert os.path.exists(
index_filename
), f"Index {index_filename} not found for {filename}"
index: List[int] = np.load(index_filename)
n_chunks = len(index)
chunk_per_reader = int(np.ceil(n_chunks / n_readers))
n_readers = int(np.ceil(n_chunks / chunk_per_reader))
# I'm not sure how to handle the case where there is less reader than expected.
# Currently we return empty readers.
start = 0
if i > 0:
start = index[min((i - 1) * chunk_per_reader, n_chunks - 1)]
end = index[min(i * chunk_per_reader, n_chunks - 1)]
return _blocked_gzip_reader(root, start, end, mode="t")
def _blocked_gzip_reader(filename, start, end, mode="t") -> Iterable[str]:
handle = gzip.open(filename, "r" + mode)
handle.seek(start)
try:
while handle.tell() < end:
line = handle.readline()
if not line:
break
yield line
finally:
handle.close()
class BlockedGzipWriter(MultiFile):
"""Writes a Gzip files which can be read by block.
Decreasing the block size may hurt compression, but provides more split points.
"""
def __init__(self, filename: Path, mode: str, block_size: str = "256M"):
assert "w" in mode
self.filename = Path(filename)
self.index: List[int] = []
self.zipfile: Optional[gzip.GzipFile] = None
super().__init__([], mode, block_size)
def _open_next_handle(self) -> bool:
"""Here we never actually close/open handles,
we just write the end of block sequence."""
if not self.current_handle:
mode = self.mode + "t"
self.current_handle = tp.cast(TextIO, gzip.open(self.filename, mode))
assert isinstance(self.current_handle.buffer, gzip.GzipFile)
self.zipfile = self.current_handle.buffer
return True
# Use Z_FULL_FLUSH to allow random access:
# https://github.com/madler/zlib/blob/cacf7f1d4e3d44d871b605da3b647f07d718623f/zlib.h#L313
self.current_handle.buffer.flush(zlib_mode=zlib.Z_FULL_FLUSH) # type: ignore
self.index.append(self.current_handle.tell())
self.current_block_size = 0
return True
def flush(self):
assert self.current_handle is not None
self.current_handle.flush()
def close(self):
if self.current_handle is None:
return
self.current_handle.flush()
self.index.append(self.current_handle.tell())
self.current_handle.close()
self.current_handle = None
index = np.array(self.index, dtype=np.uint64)
with open(str(self.filename) + ".index", "wb") as o:
np.save(o, index)
def grouper(iterable, n):
group = []
for x in iterable:
group.append(x)
if len(group) == n:
yield group
group = []
if group:
yield group
PROCESS = psutil.Process()
def mem_footprint_gb(pid=None):
rss = PROCESS.memory_info().rss
return rss / 1_000_000_000
def _tmp(output: Path) -> Path:
suffix = "".join(output.suffixes)
suffix = ".tmp" + suffix
prefix = output.name[: -len(suffix)]
_, tmp_path = tempfile.mkstemp(dir=output.parent, prefix=prefix, suffix=suffix)
return Path(tmp_path)
@functools.lru_cache()
def _tmp_dir() -> Path:
job_id = os.environ.get("SLURM_JOB_ID")
if job_id:
return Path("/scratch/slurm_tmpdir") / job_id
checkpoint = Path("/checkpoint") / os.environ.get("USER", "")
if checkpoint.exists():
tmp = checkpoint / "tmp"
tmp.mkdir(exist_ok=True)
return tmp
return Path("/tmp")
if __name__ == "__main__":
multiprocessing.set_start_method("fork")
main(sys.argv[1:])
|
cc_net-main
|
cc_net/jsonql.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import functools
import itertools
import logging
import os
import sys
import time
import warnings
from pathlib import Path
from typing import Callable, Dict, Iterable, List, Optional, Sequence, Sized
import submitit
from typing_extensions import Protocol
class Executor(Protocol):
def __call__(self, function: Callable[..., str], *args: Iterable) -> None:
...
class SubmititRetryOnTimeout(submitit.helpers.Checkpointable):
def __init__(self, fn: Callable):
self.fn = fn
self.__name__ = fn.__name__
def __call__(self, *args, **kwargs):
return self.fn(*args, **kwargs)
def get_executor(
name: str,
log_dir: Path,
execution: str,
timeout_hour: float = 1.0,
mem_gb: int = 1,
cpus: int = 1,
task_parallelism: int = -1,
options: dict = {},
) -> Executor:
execution_mode = execution.split(",")[0]
options.update(
{kv.split("=", 1)[0]: kv.split("=", 1)[1] for kv in execution.split(",")[1:]}
)
if execution_mode == "mp":
warnings.warn("Execution mode 'mp' is deprecated, use 'local'.")
execution_mode = "local"
cluster = None if execution_mode == "auto" else execution_mode
# use submitit to detect which executor is available
ex = submitit.AutoExecutor(log_dir, cluster=cluster)
if ex.cluster == "local":
# LocalExecutor doesn't respect task_parallelism
return functools.partial(custom_map_array, ex, task_parallelism)
if ex.cluster == "debug":
return debug_executor
# We are on slurm
if task_parallelism == -1:
task_parallelism = 500
ex.update_parameters(
name=name,
timeout_min=int(timeout_hour * 60),
mem_gb=mem_gb,
cpus_per_task=cpus,
slurm_array_parallelism=task_parallelism,
**options,
)
return functools.partial(map_array_and_wait, ex)
def map_array_and_wait(
ex: submitit.AutoExecutor, function: Callable[..., str], *args: Iterable
):
f_name = function.__name__
assert len(args) > 0, f"No arguments passed to {f_name}"
approx_length = _approx_length(*args)
print(f"Submitting {f_name} in a job array ({approx_length} jobs)")
jobs = ex.map_array(function, *args)
if not jobs:
return
failed_jobs = []
done = 0
total = len(jobs)
job_array_id = jobs[0].job_id.split("_")[0]
print(f"Started {f_name} in job array {job_array_id} ({len(jobs)} jobs).")
for job in submitit.helpers.as_completed(jobs):
done += 1
e = job.exception()
if not e:
print(f"Finished job {job.job_id} ({done} / {total}).", job.result())
continue
print(f"Failed job {job.job_id} ({done} / {total}):", e)
failed_jobs.append(job)
if failed_jobs:
n_failures = 10
message = f"{len(failed_jobs)} / {done} jobs failed while running {f_name}"
print(message)
for job in failed_jobs[:n_failures]:
print(f"Failed {job.job_id} -> {job.paths.stderr}")
if len(failed_jobs) > n_failures:
print(f"... ({len(failed_jobs) - n_failures} failed job skipped)")
raise Exception(message)
def debug_executor(function: Callable[..., Optional[str]], *args: Iterable) -> None:
logging.getLogger().setLevel(logging.DEBUG)
approx_length = _approx_length(*args)
for i, x in enumerate(zip(*args)):
try:
message = function(*x)
except Exception:
try:
import ipdb as pdb # type: ignore
except ImportError:
import pdb # type: ignore
import traceback
traceback.print_exc()
print("")
pdb.post_mortem()
sys.exit(1)
if message is not None:
print(message, f"({i + 1} / {approx_length})")
def _approx_length(*args: Iterable):
for a in args:
if isinstance(a, Sized):
return len(a)
return -1
def custom_map_array(
ex: submitit.AutoExecutor,
parallelism: int,
function: Callable[..., Optional[str]],
*args: Iterable,
) -> None:
f_name = function.__name__
assert len(args) > 0, f"No arguments passed to {f_name}"
jobs_args = list(zip(*args))
total = len(jobs_args)
if parallelism < 0:
parallelism = os.cpu_count() or 0
assert parallelism >= 0, f"Can't run any jobs with task_parallelism={parallelism}"
print(f"Submitting {total} jobs for {f_name}, with task_parallelism={parallelism}")
enqueued = 0
done = 0
running_jobs: List[submitit.Job] = []
failed_jobs: List[submitit.Job] = []
while done < len(jobs_args):
# Try to queue more job if we have some bandwidth.
if enqueued < total and len(running_jobs) < parallelism:
running_jobs.append(ex.submit(function, *jobs_args[enqueued]))
enqueued += 1
continue
# Else wait for some job to finish
if not running_jobs:
warnings.warn(
f"No more running jobs, yet we submitted only {enqueued} / {total} and finished {done} / {total}"
)
break
job = get_next_job(running_jobs)
running_jobs.remove(job)
done += 1
e = job.exception()
if not e:
print(f"Finished job {job.job_id} ({done} / {total}).", job.result())
continue
print(f"Failed job {job.job_id} ({done} / {total}):", e)
failed_jobs.append(job)
if failed_jobs:
n_failures = 10
message = f"{len(failed_jobs)} / {done} jobs failed while running {f_name}"
print(message)
for job in failed_jobs[:n_failures]:
print(f"Failed {job.job_id} -> {job.paths.stderr}")
if len(failed_jobs) > n_failures:
print(f"... ({len(failed_jobs) - n_failures} failed job skipped)")
raise Exception(message)
def get_next_job(
jobs: Sequence[submitit.Job], poll_frequency: float = 10
) -> submitit.Job:
"""
Waits for any of the job to finish and returns it.
jobs: list of jobs
poll_frequency: frequency in second at which we check job status
"""
start = time.time()
waiting = False
while True:
for job in jobs:
if job.done():
return job
if not waiting:
job_ids = [j.job_id for j in jobs[:4]]
suffix = "..." if len(jobs) > 4 else ""
print(
f"Waiting on {len(jobs)} running jobs. Job ids: {','.join(job_ids)}{suffix}"
)
waiting = True
time.sleep(poll_frequency)
|
cc_net-main
|
cc_net/execution.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import sys
import time
import warnings
from typing import Iterable, Iterator, Sequence, Sized, Tuple, Type
import numpy as np
HASH_TYPE: Type[np.uint64] = np.uint64
GETPY_WARNING = False
class AbstractDedupHashSet(Sized, Iterable[np.uint64]):
"""A dict-like that returns `True` for keys that have been added more than once.
The API is batched and expect np.array as input. This batching grants better
perf when using the C++ implementation.
"""
dtype: Type[np.uint64] = HASH_TYPE
def __repr__(self):
implementation = type(self).__name__
return f"[{implementation}, len: {len(self)}"
def __len__(self) -> int:
...
def __contains__(self, values: Sequence[np.uint64]) -> np.ndarray:
...
def __getitem__(self, values) -> np.ndarray:
...
def __setitem__(self, keys, values) -> None:
...
def items(self) -> Iterable[Tuple[np.uint64, np.uint8]]:
...
def keys(self) -> Iterable[np.uint64]:
...
def __iter__(self) -> Iterator[np.uint64]:
return iter(self.keys())
def add(self, h, contains=None):
"""Add the given keys. First time a key is added the value is set to 0,
then it's set to one."""
if not isinstance(h, np.ndarray):
h = np.array(h, dtype=HASH_TYPE)
if contains is None:
contains = self.__contains__(h)
self.__setitem__(h, contains)
return contains
def merge(self, keys, values):
contains = self.__contains__(keys)
self.__setitem__(keys, contains | values)
def dump(self, filename):
return self.dump_np(filename)
def load(self, filename):
return self.load_np(filename)
def dump_np(self, filename):
kv_type = np.dtype([("k", HASH_TYPE), ("v", np.uint8)])
items = np.fromiter(self.items(), dtype=kv_type, count=len(self))
with open(filename, "wb") as f:
np.save(f, items)
def load_np(self, filename):
items = np.load(str(filename))
keys = items["k"].copy()
values = items["v"].copy()
self.merge(keys, values)
def dump_np2(self, filename):
keys = np.fromiter(
(k for (k, v) in self.items()), dtype=HASH_TYPE, count=len(self)
)
with open(filename, "wb") as f:
np.save(f, keys)
values = np.fromiter(
(v for (k, v) in self.items()), dtype=np.uint8, count=len(self)
)
with open(str(filename) + ".val", "wb") as f:
np.save(f, values)
def load_np2(self, filename):
keys = np.load(filename)
values = np.load(str(filename) + ".val")
self.merge(keys, values)
class NaiveHashSet(dict, AbstractDedupHashSet):
"""Pure python implementation of AbstractDedupHashSet.
This implementation is quite fast, since Python dict are heavily optimized.
"""
def __init__(self, iterable=None):
super().__init__()
global GETPY_WARNING
if GETPY_WARNING:
warnings.warn(
"Module 'getpy' not found. Deduplication will take more RAM."
" Try `pip install cc_net[getpy]"
)
GETPY_WARNING = False
def __contains__(self, values):
"""Returns `True` if the object has been added at list once."""
contains_point = super().__contains__
return np.fromiter(
map(contains_point, values), count=len(values), dtype=np.uint8
)
def __getitem__(self, values):
"""Returns `True` if the object has been added at list twice."""
get_point = super().get
return np.fromiter(
map(lambda x: get_point(x, False), values),
count=len(values),
dtype=np.uint8,
)
def __setitem__(self, keys, values):
assert len(keys) == len(values)
for k, v in zip(keys, values):
dict.__setitem__(self, k, v)
try:
import getpy as gp # type: ignore
class _FlatHashSet(gp.Dict, AbstractDedupHashSet):
"""C++ backed implementation of AbstractDedupHashSet.
This implementation is slightly slower than the Python one but uses
3x less RAM.
See https://github.com/atom-moyer/getpy.
"""
def __init__(self):
super().__init__(HASH_TYPE, np.uint8, default_value=False)
def __contains__(self, h):
"""Returns `True` if the object has been added at list once."""
if not isinstance(h, np.ndarray):
h = np.array(h, dtype=HASH_TYPE)
c = gp.Dict.__contains__(self, h)
c.dtype = np.uint8
return c
def dump(self, filename):
return self.dump_gp(filename)
def load(self, filename):
return self.load_gp(filename)
def dump_gp(self, filename):
return gp.Dict.dump(self, str(filename))
def load_gp(self, filename):
"""Override gp.Dict.load, to correctly merge values instead of overwriting."""
other = gp.Dict(HASH_TYPE, np.uint8, default_value=False)
other.load(str(filename))
n = len(other)
keys = np.fromiter(
(k for (k, v) in other.items()), dtype=HASH_TYPE, count=n
)
values = np.fromiter(
(v for (k, v) in other.items()), dtype=np.uint8, count=n
)
self.merge(keys, values)
FlatHashSet: Type[AbstractDedupHashSet] = _FlatHashSet
except ImportError:
GETPY_WARNING = True
FlatHashSet = NaiveHashSet
def timeit(message, function, *args):
start = time.time()
function(*args)
end = time.time()
print(message, f"took {end - start:.0f}s")
def compare_load(*filenames):
assert filenames, "No file given"
def load_list():
hashes = []
for f in filenames:
h = FlatHashSet()
h.load(f)
print(f"Loaded {h} from {f}.")
hashes.append(h)
return hashes
def load_all(load, ext):
hashes = FlatHashSet()
for f in filenames:
load(hashes, f + ext)
def dump_all(hashes, dump, ext):
for h, f in zip(hashes, filenames):
dump(h, f + ext)
hashes = load_list()
dump_gp = getattr(FlatHashSet, "dump_gp")
if dump_gp is not None:
timeit("Dumping using gp.dump", dump_all, hashes, dump_gp, ".gp.test")
timeit("Dumping using dump_np", dump_all, hashes, FlatHashSet.dump_np, ".npy.test")
timeit(
"Dumping using dump_np2", dump_all, hashes, FlatHashSet.dump_np2, ".npy2.test"
)
load_gp = getattr(FlatHashSet, "load_gp")
if load_gp is not None:
timeit("Loading using gp.load", load_all, load_gp, ".gp.test")
timeit("Loading using load_np", load_all, FlatHashSet.load_np, ".npy.test")
timeit("Loading using load_np2", load_all, FlatHashSet.load_np2, ".npy2.test")
# Loading 10 shards:
# [dedup] Dumping using gp.dump took 52s
# [dedup] Dumping using dump_np took 270s
# [dedup] Dumping using dump_np2 took 483s
#
# [dedup] Loading using gp.load took 654s
# [dedup] Loading using load_np took 82s
# [dedup] Loading using load_np2 took 76s
if __name__ == "__main__":
compare_load(*sys.argv[1:])
|
cc_net-main
|
cc_net/flat_hash_set.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import base64
import hashlib
import itertools
import urllib.parse
from pathlib import Path
from typing import Dict, Iterable, List, Optional, Sequence, Set, Union
import numpy as np
from cc_net import jsonql
from cc_net.execution import get_executor
from cc_net.jsonql import mem_footprint_gb
HASH_SIZE = 4
HASH_TYPE = np.uint32
PUBLIC_FIELDS = ["url", "digest"]
COMPUTED_FIELDS = ["cc_segment", "language", "language_score", "bucket", "perplexity"]
DATA = Path(__file__).parent.parent / "data"
# This is similar to dedup methods but with use 32 bits hashes.
def _b2i(b: bytes) -> int:
return np.frombuffer(b[:HASH_SIZE], dtype=HASH_TYPE, count=1, offset=0).item(0)
def _str_hash(s: str) -> int:
h = hashlib.sha1(bytes(s, encoding="utf-8"))
return _b2i(h.digest())
def get_hashes(lines: Iterable[str]) -> List[bytes]:
h = HASH_SIZE
return [hashlib.sha1(bytes(l, encoding="utf-8")).digest()[:h] for l in lines]
def encode_hashes(hashes: Iterable[bytes]) -> str:
return base64.b64encode(b"".join(hashes)).decode("ascii")
def encode_as_hashes(lines: Iterable[str]) -> str:
return encode_hashes(get_hashes(lines))
def decode_hashes(compact: str) -> List[bytes]:
all_hashes = base64.b64decode(compact)
res = []
assert len(all_hashes) % HASH_SIZE == 0
for i in range(len(all_hashes) // HASH_SIZE):
chunk = all_hashes[i * HASH_SIZE : (i + 1) * HASH_SIZE]
res.append(chunk)
return res
def encode_line_ids(line_ids: Sequence[int]) -> str:
arr = np.array(line_ids, dtype="<u2")
return base64.b64encode(arr.tobytes()).decode("ascii")
def decode_line_ids(compact: str) -> List[int]:
ids_bytes = bytearray(base64.b64decode(compact))
return np.ndarray(len(ids_bytes) // 2, dtype="<i2", buffer=ids_bytes)
def get_doc_key(digest: str) -> int:
assert digest.startswith("sha1:")
h = base64.b32decode(digest[5:])
return _b2i(h[:HASH_SIZE])
class Minifier(jsonql.Transformer):
ready = True
def __init__(self):
self.fields = frozenset(COMPUTED_FIELDS + PUBLIC_FIELDS)
def do(self, doc: dict) -> Optional[dict]:
line_ids: List[int] = doc.pop("line_ids")
fields = self.fields
keys = list(doc.keys())
for k in keys:
if k not in fields:
doc.pop(k, None)
p = doc.get("perplexity", 0)
doc["line_ids"] = encode_line_ids(line_ids)
if p:
doc["perplexity"] = round(p, 1)
s = doc.get("language_score", 0)
if s:
doc["language_score"] = round(s, 2)
return doc
class MetadataFetcher(jsonql.Transformer):
"""Reads documents from CC snapshot and join precomputed metadata.
CC snapshots are split in segments. Each segment is 64Mb long.
The metadata must also be stored in segments of the same size and names.
"""
def __init__(self, folder: Union[Path, str]):
self.ready = True
self.metadata: Dict[int, dict] = {}
self._segments: Set[str] = set()
self.read_doc = 0
self.missed_doc = 0
self.missed_par = 0
self.processed_par = 0
if isinstance(folder, str):
# detect path passed as string
if urllib.parse.urlparse(folder).scheme == "":
folder = Path(folder)
assert folder.exists(), f"Metadata folder not found: {folder}"
self.folder = folder
self.segment: str = ""
self.segments_read_twice = 0
def meta_file(self, segment: str) -> str:
file_name = segment.split("/")[-1]
assert file_name.endswith(".warc.wet.gz") or file_name.endswith(".warc.wet")
if isinstance(self.folder, str):
return urllib.parse.urljoin(
self.folder, file_name.replace(".warc.wet", ".json")
)
meta_file = self.folder / file_name.replace(".warc.wet", ".json")
assert (
meta_file.exists()
), f"Couldn't find metadata file for segment {segment} at {meta_file}"
return str(meta_file)
def fetch_metadata(self, segment: str) -> None:
meta_file = self.meta_file(segment)
k = get_doc_key
self.metadata = {}
collision = 0
for m in jsonql.read_jsons(meta_file):
key = k(m["digest"])
if key in self.metadata:
collision += 1
self.metadata[key] = m
self.log(f"Loaded {len(self.metadata)} metadatas from {meta_file}")
if collision > 0:
self._logger.warning(f"Found {collision} collisions !")
self.segment = segment
if segment in self._segments:
self.log("Cache miss")
self.segments_read_twice += 1
self._segments.add(segment)
def do(self, doc: dict) -> Optional[dict]:
if self.segment != doc["cc_segment"]:
self.fetch_metadata(doc["cc_segment"])
digest = doc["digest"]
key = get_doc_key(digest)
if key not in self.metadata:
return None
metadata = self.metadata.pop(key)
return self.clean(metadata, doc)
def clean(self, metadata: dict, full_doc: dict) -> Optional[dict]:
line_ids = decode_line_ids(metadata.pop("line_ids"))
lines = full_doc["raw_content"].split("\n")
cleaned = []
for l in line_ids:
if l >= len(lines) or l < 0:
self.missed_par += 1
continue
cleaned.append(lines[l])
self.processed_par += len(line_ids)
if not cleaned:
self.missed_doc += 1
return None
full_doc["raw_content"] = "\n".join(cleaned)
full_doc["original_nlines"] = full_doc["nlines"]
full_doc["original_length"] = full_doc["length"]
full_doc["nlines"] = len(cleaned)
full_doc["length"] = len(full_doc["raw_content"])
for key, value in metadata.items():
full_doc[key] = value
return full_doc
def summary(self) -> List[str]:
summ = super().summary()
mem = mem_footprint_gb()
len_cache = len(self.metadata)
summ.append(
f"Read {self.read_doc:_}, stocking {len_cache:_} doc in {mem:.1f}g."
)
if self.missed_doc:
r = self.missed_doc / self.processed
summ.append(f"! Missed {self.missed_doc} documents ({r:.1%}) !")
if self.missed_par:
r = self.missed_par / self.processed
summ.append(f"! Missed {self.missed_par} paragraphs ({r:.1%}) !")
return summ
def _expand_files(files: List[Path]) -> List[Path]:
if len(files) == 1 and files[0].is_dir():
folder = files[0]
files = sorted(folder.glob("*.json.gz"))
print(f"Found {len(files)} files under {folder}/*.json.gz")
assert files, "No files found"
return files
def minify_file(file: Path, output: Path) -> str:
"""Minify the given file."""
jsonql.run_pipes(Minifier(), file=file, output=output)
return f"Minified {output}"
def minify(
files: List[Path], output_dir: Path, execution: str = "mp", parallelism: int = -1
):
"""Minify all the files in the given folder."""
files = _expand_files(files)
output_dir.mkdir(exist_ok=True)
with open(output_dir / "files.txt", "w") as o:
for f in files:
print(f.name, file=o)
outputs = [output_dir / f.name for f in files]
ex = get_executor(
"minify",
output_dir / "logs",
execution,
timeout_hour=2,
cpus=1,
task_parallelism=parallelism,
)
ex(minify_file, files, outputs)
def fetch_metadata_file(
file: Union[Path, str],
metadata_dir: Union[Path, str],
output: Path,
cache_dir: Path = None,
):
unminifier = MetadataFetcher(metadata_dir)
tmp = output.with_name("tmp." + output.name)
jsonql.run_pipes(unminifier, file=file, output=tmp)
tmp.rename(output)
return f"Fetched metadata for {file}. Results at {output}."
def fetch_metadata(
files: List[str],
metadata_dir: Union[Path, str],
output_dir: Path,
execution: str = "mp",
parallelism: int = -1,
cache_dir: Path = None,
):
if len(files) == 1 and Path(files[0]).is_dir():
folder = Path(files[0])
files = [str(f) for f in sorted(folder.glob("*.json.gz"))]
print(f"Found {len(files)} files under {folder}/*.json.gz")
assert len(files) > 0, "No files given."
output_dir.mkdir(exist_ok=True)
outputs = [output_dir / str(f).split("/")[-1] for f in files]
if cache_dir is None:
cache_dir = output_dir / "wet_cache"
cache_dir.mkdir(exist_ok=True)
if str(cache_dir) == "none":
cache_dir = None
files = [f for f, o in zip(files, outputs) if not o.exists()]
outputs = [o for o in outputs if not o.exists()]
if not files:
return
ex = get_executor(
"unminify",
output_dir / "logs",
execution,
timeout_hour=8,
cpus=1,
task_parallelism=parallelism,
mem_gb=32,
)
ex(fetch_metadata_file, files, outputs, itertools.repeat(cache_dir))
if __name__ == "__main__":
import func_argparse
func_argparse.main(minify_file, minify, fetch_metadata, fetch_metadata_file)
|
cc_net-main
|
cc_net/minify.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import re
import unicodedata
UNICODE_PUNCT = {
",": ",",
"。": ".",
"、": ",",
"„": '"',
"”": '"',
"“": '"',
"«": '"',
"»": '"',
"1": '"',
"」": '"',
"「": '"',
"《": '"',
"》": '"',
"´": "'",
"∶": ":",
":": ":",
"?": "?",
"!": "!",
"(": "(",
")": ")",
";": ";",
"–": "-",
"—": " - ",
".": ". ",
"~": "~",
"’": "'",
"…": "...",
"━": "-",
"〈": "<",
"〉": ">",
"【": "[",
"】": "]",
"%": "%",
"►": "-",
}
UNICODE_PUNCT_RE = re.compile(f"[{''.join(UNICODE_PUNCT.keys())}]")
def replace_unicode_punct(text: str) -> str:
return "".join((UNICODE_PUNCT.get(c, c) for c in text))
def remove_unicode_punct(text: str) -> str:
"""More aggressive version of replace_unicode_punct but also faster."""
return UNICODE_PUNCT_RE.sub("", text)
def strip_accents(line: str) -> str:
"""Strips accents from a piece of text."""
nfd = unicodedata.normalize("NFD", line)
output = [c for c in nfd if unicodedata.category(c) != "Mn"]
if len(output) == line:
return line
return "".join(output)
# Build a regex matching all control characters.
NON_PRINTING_CHARS_RE = re.compile(
f"[{''.join(map(chr, list(range(0,32)) + list(range(127,160))))}]"
)
DIGIT_RE = re.compile(r"\d")
PUNCT_OR_NON_PRINTING_CHARS_RE = re.compile(
(UNICODE_PUNCT_RE.pattern + NON_PRINTING_CHARS_RE.pattern).replace("][", "")
)
def remove_non_printing_char(text: str) -> str:
return NON_PRINTING_CHARS_RE.sub("", text)
def normalize_spacing_for_tok(text: str, language: str = "en") -> str:
res = (
text.replace("\r", "")
# remove extra spaces
.replace("(", " (")
.replace(")", ") ")
.replace(" +", " ")
)
res = re.sub(r"\) ([\.\!\:\?\;\,])", r"\)\1", res)
res = res.replace("( ", "(").replace(" )", ")")
res = re.sub(r"(\d) \%", r"\1\%", res)
res = res.replace(" :", ":").replace(" ;", ";")
res = res.replace("`", "'").replace("''", ' " ')
res = (
res.replace("„", '"')
.replace("“", '"')
.replace("”", '"')
.replace("–", "-")
.replace("—", " - ")
.replace(" +", " ")
.replace("´", "'")
.replace("([a-z])‘([a-z])", r"\1'\2/")
.replace("([a-z])’([a-z])", r"\1'\2/")
.replace("‘", '"')
.replace("‚", '"')
.replace("’", '"')
.replace("''", '"')
.replace("´´", '"')
.replace("…", "...")
# French quotes
.replace(" « ", ' "')
.replace("« ", '"')
.replace("«", '"')
.replace(" » ", '" ')
.replace(" »", '"')
.replace("»", '"')
# handle pseudo-spaces
.replace(" %", "%")
.replace("nº ", "nº ")
.replace(" :", ":")
.replace(" ºC", " ºC")
.replace(" cm", " cm")
.replace(" ?", "?")
.replace(" !", "!")
.replace(" ;", ";")
.replace(", ", ", ")
.replace(" +", " ")
.replace(".", ". ")
)
# English "quotation," followed by comma, style
if language == "en":
res = re.sub(r"\"([,\.]+)", r"\1\"", res)
# Czech is confused
elif language == "cs" or language == "cz":
pass
# German/Spanish/French "quotation", followed by comma, style
else:
res = res.replace(',"', '",')
res = re.sub(
r"(\.+)\"(\s*[^<])", r"\"\1\2", res
) # don't fix period at end of sentence
if (
language == "de"
or language == "es"
or language == "cz"
or language == "cs"
or language == "fr"
):
res = re.sub(r"(\d) (\d)", r"\1,\2", res)
else:
res = re.sub(r"(\d) (\d)", r"\1.\2", res)
return res
def normalize(line: str, accent=True, case=True, numbers=True, punct=1) -> str:
line = line.strip()
if not line:
return line
if case:
line = line.lower()
if accent:
line = strip_accents(line)
if numbers:
line = DIGIT_RE.sub("0", line)
if punct == 1:
line = replace_unicode_punct(line)
elif punct == 2:
line = remove_unicode_punct(line)
line = remove_non_printing_char(line)
return line
def slow_normalize_for_dedup(line: str) -> str:
return normalize(line, accent=False, case=True, numbers=True, punct=2)
def normalize_for_dedup(line: str) -> str:
line = line.strip()
if not line:
return line
# case
line = line.lower()
# numbers
line = DIGIT_RE.sub("0", line)
line = PUNCT_OR_NON_PRINTING_CHARS_RE.sub("", line)
return line
|
cc_net-main
|
cc_net/text_normalizer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import logging
import subprocess
from pathlib import Path
from typing import List
import func_argparse
import numpy as np
from cc_net import jsonql
def get_index(file: Path) -> Path:
return file.parent / (file.name + ".index")
def _get_tmp(output: Path) -> Path:
return output.parent / (output.stem + ".tmp" + output.suffix)
def reshard(
inputs: List[Path],
output: Path,
tmp: Path = None,
free_original: bool = False,
rm_original: bool = False,
) -> Path:
"""Read the given files and concatenate them to the output file.
Can remove original files on completion, or just write dummy content into them to free disk.
"""
if tmp is None:
tmp = _get_tmp(output)
logging.info(f"Resharding {inputs} to {tmp}, will move later to {output}")
jsonql.run_pipes(file=inputs, output=tmp)
tmp.replace(output)
tmp_index = get_index(tmp)
if tmp_index.exists():
tmp_index.replace(get_index(output))
if not (free_original or rm_original):
return output
for _input in inputs:
if rm_original:
_input.unlink()
elif free_original:
# Overwrite the previous file.
# This frees up disk space and allows doit to properly track the success.
_input.write_text(f"Resharded into {output}")
if get_index(_input).is_file():
get_index(_input).unlink()
return output
def fast_reshard(
inputs: List[Path],
output: Path,
tmp: Path = None,
free_original: bool = False,
rm_original: bool = False,
) -> Path:
"""Same as reshard but don't re-compress the output.
This will lead to a bigger output file, especially if the shards are very small.
"""
if tmp is None:
tmp = _get_tmp(output)
with open(tmp, "wb") as o:
subprocess.run(["cat"] + [str(f) for f in inputs], stdout=o)
tmp.replace(output)
indexes_files = [get_index(i) for i in inputs]
existing_indexes = sum(i.exists() for i in indexes_files)
assert (
existing_indexes == len(indexes_files) or existing_indexes == 0
), "some indexes don't exist."
if existing_indexes > 0:
indexes = [np.load(idx) for idx in indexes_files]
for i in range(len(indexes) - 1):
indexes[i + 1] += indexes[i][-1]
with open(str(output) + ".index", "wb") as o:
np.save(o, np.concatenate(indexes))
if not (free_original or rm_original):
return output
for _input in inputs:
if rm_original:
_input.unlink()
elif free_original:
# Overwrite the previous file.
# This frees up disk space and allows doit to properly track the success.
_input.write_text(f"Resharded into {output}")
if get_index(_input).is_file():
get_index(_input).unlink()
return output
def determine_groups(
inputs: List[Path], target_size: int = 4 * 1024 ** 3
) -> List[List[Path]]:
if len(inputs) == 0:
return []
sample = inputs[:10]
typical_size = sum(s.stat().st_size for s in sample) / len(sample)
group_size = min(target_size // typical_size, len(inputs))
group_size = max(group_size, 1)
return jsonql.grouper(inputs, group_size)
if __name__ == "__main__":
func_argparse.single_main(reshard)
|
cc_net-main
|
cc_net/regroup.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import time
from pathlib import Path
from typing import Dict, List, Optional, Sequence, Tuple, Union
import kenlm # type: ignore
import numpy as np # type: ignore
import pandas as pd # type: ignore
import sentencepiece # type: ignore
from cc_net import jsonql, text_normalizer
LMDescriptor = Union[Dict[str, Path], Union[Path, str]]
def get_args():
parser = argparse.ArgumentParser(
description="Compute the score of each sentences of a document",
parents=[jsonql.io_parser()],
)
parser.add_argument("--models", type=str)
parser.add_argument("--sentences", action="store_true", default=False)
parser.add_argument(
"--languages", type=str, help="Ignore doc with another language"
)
parser.add_argument("--field", type=str, default=None)
parser.add_argument("--newline", type=str, default="\n")
return vars(parser.parse_args())
def pp(log_score, length):
return 10.0 ** (-log_score / length)
class SentencePiece(jsonql.Transformer):
# Sentence Pieces model have to be read back from disk.
warning_when_pickling = True
def __init__(
self,
model: Path,
field: str,
output_field: str = "tokenized",
normalize: bool = False,
):
super().__init__()
self.model = model
self.field = field
self.output_field = output_field
self.normalize = normalize
self.sp: sentencepiece.SentencePieceProcessor = None
def _prepare(self):
if self.sp is not None:
return
self.sp = sentencepiece.SentencePieceProcessor()
self.sp.load(str(self.model))
return self
def do(self, document: dict) -> dict:
text = document[self.field]
if self.normalize:
text = text_normalizer.normalize(text)
tokenized = self.sp.encode_as_pieces(text)
document[self.output_field] = " ".join(tokenized)
return document
class MultiSentencePiece(jsonql.Transformer):
warning_when_pickling = True
def __init__(
self,
models: Union[Path, Dict[str, Path]],
field: str,
output_field: str = "tokenized",
normalize: bool = False,
):
super().__init__()
self.field = field
self.output_field = output_field
self.normalize = normalize
self._prefetch: Sequence[str] = []
if isinstance(models, Path):
self.models = {
m.name.split(".")[0]: m for m in models.parent.glob(models.name)
}
else:
self.models = models
self._prefetch = list(models.keys())
self.sp: Dict[str, sentencepiece.SentencePieceProcessor] = {}
def _prepare(self) -> None:
for lang in self._prefetch:
assert (
self.get_sp(lang) is not None
), f"No model found for {lang} at {self.models.get(lang)}."
def get_sp(self, lang) -> Optional[sentencepiece.SentencePieceProcessor]:
sp = self.sp.get(lang)
if sp is not None:
return sp
if lang not in self.models:
return None
start_load = time.time()
self.log(f"Loading {self.models[lang]}...")
sp = sentencepiece.SentencePieceProcessor()
sp.load(str(self.models[lang]))
self.sp[lang] = sp
load_time = time.time() - start_load
self.log(f"Loaded {self.models[lang]} (took {load_time / 60:.1f}min)")
return sp
def do(self, document: dict) -> Optional[dict]:
text = document[self.field]
if self.normalize:
text = text_normalizer.normalize(text)
sp = self.get_sp(document.get("language"))
if sp is None:
return document
tokenized = sp.encode_as_pieces(text)
document[self.output_field] = " ".join(tokenized)
return document
class DocLM(jsonql.Transformer):
def __init__(
self,
models: Union[Path, Dict[str, Path]],
field: str,
output_field: str = "perplexity",
newline: str = "\n",
normalize: bool = True,
load_method: int = 2,
):
super().__init__()
self.field = field
self.output_field = output_field
self.newline = newline
self.normalize = normalize
self._prefetch: Sequence[str] = []
self.lm_config = kenlm.Config()
# This is the default settings
# POPULATE will mmap the models and populate the pages.
# Maybe that's not the best way when the models are on a network disk.
# TODO: try copying models file, try READ or PARALLEL_READ
self.lm_config.load_method = load_method
if isinstance(models, Path):
self.models = {
m.name.split(".")[0]: m for m in models.parent.glob(models.name)
}
else:
self.models = models
self._prefetch = list(models.keys())
self.lm: Dict[str, kenlm.Model] = {}
self.n_lines = 0
def _prepare(self) -> None:
for lang in self._prefetch:
assert (
self.get_lm(lang) is not None
), f"No model found for {lang} at {self.models.get(lang)}."
def get_lines(self, document: dict) -> List[str]:
lang = document.get("language")
if not lang:
return []
if lang not in self.models:
return []
content = document.get(self.field)
if not content:
return []
lines = content.split(self.newline)
self.n_lines += len(lines)
return lines
def get_lm(self, lang: Optional[str]) -> Optional[kenlm.Model]:
if lang is None:
return None
lm = self.lm.get(lang)
if lm is not None:
return lm
model = self.models.get(lang)
if model is None:
return None
start_load = time.time()
self.log(f"Loading {self.models[lang]}...")
lm = kenlm.Model(str(model), self.lm_config)
self.lm[lang] = lm
load_time = time.time() - start_load
self.log(f"Loaded {self.models[lang]} (took {load_time / 60:.1f}min)")
return lm
def do(self, document: dict) -> dict:
lines = self.get_lines(document)
model = self.get_lm(document.get("language"))
if not lines or not model:
return document
doc_log_score, doc_length = 0, 0
for line in lines:
if self.normalize:
line = text_normalizer.normalize(line)
log_score = model.score(line)
length = len(line.split()) + 1
doc_log_score += log_score
doc_length += length
document[self.output_field] = round(pp(doc_log_score, doc_length), 1)
return document
def summary(self):
delay = time.time() - self.start_time
h = delay / 3600
s = self.n_lines / delay
summ = super().summary()
summ.append(f"Processed {self.n_lines:_} lines in {h:.2}h ({s:.1} lines/s).")
return summ
class SentencesLM(DocLM):
"""Returns the score of each individual paragraph."""
def do(self, document: dict) -> Optional[str]: # type: ignore
lines = self.get_lines(document)
model = self.get_lm(document.get("language"))
if not lines or not model:
return None
sentences = []
for line in lines:
if self.normalize:
line = text_normalizer.normalize(line)
log_score = model.score(line)
length = len(line.split()) + 1
sentences.append(f"{pp(log_score, length)}\t{line}")
return "\n".join(sentences)
class PerplexityBucket(jsonql.Transformer):
def __init__(
self, cutoff_csv: Path, percentile_head: int = 30, percentile_tail: int = 60
):
super().__init__()
self.cutoff_csv = cutoff_csv
self.percentile_head = percentile_head
self.percentile_tail = percentile_tail
self.cutoffs: Dict[str, Tuple[float, float]] = {}
def _prepare(self) -> None:
cutoffs = pd.read_csv(self.cutoff_csv, index_col=0)
self.cutoffs = {
l: (cutoffs[l][self.percentile_head], cutoffs[l][self.percentile_tail])
for l in cutoffs.columns
}
def get_bucket(self, doc: dict) -> str:
perplexity = doc.get("perplexity", -1)
lang = doc.get("language")
if lang not in self.cutoffs or perplexity < 0:
return "all"
pp_head, pp_tail = self.cutoffs[lang]
if perplexity < pp_head:
return "head"
if perplexity < pp_tail:
return "middle"
return "tail"
def do(self, doc: dict) -> dict:
doc["bucket"] = self.get_bucket(doc)
return doc
class DropKeys(jsonql.Transformer):
def __init__(self, *keys):
super().__init__()
self.keys = keys
def do(self, document: dict) -> Optional[dict]:
if not document:
return None
for key in self.keys:
document.pop(key, None)
return document
class RemoveSmall(jsonql.Transformer):
def __init__(self, field, min_len):
super().__init__()
self.field = field
self.min_len = min_len
self.removed = 0
def do(self, document: dict) -> Optional[dict]:
if not document:
return None
content = document.get(self.field)
if not content or len(content) < self.min_len:
self.removed += 1
return None
return document
def summary(self):
r, n = self.removed, self.processed
ratio = r / n if n else 0
return [f"Removed {r} small documents out of {n} ({ratio:.1%})"]
def perplexity_to_bin(file: Path, output: Path, models, tok_field: str):
pp_field = "perplexity"
lm = DocLM(models, tok_field, output_field=pp_field)
stats: List[float] = []
max_stats = 1_000_000
batch_size = 100_000
i = 0
batch = []
with open(output, "wb") as o:
for doc in jsonql.read_jsons(file):
i += 1
pp = lm(doc)[pp_field]
if len(stats) < max_stats:
stats.append(pp)
batch.append(pp)
if len(batch) >= batch_size:
np.array(batch, dtype=np.float32).tofile(o)
batch = []
if len(batch) > 0:
np.array(batch, dtype=np.float32).tofile(o)
if __name__ == "__main__":
args = get_args()
output = Path(args["output"])
if output.suffix == ".bin":
perplexity_to_bin(args["file"], output, args["models"], args["field"])
else:
jsonql.run_pipe(DocLM, args)
|
cc_net-main
|
cc_net/perplexity.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
|
cc_net-main
|
cc_net/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import time
from typing import Dict, Optional
import sacremoses # type: ignore
from cc_net import jsonql, text_normalizer
class RobustTokenizer(jsonql.Transformer):
"""Moses tokenizer with the expected preprocessing."""
LANG_WITHOUT_ACCENT = {"en", "my"}
def __init__(self, lang: str):
super().__init__()
self.lang = lang
self.moses = sacremoses.MosesTokenizer(lang)
self.rm_accent = lang in self.LANG_WITHOUT_ACCENT
self.ready = True
def do(self, text: str):
text = text_normalizer.normalize(
text, accent=self.rm_accent, case=False, numbers=False, punct=True
)
text = text_normalizer.normalize_spacing_for_tok(text, language=self.lang)
return self.moses.tokenize(text, return_str=True, escape=False)
class DocTokenizer(jsonql.Transformer):
"""Tokenize the text found in `output_field and store the result in `output_field`."""
def __init__(
self,
field: str,
output_field: str = "tokenized",
language_field: str = "language",
):
super().__init__()
self.field = field
self.output_field = output_field
self.language_field = language_field
self.n_docs = 0
self.tokenizers: Dict[str, RobustTokenizer] = {}
def get_tokenizer(self, lang: str) -> Optional[RobustTokenizer]:
cache = self.tokenizers
if lang in cache:
return cache[lang]
if lang in ("th", "zh", "ja"):
# TODO find a tokenizer for those languages
return None
cache[lang] = RobustTokenizer(lang)
return cache[lang]
def do(self, document):
lang = document[self.language_field]
tok = self.get_tokenizer(lang)
if not tok:
return document
self.n_docs += 1
lines = document[self.field].split("\n")
tokenized = "\n".join(tok(l) for l in lines)
document[self.output_field] = tokenized
return document
def summary(self):
delay = (time.time() - self.start_time) / 3600
speed = self.n_docs / delay
return [
f"Tokenized {self.n_docs:_} documents in {delay:.2}h ({speed:.1} doc/s)."
]
|
cc_net-main
|
cc_net/tokenizer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Tools to remove duplicate paragraphs across one or several shards.
"""
import argparse
import gc
import hashlib
import logging
import multiprocessing
import os
import tempfile
import time
from pathlib import Path
from typing import Iterable, List, Optional, Set, Union
import numpy as np
from cc_net import jsonql
from cc_net.flat_hash_set import HASH_TYPE, AbstractDedupHashSet, FlatHashSet
from cc_net.jsonql import mem_footprint_gb
from cc_net.text_normalizer import normalize_for_dedup
BYTE_ORDER = "little"
HASH_SIZE = HASH_TYPE(0).nbytes
DISABLE_MULTI_PROCESSING = False
FilesOrDir = Union[List[Path], Path]
def get_args():
parser = argparse.ArgumentParser(
description="Read a set of json files and allow to query them",
parents=[jsonql.io_parser()],
)
parser.add_argument("--field", type=str, default="raw_content")
parser.add_argument("--output_hashes", type=str)
parser.add_argument("--no_finalize", action="store_false", dest="finalize")
# parser.add_argument("--mem_gb", type=int)
parser.add_argument("--hashes", type=str)
return vars(parser.parse_args())
def _b2i(b: bytes) -> int:
return np.frombuffer(b[:HASH_SIZE], dtype=HASH_TYPE, count=1, offset=0).item(0)
def str_hash(s: str) -> int:
h = hashlib.sha1(bytes(s, encoding="utf-8"))
return _b2i(h.digest())
log = logging.getLogger(__name__).info
def run_par(processes):
# This is different from multiprocessing.map since it allows for kwargs.
processes = list(processes)
if len(processes) == 1 or DISABLE_MULTI_PROCESSING:
for f, args, kwargs in processes:
f(*args, **kwargs)
return
log(f"Starting {len(processes)} subprocess")
processes = [
multiprocessing.Process(target=f, args=a, kwargs=kw) for (f, a, kw) in processes
]
for p in processes:
p.start()
for p in processes:
p.join()
failed = 0
for p in processes:
if p.exitcode != 0:
log(f"Process failed with code {p.exitcode}: {p}")
failed += 1
assert failed == 0, f"{failed} processes failed..."
def split_file(file, n_splits):
for i in range(n_splits):
yield jsonql.SplitFile(file, i, n_splits)
def merge(hashes_1, hashes_2, output):
if isinstance(hashes_1, str):
h1 = FlatHashSet()
h1.load(hashes_1)
else:
h1 = hashes_1
if isinstance(hashes_2, str):
h2 = FlatHashSet()
h2.load(hashes_2)
else:
h2 = hashes_2
h2_np = np.fromiter(h2.keys(), dtype=FlatHashSet.dtype, count=len(h2))
dup = h1.__contains__(h2_np)
# Dups between h1 and h2 will be set to 1, keys unique to h2 are copied to
# h1 with their value.
h1[h2_np] = dup
if output:
h1.dump(output)
return h1
def merge_shard(hash_files, output):
h = FlatHashSet()
h.load(hash_files[0])
for hash_file in hash_files[1:]:
h = merge(h, hash_file, output=None)
print(f"Merged {hash_file}. We now have {len(h)} hashes.")
h.dump(output)
print(f"Saved {len(h)} hashes to {output}.")
def _dump_sentence_hashes(source: Path, output: Path, field: str):
treated = 0
started = time.time()
with open(output, "wb") as o:
for doc in jsonql.read_jsons(source):
content = doc.get(field)
if not content:
continue
h = compute_hashes(content)
if h is None:
continue
h.tofile(o)
treated += 1
if treated % 100_000 == 0:
delay = time.time() - started
log(
f"Computed {treated} documents hashes in {delay / 3600:.2f}h ({treated / delay} doc / s)"
)
def _remove_duplicate_hashes(duplicates, source, output):
batch_size = 100_000
n_lines, n_lines_kept = 0, 0
with open(source, "rb") as f, open(output, "wb") as o:
log(f"Opening {source} with mode rb")
log(f"Opening {output} with mode wb")
while True:
hashes = np.fromfile(f, dtype=HASH_TYPE, count=batch_size)
if hashes.size == 0:
break
keep = duplicates[hashes] < 1
kept = keep.sum()
hashes *= keep
hashes.tofile(o)
n_lines += hashes.size
n_lines_kept += kept
removed = n_lines - n_lines_kept
selectivity = n_lines_kept / n_lines if n_lines else 0
log(f"Removed {removed} duplicate hashes with selectivity: {selectivity:3.1%}")
def remove_duplicates_sharded(
files: List[Path],
outputs: List[Path],
hashes_dir: FilesOrDir,
field: str,
group_hashes: int = 1,
tmp_dir: Path = None,
min_len: int = 0,
):
"""Remove duplicates in several passes, when all hashes don't fit in RAM.
Note: The current implementation is not doing a 'perfect' deduplication.
If a hash appear exactly once in each shard of hashes it won't be detected
as a duplicate. This can be fixed if hashes are fully dedup beforehand.
"""
assert len(files) == len(outputs)
if isinstance(hashes_dir, list):
hashes_files = hashes_dir
else:
hashes_files = sorted(
h for h in Path(hashes_dir).iterdir() if h.suffix == ".bin"
)
assert len(hashes_files) > 0, f"no hashes files found in: {hashes_dir}"
if len(hashes_files) <= group_hashes:
log(f"All hashes can be done in one pass, using DuplicatesRemover on {files}")
rm_dups = DuplicatesRemover(field, hashes_files)
rm_dups._prepare()
run_par(
(jsonql.run_pipes, (rm_dups,), dict(file=f, output=o))
for f, o in zip(files, outputs)
)
return
log(f"Starting deduplicate_sharded on {files}.")
tmp_directory = tempfile.TemporaryDirectory(dir=str(tmp_dir) if tmp_dir else None)
def tmp_files(i):
return [
Path(tmp_directory.name) / (f.name.split(".")[0] + f".{i}.bin")
for f in files
]
last = tmp_files(0)
run_par((_dump_sentence_hashes, (f, tmp, field), {}) for f, tmp in zip(files, last))
if isinstance(hashes_dir, list):
hashes_files = hashes_dir
else:
hashes_files = sorted(
h for h in Path(hashes_dir).iterdir() if h.suffix == ".bin"
)
for i, group in enumerate(jsonql.grouper(hashes_files, group_hashes)):
hashes = FlatHashSet()
for h in group:
hashes.load(h)
log(f"Loaded {h}, up to {len(hashes)} hashes ({mem_footprint_gb()}GB)")
intermediates = tmp_files(i + 1)
# Remove hashes in parallel. Since modern OS have "copy-on-write" and
# `hashes` is read-only, we will only have one version of it in RAM.
run_par(
(_remove_duplicate_hashes, (hashes, f, tmp), {})
for f, tmp in zip(last, intermediates)
)
# Force hashes to be freed, before we start allocating a new one.
del hashes
gc.collect()
for tmp in last:
os.remove(tmp)
last = intermediates
def finalize(source, dedup_hashes, min_len):
n_chars, n_chars_kept = 0, 0
with open(dedup_hashes, "rb") as hashes:
for doc in jsonql.read_jsons(source):
content = doc.get(field)
if not content or len(content) < min_len:
continue
sentences = content.split("\n")
doc_hashes = np.fromfile(hashes, dtype=HASH_TYPE, count=len(sentences))
chars, kept_chars = finalize_doc(doc, field, doc_hashes)
n_chars += chars
n_chars_kept += kept_chars
yield doc
selectivity = n_chars_kept / n_chars if n_chars else 0
log(f"Kept {n_chars_kept} chars out of {n_chars} ({selectivity:.1%}).")
dedup_hashes = last
run_par(
[
(
jsonql.run_pipe,
(finalize,),
dict(kwargs=dict(dedup_hashes=h, min_len=min_len), file=f, output=o),
)
for h, f, o in zip(dedup_hashes, files, outputs)
]
)
tmp_directory.cleanup()
def compute_hashes(content) -> Optional[np.ndarray]:
if not content:
return None
lines = content.split("\n")
# save hashes as bytes but reinterpret them as uint64.
hashes = np.fromiter(
(
hashlib.sha1(bytes(normalize_for_dedup(l), encoding="utf-8")).digest()[
:HASH_SIZE
]
for l in lines
),
dtype=np.dtype((bytes, HASH_SIZE)),
count=len(lines),
)
return np.ndarray(dtype=HASH_TYPE, buffer=hashes.data, shape=hashes.shape)
def finalize_doc(doc, field, hashes=None):
content = doc.get(field)
lines = content.split("\n")
n_chars = len(content)
if "original_nlines" not in doc:
doc["original_nlines"] = doc.get("nlines", len(lines))
if "original_length" not in doc:
doc["original_length"] = doc.get("length", n_chars)
if hashes is None:
hashes = doc.pop(field + "_hash")
# Remove duplicates inside doc
seen: Set[int] = set()
original_line_ids = doc.get("line_ids", range(len(hashes)))
line_ids = []
new_lines = []
for l, line, h in zip(original_line_ids, lines, hashes):
if h not in seen and h != 0:
line_ids.append(l)
new_lines.append(line)
seen.add(h)
doc[field] = "\n".join(new_lines)
doc["nlines"] = len(line_ids)
n_chars_kept = len(doc[field])
doc["length"] = n_chars_kept
doc["line_ids"] = line_ids
return n_chars, n_chars_kept
class HashesCollector(jsonql.Transformer):
"""
Collect all hashes found of lines found in the `field` of the source documents.
"""
parallelisable = False
def __init__(
self, field: str, output: Path = None, hashes: AbstractDedupHashSet = None
):
super().__init__()
self.n_lines = 0
self.field = field
self.output = output
self.hashes = FlatHashSet() if hashes is None else hashes
self.num_hashes_end = 0
self.num_hashes_start = len(self.hashes)
def summary(self) -> List[str]:
summ = super().summary()
h = self.num_hashes_end if self.hashes is None else len(self.hashes)
h = (h - self.num_hashes_start) // 1000
max_mem = mem_footprint_gb()
n = self.n_lines // 1000
summ.append(
f"Found {h:_}k unique hashes over {n:_}k lines. Using {max_mem:.1f}GB of RAM."
)
return summ
def do(self, doc: dict) -> None:
doc_hashes = compute_hashes(doc.get(self.field))
if doc_hashes is None:
return
self.hashes.add(doc_hashes)
self.n_lines += doc_hashes.size
def close(self):
if self.output and self.hashes:
self.hashes.dump(self.output)
self.log(f"Saved {len(self.hashes)} hashes to {self.output}")
# Save the number of hashes.
self.num_hashes_end = len(self.hashes)
# Free up mem even if the transformer is kept somewhere else.
self.hashes = None # type: ignore
class DuplicatesRemover(jsonql.Transformer):
"""DuplicatesRemover"""
# The hashes can't be pickled so they will have to be read back from disk.
warn_when_pickling = True
def __init__(self, field: str, hashes_files: List[Path], collect: bool = False):
"""
Remove duplicates
"""
super().__init__()
self.field = field
self.collect = collect
self.hashes_files = hashes_files
self.duplicates: Optional[AbstractDedupHashSet] = None
self.n_lines, self.n_lines_kept = 0, 0
self.n_chars, self.n_chars_kept = 0, 0
def _prepare(self):
if self.duplicates is not None:
return
self.duplicates = FlatHashSet()
start = time.time()
for h in self.hashes_files:
shard_start = time.time()
self.duplicates.load(str(h))
delay = time.time() - shard_start
self.log(
f"Loaded hashes from {h} ({mem_footprint_gb():.3f}GB total, took {delay / 60:.1}m)"
)
delay = time.time() - start
self.log(
f"Loaded {len(self.duplicates):_d} hashes from {len(self.hashes_files)} files. ({mem_footprint_gb():.1f}GB total, took {delay / 60:.1}m)"
)
def do(self, doc: dict) -> Optional[dict]:
content = doc.get(self.field)
if not content:
return None
doc_hashes = compute_hashes(content)
assert self.duplicates is not None
seen = (
self.duplicates.add(doc_hashes)
if self.collect
else self.duplicates[doc_hashes]
)
keep = seen < True
kept = keep.sum()
if kept == 0:
return None
doc_hashes = doc_hashes * keep
self.n_lines += keep.size
self.n_lines_kept += kept
chars, kept_chars = finalize_doc(doc, self.field, hashes=doc_hashes)
self.n_chars += chars
self.n_chars_kept += kept_chars
return doc
def summary(self) -> List[str]:
summ = super().summary()
end_time = time.time()
n_lines_kept, n_lines, n_docs = self.n_lines_kept, self.n_lines, self.processed
speed = n_docs / (end_time - self.start_time)
summ.append(
f"Processed {self.n_lines} lines in {n_docs} docs. [{speed:.1f} doc/s]"
)
selectivity = self.n_lines_kept / self.n_lines if n_lines else 0
summ.append(f"Kept {n_lines_kept} lines out of {n_lines} ({selectivity:.1%}).")
n_chars_kept, n_chars = self.n_chars_kept, self.n_chars
selectivity = n_chars_kept / n_chars if n_chars else 0
summ.append(f"Kept {n_chars_kept} chars out of {n_chars} ({selectivity:.1%}).")
return summ
def deduplicate(
file: jsonql.ReadableFileLike, field: str = "raw_content"
) -> Iterable[dict]:
"""Remove duplicates of the given file (but keep the first occurence)."""
dup_remover = DuplicatesRemover(field, [], collect=True)
return dup_remover.map(jsonql.read_jsons(file))
def deduplicate_two_pass(
file: jsonql.FileDescriptor, field: str = "raw_content"
) -> Iterable[dict]:
"""Remove duplicates of the given file (even removing the first occurence).
This is what is done in the paper, and in mine.py
"""
try:
if isinstance(file, Path):
hash_file: Path = file.with_suffix(".bin")
else:
hash_file = jsonql._tmp(Path("hashes.bin"))
jsonql.run_pipes(
jsonql.JsonReader(), HashesCollector(field, output=hash_file), file=file
)
dup_remover = DuplicatesRemover(field, [hash_file])
return dup_remover.map(jsonql.read_jsons(file))
finally:
if hash_file.exists():
hash_file.unlink()
|
cc_net-main
|
cc_net/dedup.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import contextlib
import functools
import logging
import re
import tempfile
import time
import urllib.request
from pathlib import Path
from typing import ContextManager, Iterable, Iterator, List, Optional, Sequence
from urllib.parse import urlparse
import func_argparse
from bs4 import BeautifulSoup # type: ignore
from cc_net import jsonql
WET_URL_ROOT = "https://commoncrawl.s3.amazonaws.com"
logger = logging.getLogger(__name__)
def cc_wet_paths_url(dump_id: str) -> str:
return "/".join([WET_URL_ROOT, "crawl-data", "CC-MAIN-" + dump_id, "wet.paths.gz"])
@functools.lru_cache()
def cc_segments(dump_id: str, cache_dir: Path = None) -> List[str]:
wet_paths = cc_wet_paths_url(dump_id)
cache_dir = cache_dir or jsonql._tmp_dir()
wet_paths_cache = cache_dir / f"wet_{dump_id}.paths.gz"
f = jsonql.open_remote_file(wet_paths, cache=wet_paths_cache)
return [segment.strip() for segment in f]
def list_dumps() -> List[str]:
home_page = BeautifulSoup(
urllib.request.urlopen("http://index.commoncrawl.org/"), features="html.parser"
)
dumps = [a.get("href").strip("/") for a in home_page.findAll("a")]
dumps = [a[8:] for a in dumps if re.match(r"^CC-MAIN-\d\d\d\d-\d\d$", a)]
return sorted(dumps)
def ls():
for dump in list_dumps():
print(dump, "->", cc_wet_paths_url(dump))
def parse_doc(headers: List[str], doc: List[str]) -> Optional[dict]:
"""Headers format is:
WARC/1.0
WARC-Type: conversion
WARC-Target-URI: [url]
WARC-Date: [crawldate: 2019-02-15T19:15:59Z]
WARC-Record-ID: <urn:uuid:8865156e-d5f1-4734-9c68-4b46eaf2bb7e>
WARC-Refers-To: <urn:uuid:340152e2-65cf-4143-b522-8ce4e2d069d7>
WARC-Block-Digest: sha1:S3DTWCONT2L6ORTGCY2KXEZ37LNBB7V2
Content-Type: text/plain
Content-Length: 7743
"""
if not headers or not doc:
return None
try:
warc_type = headers[1].split()[1]
if warc_type != "conversion":
return None
url = headers[2].split()[1]
date = headers[3].split()[1]
digest = headers[6].split()[1]
length = int(headers[8].split()[1])
except Exception as e:
logger.warning("Can't parse header:", e, headers, doc)
return None
# Docs are separated by two empty lines.
last = None
if not doc[-1] and not doc[-2]:
last = -2
title, doc = doc[0], doc[1:last]
return {
"url": url,
"date_download": date,
"digest": digest,
"length": length,
"nlines": len(doc),
"source_domain": urlparse(url).netloc,
"title": title,
"raw_content": "\n".join(doc),
}
def group_by_docs(warc_lines: Iterable[str]) -> Iterable[dict]:
doc: List[str] = []
headers, read_headers = [], True
for warc in warc_lines:
warc = warc.strip()
if read_headers:
headers.append(warc)
read_headers = warc != ""
continue
if warc == "WARC/1.0":
# We reached the beginning of the new doc.
parsed = parse_doc(headers, doc)
if parsed is not None:
yield parsed
headers, doc, read_headers = [warc], [], True
continue
doc.append(warc)
# Return the last document
if doc:
parsed = parse_doc(headers, doc)
if parsed is not None:
yield parsed
def parse_warc_file(lines: Iterable[str], min_len: int = 1) -> Iterator[dict]:
n_doc = 0
n_ok = 0
for doc in group_by_docs(lines):
n_doc += 1
if not doc or len(doc["raw_content"]) < min_len:
continue
n_ok += 1
yield doc
if n_doc > 0:
logger.info(f"Kept {n_ok:_d} documents over {n_doc:_d} ({n_ok / n_doc:.1%}).")
else:
logger.info(f"Found no documents")
def dl(
dump: str,
shard: int,
num_shards: int,
output: Path = None,
num_segments_per_shard: int = 0,
):
"""Download a shard of the common crawl, and export it to json.
Arguments:
output: filename of the output file
dump: CC dump id
shard: id of the shard
num_shards: total number of shards
num_segments_per_shard: manual control of the number of segment per shard.
"""
reader = CCShardReader(dump, shard, num_shards, num_segments_per_shard)
jsonql.run_pipes(inputs=reader, output=output)
logger.info(f"Done. {output} is ready.")
class CCSegmentsReader(Iterable[dict]):
def __init__(
self, segments: Sequence[str], min_len: int = 0, cache_dir: Path = None
):
self._segments = segments
self.min_len = min_len
if cache_dir is not None:
cache_dir = Path(cache_dir)
cache_dir.mkdir(exist_ok=True)
self.cache_dir = cache_dir
self.retrieved_segments = 0
def segment_url(self, segment: str):
return "/".join((WET_URL_ROOT, segment))
@property
def segments(self) -> Sequence[str]:
return self._segments
def open_segment(self, segment: str) -> Iterable[str]:
url = self.segment_url(segment)
file: Optional[Path] = None
if self.cache_dir:
file = self.cache_dir / segment.split("/")[-1]
if not file or not file.exists():
self.retrieved_segments += 1
return jsonql.open_remote_file(url, cache=file)
def __iter__(self) -> Iterator[dict]:
n = len(self.segments)
for i, segment in enumerate(self.segments):
start = time.time()
# TODO: start downloading the next segment in the background
for doc in parse_warc_file(self.open_segment(segment), self.min_len):
doc["cc_segment"] = segment
yield doc
if i + 1 >= n:
continue
end = time.time()
delay = (end - start) / 3600 * (n - 1 - i)
logger.info(
f"Parsed {i + 1} / {n} files. Estimated remaining time: {delay:.1f}h"
)
class CCShardReader(CCSegmentsReader):
def __init__(
self,
dump: str,
shard: int,
num_shards: int = -1,
num_segments_per_shard: int = 40,
min_len: int = 300,
cache_dir: Path = None,
):
"""Downloads a shard of Common Crawl, and yields dict.
Arguments:
dump: CC dump id
shard: id of the shard
num_shards: total number of shards
num_segments_per_shard: if set will limit the number of files by shard.
Useful for testing.
"""
super().__init__([], min_len=min_len, cache_dir=cache_dir)
self.dump = dump
self.shard = shard
assert num_shards > 0 or num_segments_per_shard > 0
self.num_shards = num_shards
self.num_segments_per_shard = num_segments_per_shard
@property
def segments(self) -> Sequence[str]:
# Delaying the initialization allows to delay the looking up of the WET files
if self._segments:
return self._segments
segments = cc_segments(self.dump, self.cache_dir)
n = len(segments)
if self.num_shards < 0:
self.num_shards = n // self.num_segments_per_shard
i_min = (self.shard * n) // self.num_shards
i_max = ((self.shard + 1) * n) // self.num_shards
if self.num_segments_per_shard > 0:
i_max = min(i_max, i_min + self.num_segments_per_shard)
self._segments = segments[i_min:i_max]
return self._segments
def _tmp(prefix: str = None, suffix: str = None, dir: Path = None) -> Path:
_, tmp_path = tempfile.mkstemp(prefix=prefix, suffix=suffix, dir=dir)
return Path(tmp_path)
@contextlib.contextmanager
def timer(name: str = "-"):
start = time.time()
yield None
delay = time.time() - start
print(f"{name} took {delay:.1f}s")
def benchmark(tmp_path: Path):
segments = [
"crawl-data/CC-MAIN-2019-09/segments/1550249406966.99/wet/CC-MAIN-20190222220601-20190223002601-00441.warc.wet.gz"
]
seg_file = tmp_path / "CC-MAIN-20190222220601-20190223002601-00441.warc.wet.gz"
with timer("from network"):
list(CCSegmentsReader(segments))
with timer("from network, with caching"):
list(CCSegmentsReader(segments, cache_dir=tmp_path))
assert seg_file.exists()
with timer("from disk"):
CCSegmentsReader(segments, cache_dir=tmp_path)
seg_file.unlink()
if __name__ == "__main__":
func_argparse.main(ls, dl)
|
cc_net-main
|
cc_net/process_wet_file.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import func_argparse
import cc_net.mine
def main():
func_argparse.parse_and_call(cc_net.mine.get_main_parser())
if __name__ == "__main__":
main()
|
cc_net-main
|
cc_net/__main__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import collections
from pathlib import Path
from typing import Dict, Optional
import fasttext # type: ignore
from cc_net import jsonql
def get_args():
parser = argparse.ArgumentParser(
description="Read a list of json files and split them ",
parents=[jsonql.io_parser()],
)
parser.add_argument("--pattern", type=str)
parser.add_argument("--field", type=str, default="raw_content")
parser.add_argument("--threshold", type=float, default=0)
parser.add_argument("--model", type=str, required=True)
parser.add_argument("--out_field", type=str, default="language")
parser.add_argument("--top", type=int, default=1)
return vars(parser.parse_args())
def predict(model, text: str, k: int = 1):
labels, scores = model.predict(text, k=k)
labels = [l.replace("__label__", "") for l in labels]
return labels, scores
def avg_predict(model, text):
# Overall gives the same results than predict(model, text.replace("\n", ""))
text = text.split("\n")
text_len = sum(len(line) for line in text)
if text_len == 0:
return None, 0
scores = [predict(model, line) for line in text]
scores_by_label: Dict[str, float] = collections.defaultdict(float)
for (label, score), line in zip(scores, text):
scores_by_label[label] += score * len(line)
label, score = max(scores_by_label.items(), key=lambda kv: kv[1])
return label, score / text_len
class Classifier(jsonql.Transformer):
def __init__(
self,
model: Path,
field: str,
out_field: str,
threshold: float = 0,
top: int = 1,
language: str = None,
rounding: int = 2,
):
super().__init__()
self.model = model
assert model.exists(), f"Model {model} doesn't exist."
self.field = field
self.out_field = out_field
self.threshold = threshold
self.top = top
self.language = language
self.rounding = rounding
# Fasttext model is a C object and can't be pickled
self.fasttext_model: fasttext._FastText = None
self.n_doc, self.n_accepted, self.n_ignored, self.n_disagreement = 0, 0, 0, 0
self.cnt: Dict[str, int] = {}
def _prepare(self):
self.log(f"Loading {self.model}")
self.fasttext_model = fasttext.load_model(str(self.model))
def predict(self, text):
return predict(self.fasttext_model, text.replace("\n", ""), k=self.top)
def do(self, doc: dict) -> Optional[dict]:
text = doc.get(self.field, None)
if not text:
return None
if self.language and doc.get("language") != self.language:
self.n_ignored += 1
return doc
self.n_doc += 1
labels, scores = self.predict(text)
scores.round(self.rounding, out=scores)
for l in labels:
self.cnt[l] = self.cnt.get(l, 0) + 1
if self.top == 1:
existing_label = doc.get(self.out_field, None)
if existing_label and labels[0] != existing_label:
self.n_disagreement += 1
if all(s < self.threshold for s in scores):
return None
self.n_accepted += 1
if self.top == 1:
doc[self.out_field] = labels[0]
doc[self.out_field + "_score"] = scores[0]
else:
doc[self.out_field] = {l: s for l, s in zip(labels, scores)}
return doc
def summary(self):
n_doc, n_accepted, n_disagreement, cnt, out_field = (
self.n_doc,
self.n_accepted,
self.n_disagreement,
self.cnt,
self.out_field,
)
summ = super().summary()
if self.threshold > 0:
ratio = n_accepted / n_doc if n_doc else 0
summ.append(f"Kept {n_accepted} docs over {n_doc} ({ratio :.1%})")
summ.append(f"Found {len(cnt)} {out_field} labels: {cnt}")
disagreement = n_disagreement / n_doc if n_doc else 0
if disagreement:
summ.append(f"{out_field} disagreement is at {disagreement:.1%}.")
return summ
def __repr__(self):
return f"Classifier({self.model})"
def classify_and_split(file, output, pattern, **kwargs):
classifier = Classifier(**kwargs)
splitter = jsonql.split(pattern)
jsonql.run_pipes(classifier, splitter, file=file, output=output)
if __name__ == "__main__":
args = get_args()
pattern = args.get("pattern")
if pattern:
classify_and_split(**args)
else:
args.pop("pattern")
jsonql.run_pipe(Classifier, args)
|
cc_net-main
|
cc_net/split_by_lang.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import contextlib
import functools
import gzip
import logging
import multiprocessing
from collections import defaultdict
from pathlib import Path
from typing import Callable, Dict, Iterator, List, NamedTuple, Optional, Tuple
import cc_net
from cc_net import jsonql
from cc_net.process_wet_file import CCSegmentsReader
# Set this to a directory to use as cache for intermediary files.
# This helps for debugging.
WET_CACHE = None
# WET_CACHE = Path("wet_cache")
S3_BUCKET = "https://dl.fbaipublicfiles.com/cc100"
VERSION = "1.0.0"
CC_100_SNAPSHOTS = [
"2018-05",
"2018-09",
"2018-13",
"2018-17",
"2018-22",
"2018-26",
"2018-30",
"2018-34",
"2018-39",
"2018-43",
"2018-47",
"2018-51",
]
BIG_LANGUAGES = {
"es_XX",
"fr_XX",
"de_DE",
"ja_XX",
"ru_RU",
"zh_CN",
"en_XX",
"it_IT",
"ar_AR",
"nl_XX",
"pl_PL",
"pt_XX",
"tr_TR",
"zh_TW",
}
class Paragraph(NamedTuple):
lang: str
text: str
lm_score: float
def _dl_shard(snapshot: str, shard: int) -> Iterator[Paragraph]:
"""
Download metadata from a shards.
Sample metadata:
{
"cc_segment": "crawl-data/CC-MAIN-2018-51/segments/1544376823009.19/wet/CC-MAIN-20181209185547-20181209211547-00000.warc.wet.gz",
"digest": "sha1:222LWNHN5FM26XGS7WJSMI6IISTVWBKJ",
"url": "http://personals.gearplay.com/ads/DRJONES.htm",
"line_ids": [10],
"languages": ["en_XX"],
"lm_scores": [-2.658],
}
"""
snapshot = snapshot.replace("-", "_")
name = f"snap_{snapshot}_batch_{shard}.json.gz"
url = "/".join([S3_BUCKET, VERSION, name])
shard_metadata: Dict[str, Dict[str, dict]] = defaultdict(dict)
try:
cache_file: Optional[Path] = None
if WET_CACHE is not None:
cache_file = WET_CACHE / name
metadata_file = jsonql.open_remote_file(url, cache_file)
except:
logging.warning(f"Couldn't open {url}")
return
for meta in jsonql.read_jsons(metadata_file):
shard_metadata[meta["cc_segment"]][meta["digest"]] = meta
found_pars, missed_pars = 0, 0
for seg, segment_metadata in shard_metadata.items():
for doc in CCSegmentsReader([seg], cache_dir=WET_CACHE):
if doc["digest"] not in segment_metadata:
continue
meta = segment_metadata[doc["digest"]]
full_pars = [doc["title"]] + doc["raw_content"].split("\n")
assert len(meta["line_ids"]) == len(meta["languages"])
assert len(meta["line_ids"]) == len(meta["lm_scores"])
for i, lang, score in zip(
meta["line_ids"], meta["languages"], meta["lm_scores"]
):
if snapshot != "2018-51" and lang in BIG_LANGUAGES:
# Big languages only come from "2018-51" snapshot
continue
if i >= len(full_pars):
# This is because CC100 was created by saving only urls.
# Some urls appears in different snapshot with slightly different
# versions, but we don't know which one is correct.
# Here we read both versions, but some index may end up
# being incorrect.
# This impact ~3% documents.
missed_pars += 1
continue
yield Paragraph(lang, full_pars[i], score)
found_pars += 1
if missed_pars > 0:
logging.warning(
f"Missed {missed_pars} ({missed_pars / found_pars:%}) paragraphes."
)
def _split_by_par(
paragraphes: Iterator[Paragraph], snapshot: str, shard: int, outdir: Path
) -> int:
outdir.mkdir(exist_ok=True)
outfiles = {}
num_pars = 0
try:
for par in paragraphes:
# MODIFY ME: filter paragraph if needed (languages, score, ...)
if par.lang not in outfiles:
(outdir / par.lang).mkdir(exist_ok=True)
outfile = outdir / par.lang / f"snap_{snapshot}_batch_{shard}.gz"
outfiles[par.lang] = gzip.open(outfile, "wt")
print(par.text, file=outfiles[par.lang])
num_pars += 1
finally:
for o in outfiles.values():
o.close()
logging.info(f"Extracted {num_pars:_d} paragraphs from shard {snapshot}_{shard}")
return num_pars
def dl_shard(snapshot: str, shard: int, outdir: Path) -> int:
return _split_by_par(_dl_shard(snapshot, shard), snapshot, shard, outdir)
@contextlib.contextmanager
def unordered_map(processes: int):
if processes == 0:
yield map
return
with multiprocessing.Pool(processes) as pool:
yield pool.imap_unordered
def dl_snapshot(snapshot: str, outdir: Path, processes: int = 1) -> None:
_dl_shard = functools.partial(dl_shard, snapshot, outdir=outdir)
with unordered_map(processes) as umap:
num_pars = sum(umap(_dl_shard, range(500)))
logging.info(f"Extracted {num_pars:_d} paragraphs from snapshot {snapshot}.")
def dl(
snapshot: str = None, outdir: Path = Path("data_cc100"), processes: int = 1
) -> None:
"""
Download CC100 corpus.
Will create one text file per language and CC snapshot.
- snapshot: restrict to one snapshot. Useful for parallelization.
- outdir: output directory
- processes: number of processes to use
"""
if snapshot is None:
snapshots = CC_100_SNAPSHOTS
else:
snapshots = snapshot.split(",")
invalids = [s for s in snapshots if s not in CC_100_SNAPSHOTS]
assert not invalids, f"Invalid snapshots {invalids}, chose from {CC_100_SNAPSHOTS}"
for snapshot in snapshots:
dl_snapshot(snapshot, outdir, processes)
if __name__ == "__main__":
import func_argparse
func_argparse.single_main(dl)
|
cc_net-main
|
cc_net/tools/dl_cc_100.py
|
cc_net-main
|
cc_net/tools/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
"""
This code is used to train a fastText classifier to label document with DMOZ categories.
The data, distributed under the cc-by 3.0 license
(https://web.archive.org/web/20140605215533/http://www.dmoz.org/license.html),
can be downloaded from
https://web.archive.org/web/20140617145301/http://rdf.dmoz.org/rdf/content.rdf.u8.gz.
"""
import urllib.request
from io import StringIO
from pathlib import Path
from typing import Dict, Set
from urllib.parse import urlparse
import func_argparse
from lxml import etree # type: ignore
from cc_net import jsonql
TaggedUrls = Dict[str, Set[str]]
DMOZ_TAGS_URL = "https://web.archive.org/web/20140617145301/http://rdf.dmoz.org/rdf/content.rdf.u8.gz"
def add_tags(url: str, tags: Set[str], url2tags: TaggedUrls):
if url in url2tags:
url2tags[url] &= tags
else:
url2tags[url] = tags
def load_tags(filename: Path = None) -> TaggedUrls:
if filename is None:
with StringIO("".join(jsonql.open_remote_file(DMOZ_TAGS_URL))) as dmoz:
tree = etree.parse(dmoz)
else:
tree = etree.parse(str(filename))
root = tree.getroot()
url2tags: Dict[str, Set[str]] = {}
for external_page in root.iterfind("{http://dmoz.org/rdf/}ExternalPage"):
url = external_page.get("about")
domain = urlparse(url).netloc
for topic in external_page.iterfind("{http://dmoz.org/rdf/}topic"):
# print(url, topic.text)
# Tags looks like Top/Arts/Animation/Anime/Collectibles
tags = set(topic.text.split("/")[1:])
add_tags(url, tags, url2tags)
add_tags(domain, tags, url2tags)
return url2tags
def dl(output: Path) -> None:
urllib.request.urlretrieve(DMOZ_TAGS_URL, output)
def make_corpus(file: Path, tags_file: Path = None, output: Path = None) -> None:
"""
Loads a tags file and create a training dataset using the given webpages.
Arguments:
- file: CC shard file
- tags_file: dmoz tagging file, (like the one produced by `dl`)
- output: ""
"""
url2tags = load_tags(tags_file)
with jsonql.open_write(output) as o:
for document in jsonql.read_jsons(file):
if not document:
continue
url = document["url"]
domain = document["source_domain"]
if url in url2tags:
tags = url2tags[url]
elif domain in url2tags:
tags = url2tags[domain]
else:
continue
if len(tags) == 0:
continue
fasttext_tags = ["__label__" + tag for tag in tags]
content = document["tokenized"].replace("\n", " ").lower()
if len(content) > 200:
print(" ".join(fasttext_tags), content, file=o) # type: ignore
if __name__ == "__main__":
func_argparse.single_main(make_corpus)
|
cc_net-main
|
cc_net/tools/make_dmoz_corpus.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Tools to search sentences in CC similar to sentences in another corpus.
"""
import functools
import logging
import math
import subprocess
from collections import Counter
from pathlib import Path
from typing import Iterable, List, Optional, Set, Tuple
import func_argparse
import submitit
from kenlm import Model as KenlmModel # type: ignore
from sentence_splitter import SentenceSplitter # type: ignore
from sentencepiece import SentencePieceProcessor # type: ignore
from cc_net import dedup, jsonql, perplexity, text_normalizer
KENLM = Path("./bin/lmplz")
KENLM_BUILD = Path("./bin/build_binary")
VOCAB_SIZE = 2 ** 16 - 10
PROCESSES = 16
def normalize(corpus: Path, output_dir: Path) -> Path:
normalized = output_dir / (corpus.stem + ".normalized")
if normalized.exists():
return normalized
print("Will normalize", corpus, "to", normalized)
jsonql.run_pipes(
jsonql.Mapper(text_normalizer.normalize),
file=corpus,
output=normalized,
processes=PROCESSES,
)
return normalized
# TODO use classic files directory.
def sp_model(lang: str) -> Path:
return Path(f"/checkpoint/guw/cc_clean/lm_sp/{lang}.sp.model")
def _dataset(dataset: Optional[Path], lang: str) -> Path:
return (
dataset
or Path("/datasets01_101/common_crawl/020919") / f"{lang}_head_*.json.gz"
)
class SentencePiece(jsonql.Transformer):
def __init__(self, model: Path):
super().__init__()
self.model = model
self.sp: SentencePieceProcessor = None # type: ignore
def _prepare(self):
self.sp = SentencePieceProcessor()
self.sp.load(str(self.model))
def do(self, line: str) -> str:
return " ".join(self.sp.encode_as_pieces(line))
class ExtractSentences(jsonql.Transformer):
def __init__(
self,
sp_model: Path,
lm_model: Path,
field: str = "raw_content",
threshold: float = float("+inf"),
):
super().__init__()
self.sp_model = sp_model
self.lm_model = lm_model
self.field = field
self.threshold = threshold
self.sp: SentencePieceProcessor = None
self.lm: KenlmModel = None
self.splitter: SentenceSplitter = None
self.hashes: Set[int] = set()
def _prepare(self):
self.sp = SentencePieceProcessor()
self.sp.load(str(self.sp_model))
self.splitter = SentenceSplitter("en")
self.lm = KenlmModel(str(self.lm_model))
def do(self, document: dict) -> Optional[str]:
content: Optional[str] = document.get(self.field)
if not content:
return None
all_sentences = [
s for l in content.split("\n") if l for s in self.splitter.split(text=l)
]
unique_sentences = []
for s in all_sentences:
if not s:
continue
h = dedup.str_hash(s)
if h in self.hashes:
continue
self.hashes.add(h)
unique_sentences.append(s)
scores = []
for sentence in unique_sentences:
normalized = text_normalizer.normalize(sentence)
pieces = self.sp.encode_as_pieces(normalized)
log_score = self.lm.score(" ".join(pieces))
pp = -1
if len(pieces):
pp = perplexity.pp(log_score, len(pieces))
scores.append(pp)
res = filter(
lambda pp_s: self.threshold > pp_s[0] > 0, zip(scores, unique_sentences)
)
return "\n".join(f"{pp}\t{s}" for (pp, s) in res) or None
def tokenize(corpus: Path, output_dir: Path, lang: str) -> Path:
tokenized = output_dir / (corpus.stem + ".tokenized")
if tokenized.exists():
return tokenized
print("Will SentencePiece", corpus, "to", tokenized)
jsonql.run_pipes(
SentencePiece(sp_model(lang)),
file=normalize(corpus, output_dir),
output=tokenized,
processes=PROCESSES,
)
return tokenized
def train_lm(
corpus: Path,
output_dir: Path,
lang: str = "en",
vocab_size: int = VOCAB_SIZE,
ngrams: int = 5,
):
lm_text_file = output_dir / (corpus.stem + ".arpa")
lm_bin_file = output_dir / (corpus.stem + ".arpa.bin")
if lm_bin_file.exists():
return lm_bin_file
assert KENLM.exists(), f"{KENLM} binary to train kenlm model not found."
normalized = normalize(corpus, output_dir)
tokenized = tokenize(normalized, output_dir, lang)
print("Will train LM", lm_text_file, "on", tokenized)
kenlm_cmd = [
str(KENLM),
f"--order={ngrams}",
"--memory=8G",
f"--temp_prefix={jsonql._tmp_dir()}",
f"--text={tokenized}",
f"--arpa={lm_text_file}",
f"--vocab_estimate={vocab_size}",
"--discount_fallback",
]
subprocess.run(kenlm_cmd, check=True)
print("Will create binary model", lm_bin_file, "from", lm_text_file)
subprocess.run([str(KENLM_BUILD), str(lm_text_file), str(lm_bin_file)], check=True)
return lm_bin_file
def uniform_sampling_wrt_perplexity(
paragraphes: Iterable[str],
rounding: float = 100.0,
cut: float = 1000.0,
samples: int = 20,
) -> Iterable[str]:
max_samples = math.floor(cut / rounding * samples)
n = 0
buckets = Counter([0.0])
logging.info(f"Will sample {max_samples} sentences.")
for lines in paragraphes:
for line in lines.split("\n"):
if not line:
continue
pp = float(line[: line.find("\t")])
pp = math.floor(pp / rounding) * rounding
if pp > cut:
continue
if buckets[pp] > samples:
continue
yield line
buckets[pp] += 1
if buckets[pp] > samples:
logging.info(f"Bucket {pp} is full ({samples} samples, {n} total)")
n += 1
if n > max_samples:
return
def sample(
corpus: Path,
output_dir: Path,
dataset: Path = None,
n: int = 10_000,
lang: str = "en",
) -> Path:
sample_file = output_dir / (corpus.stem + ".pp_sample.tsv")
if sample_file.exists():
return sample_file
dataset = _dataset(dataset, lang)
extractor = ExtractSentences(
sp_model(lang), train_lm(corpus, output_dir), field="raw_content"
)
sampling = functools.partial(
uniform_sampling_wrt_perplexity, rounding=100.0, cut=1000.0, samples=n // 10
)
print(f"Will sample data from {dataset} to {sample_file}")
try:
jsonql.run_pipes(
extractor, sampling, file=dataset, output=sample_file, processes=PROCESSES
)
except Exception:
sample_file.unlink()
raise
subprocess.run(["sort", "-n", "-o", sample_file, sample_file], check=True)
subprocess.run(["head", sample_file], check=True)
return sample_file
def mine(
corpus: Path,
output_dir: Path,
threshold: float,
dataset: Path = None,
lang: str = "en",
) -> List[Path]:
"""Search sentences in CC similar to the one in the given corpus.
Args:
- corpus: corpus to train the LM one. Assumes one sentence per line.
- output_dir: where to store the results
- threshold: maximum perplexity to have
- dataset: glob pattern matching CC shards.
- lang: search in the files of this language
"""
dataset = _dataset(dataset, lang)
files = list(dataset.parent.glob(dataset.name))
outputs = [output_dir / (f.stem + ".tsv") for f in files]
if all(o.exists() for o in outputs):
return outputs
n = len(outputs)
sp = [sp_model(lang)] * n
lm = [train_lm(corpus, output_dir)] * n
thresholds = [threshold] * n
ex = submitit.AutoExecutor(output_dir / "mining_logs")
ex.update_parameters(
name="mine",
cpus_per_task=PROCESSES,
timeout_min=60 * 24 // PROCESSES,
mem_gb=10,
)
jobs = ex.map_array(_mine, files, outputs, sp, lm, thresholds)
print("Submited job array:", jobs[0])
for j in submitit.helpers.as_completed(jobs):
(i, o) = j.result()
print("Mined sentences from", i, "to", o)
return outputs
def _mine(
file: Path, output: Path, sp: Path, lm: Path, threshold: float
) -> Tuple[Path, Path]:
extractor = ExtractSentences(sp, lm, field="raw_content", threshold=threshold)
jsonql.run_pipes(extractor, file=file, output=output, processes=PROCESSES)
return (file, output)
if __name__ == "__main__":
func_argparse.main(sample, mine)
|
cc_net-main
|
cc_net/tools/expand_corpus.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import json
from pathlib import Path
from typing import Iterable, Sequence
from cc_net import dedup, jsonql
from cc_net.dedup import str_hash
from cc_net.flat_hash_set import FlatHashSet
def text(*args: str) -> str:
return "\n".join(args)
def write_docs(file: Path, docs: Iterable[Sequence[str]]):
file.parent.mkdir(exist_ok=True)
with open(file, "w") as f:
for sentences in docs:
doc = dict(text=text(*sentences))
print(json.dumps(doc), file=f)
def as_dict(hash_set):
if not isinstance(hash_set, dict):
hash_set = {k: v for (k, v) in hash_set.items()}
return hash_set
def load_hashes(file):
results = dedup.FlatHashSet()
results.load(file)
return as_dict(results)
LENGTHS = ["original_length", "length"]
def assert_documents_equal(expected, actual, ignoring={}):
expected = [{k: doc[k] for k in doc if k not in ignoring} for doc in expected]
actual = [{k: doc[k] for k in doc if k not in ignoring} for doc in expected]
assert expected == actual
def test_simple_dedup(tmp_path: Path) -> None:
write_docs(
tmp_path / "docs.json",
[
["_Hello", "_World", "I'm so original"],
["_world", "I'm originaler", "_Hello"],
],
)
results = list(dedup.deduplicate(tmp_path / "docs.json", field="text"))
expected = [
# First document is untouched
dict(
text=text("_Hello", "_World", "I'm so original"),
original_nlines=3,
nlines=3,
line_ids=[0, 1, 2],
),
# Second documents loses several lines
dict(text="I'm originaler", original_nlines=3, nlines=1, line_ids=[1]),
]
assert_documents_equal(expected, results, ignoring=LENGTHS)
def test_dedup_with_dump(tmp_path: Path):
hashes = tmp_path / "hashes.bin"
documents = [
dict(text=text("_Hello", "_World", "I'm so original")),
dict(text=text("_world", "I'm originaler", "_Hello")),
]
collector = dedup.HashesCollector(field="text", output=hashes)
list(collector.map(documents))
results = load_hashes(hashes)
expected = {
str_hash(l): l.startswith("_")
for l in ["_hello", "_world", "i'm so original", "i'm originaler"]
}
assert expected == results
def test_dedup_with_np_dump(tmp_path: Path):
hashes = tmp_path / "hashes.bin"
documents = [
dict(text=text("_Hello", "_World", "I'm so original")),
dict(text=text("_world", "I'm originaler", "_Hello")),
]
with dedup.HashesCollector(field="text", output=hashes) as d:
list(d.map(documents))
results = FlatHashSet()
results.load_np(hashes)
expected = set(
str_hash(l) for l in ["_hello", "_world", "i'm so original", "i'm originaler"]
)
assert expected == set(results.keys())
def test_dedup_from_hashes(tmp_path: Path):
documents = [
dict(text=text("_Hello", "World", "I'm so original")),
dict(text=text("Good morning", "World", "I'm originaler")),
]
seen = ["_hello", "i'm originaler", "world"]
hashes = [str_hash(h) for h in seen]
h = dedup.FlatHashSet()
h.add(hashes)
# Note: 'world' appears only once and won't be treated as a duplicate.
h.add(hashes[:-1])
h.dump(tmp_path / "hashes.bin")
results = list(
dedup.DuplicatesRemover("text", [tmp_path / "hashes.bin"]).map(documents)
)
expected = [
dict(
text=text("World", "I'm so original"),
original_nlines=3,
nlines=2,
line_ids=[1, 2],
),
dict(
text=text("Good morning", "World"),
original_nlines=3,
nlines=2,
line_ids=[0, 1],
),
]
assert_documents_equal(expected, results, ignoring=LENGTHS)
def test_dedup_fast(tmp_path: Path):
data = tmp_path / "data"
part_0 = [["Hello", "_World", "I'm so original"]]
write_docs(data / "part_0.json", part_0)
part_1 = [["Good morning", "_World", "I'm originaler"]]
write_docs(data / "part_1.json", part_1)
parts = [data / "part_0.json", data / "part_1.json"]
res = tmp_path / "res"
res.mkdir()
h = tmp_path / "hashes.bin"
field = "text"
jsonql.run_pipes(dedup.HashesCollector(field, output=h), file=parts)
for part in parts:
jsonql.run_pipes(
dedup.DuplicatesRemover(field, [h]), file=part, output=res / part.name
)
jsonql.run_pipes(
dedup.DuplicatesRemover(field, [h]), file=part, output=res / part.name
)
results_0 = list(jsonql.read_jsons(res / "part_0.json"))
expected_0 = [
dict(
text=text("Hello", "I'm so original"),
original_nlines=3,
nlines=2,
line_ids=[0, 2],
)
]
assert_documents_equal(expected_0, results_0, ignoring=LENGTHS)
results_1 = list(jsonql.read_jsons(res / "part_1.json"))
expected_1 = [
dict(
text=text("Good morning", "I'm originaler"),
original_nlines=3,
nlines=2,
line_ids=[0, 2],
)
]
assert_documents_equal(expected_1, results_1, ignoring=LENGTHS)
words = [w for part in [part_0, part_1] for doc in part for w in doc]
expected = {str_hash(s.lower()): s.startswith("_") for s in words}
assert expected == load_hashes(h)
def test_remove_duplicates_sharded(tmp_path: Path):
data = tmp_path / "data"
part_0 = [["Hello", "_World", "I'm so original"]]
write_docs(data / "part_0.json", part_0)
part_1 = [["_Good morning", "_World", "I'm originaler"]]
write_docs(data / "part_1.json", part_1)
h = tmp_path / "hashes"
h.mkdir()
h0 = FlatHashSet()
h0.add([str_hash(s.lower()) for doc in part_0 for s in doc])
h0.add([str_hash("_world")])
h0.dump(h / "part_0.bin")
assert {
str_hash("hello"): False,
str_hash("_world"): True,
str_hash("i'm so original"): False,
} == as_dict(h0)
h1 = FlatHashSet()
h1.add([str_hash(s.lower()) for doc in part_1 for s in doc])
h1.add([str_hash("_good morning")])
h1.dump(h / "part_1.bin")
assert {
str_hash("_good morning"): True,
str_hash("_world"): False,
str_hash("i'm originaler"): False,
} == as_dict(h1)
res = tmp_path / "res"
res.mkdir()
# dedup.DISABLE_MULTI_PROCESSING = True # Simplifies debugging
dedup.remove_duplicates_sharded(
files=[data / "part_0.json", data / "part_1.json"],
outputs=[res / "part_0.json", res / "part_1.json"],
field="text",
hashes_dir=h,
)
results_0 = list(jsonql.read_jsons(res / "part_0.json"))
expected_0 = [
dict(
text=text("Hello", "I'm so original"),
original_nlines=3,
nlines=2,
line_ids=[0, 2],
)
]
assert_documents_equal(expected_0, results_0, ignoring=LENGTHS)
# First pass removes "_world", second "_good morning".
results_1 = list(jsonql.read_jsons(res / "part_1.json"))
expected_1 = [
dict(text=text("I'm originaler"), original_nlines=3, nlines=1, line_ids=[2])
]
assert_documents_equal(expected_1, results_1, ignoring=LENGTHS)
|
cc_net-main
|
tests/test_dedup.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import cc_net.text_normalizer as txt
def test_unicode_punct():
weird = ",。、„”“«»1」「《》´∶:?!();–—.~’…━〈〉【】%"
replaced = ',.,""""""""""\'::?!();- - . ~\'...-<>[]%'
assert txt.replace_unicode_punct(weird) == replaced
assert txt.remove_unicode_punct(weird) == ""
def test_numbers():
weird = "023456789 | 0123456789"
normalized = "000000000 | 0000000000"
assert txt.normalize(weird, numbers=True) == normalized
assert txt.normalize(weird, numbers=False) == weird
def test_normalize_for_dedup():
weird = "023´∶:\x10 | ;012 hèllo"
normalized = "000 | ;000 hèllo"
assert normalized == txt.slow_normalize_for_dedup(weird)
assert normalized == txt.normalize_for_dedup(weird)
|
cc_net-main
|
tests/test_normalizer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
from pathlib import Path
from cc_net import process_wet_file
def test_parsing():
sample = Path(__file__).parent / "data" / "sample.warc.txt"
with open(sample) as f:
documents = list(process_wet_file.parse_warc_file(f))
expected_urls = [
"http://sample_english.com",
"http://sample_chinese.zh",
"http://sample_russian.ru",
]
assert expected_urls == [d["url"] for d in documents]
expected_domains = ["sample_english.com", "sample_chinese.zh", "sample_russian.ru"]
assert expected_domains == [d["source_domain"] for d in documents]
expected_date = [
"2019-03-18T00:00:00Z",
"2019-03-18T00:00:01Z",
"2019-03-18T00:00:02Z",
]
assert expected_date == [d["date_download"] for d in documents]
expected_title = [
"Famous Mark Twain Quotes",
"馬克·吐溫名言",
"Цитаты знаменитого Марка Твена",
]
assert expected_title == [d["title"] for d in documents]
expected_quotes = """Don't part with your illusions. When they are gone you may still exist, but you have ceased to live.
Education: that which reveals to the wise, and conceals from the stupid, the vast limits of their knowledge.
Facts are stubborn things, but statistics are more pliable.
Fiction is obliged to stick to possibilities. Truth isn't.
"""
assert expected_quotes == documents[0]["raw_content"]
|
cc_net-main
|
tests/test_parse_wet_file.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
import pytest
from cc_net.flat_hash_set import HASH_TYPE, FlatHashSet, NaiveHashSet
def as_dict(flat_hash_set) -> dict:
return {k: v for (k, v) in flat_hash_set.items()}
need_getpy = pytest.mark.skipif(
FlatHashSet == NaiveHashSet, reason="getpy isn't installed"
)
def same_behavior(test_case):
def run_case():
naive = as_dict(test_case(FlatHashSet))
flat = as_dict(test_case(NaiveHashSet))
assert naive == flat
return need_getpy(run_case)
@same_behavior
def test_setitem(hash_set_cls):
h = hash_set_cls()
h[np.arange(10, dtype=h.dtype)] = np.zeros(10, dtype=np.uint8)
h[np.arange(5, dtype=h.dtype)] = np.ones(5, dtype=np.uint8)
return h
@same_behavior
def test_add_dup(hash_set_cls):
h = hash_set_cls()
h.add(np.arange(10, dtype=h.dtype))
h.add(np.arange(5, dtype=h.dtype))
expected = {i: i < 5 for i in range(10)}
assert expected == as_dict(h), f"add_dup with {hash_set_cls.__name__}"
return h
@need_getpy
def test_gp_dict():
import getpy as gp # type: ignore
h = gp.Dict(HASH_TYPE, np.uint8)
h[np.arange(10, dtype=HASH_TYPE)] = np.zeros(10, dtype=np.uint8)
h[np.arange(5, dtype=HASH_TYPE)] = np.ones(5, dtype=np.uint8)
expected = {i: i < 5 for i in range(10)}
assert expected == as_dict(h)
def check_reload(h, dump, load, tmp_path):
dump_path = tmp_path / dump.__name__
dump(h, dump_path)
h2 = type(h)()
load(h2, dump_path)
assert as_dict(h) == as_dict(h2)
@pytest.mark.parametrize("hash_set_cls", [FlatHashSet, NaiveHashSet])
def test_loading(tmp_path, hash_set_cls):
h = hash_set_cls()
x = np.random.randint(0, 2 ** 32, (100,), dtype=h.dtype)
h.add(x)
check_reload(h, hash_set_cls.dump, hash_set_cls.load, tmp_path)
check_reload(h, hash_set_cls.dump_np, hash_set_cls.load_np, tmp_path)
if hasattr(hash_set_cls, "dump_gp"):
check_reload(h, hash_set_cls.dump_gp, hash_set_cls.load_gp, tmp_path)
|
cc_net-main
|
tests/test_flat_hash_set.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import pytest
def _request_is_disabled(self, *args, **kwargs):
raise Exception(
f"Your code tried to call 'request' with: {args}, {kwargs}. Unit test aren't allowed to reach internet."
)
@pytest.fixture(autouse=True)
def no_requests(monkeypatch):
"""Remove requests.sessions.Session.request for all tests."""
monkeypatch.setattr("requests.sessions.Session.request", _request_is_disabled)
|
cc_net-main
|
tests/conftest.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
#
|
cc_net-main
|
tests/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import time
from cc_net import jsonql, regroup
def check_regroup(tmp_path, regroup_fn, check_blocks_boundaries=False):
n_shards = 4
n_docs = 20
shards = [
[dict(id=i, shard=s, raw_content="hello world") for i in range(n_docs)]
for s in range(n_shards)
]
shards_files = [tmp_path / f"{s:04d}.json.gz" for s in range(n_shards)]
for shard, shard_file in zip(shards, shards_files):
jsonql.run_pipes(inputs=shard, output=shard_file)
regroup_file = tmp_path / "regroup.json.gz"
start = time.time()
regroup_fn(shards_files, regroup_file)
duration = time.time() - start
print(f"{regroup_fn.__module__}.{regroup_fn.__name__} took {duration}s")
regrouped = list(jsonql.read_jsons(regroup_file))
assert [doc for shard in shards for doc in shard] == regrouped
readers = jsonql.get_block_readers(regroup_file, n_shards)
if not check_blocks_boundaries:
assert [doc for shard in shards for doc in shard] == [
doc for reader in readers for doc in jsonql.read_jsons(reader)
]
return
for shard, reader in zip(shards, readers):
block = [doc for doc in jsonql.read_jsons(reader)]
assert shard == block
def test_regroup(tmp_path):
# With regroup boundaries will be every 256Mb.
check_regroup(tmp_path, regroup.reshard, check_blocks_boundaries=False)
def test_fast_regroup(tmp_path):
# With fast regroup boundaries should match the shards.
check_regroup(tmp_path, regroup.fast_reshard, check_blocks_boundaries=True)
|
cc_net-main
|
tests/test_regroup.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import io
from pathlib import Path
from typing import Sequence
import numpy as np
import pytest
from cc_net import jsonql
def bar(small_bar: str) -> str:
return small_bar.replace(" ", " " * 10).replace("█", "█" * 10)
def get_output(transformer, data, **kwargs):
with io.StringIO() as output:
# Convert data to a generator so that it's not interpreted as a file list.
jsonql.run_pipe(transformer, kwargs, file=(x for x in data), output=output)
return output.getvalue()
def test_split(tmp_path: Path):
data = [
dict(text="Hello world", lang="en"),
dict(text="Boujour les amis", lang="fr"),
dict(text="Rock your boat", lang="en"),
]
with jsonql.split(tmp_path / "{lang}.json") as split:
list(split.map(data))
summary = split.summary()
assert "Found 2 splits." in summary
en_docs = list(jsonql.read_jsons(tmp_path / "en.json"))
assert [data[0], data[2]] == en_docs
fr_docs = list(jsonql.read_jsons(tmp_path / "fr.json"))
assert [data[1]] == fr_docs
def test_split_bad_pattern(tmp_path: Path):
data = [dict(text="Hello world", lang="en")]
with pytest.raises(KeyError):
with jsonql.split(tmp_path / "{language}.json") as split:
list(split.map(data))
def test_histogram():
data = [0.1, 0.1, 0.1, 0.1, 0.4, 0.4, 0.9, 0.9]
hist, bins = jsonql.histogram(data, bins=8, weights=None)
np.testing.assert_almost_equal(bins, [0.1 * x for x in range(1, 10)])
np.testing.assert_almost_equal(hist, [4, 0, 0, 2, 0, 0, 0, 2])
data = [0, 0.1, 0.1, 0.1, 0.1, 0.4, 0.4, 0.8, 0.8, 1]
hist, bins = jsonql.histogram(data, bins=10, weights=None)
np.testing.assert_almost_equal(bins, [0.1 * x for x in range(11)])
np.testing.assert_almost_equal(hist, [1, 4, 0, 0, 2, 0, 0, 0, 2, 1])
def test_display_stats():
stats = {
jsonql.ALL_DOCUMENTS: 100,
"title": 80,
"title.length": 80 * 50,
"text": 100,
"text.length": 100 * 1000,
"popularity": 8,
"popularity.val": [0.1, 0.1, 0.1, 0.1, 0.4, 0.4, 0.9, 0.9],
}
(title,) = jsonql.display_stats(stats, "title")
assert "title" in title
assert "saw 80 times" in title
assert "average length is" in title
assert "\n" not in title
(text,) = jsonql.display_stats(stats, "text")
assert "text" in text
assert "saw 100 times" in text
assert "average length is" in text
assert "\n" not in text
histogram = jsonql.display_stats(
stats, "popularity", bins=[x / 10 for x in range(1, 10)]
)
assert "popularity" in histogram[0]
assert "saw 8 times" in histogram[0]
assert "histogram is" in histogram[0]
assert "0.100 " + bar("████████") in histogram[1]
assert "0.400 " + bar("████ ") in histogram[2]
assert "0.800 " + bar("████ ") in histogram[3]
cum_histogram = jsonql.display_stats(stats, "popularity", bins=8, cumulative=True)
assert "popularity" in cum_histogram[0]
assert "saw 8 times" in cum_histogram[0]
assert "histogram is" in cum_histogram[0]
assert "0.100 " + bar("████ ") in cum_histogram[1]
assert "0.400 " + bar("██████ ") in cum_histogram[2]
assert "0.800 " + bar("████████") in cum_histogram[3]
def test_describe():
def sample(pop):
return dict(title="Lorem", text="Lorem ipsum dolor sit amet.", popularity=pop)
data = [sample(pop) for pop in [0.1, 0.1, 0.1, 0.1, 0.4, 0.4, 0.9, 0.9]]
desc = get_output(
jsonql.describe, data, columns=None, bins=[x / 10 for x in range(1, 10)]
)
assert "Field title saw 8 times (100.0%), average length is 5" in desc
assert "Field text saw 8 times (100.0%), average length is 27" in desc
assert "Field popularity saw 8 times (100.0%), histogram is" in desc
assert "0.100 " + bar("████████") in desc
assert "0.400 " + bar("████ ") in desc
assert "0.800 " + bar("████ ") in desc
desc = get_output(jsonql.describe, data, columns=["text"])
assert "Field title saw 8 times (100.0%), average length is 5" not in desc
assert "Field text saw 8 times (100.0%), average length is 27" in desc
assert "Field popularity, histogram is:" not in desc
def test_custom_pipe():
def transformer(source, sep=" "):
for i, line in enumerate(source):
res = f"{i}{sep}{line}"
yield res
data = ["hello", "world"]
assert get_output(transformer, data) == "0 hello\n1 world\n"
assert get_output(transformer, data, sep="_") == "0_hello\n1_world\n"
def test_open_read_write(tmp_path: Path):
def _lines(filename: Path) -> Sequence[str]:
# jsonql.lines calls open_read
return list(jsonql.lines(filename))
tmp = tmp_path
with jsonql.open_write(tmp / "a.txt") as o:
print("a", file=o)
assert _lines(tmp / "a.txt") == ["a"]
jsonql.write_jsons([{"a": 1}], tmp / "a.txt")
assert _lines(tmp / "a.txt") == ['{"a": 1}']
with jsonql.open_write(tmp / "a.gz") as o:
print("a", file=o)
assert _lines(tmp / "a.gz") == ["a"]
with jsonql.open_write([tmp / "a0.txt", tmp / "a1.txt"]) as o:
print("a", file=o)
assert _lines(tmp / "a0.txt") == ["a"]
assert not (tmp / "a1.txt").is_file()
with jsonql.open_write([tmp / "b0.txt", tmp / "b1.txt"], max_size="1k") as o:
print("0" * 2000, file=o)
print("1" * 2000, file=o)
assert _lines(tmp / "b0.txt") == ["0" * 2000]
assert _lines(tmp / "b1.txt") == ["1" * 2000]
with jsonql.open_write(tmp / "a_????.json") as o:
print("a", file=o)
assert _lines(tmp / "a_0000.json") == ["a"]
assert not (tmp / "a_0001.json").is_file()
assert _lines(tmp / "a_*.json") == ["a"]
with jsonql.open_write(tmp / "b_??.json", max_size="1k") as o:
print("0" * 2000, file=o)
print("1" * 2000, file=o)
assert _lines(tmp / "b_00.json") == ["0" * 2000]
assert _lines(tmp / "b_01.json") == ["1" * 2000]
assert _lines(tmp / "b_*.json") == ["0" * 2000, "1" * 2000]
def test_split_file(tmp_path: Path):
file = tmp_path / "test.txt"
content = "Hello\nWorld\n"
with open(file, "w") as o:
o.write(content)
with jsonql.SplitFile(file, chunk=0, n_chunks=2) as f:
assert f.readlines() == ["Hello\n"]
with jsonql.SplitFile(file, chunk=1, n_chunks=2) as f:
assert f.readlines() == ["World\n"]
def test_split_file_middle_of_line(tmp_path: Path):
file = tmp_path / "test.txt"
content = "Hello _|_\nWorld\n"
# split is here ^
with open(file, "w") as o:
o.write(content)
with jsonql.SplitFile(file, chunk=0, n_chunks=2) as f:
assert f.readlines() == ["Hello _|_\n"]
with jsonql.SplitFile(file, chunk=1, n_chunks=2) as f:
assert f.readlines() == ["World\n"]
def test_split_file_middle_of_char(tmp_path: Path):
file = tmp_path / "test.txt"
content = "Hello\U0001F40D\nWorld\n"
# split is here ^^
with open(file, "w") as o:
o.write(content)
with jsonql.SplitFile(file, chunk=0, n_chunks=2) as f:
assert f.readlines() == ["Hello🐍\n"]
with jsonql.SplitFile(file, chunk=1, n_chunks=2) as f:
assert f.readlines() == ["World\n"]
def test_blocked_gzip(tmp_path: Path):
file = tmp_path / "test.gz"
f = str(file)
# Each object is 10/11 bytes long. We have 2 of them by block.
content = ['{"xx": %d}' % i for i in range(80)]
with jsonql.BlockedGzipWriter(file, "wt", block_size="20B") as o:
for line in content:
print(line, file=o)
jr = jsonql.JsonReader(strict=True)
expected = list(jr.map(content))
# read as one file
assert expected == list(jsonql.read_jsons(file))
# read first block
assert expected[:2] == list(jsonql.read_jsons(f + "[0/40]"))
# read last block
assert expected[-2:] == list(jsonql.read_jsons(f + "[39/40]"))
readers = jsonql.get_block_readers(file, 9)
read_as_several_files = [list(jsonql.read_jsons(r)) for r in readers]
# 40 splits of 2 docs, 9 readers -> 5 splits, 10 docs per reader
assert list(jsonql.grouper(expected, 10)) == read_as_several_files
def test_enter_exit(capsys):
class MyTransformer(jsonql.Transformer):
def __enter__(self):
print("trans: started")
self.ready = True
return self
def __exit__(self, *args):
print("trans: done")
def do(self, x):
return (x, x)
def acc(values):
print("acc: started")
res = 0
for (x, _) in values:
res += int(x)
print("acc: done")
yield f"acc: result={res}"
t = MyTransformer()
data = (str(x) for x in range(10))
print("pipeline: started")
# Print to stdout.
jsonql.run_pipes(t, acc, file=data)
print("pipeline: done")
out = capsys.readouterr().out
assert (
"\n".join(
[
"pipeline: started",
"trans: started",
"acc: started",
"acc: done",
f"acc: result=45",
# Transformers are closed at the very end.
"trans: done",
"pipeline: done\n",
]
)
== out
)
def test_write_to_stdout(capsys):
lines = [str(x) for x in range(10)]
jsonql.run_pipes(file=iter(lines))
out = capsys.readouterr().out
assert out == "\n".join(lines) + "\n"
def test_write_to_stdout_handle_newlines(capsys):
lines = [str(x) + "\n" for x in range(10)]
jsonql.run_pipes(file=iter(lines))
out = capsys.readouterr().out
assert out == "".join(lines)
def test_multiprocess(capsys):
mult = jsonql.Mapper(lambda x: f"2x = {2 * int(x)}")
jsonql.run_pipes(mult, processes=2, file=(str(x) for x in range(10)))
out = set(capsys.readouterr().out.strip("\n").split("\n"))
assert set(f"2x = {2 * x}" for x in range(10)) == out
|
cc_net-main
|
tests/test_jsonql.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import json
from pathlib import Path
import pytest
import cc_net
import cc_net.minify as minify
from cc_net import jsonql, process_wet_file
from cc_net.minify import (
HASH_SIZE,
decode_hashes,
encode_hashes,
encode_line_ids,
get_hashes,
)
def test_encode_decode():
sentences = ["Hello world !", "Is everyone happy in here ?"]
hashes = get_hashes(sentences)
assert all([len(h) == HASH_SIZE for h in hashes])
hashes_int = [minify._b2i(h) for h in hashes]
encoded = encode_hashes(hashes)
decoded = decode_hashes(encoded)
assert all([len(d) == HASH_SIZE for d in decoded])
decoded_int = [minify._b2i(d) for d in decoded]
assert hashes_int == decoded_int
assert hashes == decoded
def test_minify():
doc = {
"raw_content": "Hello world !\nIs everyone happy in here ?",
"language": "en",
"perplexity": 120.0,
"line_ids": [0, 4],
}
expected = {"line_ids": "AAAEAA==", "language": "en", "perplexity": 120.0}
minifier = minify.Minifier()
assert expected == minifier(doc)
@pytest.fixture
def http_from_disk(monkeypatch):
def read_sample_file(url: str, n_retry: int = 3) -> bytes:
expected_url = process_wet_file.WET_URL_ROOT + "/crawl-data/sample.warc.wet"
assert expected_url == url
file = Path(__file__).parent / "data" / "sample.warc.txt"
return file.read_bytes()
monkeypatch.setattr(cc_net.jsonql, "request_get_content", read_sample_file)
def test_minify_and_fetch(http_from_disk, tmp_path: Path):
full_quotes = """Don't part with your illusions. When they are gone you may still exist, but you have ceased to live.
Education: that which reveals to the wise, and conceals from the stupid, the vast limits of their knowledge.
Facts are stubborn things, but statistics are more pliable.
Fiction is obliged to stick to possibilities. Truth isn't."""
# We don't need no education.
chosen_quotes = "\n".join(
l for l in full_quotes.splitlines() if "Education" not in l
)
cc_doc = {
"url": "http://sample_english.com",
"date_download": "2019-03-18T00:00:00Z",
"digest": "sha1:XQZHW7QWIG54HVAV3KPRW6MK5ILDNCER",
"source_domain": "sample_english.com",
"title": "Famous Mark Twain Quotes",
"raw_content": full_quotes,
"cc_segment": "crawl-data/sample.warc.wet",
"nlines": 4,
"length": 353,
}
ccnet_metadata = {
"language": "en",
"language_score": 0.99,
"perplexity": 151.5,
"bucket": "head",
"raw_content": chosen_quotes,
"nlines": 3,
"length": len(chosen_quotes),
"original_nlines": 4,
"original_length": 353,
"line_ids": [0, 2, 3],
}
ccnet_doc = dict(cc_doc, **ccnet_metadata)
mini = minify.Minifier()(ccnet_doc.copy())
assert mini is not ccnet_doc
important_fields = [
"url",
"digest",
"cc_segment",
"language",
"language_score",
"perplexity",
"bucket",
"line_ids",
]
expected = {k: ccnet_doc[k] for k in important_fields}
expected["line_ids"] = encode_line_ids(expected["line_ids"]) # type: ignore
assert expected == mini
with jsonql.open_write(tmp_path / "sample.json") as o:
print(json.dumps(mini), file=o)
fetcher = minify.MetadataFetcher(tmp_path)
# line_ids is removed when unminifying
ccnet_doc.pop("line_ids")
assert ccnet_doc == fetcher(cc_doc)
def test_fetch(http_from_disk, tmp_path: Path):
mini_docs = [
{
"url": "http://sample_chinese.com",
"digest": "sha1:Y4E6URVYGIAFNVRTPZ5S3J64RTZTP6HJ",
"cc_segment": "crawl-data/sample.warc.wet",
"line_ids": encode_line_ids([2]),
"bucket": "not_that_great",
},
{
"url": "http://sample_english.com",
"digest": "sha1:XQZHW7QWIG54HVAV3KPRW6MK5ILDNCER",
"cc_segment": "crawl-data/sample.warc.wet",
"line_ids": encode_line_ids([3]),
"bucket": "top_notch",
},
]
with jsonql.open_write(tmp_path / "sample.json") as o:
for mini in mini_docs:
print(json.dumps(mini), file=o)
fetcher = minify.MetadataFetcher(tmp_path)
cc = process_wet_file.CCSegmentsReader(["crawl-data/sample.warc.wet"])
docs = [d for d in fetcher.map(cc) if d is not None]
assert cc.retrieved_segments == 1
# Note: documents are retrieved as they are ordered in the .warc.wet file
assert [
"Facts are stubborn things, but statistics are more pliable.",
"事實是固執的東西,但統計數字卻比較柔和。",
] == [d["raw_content"] for d in docs]
assert ["top_notch", "not_that_great"] == [d["bucket"] for d in docs]
|
cc_net-main
|
tests/test_minify.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import inspect
import pickle
from pathlib import Path
import pytest
from cc_net import dedup, jsonql, perplexity, split_by_lang, tokenizer
def get_transformers(module):
return [
v
for v in vars(module).values()
if type(v) is type
and issubclass(v, jsonql.Transformer)
and v != jsonql.Transformer
]
ALL_TRANSFORMERS = (
get_transformers(jsonql)
+ get_transformers(dedup)
+ get_transformers(perplexity)
+ get_transformers(tokenizer)
+ get_transformers(split_by_lang)
)
def check_transformer_is_calling_super_init(cls: type):
assert issubclass(cls, jsonql.Transformer)
# accessing __init__ is generally an error, but here we do want to inspect
# the __init__method.
code = inspect.getsource(cls.__init__) # type: ignore
code = code.replace(" ", "")
# Check that super().__init__ is called.
assert "super().__init__()" in code
def test_bad_transformers_are_caught():
class BadTransformer(jsonql.Transformer):
def __init__(self, arg):
# We aren't calling super /!\
self.arg = arg
with pytest.raises(AssertionError):
check_transformer_is_calling_super_init(BadTransformer)
@pytest.mark.parametrize("transformer", ALL_TRANSFORMERS)
def test_transformer_is_correctly_implemented(transformer):
check_transformer_is_calling_super_init(transformer)
@pytest.mark.skipif(
not Path("bin/lid.bin").exists(), reason="bin/lid.bin not found, run `make install`"
)
def test_can_pickle_transformer(tmp_path):
model = Path("bin/lid.bin")
if not model.exists():
return
classifier = split_by_lang.Classifier(model, "text", "lang")
classifier.__enter__()
doc = dict(text="Hello world ! This is English btw.")
original_results = classifier(doc)
with open(tmp_path / "transformer.pkl", "wb") as o:
pickle.dump(classifier, o)
with open(tmp_path / "transformer.pkl", "rb") as f:
classifier = pickle.load(f)
assert original_results == classifier(doc)
# Do it again with the unpickled object.
with open(tmp_path / "transformer.pkl", "wb") as o:
pickle.dump(classifier, o)
with open(tmp_path / "transformer.pkl", "rb") as f:
classifier = pickle.load(f)
assert original_results == classifier(doc)
|
cc_net-main
|
tests/test_transformer.py
|
# Adapted from https://github.com/NVIDIA/apex/blob/master/setup.py
import torch
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME
from setuptools import setup, find_packages
import subprocess
import sys
import warnings
import os
# ninja build does not work unless include_dirs are abs path
this_dir = os.path.dirname(os.path.abspath(__file__))
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
release = output[release_idx].split(".")
bare_metal_major = release[0]
bare_metal_minor = release[1][0]
return raw_output, bare_metal_major, bare_metal_minor
def check_cuda_torch_binary_vs_bare_metal(cuda_dir):
raw_output, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(cuda_dir)
torch_binary_major = torch.version.cuda.split(".")[0]
torch_binary_minor = torch.version.cuda.split(".")[1]
print("\nCompiling cuda extensions with")
print(raw_output + "from " + cuda_dir + "/bin\n")
if (bare_metal_major != torch_binary_major) or (bare_metal_minor != torch_binary_minor):
raise RuntimeError(
"Cuda extensions are being compiled with a version of Cuda that does "
"not match the version used to compile Pytorch binaries. "
"Pytorch binaries were compiled with Cuda {}.\n".format(torch.version.cuda)
+ "In some cases, a minor-version mismatch will not cause later errors: "
"https://github.com/NVIDIA/apex/pull/323#discussion_r287021798. "
"You can try commenting out this check (at your own risk)."
)
def raise_if_cuda_home_none(global_option: str) -> None:
if CUDA_HOME is not None:
return
raise RuntimeError(
f"{global_option} was requested, but nvcc was not found. Are you sure your environment has nvcc available? "
"If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, "
"only images whose names contain 'devel' will provide nvcc."
)
def append_nvcc_threads(nvcc_extra_args):
_, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(CUDA_HOME)
if int(bare_metal_major) >= 11 and int(bare_metal_minor) >= 2:
return nvcc_extra_args + ["--threads", "4"]
return nvcc_extra_args
if not torch.cuda.is_available():
# https://github.com/NVIDIA/apex/issues/486
# Extension builds after https://github.com/pytorch/pytorch/pull/23408 attempt to query torch.cuda.get_device_capability(),
# which will fail if you are compiling in an environment without visible GPUs (e.g. during an nvidia-docker build command).
print(
"\nWarning: Torch did not find available GPUs on this system.\n",
"If your intention is to cross-compile, this is not an error.\n"
"By default, Apex will cross-compile for Pascal (compute capabilities 6.0, 6.1, 6.2),\n"
"Volta (compute capability 7.0), Turing (compute capability 7.5),\n"
"and, if the CUDA version is >= 11.0, Ampere (compute capability 8.0).\n"
"If you wish to cross-compile for a single specific architecture,\n"
'export TORCH_CUDA_ARCH_LIST="compute capability" before running setup.py.\n',
)
if os.environ.get("TORCH_CUDA_ARCH_LIST", None) is None:
_, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(CUDA_HOME)
if int(bare_metal_major) == 11:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0"
if int(bare_metal_minor) > 0:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0;8.6"
else:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5"
print("\n\ntorch.__version__ = {}\n\n".format(torch.__version__))
TORCH_MAJOR = int(torch.__version__.split(".")[0])
TORCH_MINOR = int(torch.__version__.split(".")[1])
cmdclass = {}
ext_modules = []
raise_if_cuda_home_none("flashmm")
# Check, if CUDA11 is installed for compute capability 8.0
cc_flag = []
# cc_flag.append("-gencode")
# cc_flag.append("arch=compute_70,code=sm_70")
cc_flag.append("-gencode")
cc_flag.append("arch=compute_80,code=sm_80")
ext_modules.append(
CUDAExtension(
'flashmm', [
'flash_mm.cpp',
'mm_block_fwd_cuda.cu',
'hyena_filter_cuda.cu',
],
extra_compile_args={'cxx': ['-g', '-march=native', '-funroll-loops'],
'nvcc': ['-O3', '--threads', '4', '-lineinfo', '--use_fast_math', '-std=c++17', '-arch=compute_70']
# extra_compile_args={'cxx': ['-O3'],
# 'nvcc': append_nvcc_threads(['-O3', '-lineinfo', '--use_fast_math', '-std=c++17'] + cc_flag)
},
include_dirs=[os.path.join(this_dir, 'mathdx/22.02/include')],
)
)
torch.utils.cpp_extension.COMMON_NVCC_FLAGS.remove('-D__CUDA_NO_HALF2_OPERATORS__')
setup(
name="flashmm",
version="0.1",
description="Fast modules for Monarch Mixer block",
ext_modules=ext_modules,
cmdclass={"build_ext": BuildExtension} if ext_modules else {},
)
|
m2-main
|
csrc/flashmm/setup.py
|
import torch
import torch.nn.functional as F
from einops import rearrange
import math
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
from flashmm import mm_block_fwd, hyena_filter_fwd, exp_mod_in_place_fwd
def ref_mm_block(
u,
linear, out_linear,
x1_s, x2_s, v_s,
x1_s_bias, x2_s_bias, v_s_bias,
k, k_resid, D, Du,
dropout_mask,
gelu, fft_size
):
x1x2v = linear(u)
H = x1x2v.shape[-1] // 3
seqlen = x1x2v.shape[-2]
x1x2v = x1x2v.transpose(-1, -2)
x1x2v_c = torch.nn.functional.conv1d(
x1x2v,
torch.flip(torch.cat([x1_s, x2_s, v_s], dim=0), dims=(-1,)).unsqueeze(1), # torch.flip to match our short conv
bias=torch.cat([x1_s_bias, x2_s_bias, v_s_bias], dim=0), padding=x1_s.shape[-1] - 1, groups=x1x2v.shape[1]
)[..., :seqlen]
x1 = x1x2v_c[:, :H, :]
x2 = x1x2v_c[:, H:2*H, :]
v = x1x2v_c[:, 2*H:, :]
x1 = x1 * v
k_f = torch.fft.rfft(k, n=fft_size) / fft_size
x1_f = torch.fft.rfft(x1.to(dtype=k.dtype), n=fft_size)
y = torch.fft.irfft(x1_f * k_f, n=fft_size, norm="forward")[..., :seqlen]
# y.shape: B H L
out = y + x1 * D[None, :, None]
if gelu:
out = F.gelu(out)
if dropout_mask is not None:
out = (out * rearrange(dropout_mask, "b H -> b H 1")).to(dtype=x1.dtype)
else:
out = out.to(dtype=x1.dtype)
out = out * x2
u = u.transpose(-1, -2)
u_f = torch.fft.rfft(u, n=fft_size)
k_resid_f = torch.fft.rfft(k_resid, n=fft_size) / fft_size
out = out + torch.fft.irfft(u_f * k_resid_f, n=fft_size, norm="forward")[..., :seqlen] + Du[None, :, None] * u
out = out.transpose(-1, -2)
return out_linear(out)
def ref_hyena_filter(
z, sin_freq, eo_mat, eo_bias, oo1_mat, oo1_bias, oo2_mat,
oo2_bias, oh_mat, t, deltas, shift
):
out = torch.bmm(z, eo_mat) + eo_bias.unsqueeze(1)
out = torch.sin(out * sin_freq)
out = torch.bmm(out, oo1_mat) + oo1_bias.unsqueeze(1)
out = torch.sin(out * sin_freq)
out = torch.bmm(out, oo2_mat) + oo2_bias.unsqueeze(1)
out = torch.sin(out * sin_freq)
out = torch.bmm(out, oh_mat)
out = out * torch.exp(-t * deltas.abs()) + shift
return out
def fast_mm_block(
u,
linear, out_linear,
x1_s, x2_s, v_s,
x1_s_bias, x2_s_bias, v_s_bias,
k, k_resid, D, Du,
dropout_mask,
gelu, fft_size
):
x1x2v = linear(u)
H = x1x2v.shape[-1] // 3
x1, x2, v = x1x2v.split(H, dim=-1)
x1 = x1.transpose(1, 2).contiguous()
x2 = x2.transpose(1, 2).contiguous()
v = v.transpose(1, 2).contiguous()
u = u.transpose(1, 2).contiguous()
k_f = torch.fft.rfft(k, n=fft_size)
k_residual_f = torch.fft.rfft(k_resid, n=fft_size)
out = mm_block_fwd(
x1, x2, v,
x1_s, x2_s, v_s,
x1_s_bias, x2_s_bias, v_s_bias,
k_f, None, u, k_residual_f, Du, D, dropout_mask, gelu, fft_size,
False, False
)
out = out.transpose(1, 2)
return out_linear(out)
def fast_hyena_filter(
z, sin_freq, eo_mat, eo_bias, oo1_mat, oo1_bias, oo2_mat,
oo2_bias, oh_mat, min_delay, max_delay, shift, reverse_vec
):
k = hyena_filter_fwd(
z, sin_freq, eo_mat, eo_bias, oo1_mat, oo1_bias, oo2_mat, oo2_bias, reverse_vec, None
)
k = torch.bmm(k, oh_mat)
return exp_mod_in_place_fwd(k, reverse_vec, min_delay, max_delay, shift)
B = 64
H = 768
L = 128
fftsize = 2 * L
device = 'cuda'
repeats = 30
short_conv_width = 4
gelu = True
torch.manual_seed(19)
u = torch.randn(B, L, H, device=device)
in_linear = torch.nn.Linear(H, 3 * H).to(device=device)
out_linear = torch.nn.Linear(H, H).to(device=device)
short_filter = torch.nn.Conv1d(3 * H, 3 * H, kernel_size=short_conv_width, padding=short_conv_width - 1, groups=3 * H, device=device)
x1_s = short_filter.weight[:H, 0, :]
x2_s = short_filter.weight[H:2*H, 0, :]
v_s = short_filter.weight[2*H:3*H, 0, :]
x1_s_bias = torch.zeros(short_filter.bias[:H].shape).to(device=device)
x2_s_bias = torch.zeros(short_filter.bias[H:2*H].shape).to(device=device)
v_s_bias = torch.zeros(short_filter.bias[2*H:3*H].shape).to(device=device)
k = torch.randn(H, L, device=device)
k_resid = torch.randn(H, L, device=device)
D = torch.randn(H, device=device)
Du = torch.randn(H, device=device)
out_ref = ref_mm_block(u, in_linear, out_linear, x1_s, x2_s, v_s, x1_s_bias, x2_s_bias, v_s_bias, k, k_resid, D, Du, None, gelu, fftsize)
out = fast_mm_block(u, in_linear, out_linear, x1_s, x2_s, v_s, x1_s_bias, x2_s_bias, v_s_bias, k, k_resid, D, Du, None, gelu, fftsize)
diff = (out_ref - out).abs().flatten()
argmax_diff = diff.argmax()
print("max diff for mm block:", diff[argmax_diff])
print("average diff for mm block:", diff.mean())
order = 128
emb_dim = 5
min_delay = math.log(1e-2) / 1.5
max_delay = math.log(1e-2) / 0.3
shift = 0.
z = torch.randn(1, L, emb_dim, device=device) * .02
sin_freq = torch.randn(1, order, device=device) * .02
eo_mat = torch.randn(1, emb_dim, order, device=device) * .02
eo_bias = torch.randn(1, order, device=device) * .02
oo1_mat = torch.randn(1, order, order, device=device) * .02
oo1_bias = torch.randn(1, order, device=device) * .02
oo2_mat = torch.randn(1, order, order, device=device) * .02
oo2_bias = torch.randn(1, order, device=device) * .02
oh_mat = torch.randn(1, order, H, device=device) * .02
reverse_vec = torch.zeros(1, device=device, dtype=torch.int32)
deltas = torch.linspace(min_delay, max_delay, H, device=device)[None, None]
t = torch.linspace(0, 1, L, device=device)[None, :, None]
out_ref = ref_hyena_filter(
z, sin_freq, eo_mat, eo_bias, oo1_mat, oo1_bias, oo2_mat,
oo2_bias, oh_mat, t, deltas, shift)
out = fast_hyena_filter(
z, sin_freq, eo_mat, eo_bias, oo1_mat, oo1_bias, oo2_mat,
oo2_bias, oh_mat, min_delay, max_delay, shift, reverse_vec)
diff = (out_ref - out).abs().flatten()
argmax_diff = diff.argmax()
print("max diff:", diff[argmax_diff])
print("avg diff:", diff.mean())
|
m2-main
|
csrc/flashmm/test_flash_mm.py
|
import math
import re
import numpy as np
# N = 8192
N = 16384
# The case of 0 / N is special, we want to simplify it to 0 / 2 instead of 0 / 1
numerator = np.arange(1, N // 8 + 1)
gcd = np.gcd(numerator, N)
num = numerator // gcd
denom = N // gcd
lut_vals = ['T_2_0'] + [f'T_{d}_{n}' for n, d in zip(num, denom)]
lut_string = f"static const __device__ float2 lut_mine_sp_8_{N}[{N // 8 + 1}] = {{\n {','.join(lut_vals)}\n}};"
print(lut_string)
# Only define new values if it's not already in the cuFFTDx lookup table
cufftdx_lut_filename = 'mathdx/22.02/include/cufftdx/include/database/lut_defines_0.hpp.inc'
matches = set()
reg = re.compile(f'^#define T_{N}_([0-9]+) ')
with open(cufftdx_lut_filename, 'r') as f:
for line in f:
if (match := reg.match(line)) is not None:
matches.add(int(match[1]))
numerator = np.arange(1, N // 8 + 1, 2)
angle = -2 * math.pi * numerator.astype(np.float64) / N
cos, sin = np.cos(angle), np.sin(angle)
defs = [f'#define T_{N}_{n} {{{c:.40f},{s:.40f}}}' for n, c, s in zip(numerator, cos, sin) if n not in matches]
def_string = '\n'.join(defs)
print(def_string)
|
m2-main
|
csrc/flashmm/lut_code_gen.py
|
# Copyright 2022 MosaicML Examples authors
# SPDX-License-Identifier: Apache-2.0
import os
import sys
from typing import Optional, cast
# Add folder root to path to allow us to use relative imports regardless of what directory the script is run from
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
import src.hf_bert as hf_bert_module
import src.create_bert as bert_module
from omegaconf import DictConfig
from omegaconf import OmegaConf as om
import torch
from src.benchmark.benchmark import benchmark_forward
def build_model(cfg: DictConfig):
if cfg.name == 'hf_bert':
return hf_bert_module.create_hf_bert_mlm(
pretrained_model_name=cfg.pretrained_model_name,
use_pretrained=cfg.get('use_pretrained', None),
model_config=cfg.get('model_config', None),
tokenizer_name=cfg.get('tokenizer_name', None),
gradient_checkpointing=cfg.get('gradient_checkpointing', None))
elif cfg.name == 'bert':
return bert_module.create_bert_mlm(
pretrained_model_name=cfg.pretrained_model_name,
pretrained_checkpoint=cfg.get('pretrained_checkpoint', None),
model_config=cfg.get('model_config', None),
tokenizer_name=cfg.get('tokenizer_name', None),
gradient_checkpointing=cfg.get('gradient_checkpointing', None))
else:
raise ValueError(f'Not sure how to build model with name={cfg.name}')
def run_bert(model, u, attn_mask):
encoder_outputs = model.model.bert.encoder(u, attn_mask)
output = model.model.cls(encoder_outputs[0])
return output
def main(cfg: DictConfig,
return_trainer: bool = False,
do_train: bool = True):
print('Using config: ')
print(om.to_yaml(cfg))
# Build Model
print('Initializing model...')
model = build_model(cfg.model).cuda()
B = cfg.device_train_microbatch_size
# B = 32
L = cfg.max_seq_len
print('Batch size: ', B)
print('max seq len: ', L)
if 'hidden_size' not in cfg.model.model_config:
H = 768
else:
H = cfg.model.model_config.hidden_size
u = torch.randn(B, L, H).cuda()
if cfg.model.name == 'bert':
attention_mask = torch.ones(B, L, dtype=torch.int64).cuda()
else:
attention_mask = torch.ones(L, L, dtype=torch.int64).cuda()
# model.model.bert.encoder(u, attention_mask)
# breakpoint()
run_bert(model, u, attention_mask)
repeats = 30
# Run forward pass
print('Running forward pass...')
with torch.autocast(device_type='cuda', dtype=torch.bfloat16, enabled=True):
_, ret = benchmark_forward(run_bert, model, u, attention_mask, repeats=repeats, verbose=True, amp_dtype=torch.bfloat16, amp=True)
time = ret._mean
print('Time: ', time)
print('Tokens/ms: ', B*L/time/1000)
if __name__ == '__main__':
yaml_path, args_list = sys.argv[1], sys.argv[2:]
with open(yaml_path) as f:
yaml_cfg = om.load(f)
cli_cfg = om.from_cli(args_list)
cfg = om.merge(yaml_cfg, cli_cfg)
cfg = cast(DictConfig, cfg) # for type checking
main(cfg)
|
m2-main
|
bert/benchmark_fwd.py
|
# Copyright 2022 MosaicML Examples authors
# SPDX-License-Identifier: Apache-2.0
import os
import sys
# Add folder root to path to allow us to use relative imports regardless of what directory the script is run from
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
try:
import torch
# yapf: disable
from src.bert_layers import (BertEmbeddings, BertEncoder, BertForMaskedLM,
BertForSequenceClassification,
BertGatedLinearUnitMLP, BertLayer,
BertLMPredictionHead, BertModel,
BertOnlyMLMHead, BertOnlyNSPHead, BertPooler,
BertPredictionHeadTransform, BertSelfOutput,
BertUnpadAttention, BertUnpadSelfAttention)
# yapf: enable
from src.bert_padding import (IndexFirstAxis, IndexPutFirstAxis,
index_first_axis, index_put_first_axis,
pad_input, unpad_input, unpad_input_only)
if torch.cuda.is_available():
from src.flash_attn_triton import \
flash_attn_func as flash_attn_func_bert # type: ignore
from src.flash_attn_triton import \
flash_attn_qkvpacked_func as flash_attn_qkvpacked_func_bert # type: ignore
from src.hf_bert import create_hf_bert_classification, create_hf_bert_mlm
from src.mosaic_bert import (create_mosaic_bert_classification,
create_mosaic_bert_mlm)
except ImportError as e:
try:
is_cuda_available = torch.cuda.is_available() # type: ignore
except:
is_cuda_available = False
reqs_file = 'requirements.txt' if is_cuda_available else 'requirements-cpu.txt'
raise ImportError(
f'Please make sure to pip install -r {reqs_file} to get the requirements for the BERT benchmark.'
) from e
__all__ = [
'BertEmbeddings',
'BertEncoder',
'BertForMaskedLM',
'BertForSequenceClassification',
'BertGatedLinearUnitMLP',
'BertLayer',
'BertLMPredictionHead',
'BertModel',
'BertOnlyMLMHead',
'BertOnlyNSPHead',
'BertPooler',
'BertPredictionHeadTransform',
'BertSelfOutput',
'BertUnpadAttention',
'BertUnpadSelfAttention',
'IndexFirstAxis',
'IndexPutFirstAxis',
'index_first_axis',
'index_put_first_axis',
'pad_input',
'unpad_input',
'unpad_input_only',
'create_hf_bert_classification',
'create_hf_bert_mlm',
'create_mosaic_bert_classification',
'create_mosaic_bert_mlm',
# These are commented out because they only exist if CUDA is available
# 'flash_attn_func_bert',
# 'flash_attn_qkvpacked_func_bert'
]
|
m2-main
|
bert/__init__.py
|
# Copyright 2022 MosaicML Examples authors
# SPDX-License-Identifier: Apache-2.0
import copy
import gc
import multiprocessing as mp
import os
import sys
import time
from collections import defaultdict
from concurrent.futures import ProcessPoolExecutor as Pool
from multiprocessing.managers import DictProxy, SyncManager
from typing import Any, Dict, List, Optional, Sequence, Set, Tuple
from urllib.parse import urlparse
# Add folder root to path to allow us to use relative imports regardless of what directory the script is run from
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
import numpy as np
import omegaconf as om
import src.glue.finetuning_jobs as finetuning_jobs_module
import src.create_bert as bert_module
import src.hf_bert as hf_bert_module
import torch
from composer import algorithms
from composer.callbacks import (HealthChecker, LRMonitor, MemoryMonitor,
OptimizerMonitor, RuntimeEstimator,
SpeedMonitor)
from composer.loggers import WandBLogger
from composer.optim.scheduler import (ConstantWithWarmupScheduler,
CosineAnnealingWithWarmupScheduler,
LinearWithWarmupScheduler)
from composer.utils import reproducibility
from composer.utils.file_helpers import get_file
from composer.utils.object_store import S3ObjectStore
from omegaconf import DictConfig
TASK_NAME_TO_CLASS = {
'mnli': finetuning_jobs_module.MNLIJob,
'rte': finetuning_jobs_module.RTEJob,
'mrpc': finetuning_jobs_module.MRPCJob,
'qnli': finetuning_jobs_module.QNLIJob,
'qqp': finetuning_jobs_module.QQPJob,
'sst2': finetuning_jobs_module.SST2Job,
'stsb': finetuning_jobs_module.STSBJob,
'cola': finetuning_jobs_module.COLAJob,
}
def build_algorithm(name, kwargs):
if name == 'gradient_clipping':
return algorithms.GradientClipping(**kwargs)
elif name == 'alibi':
return algorithms.Alibi(**kwargs)
elif name == 'fused_layernorm':
return algorithms.FusedLayerNorm(**kwargs)
elif name == 'gated_linear_units':
return algorithms.GatedLinearUnits(**kwargs)
elif name == 'low_precision_layernorm':
return algorithms.LowPrecisionLayerNorm(**kwargs)
else:
raise ValueError(f'Not sure how to build algorithm: {name}')
def build_callback(name, kwargs):
if name == 'lr_monitor':
return LRMonitor()
elif name == 'memory_monitor':
return MemoryMonitor()
elif name == 'speed_monitor':
return SpeedMonitor(window_size=kwargs.get('window_size', 1),
gpu_flops_available=kwargs.get(
'gpu_flops_available', None))
elif name == 'runtime_estimator':
return RuntimeEstimator()
elif name == 'optimizer_monitor':
return OptimizerMonitor(log_optimizer_metrics=kwargs.get(
'log_optimizer_metrics', True),)
elif name == 'health_checker':
return HealthChecker(**kwargs)
else:
raise ValueError(f'Not sure how to build callback: {name}')
def build_logger(name, kwargs):
if name == 'wandb':
return WandBLogger(**kwargs)
else:
raise ValueError(f'Not sure how to build logger: {name}')
def build_scheduler(cfg):
if cfg.name == 'constant_with_warmup':
return ConstantWithWarmupScheduler(t_warmup=cfg.t_warmup)
elif cfg.name == 'cosine_with_warmup':
return CosineAnnealingWithWarmupScheduler(t_warmup=cfg.t_warmup,
alpha_f=cfg.alpha_f)
elif cfg.name == 'linear_decay_with_warmup':
return LinearWithWarmupScheduler(t_warmup=cfg.t_warmup,
alpha_f=cfg.alpha_f)
else:
raise ValueError(f'Not sure how to build scheduler: {cfg.name}')
def build_model(cfg: DictConfig, num_labels: int):
if cfg.name == 'hf_bert':
return hf_bert_module.create_hf_bert_classification(
num_labels=num_labels,
pretrained_model_name=cfg.pretrained_model_name,
use_pretrained=cfg.get('use_pretrained', False),
model_config=cfg.get('model_config', None),
tokenizer_name=cfg.get('tokenizer_name', None),
gradient_checkpointing=cfg.get('gradient_checkpointing', None))
elif cfg.name == 'bert':
return bert_module.create_bert_classification(
num_labels=num_labels,
pretrained_model_name=cfg.pretrained_model_name,
pretrained_checkpoint=cfg.get('pretrained_checkpoint', None),
model_config=cfg.get('model_config', None),
tokenizer_name=cfg.get('tokenizer_name', None),
gradient_checkpointing=cfg.get('gradient_checkpointing', None))
else:
raise ValueError(f'Not sure how to build model with name={cfg.name}')
def get_values_from_path(path: str, separator: str = '/') -> Dict[str, str]:
"""Parses out information from a path/string that looks like.
...<separator>key=value<separator...
"""
dict_output = {}
underscore_split = path.split(separator)
for item in underscore_split:
if '=' not in item:
continue
key, value = item.split('=')
dict_output[key] = value
return dict_output
def get_checkpoint_name_from_path(path: str) -> str:
"""To go from checkpoint name to path, replace | with /"""
return path.lstrip('/').replace('/', '|')
def download_starting_checkpoint(starting_checkpoint_load_path: str,
local_pretrain_checkpoints_folder: str) -> str:
"""Downloads the pretrained checkpoints to start from.
Currently only supports S3 and URLs
"""
load_object_store = None
parsed_path = urlparse(starting_checkpoint_load_path)
if parsed_path.scheme == 's3':
load_object_store = S3ObjectStore(bucket=parsed_path.netloc)
download_path = parsed_path.path if parsed_path.scheme == 's3' else starting_checkpoint_load_path
os.makedirs(local_pretrain_checkpoints_folder, exist_ok=True)
local_path = os.path.join(local_pretrain_checkpoints_folder,
get_checkpoint_name_from_path(parsed_path.path))
if not os.path.exists(local_path):
get_file(destination=local_path,
path=download_path, #.lstrip('/'),
object_store=load_object_store,
progress_bar=True)
return local_path
def _setup_gpu_queue(num_gpus: int, manager: SyncManager):
"""Returns a queue with [0, 1, ..
num_gpus].
"""
gpu_queue = manager.Queue(num_gpus)
for gpu_id in range(num_gpus):
gpu_queue.put(gpu_id)
return gpu_queue
def create_job_configs(main_config: om.DictConfig, tasks_to_run: Set[str],
pretrained_checkpoint_path: Optional[str]):
configs = []
for task_name, task_config in main_config.tasks.items():
copy_main_config = copy.deepcopy(main_config)
if 'pool_all' in task_config['trainer_kwargs'].keys():
copy_main_config.model['model_config']['pool_all'] = task_config['trainer_kwargs']['pool_all']
# delete from trainer_config
del task_config['trainer_kwargs']['pool_all']
else:
copy_main_config = main_config
main_config = copy.deepcopy(copy_main_config)
if main_config.get('base_run_name') is None:
main_config.base_run_name = os.environ.get('COMPOSER_RUN_NAME',
'glue')
if task_name not in tasks_to_run:
continue
for task_seed in task_config.get('seeds', [main_config.default_seed]):
run_name = f'{main_config.base_run_name}_task={task_name}_seed={str(task_seed)}'
logger_configs = copy.deepcopy(main_config.get('loggers', {}))
for logger_name, logger_config in logger_configs.items():
if logger_name == 'wandb':
# allow user set groups, otherwise set group to run name
if 'group' not in logger_config:
logger_config['group'] = main_config.base_run_name
logger_config['name'] = run_name
task_seed_config = om.OmegaConf.create({
'task':
task_name,
'job_name':
run_name,
'seed':
task_seed,
'model':
main_config.model,
'tokenizer_name':
main_config.tokenizer_name,
'scheduler':
main_config.scheduler,
'load_path':
pretrained_checkpoint_path,
'save_folder':
os.path.join(main_config.save_finetune_checkpoint_folder,
f'task={task_name}', f'seed={task_seed}'),
'loggers':
logger_configs,
'callbacks':
main_config.get('callbacks', {}),
'algorithms':
main_config.get('algorithms', {}),
'precision':
main_config.get('precision', None),
'trainer_kwargs':
task_config.trainer_kwargs,
})
configs.append(task_seed_config)
return configs
def run_job_worker(config: om.DictConfig,
gpu_queue: Optional[mp.Queue] = None,
process_to_gpu: Optional[DictProxy] = None) -> Any:
"""Instantiates the job object and runs it."""
# need to set seed before model initialization for determinism
reproducibility.seed_all(config.seed)
model = build_model(
config.model,
finetuning_jobs_module.TASK_NAME_TO_NUM_LABELS[config.task])
n_params = sum(p.numel() for p in model.parameters())
print(f'{n_params=:.4e}')
instantiated_job = TASK_NAME_TO_CLASS[config.task](
job_name=config.job_name,
seed=config.seed,
model=model,
tokenizer_name=config.tokenizer_name,
scheduler=build_scheduler(config.scheduler),
load_path=config.load_path,
save_folder=config.save_folder,
loggers=[
build_logger(name, logger_config)
for name, logger_config in config.get('loggers', {}).items()
],
callbacks=[
build_callback(name, callback_config)
for name, callback_config in config.get('callbacks', {}).items()
],
algorithms=[
build_algorithm(name, algorithm_config)
for name, algorithm_config in config.get('algorithms', {}).items()
],
precision=config.precision,
**config.trainer_kwargs,
)
results = instantiated_job.run(gpu_queue, process_to_gpu)
# delete the job so that the optimizer and anything else on the gpu gets deleted
del instantiated_job
torch.cuda.empty_cache()
gc.collect()
return results
def run_jobs_parallel(configs: Sequence[om.DictConfig]) -> Dict[str, Any]:
"""Runs a list of jobs (passed in as Hydra configs) across GPUs.
Returns a dictionary mapping job name to the result and original config
Each job's results is a dict of:
* 'checkpoints': list of saved_checkpoints, if any,
* 'metrics': nested dict of results, accessed by
dataset and metric name, e.g.
``metrics['glue_mnli']['MulticlassAccuracy']``.
* 'job_name': The job name, helpful for keeping track of results during multiprocessing
"""
num_gpus = torch.cuda.device_count()
results = []
with mp.Manager() as manager:
# workers get gpu ids from this queue
# to set the GPU to run on
gpu_queue = _setup_gpu_queue(num_gpus, manager)
process_to_gpu = manager.dict()
ctx = mp.get_context('spawn')
with Pool(max_workers=min(num_gpus, len(configs)),
mp_context=ctx) as pool:
results = pool.map(run_job_worker, [config for config in configs],
[gpu_queue for _ in configs],
[process_to_gpu for _ in configs])
job_name_to_config = {config.job_name: config for config in configs}
finished_results = {}
for result in results:
job_name = result['job_name']
finished_results[job_name] = {
'result': result,
'config': job_name_to_config[job_name]
}
return finished_results
def run_jobs_serial(configs) -> Dict[str, Any]:
"""Runs the jobs serially, rather than in parallel.
Useful for debugging
"""
results = {}
for config in configs:
result = run_job_worker(config)
results[config.job_name] = {'result': result, 'config': config}
return results
def format_job_name(job_name: str) -> str:
"""Formats the job name for pretty printing."""
dict_output = get_values_from_path(job_name, separator='_')
return f'{dict_output["task"].upper()}(seed={dict_output["seed"]})'
def _print_table(results: Dict[str, Dict[str, Any]]):
"""Pretty prints a table given a results dictionary."""
header = '{job_name:50}| {eval_task:25}| {name:27}|'
hyphen_count = 50 + 25 + 27 + 11
row_format = header + ' {value:.2f}'
print('\nCollected Job Results: \n')
print('-' * hyphen_count)
print(header.format(job_name='Job', eval_task='Dataset', name='Metric'))
print('-' * hyphen_count)
for job_name, result in results.items():
for eval_task, eval_results in result['result']['metrics'].items():
for name, metric in eval_results.items():
print(
row_format.format(
job_name=format_job_name(job_name),
eval_task=eval_task,
name=name,
value=metric * 100,
))
print('-' * hyphen_count)
print('\n')
def _print_averaged_glue_results(glue_results: List[Tuple[str, float]]) -> None:
"""Pretty prints a table of glue results averaged across seeds."""
header = '{job_name:50}|'
hyphen_count = 50 + 11
row_format = header + ' {value:.2f}'
print('\nCollected Job Results: \n')
print('-' * hyphen_count)
print(header.format(job_name='Task'))
print('-' * hyphen_count)
for task_name, result in glue_results:
print(row_format.format(
job_name=f'{task_name.upper()}',
value=result,
))
print('-' * hyphen_count)
print('\n')
def train(config: om.DictConfig) -> None:
"""Main training logic.
Args:
config (DictConfig): Configuration composed by OmegaConf
"""
start_time = time.time()
# Initial default seed
reproducibility.seed_all(config.default_seed)
# Quiet down WandB
os.environ['WANDB_SILENT'] = 'true'
# Set tokenizer parallelism
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
# Confirm GPUs if parallel=True
if config.parallel:
assert torch.cuda.device_count(
) > 0, 'Can only use parallel mode if GPUs are available. Please set parallel=False.'
# Downloads the starting checkpoint ahead of time so that
# the different tasks don't all try to download it at the same time
if config.get('starting_checkpoint_load_path', None):
local_pretrain_checkpoint_path = download_starting_checkpoint(
config.starting_checkpoint_load_path,
config.local_pretrain_checkpoint_folder)
else:
local_pretrain_checkpoint_path = None
# Builds round 1 configs and runs them
if 'mnli' in config.starting_checkpoint_load_path:
round_1_task_names = {'rte', 'mrpc', 'stsb'}
print(f"Starting from MNLI checkpoint, only running round 1 tasks: {round_1_task_names}")
else:
round_1_task_names = {'cola', 'sst2', 'qqp', 'qnli', 'mnli', 'rte', 'mrpc', 'stsb'}
round_1_job_configs = create_job_configs(config, round_1_task_names,
local_pretrain_checkpoint_path)
round_1_results = {}
if len(round_1_job_configs) > 0:
if config.parallel:
round_1_results = run_jobs_parallel(round_1_job_configs)
else:
round_1_results = run_jobs_serial(round_1_job_configs)
end_time = time.time()
print('-' * 30)
print(f'Training completed in {(end_time-start_time):.2f} seconds')
print('-' * 30)
# Join the results and pretty print them
all_results = {}
all_results.update(round_1_results)
# all_results.update(round_2_results)
_print_table(all_results)
# Average the GLUE results across seeds and pretty print them
glue_results: Dict[str, List[float]] = defaultdict(list)
for job_name, result in all_results.items():
job_values = get_values_from_path(job_name, separator='_')
for _, eval_results in result['result']['metrics'].items():
for _, metric in eval_results.items():
glue_results[job_values['task']].append(metric * 100)
glue_results_mean: Dict[str, float] = {
key: float(np.mean(values)) for key, values in glue_results.items()
}
overall_glue = []
for _, average_metric in glue_results_mean.items():
overall_glue.append(average_metric)
glue_results_mean['glue'] = float(np.mean(overall_glue))
_print_averaged_glue_results([
(key, value) for key, value in glue_results_mean.items()
])
if __name__ == '__main__':
yaml_path, args_list = sys.argv[1], sys.argv[2:]
with open(yaml_path) as f:
yaml_cfg = om.OmegaConf.load(f)
cli_cfg = om.OmegaConf.from_cli(args_list)
cfg = om.OmegaConf.merge(yaml_cfg, cli_cfg)
assert isinstance(cfg, om.DictConfig)
train(cfg)
|
m2-main
|
bert/glue.py
|
# Copyright 2022 MosaicML Examples authors
# SPDX-License-Identifier: Apache-2.0
import os
import sys
from typing import Optional, cast
# Add folder root to path to allow us to use relative imports regardless of what directory the script is run from
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
import src.hf_bert as hf_bert_module
import src.create_bert as bert_module
import src.text_data as text_data_module
from src.optim.create_param_groups import create_param_groups
from composer import Trainer, algorithms
from composer.callbacks import (HealthChecker, LRMonitor, MemoryMonitor,
OptimizerMonitor, RuntimeEstimator,
SpeedMonitor)
from composer.loggers import WandBLogger
from composer.optim import DecoupledAdamW
from composer.optim.scheduler import (ConstantWithWarmupScheduler,
CosineAnnealingWithWarmupScheduler,
LinearWithWarmupScheduler)
from composer.utils import dist, reproducibility
from omegaconf import DictConfig
from omegaconf import OmegaConf as om
def update_batch_size_info(cfg: DictConfig):
global_batch_size, device_microbatch_size = cfg.global_train_batch_size, cfg.device_train_microbatch_size
if global_batch_size % dist.get_world_size() != 0:
raise ValueError(
f'Global batch size {global_batch_size} is not divisible by {dist.get_world_size()} '
'as a result, the batch size would be truncated, please adjust `global_batch_size` '
f'to be divisible by world size, {dist.get_world_size()}.')
device_train_batch_size = global_batch_size // dist.get_world_size()
if isinstance(device_microbatch_size, int):
if device_microbatch_size > device_train_batch_size:
print(
f'WARNING: device_train_microbatch_size > device_train_batch_size, '
f'will be reduced from {device_microbatch_size} -> {device_train_batch_size}.'
)
device_microbatch_size = device_train_batch_size
cfg.n_gpus = dist.get_world_size()
cfg.device_train_batch_size = device_train_batch_size
cfg.device_train_microbatch_size = device_microbatch_size
# Safely set `device_eval_batch_size` if not provided by user
if 'device_eval_batch_size' not in cfg:
if cfg.device_train_microbatch_size == 'auto':
cfg.device_eval_batch_size = 1
else:
cfg.device_eval_batch_size = cfg.device_train_microbatch_size
return cfg
def log_config(cfg: DictConfig):
print(om.to_yaml(cfg))
if 'wandb' in cfg.get('loggers', {}):
try:
import wandb
except ImportError as e:
raise e
if wandb.run:
wandb.config.update(om.to_container(cfg, resolve=True))
def build_algorithm(name, kwargs):
if name == 'gradient_clipping':
return algorithms.GradientClipping(**kwargs)
elif name == 'alibi':
return algorithms.Alibi(**kwargs)
elif name == 'fused_layernorm':
return algorithms.FusedLayerNorm(**kwargs)
elif name == 'gated_linear_units':
return algorithms.GatedLinearUnits(**kwargs)
elif name == 'low_precision_layernorm':
return algorithms.LowPrecisionLayerNorm(**kwargs)
else:
raise ValueError(f'Not sure how to build algorithm: {name}')
def build_callback(name, kwargs):
if name == 'lr_monitor':
return LRMonitor()
elif name == 'memory_monitor':
return MemoryMonitor()
elif name == 'speed_monitor':
return SpeedMonitor(window_size=kwargs.get('window_size', 1),
gpu_flops_available=kwargs.get(
'gpu_flops_available', None))
elif name == 'runtime_estimator':
return RuntimeEstimator()
elif name == 'optimizer_monitor':
return OptimizerMonitor(log_optimizer_metrics=kwargs.get(
'log_optimizer_metrics', True),)
elif name == 'health_checker':
return HealthChecker(**kwargs)
else:
raise ValueError(f'Not sure how to build callback: {name}')
def build_logger(name, kwargs):
if name == 'wandb':
return WandBLogger(**kwargs)
else:
raise ValueError(f'Not sure how to build logger: {name}')
def build_scheduler(cfg):
if cfg.name == 'constant_with_warmup':
return ConstantWithWarmupScheduler(t_warmup=cfg.t_warmup)
elif cfg.name == 'cosine_with_warmup':
return CosineAnnealingWithWarmupScheduler(t_warmup=cfg.t_warmup,
alpha_f=cfg.alpha_f)
elif cfg.name == 'linear_decay_with_warmup':
return LinearWithWarmupScheduler(t_warmup=cfg.t_warmup,
alpha_f=cfg.alpha_f)
else:
raise ValueError(f'Not sure how to build scheduler: {cfg.name}')
def build_optimizer(cfg, model):
if cfg.name == 'decoupled_adamw':
return DecoupledAdamW(create_param_groups(cfg, model),
lr=cfg.lr,
betas=cfg.betas,
eps=cfg.eps,
weight_decay=cfg.weight_decay)
elif cfg.name == 'adamw':
from torch.optim import AdamW
return AdamW(create_param_groups(None, model),
lr=cfg.lr,
betas=cfg.betas,
eps=cfg.eps,
weight_decay=cfg.weight_decay)
else:
raise ValueError(f'Not sure how to build optimizer: {cfg.name}')
def build_dataloader(cfg, tokenizer, device_batch_size):
if cfg.name == 'text':
return text_data_module.build_text_dataloader(cfg, tokenizer,
device_batch_size)
else:
raise ValueError(f'Not sure how to build dataloader with config: {cfg}')
def build_model(cfg: DictConfig):
if cfg.name == 'hf_bert':
return hf_bert_module.create_hf_bert_mlm(
pretrained_model_name=cfg.pretrained_model_name,
use_pretrained=cfg.get('use_pretrained', None),
model_config=cfg.get('model_config', None),
tokenizer_name=cfg.get('tokenizer_name', None),
gradient_checkpointing=cfg.get('gradient_checkpointing', None))
elif cfg.name == 'bert':
return bert_module.create_bert_mlm(
pretrained_model_name=cfg.pretrained_model_name,
pretrained_checkpoint=cfg.get('pretrained_checkpoint', None),
model_config=cfg.get('model_config', None),
tokenizer_name=cfg.get('tokenizer_name', None),
gradient_checkpointing=cfg.get('gradient_checkpointing', None))
else:
raise ValueError(f'Not sure how to build model with name={cfg.name}')
def main(cfg: DictConfig,
return_trainer: bool = False,
do_train: bool = True) -> Optional[Trainer]:
print('Training using config: ')
print(om.to_yaml(cfg))
reproducibility.seed_all(cfg.seed)
# Get batch size info
cfg = update_batch_size_info(cfg)
# Build Model
print('Initializing model...')
model = build_model(cfg.model)
n_params = sum(p.numel() for p in model.parameters())
print(f'{n_params=:.4e}')
# Dataloaders
print('Building train loader...')
train_loader = build_dataloader(
cfg.train_loader,
model.tokenizer,
cfg.global_train_batch_size // dist.get_world_size(),
)
print('Building eval loader...')
global_eval_batch_size = cfg.get('global_eval_batch_size',
cfg.global_train_batch_size)
eval_loader = build_dataloader(
cfg.eval_loader,
model.tokenizer,
global_eval_batch_size // dist.get_world_size(),
)
# Optimizer
optimizer = build_optimizer(cfg.optimizer, model)
# Scheduler
scheduler = build_scheduler(cfg.scheduler)
# Loggers
loggers = [
build_logger(name, logger_cfg)
for name, logger_cfg in cfg.get('loggers', {}).items()
]
# Callbacks
callbacks = [
build_callback(name, callback_cfg)
for name, callback_cfg in cfg.get('callbacks', {}).items()
]
# Algorithms
algorithms = [
build_algorithm(name, algorithm_cfg)
for name, algorithm_cfg in cfg.get('algorithms', {}).items()
]
if cfg.get('run_name') is None:
cfg.run_name = os.environ.get('COMPOSER_RUN_NAME', 'bert')
# Build the Trainer
trainer = Trainer(
run_name=cfg.run_name,
seed=cfg.seed,
model=model,
algorithms=algorithms,
train_dataloader=train_loader,
eval_dataloader=eval_loader,
train_subset_num_batches=cfg.get('train_subset_num_batches', -1),
eval_subset_num_batches=cfg.get('eval_subset_num_batches', -1),
optimizers=optimizer,
schedulers=scheduler,
max_duration=cfg.max_duration,
eval_interval=cfg.eval_interval,
progress_bar=cfg.progress_bar,
log_to_console=cfg.log_to_console,
console_log_interval=cfg.console_log_interval,
loggers=loggers,
callbacks=callbacks,
precision=cfg.precision,
device=cfg.get('device', None),
device_train_microbatch_size=cfg.get('device_train_microbatch_size',
'auto'),
save_folder=cfg.get('save_folder', None),
save_interval=cfg.get('save_interval', '1000ba'),
save_num_checkpoints_to_keep=cfg.get('save_num_checkpoints_to_keep',
-1),
save_overwrite=cfg.get('save_overwrite', False),
load_path=cfg.get('load_path', None),
load_weights_only=cfg.get('load_weights_only', False),
python_log_level=cfg.get('python_log_level', None),
autoresume=True,
)
print('Logging config...')
log_config(cfg)
if do_train:
print('Starting training...')
trainer.fit()
if return_trainer:
return trainer
if __name__ == '__main__':
yaml_path, args_list = sys.argv[1], sys.argv[2:]
with open(yaml_path) as f:
yaml_cfg = om.load(f)
cli_cfg = om.from_cli(args_list)
cfg = om.merge(yaml_cfg, cli_cfg)
cfg = cast(DictConfig, cfg) # for type checking
main(cfg)
|
m2-main
|
bert/main.py
|
from transformers import BertConfig
class BertConfig(BertConfig):
def __init__(
self,
alibi_starting_size: int = 512,
attention_probs_dropout_prob: float = 0.0,
# mlp
use_glu_mlp: bool = True,
use_monarch_mlp: bool = False,
monarch_mlp_nblocks: int = 4,
# position
use_positional_encodings: bool = False,
max_position_embeddings: int = 512,
# architecture selection
monarch_mixer_sequence_mixing: bool = False,
residual_long_conv: bool = False,
# hyena and long conv hyperparameters
bidirectional: bool = True,
hyena_w_mod: int = 1,
hyena_filter_dropout: float = 0.2,
hyena_filter_order: int = 64,
hyena_training_additions: bool = False,
# efficiency
use_flash_mm: bool = False,
# average pooling instead of CLS token
pool_all: bool = False,
**kwargs,
):
"""Configuration class for MosaicBert.
Args:
alibi_starting_size (int): Use `alibi_starting_size` to determine how large of an alibi tensor to
create when initializing the model. You should be able to ignore this parameter in most cases.
Defaults to 512.
attention_probs_dropout_prob (float): By default, turn off attention dropout in Mosaic BERT
(otherwise, Flash Attention will be off by default). Defaults to 0.0.
"""
super().__init__(
attention_probs_dropout_prob=attention_probs_dropout_prob, **kwargs)
self.alibi_starting_size = alibi_starting_size
# mlp
self.use_glu_mlp = use_glu_mlp
self.use_monarch_mlp = use_monarch_mlp
self.monarch_mlp_nblocks = monarch_mlp_nblocks
# positional encodings
self.use_positional_encodings = use_positional_encodings
self.max_position_embeddings = max_position_embeddings
# architecture
self.monarch_mixer_sequence_mixing = monarch_mixer_sequence_mixing
self.residual_long_conv = residual_long_conv
# hyena and long conv hyperparameters
self.bidirectional = bidirectional
self.hyena_w_mod = hyena_w_mod
self.hyena_filter_dropout = hyena_filter_dropout
self.hyena_filter_order = hyena_filter_order
self.hyena_training_additions = hyena_training_additions
# efficiency
self.use_flash_mm = use_flash_mm
# average pooling instead of CLS token
self.pool_all = pool_all
|
m2-main
|
bert/src/configuration_bert.py
|
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) 2022, Tri Dao.
# Copyright (c) 2023, MosaicML.
# Copyright (c) 2023, Dan Fu and Simran Arora.
import copy
import logging
import math
import os
import sys
import warnings
from typing import List, Optional, Tuple, Union
from functools import partial
# Add folder root to path to allow us to use relative imports regardless of what directory the script is run from
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
import bert_padding as bert_padding_module
import torch
import torch.nn as nn
from einops import rearrange
from torch.nn.modules.utils import consume_prefix_in_state_dict_if_present
from transformers.activations import ACT2FN
from transformers.modeling_outputs import (MaskedLMOutput,
SequenceClassifierOutput)
from transformers.models.bert.modeling_bert import BertPreTrainedModel
try:
import flash_attn_triton as flash_attn_triton
flash_attn_qkvpacked_func = flash_attn_triton.flash_attn_qkvpacked_func
except ImportError as e:
flash_attn_qkvpacked_func = None
from src.mm.blockdiag_linear import BlockdiagLinear
from src.mm.monarch_mixer_sequence_mixer import MonarchMixerSequenceMixing
logger = logging.getLogger(__name__)
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
class BertEmbeddings(nn.Module):
"""Construct the embeddings for words, ignoring position.
There are no positional embeddings since we use ALiBi and token_type
embeddings.
This module is modeled after the Hugging Face BERT's
:class:`~transformers.model.bert.modeling_bert.BertEmbeddings`, but is
modified as part of Mosaic BERT's ALiBi implementation. The key change is
that position embeddings are removed. Position information instead comes
from attention biases that scale linearly with the position distance
between query and key tokens.
This module ignores the `position_ids` input to the `forward` method.
"""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size,
config.hidden_size,
padding_idx=config.pad_token_id)
# ALiBi doesn't use position embeddings
if config.use_positional_encodings:
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.use_positional_encodings = config.use_positional_encodings
self.token_type_embeddings = nn.Embedding(config.type_vocab_size,
config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model
# variable name and be able to load any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size,
eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
if config.use_positional_encodings:
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
self.register_buffer('token_type_ids',
torch.zeros(config.max_position_embeddings,
dtype=torch.long),
persistent=False)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
past_key_values_length: int = 0,
return_position_encodings: bool = False,
) -> torch.Tensor:
if (input_ids is not None) == (inputs_embeds is not None):
raise ValueError('Must specify either input_ids or input_embeds!')
if input_ids is not None:
input_shape = input_ids.size()
else:
assert inputs_embeds is not None # just for type checking
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
if self.use_positional_encodings:
position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
# Setting the token_type_ids to the registered buffer in constructor
# where it is all zeros, which usually occurs when it's auto-generated;
# registered buffer helps users when tracing the model without passing
# token_type_ids, solves issue #5664
if token_type_ids is None:
if hasattr(self, 'token_type_ids'):
assert isinstance(self.token_type_ids, torch.LongTensor)
buffered_token_type_ids = self.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(
input_shape[0], seq_length)
token_type_ids = buffered_token_type_ids_expanded # type: ignore
else:
token_type_ids = torch.zeros(input_shape, # type: ignore
dtype=torch.long,
device=self.word_embeddings.device) # type: ignore # yapf: disable
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.use_positional_encodings:
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
if return_position_encodings:
return embeddings, position_embeddings
else:
return embeddings
class BertUnpadSelfAttention(nn.Module):
"""Performs multi-headed self attention on a batch of unpadded sequences.
If Triton is installed, this module uses Flash Attention to greatly improve throughput.
The Flash Attention implementation used is an adaptation from Mosaic, which supports arbitrary attention biases (
used to implement ALiBi), but does not support attention dropout. If either Triton is not installed
or `config.attention_probs_dropout_prob > 0`, the implementation will default to a
math-equivalent pytorch version, which is much slower.
See `forward` method for additional detail.
"""
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(
config, 'embedding_size'):
raise ValueError(
f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention '
f'heads ({config.num_attention_heads})')
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size /
config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.p_dropout = config.attention_probs_dropout_prob
self.Wqkv = nn.Linear(self.all_head_size, 3 * config.hidden_size)
# Warn if defaulting to pytorch because of import issues
if flash_attn_qkvpacked_func is None:
warnings.warn(
'Unable to import Triton; defaulting attention implementation to pytorch (this will reduce throughput when using this model).'
)
def forward(self, hidden_states: torch.Tensor, cu_seqlens: torch.Tensor,
max_seqlen_in_batch: int, indices: torch.Tensor,
attn_mask: torch.Tensor, bias: torch.Tensor) -> torch.Tensor:
"""Perform self-attention.
If dropout is zero, then we can use the Triton kernel, so we do that. However, if not, we send through a standard PyTorch
implementation of self-attention.
The arguments are unpadded, and our implementations of attention require padded arguments,
so we first call `pad_input`. Once we compute attention, we re-unpad our outputs for the other layers.
The pad/unpad operations add overhead, but not sending pad tokens through ffs saves compute.
It is possible to write an unpadded implementation of attention (in Triton and PyTorch), which we will eventually do.
Args:
hidden_states: (total_nnz, dim)
cu_seqlens: (batch + 1,)
max_seqlen_in_batch: int
indices: (total_nnz,)
attn_mask: (batch, max_seqlen_in_batch)
bias: (batch, heads, max_seqlen_in_batch, max_seqlen_in_batch)
Returns:
attention: (total_nnz, dim)
"""
qkv = self.Wqkv(hidden_states)
qkv = bert_padding_module.pad_input(
qkv, indices, cu_seqlens.shape[0] - 1,
max_seqlen_in_batch) # batch, max_seqlen_in_batch, thd
qkv = rearrange(qkv,
'b s (t h d) -> b s t h d',
t=3,
h=self.num_attention_heads)
if self.p_dropout or flash_attn_qkvpacked_func is None:
# if we have nonzero attention dropout (e.g. during fine-tuning) or no Triton, compute attention in PyTorch
q = qkv[:, :, 0, :, :].permute(0, 2, 1, 3) # b h s d
k = qkv[:, :, 1, :, :].permute(0, 2, 3, 1) # b h d s
v = qkv[:, :, 2, :, :].permute(0, 2, 1, 3) # b h s d
attention_scores = torch.matmul(q, k) / math.sqrt(
self.attention_head_size)
attention_scores = attention_scores + bias
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
attention_probs = self.dropout(attention_probs)
attention = torch.matmul(attention_probs, v).permute(0, 2, 1,
3) # b s h d
else:
# Triton implementation only supports 0 attention dropout
convert_dtype = qkv.dtype not in [torch.float16, torch.bfloat16]
if convert_dtype:
# Triton implementation only supports fp16 and bf16
orig_dtype = qkv.dtype
qkv = qkv.to(torch.float16)
bias_dtype = bias.dtype
bias = bias.to(torch.float16)
attention = flash_attn_qkvpacked_func(qkv, bias)
attention = attention.to(orig_dtype)
bias = bias.to(bias_dtype)
else:
attention = flash_attn_qkvpacked_func(qkv, bias)
# attn_mask is 1 for attend and 0 for don't
attention = bert_padding_module.unpad_input_only(
attention,
torch.squeeze(attn_mask) == 1)
return rearrange(attention, 'nnz h d -> nnz (h d)')
class BertSelfOutput(nn.Module):
"""Computes the output of the attention layer."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size,
eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor,
input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertUnpadAttention(nn.Module):
"""Chains attention, Dropout, and LayerNorm for BERT."""
def __init__(self, config):
super().__init__()
self.self = BertUnpadSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(
self,
input_tensor: torch.Tensor,
cu_seqlens: torch.Tensor,
max_s: int,
subset_idx: Optional[torch.Tensor] = None,
indices: Optional[torch.Tensor] = None,
attn_mask: Optional[torch.Tensor] = None,
bias: Optional[torch.Tensor] = None,
) -> torch.Tensor:
"""Forward pass for scaled self-attention without padding.
Arguments:
input_tensor: (total_nnz, dim)
cu_seqlens: (batch + 1,)
max_s: int
subset_idx: () set of indices whose values we care about at the end of the layer
(e.g., the masked tokens, if this is the final layer).
indices: None or (total_nnz,)
attn_mask: None or (batch, max_seqlen_in_batch)
bias: None or (batch, heads, max_seqlen_in_batch, max_seqlen_in_batch)
"""
self_output = self.self(input_tensor, cu_seqlens, max_s, indices,
attn_mask, bias)
if subset_idx is not None:
return self.output(
bert_padding_module.index_first_axis(self_output, subset_idx),
bert_padding_module.index_first_axis(input_tensor, subset_idx))
else:
return self.output(self_output, input_tensor)
class BertMLP(nn.Module):
"""Applies the FFN at the end of each BERT layer."""
def __init__(self, config):
super().__init__()
self.config = config
if self.config.use_monarch_mlp:
linear_cls = partial(BlockdiagLinear, nblocks=self.config.monarch_mlp_nblocks)
else:
linear_cls = nn.Linear
self.gated_layers = linear_cls(config.hidden_size,
config.intermediate_size,
bias=False)
self.act = nn.GELU(approximate='none')
self.wo = linear_cls(config.intermediate_size, config.hidden_size)
self.layernorm = nn.LayerNorm(config.hidden_size,
eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
"""Compute new hidden states from current hidden states.
Args:
hidden_states (torch.Tensor): The (unpadded) hidden states from
the attention layer [nnz, dim].
"""
residual_connection = hidden_states
hidden_states = self.gated_layers(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.wo(hidden_states)
hidden_states = self.layernorm(hidden_states + residual_connection)
return hidden_states
class BertGatedLinearUnitMLP(nn.Module):
"""Applies the FFN at the end of each BERT layer with a Gated Linear Unit"""
def __init__(self, config):
super().__init__()
self.config = config
self.is_padded = config.monarch_mixer_sequence_mixing
if self.config.use_monarch_mlp:
linear_cls = partial(BlockdiagLinear, nblocks=self.config.monarch_mlp_nblocks)
else:
linear_cls = nn.Linear
self.gated_layers = linear_cls(
config.hidden_size,
config.intermediate_size * 2,
bias=False
)
self.act = nn.GELU(approximate='none')
self.wo = linear_cls(config.intermediate_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.layernorm = nn.LayerNorm(config.hidden_size,
eps=config.layer_norm_eps)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
"""Compute new hidden states from current hidden states.
Args:
hidden_states (torch.Tensor): The (unpadded) hidden states from
the attention layer [nnz, dim].
"""
residual_connection = hidden_states
# compute the activation
hidden_states = self.gated_layers(hidden_states)
if self.is_padded:
gated = hidden_states[:, :, :self.config.intermediate_size]
non_gated = hidden_states[:, :, self.config.intermediate_size:]
else:
gated = hidden_states[:, :self.config.intermediate_size]
non_gated = hidden_states[:, self.config.intermediate_size:]
hidden_states = self.act(gated) * non_gated
hidden_states = self.dropout(hidden_states)
# multiply by the second matrix
hidden_states = self.wo(hidden_states)
# add the residual connection and post-LN
hidden_states = self.layernorm(hidden_states + residual_connection)
return hidden_states
class BertLayer(nn.Module):
"""BERT layer, which includes Sequence Mixing (e.g. Attention or Hyena) and State Mixing (e.g. MLP)."""
def __init__(self, config):
super(BertLayer, self).__init__()
self.monarch_mixer_sequence_mixing = config.monarch_mixer_sequence_mixing
print(f"Using Monarch Mixer for Sequence Mixing: {config.monarch_mixer_sequence_mixing}")
if config.monarch_mixer_sequence_mixing:
if config.use_flash_mm:
from src.mm.flash_mm import FlashMMSequenceMixing
mm_cls = FlashMMSequenceMixing
else:
mm_cls = MonarchMixerSequenceMixing
self.attention = mm_cls(
config.hidden_size,
l_max=config.long_conv_l_max,
hyena_kernel_lr=config.long_conv_kernel_learning_rate,
bidirectional=config.bidirectional,
hyena_lr_pos_emb=config.hyena_lr_pos_emb,
hyena_w=config.hyena_w,
hyena_w_mod=config.hyena_w_mod,
hyena_wd=config.hyena_wd,
hyena_emb_dim=config.hyena_emb_dim,
hyena_filter_dropout=config.hyena_filter_dropout,
hyena_filter_order=config.hyena_filter_order,
residual_long_conv=config.residual_long_conv,
hyena_training_additions=config.hyena_training_additions,
)
else:
self.attention = BertUnpadAttention(config)
if config.use_glu_mlp:
self.mlp = BertGatedLinearUnitMLP(config)
else:
self.mlp = BertMLP(config)
def forward(
self,
hidden_states: torch.Tensor,
cu_seqlens: torch.Tensor,
seqlen: int,
subset_idx: Optional[torch.Tensor] = None,
indices: Optional[torch.Tensor] = None,
attn_mask: Optional[torch.Tensor] = None,
bias: Optional[torch.Tensor] = None,
) -> torch.Tensor:
"""Forward pass for a BERT layer, including both attention and MLP.
Args:
hidden_states: (total_nnz, dim)
cu_seqlens: (batch + 1,)
seqlen: int
subset_idx: () set of indices whose values we care about at the end of the layer
(e.g., the masked tokens, if this is the final layer).
indices: None or (total_nnz,)
attn_mask: None or (batch, max_seqlen_in_batch)
bias: None or (batch, heads, max_seqlen_in_batch, max_seqlen_in_batch)
"""
if self.monarch_mixer_sequence_mixing:
attention_output = self.attention(hidden_states)
if type(attention_output) == tuple:
attention_output, _ = attention_output
else:
attention_output = self.attention(hidden_states, cu_seqlens, seqlen,
subset_idx, indices, attn_mask, bias)
layer_output = self.mlp(attention_output)
return layer_output
class BertEncoder(nn.Module):
"""A stack of BERT layers providing the backbone of BERT.
Compared to the analogous Hugging Face BERT module, this module handles unpadding to reduce unnecessary computation
at padded tokens, and pre-computes attention biases to implement ALiBi.
"""
def __init__(self, config):
super().__init__()
layer = BertLayer(config)
self.layer = nn.ModuleList(
[copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
self.monarch_mixer_sequence_mixing = config.monarch_mixer_sequence_mixing
self.num_attention_heads = config.num_attention_heads
if not self.monarch_mixer_sequence_mixing:
# The alibi mask will be dynamically expanded if it is too small for
# the input the model receives. But it generally helps to initialize it
# to a reasonably large size to help pre-allocate CUDA memory.
# The default `alibi_starting_size` is 512.
self._current_alibi_size = int(config.alibi_starting_size)
self.alibi = torch.zeros(
(1, self.num_attention_heads, self._current_alibi_size,
self._current_alibi_size))
self.rebuild_alibi_tensor(size=config.alibi_starting_size)
def rebuild_alibi_tensor(self,
size: int,
device: Optional[Union[torch.device, str]] = None):
# Alibi
# Following https://github.com/ofirpress/attention_with_linear_biases/issues/5 (Implementation 1)
# In the causal case, you can exploit the fact that softmax is invariant to a uniform translation
# of the logits, which makes the math work out *after* applying causal masking. If no causal masking
# will be applied, it is necessary to construct the diagonal mask.
n_heads = self.num_attention_heads
def _get_alibi_head_slopes(n_heads: int) -> List[float]:
def get_slopes_power_of_2(n_heads: int) -> List[float]:
start = (2**(-2**-(math.log2(n_heads) - 3)))
ratio = start
return [start * ratio**i for i in range(n_heads)]
# In the paper, they only train models that have 2^a heads for some a. This function
# has some good properties that only occur when the input is a power of 2. To
# maintain that even when the number of heads is not a power of 2, we use a
# workaround.
if math.log2(n_heads).is_integer():
return get_slopes_power_of_2(n_heads)
closest_power_of_2 = 2**math.floor(math.log2(n_heads))
slopes_a = get_slopes_power_of_2(closest_power_of_2)
slopes_b = _get_alibi_head_slopes(2 * closest_power_of_2)
slopes_b = slopes_b[0::2][:n_heads - closest_power_of_2]
return slopes_a + slopes_b
context_position = torch.arange(size, device=device)[:, None]
memory_position = torch.arange(size, device=device)[None, :]
relative_position = torch.abs(memory_position - context_position)
# [n_heads, max_token_length, max_token_length]
relative_position = relative_position.unsqueeze(0).expand(
n_heads, -1, -1)
slopes = torch.Tensor(_get_alibi_head_slopes(n_heads)).to(device)
alibi = slopes.unsqueeze(1).unsqueeze(1) * -relative_position
# [1, n_heads, max_token_length, max_token_length]
alibi = alibi.unsqueeze(0)
assert alibi.shape == torch.Size([1, n_heads, size, size])
self._current_alibi_size = size
self.alibi = alibi
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
output_all_encoded_layers: Optional[bool] = True,
subset_mask: Optional[torch.Tensor] = None,
position_encodings: Optional[torch.Tensor] = None,
) -> List[torch.Tensor]:
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(
dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
attention_mask_bool = attention_mask.bool()
batch, seqlen = hidden_states.shape[:2]
# Unpad inputs and mask. It will remove tokens that are padded.
# Assume ntokens is total number of tokens (padded and non-padded)
# and ntokens_unpad is total number of non-padded tokens.
# Then unpadding performs the following compression of the inputs:
# hidden_states[ntokens,hidden] -> hidden_states[ntokens_unpad,hidden]
if not self.monarch_mixer_sequence_mixing:
hidden_states, indices, cu_seqlens, _ = bert_padding_module.unpad_input(
hidden_states, attention_mask_bool)
else:
cu_seqlens = None
indices = None
# Add alibi matrix to extended_attention_mask
if not self.monarch_mixer_sequence_mixing:
if self._current_alibi_size < seqlen:
# Rebuild the alibi tensor when needed
warnings.warn(
f'Increasing alibi size from {self._current_alibi_size} to {seqlen}'
)
self.rebuild_alibi_tensor(size=seqlen, device=hidden_states.device)
elif self.alibi.device != hidden_states.device:
# Device catch-up
self.alibi = self.alibi.to(hidden_states.device)
alibi_bias = self.alibi[:, :, :seqlen, :seqlen]
attn_bias = extended_attention_mask[:, :, :seqlen, :seqlen]
alibi_attn_mask = attn_bias + alibi_bias
else:
alibi_attn_mask = None
all_encoder_layers = []
if self.monarch_mixer_sequence_mixing:
for layer_module in self.layer:
hidden_states = layer_module(hidden_states,
cu_seqlens,
seqlen,
None,
indices,
attn_mask=attention_mask,
bias=alibi_attn_mask
)
if position_encodings is not None:
hidden_states = hidden_states + position_encodings
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if subset_mask is not None:
hidden_states = hidden_states[subset_mask]
else:
if subset_mask is None:
for layer_module in self.layer:
hidden_states = layer_module(hidden_states,
cu_seqlens,
seqlen,
None,
indices,
attn_mask=attention_mask,
bias=alibi_attn_mask
)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
# Pad inputs and mask. It will insert back zero-padded tokens.
# Assume ntokens is total number of tokens (padded and non-padded)
# and ntokens_unpad is total number of non-padded tokens.
# Then padding performs the following de-compression:
# hidden_states[ntokens_unpad,hidden] -> hidden_states[ntokens,hidden]
hidden_states = bert_padding_module.pad_input(
hidden_states, indices, batch, seqlen
)
else:
for i in range(len(self.layer) - 1):
layer_module = self.layer[i]
hidden_states = layer_module(hidden_states,
cu_seqlens,
seqlen,
None,
indices,
attn_mask=attention_mask,
bias=alibi_attn_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
subset_idx = torch.nonzero(subset_mask[attention_mask_bool],
as_tuple=False).flatten()
hidden_states = self.layer[-1](hidden_states,
cu_seqlens,
seqlen,
subset_idx=subset_idx,
indices=indices,
attn_mask=attention_mask,
bias=alibi_attn_mask)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
self.pool_all = config.pool_all
def forward(self,
hidden_states: torch.Tensor,
pool: Optional[bool] = True,
mask= None) -> torch.Tensor:
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
if not self.pool_all:
first_token_tensor = hidden_states[:, 0] if pool else hidden_states
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
else:
# mean pool everything that isn't masked out
denom = torch.sum(mask, dim=1, keepdim=True)
mean_tensor = torch.sum((hidden_states) * mask.unsqueeze(-1), dim = 1) / denom
pooled_output = self.dense(mean_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = torch.nn.LayerNorm(config.hidden_size, eps=1e-12)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertModel(BertPreTrainedModel):
"""Overall BERT model.
Args:
config: a BertConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.
Outputs: Tuple of (encoded_layers, pooled_output)
`encoded_layers`: controlled by `output_all_encoded_layers` argument:
- `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end
of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],
- `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding
to the last attention block of shape [batch_size, sequence_length, hidden_size],
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a
classifier pretrained on top of the hidden state associated to the first character of the
input (`CLS`) to train on the Next-Sentence task (see BERT's paper).
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertModel(config=config)
all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, add_pooling_layer=True):
super(BertModel, self).__init__(config)
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config) if add_pooling_layer else None
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def forward(
self,
input_ids: torch.Tensor,
token_type_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
output_all_encoded_layers: Optional[bool] = False,
masked_tokens_mask: Optional[torch.Tensor] = None,
**kwargs
) -> Tuple[Union[List[torch.Tensor], torch.Tensor], Optional[torch.Tensor]]:
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
embedding_output = self.embeddings(
input_ids,
token_type_ids,
position_ids
)
position_encodings = None
subset_mask = []
first_col_mask = []
if masked_tokens_mask is None:
subset_mask = None
else:
first_col_mask = torch.zeros_like(masked_tokens_mask)
first_col_mask[:, 0] = True
subset_mask = masked_tokens_mask | first_col_mask
encoder_outputs = self.encoder(
embedding_output,
attention_mask,
output_all_encoded_layers=output_all_encoded_layers,
subset_mask=subset_mask,
position_encodings=position_encodings)
if masked_tokens_mask is None:
sequence_output = encoder_outputs[-1]
pooled_output = self.pooler(
sequence_output, mask = attention_mask) if self.pooler is not None else None
else:
# TD [2022-03-01]: the indexing here is very tricky.
attention_mask_bool = attention_mask.bool()
subset_idx = subset_mask[attention_mask_bool] # type: ignore
sequence_output = encoder_outputs[-1][
masked_tokens_mask[attention_mask_bool][subset_idx]]
if self.pooler is not None:
pool_input = encoder_outputs[-1][
first_col_mask[attention_mask_bool][subset_idx]]
pooled_output = self.pooler(pool_input, pool=False, mask = attention_mask)
else:
pooled_output = None
if not output_all_encoded_layers:
encoder_outputs = sequence_output
if self.pooler is not None:
return encoder_outputs, pooled_output
return encoder_outputs, None
###################
# Bert Heads
###################
class BertLMPredictionHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(bert_model_embedding_weights.size(1),
bert_model_embedding_weights.size(0))
self.decoder.weight = bert_model_embedding_weights
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super().__init__()
self.predictions = BertLMPredictionHead(config,
bert_model_embedding_weights)
def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super().__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output: torch.Tensor) -> torch.Tensor:
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
#######################
# Construct Bert model
#######################
class BertForMaskedLM(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
if config.is_decoder:
warnings.warn(
'If you want to use `BertForMaskedLM` make sure `config.is_decoder=False` for '
'bi-directional self-attention.')
self.bert = BertModel(config, add_pooling_layer=False)
self.cls = BertOnlyMLMHead(config,
self.bert.embeddings.word_embeddings.weight)
# Initialize weights and apply final processing
self.post_init()
@classmethod
def from_composer(cls,
pretrained_checkpoint,
state_dict=None,
cache_dir=None,
from_tf=False,
config=None,
*inputs,
**kwargs):
"""Load from pre-trained."""
model = cls(config, *inputs, **kwargs)
if from_tf:
raise ValueError(
'TensorFlow is not supported.')
state_dict = torch.load(pretrained_checkpoint)
# If the state_dict was saved after wrapping with `composer.HuggingFaceModel`, it takes on the `model` prefix
consume_prefix_in_state_dict_if_present(state_dict, prefix='model.')
missing_keys, unexpected_keys = model.load_state_dict(state_dict,
strict=False)
if len(missing_keys) > 0:
logger.warning(
f"Found these missing keys in the checkpoint: {', '.join(missing_keys)}"
)
if len(unexpected_keys) > 0:
logger.warning(
f"Found these unexpected keys in the checkpoint: {', '.join(unexpected_keys)}"
)
return model
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.Tensor], MaskedLMOutput]:
# labels should be a `torch.LongTensor` of shape
# `(batch_size, sequence_length)`. These are used for computing the
# masked language modeling loss.
#
# Indices should be in `[-100, 0, ..., config.vocab_size]` (see
# `input_ids` docstring) Tokens with indices set to `-100` are ignored
# (masked), the loss is only computed for the tokens with labels in `[0,
# ..., config.vocab_size]`
#
# Prediction scores are only computed for masked tokens and the (bs,
# seqlen) dimensions are flattened
if (input_ids is not None) == (inputs_embeds is not None):
raise ValueError('Must specify either input_ids or input_embeds!')
if labels is None:
masked_tokens_mask = None
else:
masked_tokens_mask = labels > 0
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
masked_tokens_mask=masked_tokens_mask,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
loss = None
if labels is not None:
# Compute loss
loss_fct = nn.CrossEntropyLoss()
masked_token_idx = torch.nonzero(labels.flatten() > 0,
as_tuple=False).flatten()
loss = loss_fct(prediction_scores,
labels.flatten()[masked_token_idx])
assert input_ids is not None, 'Coding error; please open an issue'
batch, seqlen = input_ids.shape[:2]
prediction_scores = rearrange(
bert_padding_module.index_put_first_axis(
prediction_scores, masked_token_idx, batch * seqlen),
'(b s) d -> b s d',
b=batch)
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return MaskedLMOutput(
loss=loss,
logits=prediction_scores,
hidden_states=None,
attentions=None,
)
def prepare_inputs_for_generation(self, input_ids: torch.Tensor,
attention_mask: torch.Tensor,
**model_kwargs):
input_shape = input_ids.shape
effective_batch_size = input_shape[0]
# add a dummy token
if self.config.pad_token_id is None:
raise ValueError('The PAD token should be defined for generation')
attention_mask = torch.cat([
attention_mask,
attention_mask.new_zeros((attention_mask.shape[0], 1))
], dim=-1)
dummy_token = torch.full((effective_batch_size, 1),
self.config.pad_token_id,
dtype=torch.long,
device=input_ids.device)
input_ids = torch.cat([input_ids, dummy_token], dim=1)
return {'input_ids': input_ids, 'attention_mask': attention_mask}
class BertForSequenceClassification(BertPreTrainedModel):
"""Bert Model transformer with a sequence classification/regression head.
This head is just a linear layer on top of the pooled output. Used for,
e.g., GLUE tasks.
"""
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.bert = BertModel(config)
classifier_dropout = (config.classifier_dropout
if config.classifier_dropout is not None else
config.hidden_dropout_prob)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@classmethod
def from_composer(cls,
pretrained_checkpoint,
state_dict=None,
cache_dir=None,
from_tf=False,
config=None,
*inputs,
**kwargs):
"""Load from pre-trained."""
model = cls(config, *inputs, **kwargs)
if from_tf:
raise ValueError(
'TensorFlow is not supported.')
state_dict = torch.load(pretrained_checkpoint)
# If the state_dict was saved after wrapping with `composer.HuggingFaceModel`, it takes on the `model` prefix
consume_prefix_in_state_dict_if_present(state_dict, prefix='model.')
missing_keys, unexpected_keys = model.load_state_dict(state_dict,
strict=False)
if len(missing_keys) > 0:
logger.warning(
f"Found these missing keys in the checkpoint: {', '.join(missing_keys)}"
)
if len(unexpected_keys) > 0:
logger.warning(
f"Found these unexpected keys in the checkpoint: {', '.join(unexpected_keys)}"
)
return model
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
# labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
# Labels for computing the sequence classification/regression loss.
# Indices should be in `[0, ..., config.num_labels - 1]`.
# If `config.num_labels == 1` a regression loss is computed
# (mean-square loss). If `config.num_labels > 1` a classification loss
# is computed (cross-entropy).
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
# Compute loss
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or
labels.dtype == torch.int):
self.config.problem_type = 'single_label_classification'
else:
self.config.problem_type = 'multi_label_classification'
if self.config.problem_type == 'regression':
loss_fct = nn.MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == 'single_label_classification':
loss_fct = nn.CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels),
labels.view(-1))
elif self.config.problem_type == 'multi_label_classification':
loss_fct = nn.BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=None,
attentions=None,
)
|
m2-main
|
bert/src/bert_layers.py
|
# Adapted from https://github.com/HazyResearch/flash-attention/blob/main/flash_attn/bert_padding.py
# Adapted from https://github.com/mlcommons/training_results_v1.1/blob/main/NVIDIA/benchmarks/bert/implementations/pytorch/padding.py
"""
Functions for FlashAttention padding and unpadding
"""
from typing import Tuple, cast
import torch
import torch.nn.functional as F
from einops import rearrange, repeat
class IndexFirstAxis(torch.autograd.Function):
@staticmethod
def forward(ctx, input: torch.Tensor,
indices: torch.Tensor) -> torch.Tensor:
"""Get just the values of `input` which are at `indices`.
Arguments:
ctx: the autograd context object
input: (b, ...) 2+ dimensional tensor
indices: (num_idx) 1D tensor
"""
ctx.save_for_backward(indices)
assert input.ndim >= 2
ctx.first_axis_dim, other_shape = input.shape[0], input.shape[
1:]
second_dim = other_shape.numel(
) # product of sizes of all but first dimension
# TD [2022-03-04] For some reason torch.gather is a bit faster than indexing.
return torch.gather(
rearrange(input, 'b ... -> b (...)'), # (b, ...) -> (b, second_dim)
0,
repeat(indices, 'z -> z d',
d=second_dim) # (indices,) -> (indices, second_dim)
).reshape(-1, *other_shape) # (num_idx, ...)
@staticmethod
def backward(ctx, grad_output: torch.Tensor) -> Tuple[torch.Tensor, None]:
indices, = ctx.saved_tensors
assert grad_output.ndim >= 2
other_shape = grad_output.shape[1:]
grad_output = rearrange(grad_output, 'b ... -> b (...)')
grad_input = torch.zeros([ctx.first_axis_dim, grad_output.shape[1]],
device=grad_output.device,
dtype=grad_output.dtype)
# TD [2022-03-04] For some reason torch.scatter is a bit faster than indexing.
# grad_input[indices] = grad_output
grad_input.scatter_(0,
repeat(indices, 'z -> z d', d=grad_output.shape[1]),
grad_output)
return grad_input.reshape(ctx.first_axis_dim, *other_shape), None
index_first_axis = IndexFirstAxis.apply
class IndexPutFirstAxis(torch.autograd.Function):
@staticmethod
def forward(ctx, values: torch.Tensor, indices: torch.Tensor,
first_axis_dim) -> torch.Tensor:
ctx.save_for_backward(indices)
assert indices.ndim == 1
assert values.ndim >= 2
output = torch.zeros(first_axis_dim,
*values.shape[1:],
device=values.device,
dtype=values.dtype)
output[indices] = values
return output
@staticmethod
def backward(ctx,
grad_output: torch.Tensor) -> Tuple[torch.Tensor, None, None]:
indices, = ctx.saved_tensors
grad_values = grad_output[indices]
return grad_values, None, None
index_put_first_axis = IndexPutFirstAxis.apply
def unpad_input(
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, int]:
"""Remove padding from input sequences.
Arguments:
hidden_states: (batch, seqlen, ...)
attention_mask: (batch, seqlen), bool / int, 1 means valid and 0 means not valid.
Returns:
hidden_states: (total_nnz, ...), where total_nnz = number of tokens in selected in attention_mask.
cu_seqlens: (batch + 1), the cumulative sequence lengths, used to index into hidden_states.
max_seqlen_in_batch: int
"""
seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
max_seqlen_in_batch = int(seqlens_in_batch.max().item())
cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32),
(1, 0))
# TD [2022-03-04] We don't want to index with a bool mask, because Pytorch will expand the
# bool mask, then call nonzero to get the indices, then index with those. The indices is @dim
# times larger than it needs to be, wasting memory. It's faster and more memory-efficient to
# index with integer indices. Moreover, torch's index is a bit slower than it needs to be,
# so we write custom forward and backward to make it a bit faster.
hidden_states = cast(
torch.Tensor,
index_first_axis(rearrange(hidden_states, 'b s ... -> (b s) ...'),
indices))
return hidden_states, indices, cu_seqlens, max_seqlen_in_batch
def unpad_input_only(
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
) -> torch.Tensor:
"""Like unpad_input, but only return the unpadded first tensor.
Save a small amount of overhead.
Arguments:
hidden_states: (batch, seqlen, ...)
attention_mask: (batch, seqlen), bool / int, 1 means valid and 0 means not valid.
Returns:
hidden_states: (total_nnz, ...), where total_nnz = number of tokens in selected in attention_mask.
"""
indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
rearranged = rearrange(hidden_states, 'b s ... -> (b s) ...')
return index_first_axis(rearranged, indices) # type: ignore
def pad_input(hidden_states: torch.Tensor, indices: torch.Tensor, batch: int,
seqlen: int) -> torch.Tensor:
"""Add padding to sequences.
Arguments:
hidden_states: (total_nnz, ...), where total_nnz = number of tokens in selected in attention_mask.
indices: (total_nnz)
Returns:
hidden_states: (batch, seqlen, ...)
"""
output = index_put_first_axis(hidden_states, indices, batch * seqlen)
return rearrange(output, '(b s) ... -> b s ...', b=batch) # type: ignore
|
m2-main
|
bert/src/bert_padding.py
|
# Copyright 2022 MosaicML Examples authors
# SPDX-License-Identifier: Apache-2.0
"""Implements a Hugging Face BERT wrapped inside a :class:`.ComposerModel`."""
from __future__ import annotations
from typing import Optional
from composer.metrics.nlp import (BinaryF1Score, LanguageCrossEntropy,
MaskedAccuracy)
from composer.models.huggingface import HuggingFaceModel
from composer.utils.import_helpers import MissingConditionalImportError
from torchmetrics import MeanSquaredError
from torchmetrics.classification.accuracy import MulticlassAccuracy
from torchmetrics.classification.matthews_corrcoef import MatthewsCorrCoef
from torchmetrics.regression.spearman import SpearmanCorrCoef
__all__ = ['create_hf_bert_mlm', 'create_hf_bert_classification']
def create_hf_bert_mlm(pretrained_model_name: str = 'bert-base-uncased',
use_pretrained: Optional[bool] = False,
model_config: Optional[dict] = None,
tokenizer_name: Optional[str] = None,
gradient_checkpointing: Optional[bool] = False):
"""BERT model based on |:hugging_face:| Transformers.
For more information, see `Transformers <https://huggingface.co/transformers/>`_.
Args:
pretrained_model_name (str): Name of the Hugging Face model to instantiate. Default: ``'bert-base-uncased'``.
use_pretrained (bool, optional): Whether to initialize the model with the pretrained weights. Default: ``False``.
model_config (dict): The settings used to create a Hugging Face BertConfig. BertConfig is used to specify the
architecture of a Hugging Face model.
tokenizer_name (str, optional): Tokenizer name used to preprocess the dataset and validate the models inputs.
gradient_checkpointing (bool, optional): Use gradient checkpointing. Default: ``False``.
.. code-block::
{
"_name_or_path": "bert-base-uncased",
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": 0.1,
"classifier_dropout": null,
"gradient_checkpointing": false,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 768,
"initializer_range": 0.02,
"intermediate_size": 3072,
"layer_norm_eps": 1e-12,
"max_position_embeddings": 512,
"model_type": "bert",
"num_attention_heads": 12,
"num_hidden_layers": 12,
"pad_token_id": 0,
"position_embedding_type": "absolute",
"transformers_version": "4.16.0",
"type_vocab_size": 2,
"use_cache": true,
"vocab_size": 30522
}
To create a |:hugging_face:| BERT model for Masked Language Model pretraining:
.. testcode::
from src.hf_bert import create_hf_bert_mlm
model = create_hf_bert_mlm()
"""
try:
import transformers
except ImportError as e:
raise MissingConditionalImportError(extra_deps_group='nlp',
conda_package='transformers') from e
if not model_config:
model_config = {}
if not pretrained_model_name:
pretrained_model_name = 'bert-base-uncased'
if use_pretrained:
assert transformers.AutoModelForMaskedLM.from_pretrained is not None, 'AutoModelForMaskedLM has from_pretrained method'
model = transformers.AutoModelForMaskedLM.from_pretrained(
pretrained_model_name_or_path=pretrained_model_name, **model_config)
else:
config = transformers.AutoConfig.from_pretrained(
pretrained_model_name, **model_config)
assert transformers.AutoModelForMaskedLM.from_config is not None, 'AutoModelForMaskedLM has from_config method'
model = transformers.AutoModelForMaskedLM.from_config(config)
if gradient_checkpointing:
model.gradient_checkpointing_enable() # type: ignore
# setup the tokenizer
if tokenizer_name:
tokenizer = transformers.AutoTokenizer.from_pretrained(tokenizer_name)
else:
tokenizer = None
metrics = [
LanguageCrossEntropy(ignore_index=-100,
vocab_size=model.config.vocab_size),
MaskedAccuracy(ignore_index=-100)
]
return HuggingFaceModel(model=model,
tokenizer=tokenizer,
use_logits=True,
metrics=metrics)
def create_hf_bert_classification(
num_labels: int,
pretrained_model_name: str = 'bert-base-uncased',
use_pretrained: Optional[bool] = False,
model_config: Optional[dict] = None,
tokenizer_name: Optional[str] = None,
gradient_checkpointing: Optional[bool] = False):
"""BERT model based on |:hugging_face:| Transformers.
For more information, see `Transformers <https://huggingface.co/transformers/>`_.
Args:
num_labels (int): The number of classes in the task (``1`` indicates regression). Default: ``2``.
pretrained_model_name (str): Name of the Hugging Face model to instantiate. Default: ``'bert-base-uncased'``.
use_pretrained (bool, optional): Whether to initialize the model with the pretrained weights. Default: ``False``.
model_config (dict, optional): The settings used to create a Hugging Face BertConfig. BertConfig is used to specify the
architecture of a Hugging Face model.
tokenizer_name (str, optional): Tokenizer name used to preprocess the dataset and validate the models inputs.
gradient_checkpointing (bool, optional): Use gradient checkpointing. Default: ``False``.
.. code-block::
{
"_name_or_path": "bert-base-uncased",
"architectures": [
"BertForSequenceClassification
],
"attention_probs_dropout_prob": 0.1,
"classifier_dropout": null,
"gradient_checkpointing": false,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 768,
"id2label": {
"0": "LABEL_0",
"1": "LABEL_1",
"2": "LABEL_2"
},
"initializer_range": 0.02,
"intermediate_size": 3072,
"label2id": {
"LABEL_0": 0,
"LABEL_1": 1,
"LABEL_2": 2
},
"layer_norm_eps": 1e-12,
"max_position_embeddings": 512,
"model_type": "bert",
"num_attention_heads": 12,
"num_hidden_layers": 12,
"pad_token_id": 0,
"position_embedding_type": "absolute",
"transformers_version": "4.16.0",
"type_vocab_size": 2,
"use_cache": true,
"vocab_size": 30522
}
Note:
This function can be used to construct a BERT model for regression by setting ``num_labels == 1``.
This will have two noteworthy effects. First, it will switch the training loss to :class:`~torch.nn.MSELoss`.
Second, the returned :class:`.ComposerModel`'s train/validation metrics will be :class:`~torchmetrics.MeanSquaredError` and :class:`~torchmetrics.SpearmanCorrCoef`.
For the classifcation case (when ``num_labels > 1``), the training loss is :class:`~torch.nn.CrossEntropyLoss`, and the train/validation
metrics are :class:`~torchmetrics.MulticlassAccuracy` and :class:`~torchmetrics.MatthewsCorrCoef`, as well as :class:`.BinaryF1Score` if ``num_labels == 2``.
"""
try:
import transformers
except ImportError as e:
raise MissingConditionalImportError(extra_deps_group='nlp',
conda_package='transformers') from e
if not model_config:
model_config = {}
model_config['num_labels'] = num_labels
if not pretrained_model_name:
pretrained_model_name = 'bert-base-uncased'
if use_pretrained:
assert transformers.AutoModelForSequenceClassification.from_pretrained is not None, 'AutoModelForSequenceClassification has from_pretrained method'
model = transformers.AutoModelForSequenceClassification.from_pretrained(
pretrained_model_name_or_path=pretrained_model_name, **model_config)
else:
config = transformers.AutoConfig.from_pretrained(
pretrained_model_name, **model_config)
assert transformers.AutoModelForSequenceClassification.from_config is not None, 'AutoModelForSequenceClassification has from_config method'
model = transformers.AutoModelForSequenceClassification.from_config(
config)
if gradient_checkpointing:
model.gradient_checkpointing_enable()
# setup the tokenizer
if tokenizer_name:
tokenizer = transformers.AutoTokenizer.from_pretrained(tokenizer_name)
else:
tokenizer = None
if num_labels == 1:
# Metrics for a regression model
metrics = [MeanSquaredError(), SpearmanCorrCoef()]
else:
# Metrics for a classification model
metrics = [
MulticlassAccuracy(num_classes=num_labels, average='micro'),
MatthewsCorrCoef(task='multiclass',
num_classes=model.config.num_labels)
]
if num_labels == 2:
metrics.append(BinaryF1Score())
return HuggingFaceModel(model=model,
tokenizer=tokenizer,
use_logits=True,
metrics=metrics)
|
m2-main
|
bert/src/hf_bert.py
|
# Copyright 2022 MosaicML Examples authors
# SPDX-License-Identifier: Apache-2.0
import os
import sys
import torch
# Add src folder root to path to allow us to use relative imports regardless of what directory the script is run from
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
# yapf: disable
from bert_layers import (BertEmbeddings, BertEncoder, BertForMaskedLM,
BertForSequenceClassification, BertGatedLinearUnitMLP,
BertLayer, BertLMPredictionHead, BertModel,
BertOnlyMLMHead, BertOnlyNSPHead, BertPooler,
BertPredictionHeadTransform, BertSelfOutput,
BertUnpadAttention, BertUnpadSelfAttention)
# yapf: enable
from bert_padding import (IndexFirstAxis, IndexPutFirstAxis, index_first_axis,
index_put_first_axis, pad_input, unpad_input,
unpad_input_only)
from configuration_bert import BertConfig
if torch.cuda.is_available():
from flash_attn_triton import \
flash_attn_func as flash_attn_func_bert # type: ignore
from flash_attn_triton import \
flash_attn_qkvpacked_func as flash_attn_qkvpacked_func_bert # type: ignore
from create_bert import (create_bert_classification,
create_bert_mlm)
__all__ = [
'BertConfig',
'BertEmbeddings',
'BertEncoder',
'BertForMaskedLM',
'BertForSequenceClassification',
'BertGatedLinearUnitMLP',
'BertLayer',
'BertLMPredictionHead',
'BertModel',
'BertOnlyMLMHead',
'BertOnlyNSPHead',
'BertPooler',
'BertPredictionHeadTransform',
'BertSelfOutput',
'BertUnpadAttention',
'BertUnpadSelfAttention',
'IndexFirstAxis',
'IndexPutFirstAxis',
'index_first_axis',
'index_put_first_axis',
'pad_input',
'unpad_input',
'unpad_input_only',
'create_bert_classification',
'create_bert_mlm',
'create_hf_bert_mlm',
'create_hf_bert_classification',
# These are commented out because they only exist if CUDA is available
# 'flash_attn_func_bert',
# 'flash_attn_qkvpacked_func_bert'
]
|
m2-main
|
bert/src/__init__.py
|
# Copyright 2022 MosaicML Examples authors
# SPDX-License-Identifier: Apache-2.0
"""Triton implementation of Flash Attention.
# Copyright (c) 2022, Tri Dao.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
*Experimental* implementation of FlashAttention in Triton.
We use the FlashAttention implementation from Phil Tillet a starting point.
https://github.com/openai/triton/blob/master/python/tutorials/06-fused-attention.py
Changes:
- Implement both causal and non-causal attention.
- Implement both self-attention and cross-attention.
- Support arbitrary seqlens (not just multiples of 128), for both forward and backward.
- Support all head dimensions up to 128 (not just 16, 32, 64, 128), for both forward and backward.
- Support attention bias.
- Speed up the forward pass a bit, and only store the LSE instead of m and l.
- Make the backward for d=128 much faster by reducing register spilling.
- Optionally parallelize the backward pass across seqlen_k, to deal with the case of
small batch size * nheads.
Caution:
- If you plan to use headdim other than 64 and 128, you should test for race conditions
(due to the Triton compiler), as done in tests/test_flash_attn.py
"test_flash_attn_triton_race_condition". I've tested and fixed many race conditions
for different head dimensions (40, 48, 64, 128, 80, 88, 96), but I'm still not 100% confident
that there are none left for other head dimensions.
Differences between this Triton version and the CUDA version:
- Triton version doesn't support dropout.
- Triton forward is generally faster than CUDA forward.
- Triton backward is faster than CUDA backward when batch * nheads is small, and when headdim=64.
It is slightly slower when headdim=128 and batch * nheads is large.
- Triton version doesn't yet support different sequence lengths in a batch (i.e., RaggedTensor/NestedTensor).
"""
import math
import torch
import triton # type: ignore (reportMissingImports)
import triton.language as tl # type: ignore (reportMissingImports)
from einops import repeat
@triton.autotune(
configs=[
triton.Config({
'BLOCK_M': 128,
'BLOCK_N': 128
},
num_warps=8,
num_stages=1),
# This config has a race condition when EVEN_M == False, disabling it for now.
# triton.Config({"BLOCK_M": 64, "BLOCK_N": 64}, num_warps=4, num_stages=1),
],
key=[
'CACHE_KEY_SEQLEN_Q', 'CACHE_KEY_SEQLEN_K', 'BIAS_TYPE', 'IS_CAUSAL',
'BLOCK_HEADDIM'
])
@triton.heuristics({
'EVEN_M': lambda args: args['seqlen_q'] % args['BLOCK_M'] == 0,
'EVEN_N': lambda args: args['seqlen_k'] % args['BLOCK_N'] == 0,
'EVEN_HEADDIM': lambda args: args['headdim'] == args['BLOCK_HEADDIM'],
})
@triton.jit
def _fwd_kernel(
Q,
K,
V,
Bias,
Out,
Lse,
TMP, # NOTE: TMP is a scratchpad buffer to workaround a compiler bug
softmax_scale,
stride_qb,
stride_qh,
stride_qm,
stride_kb,
stride_kh,
stride_kn,
stride_vb,
stride_vh,
stride_vn,
stride_bb,
stride_bh,
stride_bm,
stride_ob,
stride_oh,
stride_om,
nheads,
seqlen_q,
seqlen_k,
seqlen_q_rounded,
headdim,
CACHE_KEY_SEQLEN_Q,
CACHE_KEY_SEQLEN_K,
BIAS_TYPE: tl.constexpr,
IS_CAUSAL: tl.constexpr,
BLOCK_HEADDIM: tl.constexpr,
EVEN_M: tl.constexpr,
EVEN_N: tl.constexpr,
EVEN_HEADDIM: tl.constexpr,
BLOCK_M: tl.constexpr,
BLOCK_N: tl.constexpr,
):
start_m = tl.program_id(0)
off_hb = tl.program_id(1)
off_b = off_hb // nheads
off_h = off_hb % nheads
# off_b = tl.program_id(1)
# off_h = tl.program_id(2)
# off_hb = off_b * nheads + off_h
# initialize offsets
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
offs_d = tl.arange(0, BLOCK_HEADDIM)
# Initialize pointers to Q, K, V
# Adding parenthesis around indexing might use int32 math instead of int64 math?
# https://github.com/openai/triton/issues/741
# I'm seeing a tiny bit of difference (5-7us)
q_ptrs = Q + off_b * stride_qb + off_h * stride_qh + (
offs_m[:, None] * stride_qm + offs_d[None, :])
k_ptrs = K + off_b * stride_kb + off_h * stride_kh + (
offs_n[:, None] * stride_kn + offs_d[None, :])
v_ptrs = V + off_b * stride_vb + off_h * stride_vh + (
offs_n[:, None] * stride_vn + offs_d[None, :])
if BIAS_TYPE == 'vector':
b_ptrs = Bias + off_b * stride_bb + off_h * stride_bh + offs_n
elif BIAS_TYPE == 'matrix':
b_ptrs = Bias + off_b * stride_bb + off_h * stride_bh + (
offs_m[:, None] * stride_bm + offs_n[None, :])
else:
raise ValueError("BIAS_TYPE must be one of {'vector', 'matrix'}")
# initialize pointer to m and l
t_ptrs = TMP + off_hb * seqlen_q_rounded + offs_m
lse_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float('inf')
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float('inf')
acc_o = tl.zeros([BLOCK_M, BLOCK_HEADDIM], dtype=tl.float32)
# load q: it will stay in SRAM throughout
# [2022-10-30] TD: Triton bug - in the case of EVEN_M=True and EVEN_N=False, if we just call
# tl.load(q_ptrs), we get the wrong output!
if EVEN_M & EVEN_N:
if EVEN_HEADDIM:
q = tl.load(q_ptrs)
else:
q = tl.load(q_ptrs, mask=offs_d[None, :] < headdim, other=0.0)
else:
if EVEN_HEADDIM:
q = tl.load(q_ptrs, mask=offs_m[:, None] < seqlen_q, other=0.0)
else:
q = tl.load(q_ptrs,
mask=(offs_m[:, None] < seqlen_q) &
(offs_d[None, :] < headdim),
other=0.0)
# loop over k, v and update accumulator
end_n = seqlen_k if not IS_CAUSAL else tl.minimum(
(start_m + 1) * BLOCK_M, seqlen_k)
for start_n in range(0, end_n, BLOCK_N):
start_n = tl.multiple_of(start_n, BLOCK_N)
# -- compute qk ----
if EVEN_N & EVEN_M: # If we just do "if EVEN_N", there seems to be some race condition
if EVEN_HEADDIM:
k = tl.load(k_ptrs + start_n * stride_kn)
else:
k = tl.load(k_ptrs + start_n * stride_kn,
mask=offs_d[None, :] < headdim,
other=0.0)
else:
if EVEN_HEADDIM:
k = tl.load(k_ptrs + start_n * stride_kn,
mask=(start_n + offs_n)[:, None] < seqlen_k,
other=0.0)
else:
k = tl.load(k_ptrs + start_n * stride_kn,
mask=((start_n + offs_n)[:, None] < seqlen_k) &
(offs_d[None, :] < headdim),
other=0.0)
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
qk += tl.dot(q, k, trans_b=True)
# Trying to combine the two masks seem to make the result wrong
if not EVEN_N: # Need to mask out otherwise the softmax is wrong
qk += tl.where((start_n + offs_n)[None, :] < seqlen_k, 0,
float('-inf'))
if IS_CAUSAL:
qk += tl.where(offs_m[:, None] >= (start_n + offs_n)[None, :], 0,
float('-inf'))
if BIAS_TYPE != 'none':
if BIAS_TYPE == 'vector':
if EVEN_N:
bias = tl.load(b_ptrs + start_n).to(tl.float32)
else:
bias = tl.load(b_ptrs + start_n,
mask=(start_n + offs_n) < seqlen_k,
other=0.0).to(tl.float32)
bias = bias[None, :]
elif BIAS_TYPE == 'matrix':
if EVEN_M & EVEN_N:
bias = tl.load(b_ptrs + start_n).to(tl.float32)
else:
bias = tl.load(b_ptrs + start_n,
mask=(offs_m[:, None] < seqlen_q) &
((start_n + offs_n)[None, :] < seqlen_k),
other=0.0).to(tl.float32)
else:
raise ValueError(
"BIAS_TYPE must be one of {'vector', 'matrix'}")
# Slightly faster to multiply the softmax_scale in the tl.exp below since the compiler
# can then fuse the mult and add into an fma instruction. But if we have bias we need to
# to multiply with softmax_scale here.
qk = qk * softmax_scale + bias
m_ij = tl.maximum(tl.max(qk, 1), lse_i)
p = tl.exp(qk - m_ij[:, None])
else:
m_ij = tl.maximum(tl.max(qk, 1) * softmax_scale, lse_i)
p = tl.exp(qk * softmax_scale - m_ij[:, None])
l_ij = tl.sum(p, 1)
# scale acc_o
acc_o_scale = tl.exp(m_i - m_ij)
# # -- update output accumulator --
# BUG: have to store and immediately load
tl.store(t_ptrs, acc_o_scale)
acc_o_scale = tl.load(t_ptrs)
acc_o = acc_o * acc_o_scale[:, None]
# update acc_o
if EVEN_N & EVEN_M: # If we just do "if EVEN_N", there seems to be some race condition
if EVEN_HEADDIM:
v = tl.load(v_ptrs + start_n * stride_vn)
else:
v = tl.load(v_ptrs + start_n * stride_vn,
mask=offs_d[None, :] < headdim,
other=0.0)
else:
if EVEN_HEADDIM:
v = tl.load(v_ptrs + start_n * stride_vn,
mask=(start_n + offs_n)[:, None] < seqlen_k,
other=0.0)
else:
v = tl.load(v_ptrs + start_n * stride_vn,
mask=((start_n + offs_n)[:, None] < seqlen_k) &
(offs_d[None, :] < headdim),
other=0.0)
p = p.to(v.dtype)
acc_o += tl.dot(p, v)
# -- update statistics
m_i = m_ij
l_i_new = tl.exp(lse_i - m_ij) + l_ij
lse_i = m_ij + tl.log(l_i_new)
o_scale = tl.exp(m_i - lse_i)
# BUG: have to store and immediately load
tl.store(t_ptrs, o_scale)
o_scale = tl.load(t_ptrs)
acc_o = acc_o * o_scale[:, None]
# rematerialize offsets to save registers
start_m = tl.program_id(0)
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
# write back l and m
lse_ptrs = Lse + off_hb * seqlen_q_rounded + offs_m
tl.store(lse_ptrs, lse_i)
# initialize pointers to output
offs_n = tl.arange(0, BLOCK_HEADDIM)
out_ptrs = Out + off_b * stride_ob + off_h * stride_oh + (
offs_m[:, None] * stride_om + offs_n[None, :])
if EVEN_M:
if EVEN_HEADDIM:
tl.store(out_ptrs, acc_o)
else:
tl.store(out_ptrs, acc_o, mask=offs_d[None, :] < headdim)
else:
if EVEN_HEADDIM:
tl.store(out_ptrs, acc_o, mask=offs_m[:, None] < seqlen_q)
else:
tl.store(out_ptrs,
acc_o,
mask=(offs_m[:, None] < seqlen_q) &
(offs_d[None, :] < headdim))
@triton.jit
def _bwd_preprocess_do_o_dot(
Out,
DO,
Delta,
stride_ob,
stride_oh,
stride_om,
stride_dob,
stride_doh,
stride_dom,
nheads,
seqlen_q,
seqlen_q_rounded,
headdim,
BLOCK_M: tl.constexpr,
BLOCK_HEADDIM: tl.constexpr,
):
start_m = tl.program_id(0)
off_hb = tl.program_id(1)
off_b = off_hb // nheads
off_h = off_hb % nheads
# initialize offsets
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_d = tl.arange(0, BLOCK_HEADDIM)
# load
o = tl.load(Out + off_b * stride_ob + off_h * stride_oh +
offs_m[:, None] * stride_om + offs_d[None, :],
mask=(offs_m[:, None] < seqlen_q) & (offs_d[None, :] < headdim),
other=0.0).to(tl.float32)
do = tl.load(DO + off_b * stride_dob + off_h * stride_doh +
offs_m[:, None] * stride_dom + offs_d[None, :],
mask=(offs_m[:, None] < seqlen_q) &
(offs_d[None, :] < headdim),
other=0.0).to(tl.float32)
delta = tl.sum(o * do, axis=1)
# write-back
tl.store(Delta + off_hb * seqlen_q_rounded + offs_m, delta)
@triton.jit
def _bwd_kernel_one_col_block(
start_n,
Q,
K,
V,
Bias,
DO,
DQ,
DK,
DV,
LSE,
D,
softmax_scale,
stride_qm,
stride_kn,
stride_vn,
stride_bm,
stride_dom,
stride_dqm,
stride_dkn,
stride_dvn,
seqlen_q,
seqlen_k,
headdim,
ATOMIC_ADD: tl.constexpr,
BIAS_TYPE: tl.constexpr,
IS_CAUSAL: tl.constexpr,
BLOCK_HEADDIM: tl.constexpr,
EVEN_M: tl.constexpr,
EVEN_N: tl.constexpr,
EVEN_HEADDIM: tl.constexpr,
BLOCK_M: tl.constexpr,
BLOCK_N: tl.constexpr,
):
# We need to make sure begin_m is a multiple of BLOCK_M (not BLOCK_N)
begin_m = 0 if not IS_CAUSAL else ((start_n * BLOCK_N) // BLOCK_M) * BLOCK_M
# initialize row/col offsets
offs_qm = begin_m + tl.arange(0, BLOCK_M)
offs_n = start_n * BLOCK_N + tl.arange(0, BLOCK_N)
offs_m = tl.arange(0, BLOCK_M)
offs_d = tl.arange(0, BLOCK_HEADDIM)
# initialize pointers to value-like data
q_ptrs = Q + (offs_qm[:, None] * stride_qm + offs_d[None, :])
k_ptrs = K + (offs_n[:, None] * stride_kn + offs_d[None, :])
v_ptrs = V + (offs_n[:, None] * stride_vn + offs_d[None, :])
do_ptrs = DO + (offs_qm[:, None] * stride_dom + offs_d[None, :])
dq_ptrs = DQ + (offs_qm[:, None] * stride_dqm + offs_d[None, :])
if BIAS_TYPE == 'vector':
b_ptrs = Bias + offs_n
elif BIAS_TYPE == 'matrix':
b_ptrs = Bias + (offs_qm[:, None] * stride_bm + offs_n[None, :])
else:
raise ValueError("BIAS_TYPE must be one of {'vector', 'matrix'}")
# initialize dv and dk
dv = tl.zeros([BLOCK_N, BLOCK_HEADDIM], dtype=tl.float32)
dk = tl.zeros([BLOCK_N, BLOCK_HEADDIM], dtype=tl.float32)
# k and v stay in SRAM throughout
# [2022-10-30] TD: Same bug as the fwd. In the case of EVEN_N=True and EVEN_M=False,
# if we just call tl.load(k_ptrs), we get the wrong output!
if EVEN_N & EVEN_M:
if EVEN_HEADDIM:
k = tl.load(k_ptrs)
v = tl.load(v_ptrs)
else:
k = tl.load(k_ptrs, mask=offs_d[None, :] < headdim, other=0.0)
v = tl.load(v_ptrs, mask=offs_d[None, :] < headdim, other=0.0)
else:
if EVEN_HEADDIM:
k = tl.load(k_ptrs, mask=offs_n[:, None] < seqlen_k, other=0.0)
v = tl.load(v_ptrs, mask=offs_n[:, None] < seqlen_k, other=0.0)
else:
k = tl.load(k_ptrs,
mask=(offs_n[:, None] < seqlen_k) &
(offs_d[None, :] < headdim),
other=0.0)
v = tl.load(v_ptrs,
mask=(offs_n[:, None] < seqlen_k) &
(offs_d[None, :] < headdim),
other=0.0)
# loop over rows
num_block_m = tl.cdiv(seqlen_q, BLOCK_M)
for start_m in range(begin_m, num_block_m * BLOCK_M, BLOCK_M):
start_m = tl.multiple_of(start_m, BLOCK_M)
offs_m_curr = start_m + offs_m
# load q, k, v, do on-chip
# Same bug as below. Otherwise gives wrong result for headdim=40, seqlen=(128, 117)
if EVEN_M & EVEN_HEADDIM:
q = tl.load(q_ptrs)
else:
if EVEN_HEADDIM:
q = tl.load(q_ptrs,
mask=offs_m_curr[:, None] < seqlen_q,
other=0.0)
else:
q = tl.load(q_ptrs,
mask=(offs_m_curr[:, None] < seqlen_q) &
(offs_d[None, :] < headdim),
other=0.0)
# recompute p = softmax(qk, dim=-1).T
qk = tl.dot(q, k, trans_b=True)
# Trying to combine the two masks seem to make the result wrong
if not EVEN_N: # Need to mask out otherwise the softmax is wrong
qk = tl.where(offs_n[None, :] < seqlen_k, qk, float('-inf'))
if IS_CAUSAL:
qk = tl.where(offs_m_curr[:, None] >= (offs_n[None, :]), qk,
float('-inf'))
if BIAS_TYPE != 'none':
if BIAS_TYPE == 'vector':
if EVEN_N:
bias = tl.load(b_ptrs).to(tl.float32)
else:
bias = tl.load(b_ptrs, mask=offs_n < seqlen_k,
other=0.0).to(tl.float32)
bias = bias[None, :]
elif BIAS_TYPE == 'matrix':
if EVEN_M & EVEN_N:
bias = tl.load(b_ptrs).to(tl.float32)
else:
bias = tl.load(b_ptrs,
mask=(offs_m_curr[:, None] < seqlen_q) &
(offs_n[None, :] < seqlen_k),
other=0.0).to(tl.float32)
else:
raise ValueError(
"BIAS_TYPE must be one of {'vector', 'matrix'}")
qk = qk * softmax_scale + bias
# There seems to be a race condition when headdim=48/96, and dq, dk, dv are wrong.
# Also wrong for headdim=64.
if not (EVEN_M & EVEN_HEADDIM):
tl.debug_barrier()
lse_i = tl.load(LSE + offs_m_curr)
if BIAS_TYPE == 'none':
p = tl.exp(qk * softmax_scale - lse_i[:, None])
else:
p = tl.exp(qk - lse_i[:, None])
# compute dv
# [2022-10-30] TD: A Triton bug: if EVEN_M=True and EVEN_HEADDIM=False, if we call
# do = tl.load(do_ptrs, mask=offs_d[None, :] < headdim, other=0.0), we get wrong outputs
# in the case of headdim=48/96, seqlen_q & seqlen_k >= 512. If headdim=40 or seqlen < 512,
# the output is correct.
if EVEN_M & EVEN_HEADDIM:
do = tl.load(do_ptrs)
else:
# [2022-11-01] TD: Triton bug, there's a race condition if we just use m_mask and not d_mask.
do = tl.load(do_ptrs,
mask=(offs_m_curr[:, None] < seqlen_q) &
(offs_d[None, :] < headdim),
other=0.0)
# if EVEN_M:
# if EVEN_HEADDIM:
# do = tl.load(do_ptrs)
# else:
# do = tl.load(do_ptrs, mask=offs_d[None, :] < headdim, other=0.0)
# else:
# if EVEN_HEADDIM:
# do = tl.load(do_ptrs, mask=offs_m_curr[:, None] < seqlen_q, other=0.0)
# else:
# do = tl.load(do_ptrs, mask=(offs_m_curr[:, None] < seqlen_q)
# & (offs_d[None, :] < headdim), other=0.0)
dv += tl.dot(p.to(do.dtype), do, trans_a=True)
# compute dp = dot(v, do)
# There seems to be a race condition when headdim=48/96, and dq, dk are wrong.
# Also wrong for headdim=128, seqlen=(108, 256), and ATOMIC_ADD=True
# Also wrong for headdim=64, seqlen=(1023, 1024), and ATOMIC_ADD=False
if not (EVEN_M & EVEN_HEADDIM):
tl.debug_barrier()
dp = tl.dot(do, v, trans_b=True)
# There's a race condition for headdim=48
if not EVEN_HEADDIM:
tl.debug_barrier()
# compute ds = p * (dp - delta[:, None])
# Putting the subtraction after the dp matmul (instead of before) is slightly faster
Di = tl.load(D + offs_m_curr)
# Converting ds to q.dtype here reduces register pressure and makes it much faster
# for BLOCK_HEADDIM=128
ds = (p * (dp - Di[:, None]) * softmax_scale).to(q.dtype)
# compute dk = dot(ds.T, q)
dk += tl.dot(ds, q, trans_a=True)
# compute dq
if not ATOMIC_ADD:
if EVEN_M & EVEN_HEADDIM: # Race condition if we just do EVEN_M
dq = tl.load(dq_ptrs, eviction_policy='evict_last')
dq += tl.dot(ds, k)
tl.store(dq_ptrs, dq, eviction_policy='evict_last')
else:
if EVEN_HEADDIM:
dq = tl.load(dq_ptrs,
mask=offs_m_curr[:, None] < seqlen_q,
other=0.0,
eviction_policy='evict_last')
dq += tl.dot(ds, k)
tl.store(dq_ptrs,
dq,
mask=offs_m_curr[:, None] < seqlen_q,
eviction_policy='evict_last')
else:
dq = tl.load(dq_ptrs,
mask=(offs_m_curr[:, None] < seqlen_q) &
(offs_d[None, :] < headdim),
other=0.0,
eviction_policy='evict_last')
dq += tl.dot(ds, k)
tl.store(dq_ptrs,
dq,
mask=(offs_m_curr[:, None] < seqlen_q) &
(offs_d[None, :] < headdim),
eviction_policy='evict_last')
else: # If we're parallelizing across the seqlen_k dimension
dq = tl.dot(ds, k)
if EVEN_M & EVEN_HEADDIM: # Race condition if we just do EVEN_M
tl.atomic_add(dq_ptrs, dq)
else:
if EVEN_HEADDIM:
tl.atomic_add(dq_ptrs,
dq,
mask=offs_m_curr[:, None] < seqlen_q)
else:
tl.atomic_add(dq_ptrs,
dq,
mask=(offs_m_curr[:, None] < seqlen_q) &
(offs_d[None, :] < headdim))
# increment pointers
dq_ptrs += BLOCK_M * stride_dqm
q_ptrs += BLOCK_M * stride_qm
do_ptrs += BLOCK_M * stride_dom
if BIAS_TYPE == 'matrix':
b_ptrs += BLOCK_M * stride_bm
# write-back
dv_ptrs = DV + (offs_n[:, None] * stride_dvn + offs_d[None, :])
dk_ptrs = DK + (offs_n[:, None] * stride_dkn + offs_d[None, :])
# [2022-11-01] TD: Same bug. In the case of EVEN_N=True and EVEN_M=False,
# if we just call tl.store(dv_ptrs), there's a race condition
if EVEN_N & EVEN_M:
if EVEN_HEADDIM:
tl.store(dv_ptrs, dv)
tl.store(dk_ptrs, dk)
else:
tl.store(dv_ptrs, dv, mask=offs_d[None, :] < headdim)
tl.store(dk_ptrs, dk, mask=offs_d[None, :] < headdim)
else:
if EVEN_HEADDIM:
tl.store(dv_ptrs, dv, mask=offs_n[:, None] < seqlen_k)
tl.store(dk_ptrs, dk, mask=offs_n[:, None] < seqlen_k)
else:
tl.store(dv_ptrs,
dv,
mask=(offs_n[:, None] < seqlen_k) &
(offs_d[None, :] < headdim))
tl.store(dk_ptrs,
dk,
mask=(offs_n[:, None] < seqlen_k) &
(offs_d[None, :] < headdim))
def init_to_zero(name):
return lambda nargs: nargs[name].zero_()
@triton.autotune(
configs=[
triton.Config(
{
'BLOCK_M': 128,
'BLOCK_N': 128,
'SEQUENCE_PARALLEL': False
},
num_warps=8,
num_stages=1,
pre_hook=init_to_zero('DQ')),
triton.Config(
{
'BLOCK_M': 128,
'BLOCK_N': 128,
'SEQUENCE_PARALLEL': True
},
num_warps=8,
num_stages=1,
pre_hook=init_to_zero('DQ')),
# Other configs seem to give wrong results when seqlen_q % 128 != 0, disabling them for now
# # Kernel is buggy (give wrong result) if we set BLOCK_m=128, BLOCK_n=64, num_warps=*4*
# triton.Config({"BLOCK_M": 128, "BLOCK_N": 64, "SEQUENCE_PARALLEL": False}, num_warps=8, num_stages=1, pre_hook=init_to_zero('DQ')),
# triton.Config({"BLOCK_M": 128, "BLOCK_N": 64, "SEQUENCE_PARALLEL": True}, num_warps=8, num_stages=1, pre_hook=init_to_zero('DQ')),
# triton.Config({"BLOCK_M": 64, "BLOCK_N": 64, "SEQUENCE_PARALLEL": False}, num_warps=4, num_stages=1, pre_hook=init_to_zero('DQ')),
# triton.Config({"BLOCK_M": 64, "BLOCK_N": 64, "SEQUENCE_PARALLEL": True}, num_warps=4, num_stages=1, pre_hook=init_to_zero('DQ')),
],
key=[
'CACHE_KEY_SEQLEN_Q', 'CACHE_KEY_SEQLEN_K', 'BIAS_TYPE', 'IS_CAUSAL',
'BLOCK_HEADDIM'
],
)
@triton.heuristics({
'EVEN_M': lambda args: args['seqlen_q'] % args['BLOCK_M'] == 0,
'EVEN_N': lambda args: args['seqlen_k'] % args['BLOCK_N'] == 0,
'EVEN_HEADDIM': lambda args: args['headdim'] == args['BLOCK_HEADDIM'],
})
@triton.jit
def _bwd_kernel(
Q,
K,
V,
Bias,
DO,
DQ,
DK,
DV,
LSE,
D,
softmax_scale,
stride_qb,
stride_qh,
stride_qm,
stride_kb,
stride_kh,
stride_kn,
stride_vb,
stride_vh,
stride_vn,
stride_bb,
stride_bh,
stride_bm,
stride_dob,
stride_doh,
stride_dom,
stride_dqb,
stride_dqh,
stride_dqm,
stride_dkb,
stride_dkh,
stride_dkn,
stride_dvb,
stride_dvh,
stride_dvn,
nheads,
seqlen_q,
seqlen_k,
seqlen_q_rounded,
headdim,
CACHE_KEY_SEQLEN_Q,
CACHE_KEY_SEQLEN_K,
BIAS_TYPE: tl.constexpr,
IS_CAUSAL: tl.constexpr,
BLOCK_HEADDIM: tl.constexpr,
SEQUENCE_PARALLEL: tl.constexpr,
EVEN_M: tl.constexpr,
EVEN_N: tl.constexpr,
EVEN_HEADDIM: tl.constexpr,
BLOCK_M: tl.constexpr,
BLOCK_N: tl.constexpr,
):
off_hb = tl.program_id(1)
off_b = off_hb // nheads
off_h = off_hb % nheads
# offset pointers for batch/head
Q += off_b * stride_qb + off_h * stride_qh
K += off_b * stride_kb + off_h * stride_kh
V += off_b * stride_vb + off_h * stride_vh
DO += off_b * stride_dob + off_h * stride_doh
DQ += off_b * stride_dqb + off_h * stride_dqh
DK += off_b * stride_dkb + off_h * stride_dkh
DV += off_b * stride_dvb + off_h * stride_dvh
if BIAS_TYPE != 'none':
Bias += off_b * stride_bb + off_h * stride_bh
# pointer to row-wise quantities in value-like data
D += off_hb * seqlen_q_rounded
LSE += off_hb * seqlen_q_rounded
if not SEQUENCE_PARALLEL:
num_block_n = tl.cdiv(seqlen_k, BLOCK_N)
for start_n in range(0, num_block_n):
_bwd_kernel_one_col_block(start_n,
Q,
K,
V,
Bias,
DO,
DQ,
DK,
DV,
LSE,
D,
softmax_scale,
stride_qm,
stride_kn,
stride_vn,
stride_bm,
stride_dom,
stride_dqm,
stride_dkn,
stride_dvn,
seqlen_q,
seqlen_k,
headdim,
ATOMIC_ADD=False,
BIAS_TYPE=BIAS_TYPE,
IS_CAUSAL=IS_CAUSAL,
BLOCK_HEADDIM=BLOCK_HEADDIM,
EVEN_M=EVEN_M,
EVEN_N=EVEN_N,
EVEN_HEADDIM=EVEN_HEADDIM,
BLOCK_M=BLOCK_M,
BLOCK_N=BLOCK_N)
else:
start_n = tl.program_id(0)
_bwd_kernel_one_col_block(start_n,
Q,
K,
V,
Bias,
DO,
DQ,
DK,
DV,
LSE,
D,
softmax_scale,
stride_qm,
stride_kn,
stride_vn,
stride_bm,
stride_dom,
stride_dqm,
stride_dkn,
stride_dvn,
seqlen_q,
seqlen_k,
headdim,
ATOMIC_ADD=True,
BIAS_TYPE=BIAS_TYPE,
IS_CAUSAL=IS_CAUSAL,
BLOCK_HEADDIM=BLOCK_HEADDIM,
EVEN_M=EVEN_M,
EVEN_N=EVEN_N,
EVEN_HEADDIM=EVEN_HEADDIM,
BLOCK_M=BLOCK_M,
BLOCK_N=BLOCK_N)
def _flash_attn_forward(q, k, v, bias=None, causal=False, softmax_scale=None):
# shape constraints
batch, seqlen_q, nheads, d = q.shape
_, seqlen_k, _, _ = k.shape
assert k.shape == (batch, seqlen_k, nheads, d)
assert v.shape == (batch, seqlen_k, nheads, d)
assert d <= 128, 'FlashAttention only support head dimensions up to 128'
assert q.dtype == k.dtype == v.dtype, 'All tensors must have the same type'
assert q.dtype in [torch.float16,
torch.bfloat16], 'Only support fp16 and bf16'
assert q.is_cuda and k.is_cuda and v.is_cuda
softmax_scale = softmax_scale or 1.0 / math.sqrt(d)
has_bias = bias is not None
bias_type = 'none'
if has_bias:
assert bias.dtype in [q.dtype, torch.float]
assert bias.is_cuda
assert bias.dim() == 4
if bias.stride(-1) != 1:
bias = bias.contiguous()
if bias.shape[2:] == (1, seqlen_k):
bias_type = 'vector'
elif bias.shape[2:] == (seqlen_q, seqlen_k):
bias_type = 'matrix'
else:
raise RuntimeError('Last 2 dimensions of bias must be (1, seqlen_k)'
' or (seqlen_q, seqlen_k)')
if bias.shape[:2] == (1, nheads):
bias = repeat(bias, '1 h ... -> b h ...', b=batch)
elif bias.shape[:2] == (batch, 1):
bias = repeat(bias, 'b 1 ... -> b h ...', h=nheads)
elif bias.shape[:2] == (1, 1):
bias = repeat(bias, '1 h ... -> b h ...', b=batch)
bias = repeat(bias, 'b 1 ... -> b h ...', h=nheads)
assert bias.shape[:2] == (
batch, nheads
), f'First 2 dimensions of bias must be broadcastible to (batch, nheads) = ({batch, nheads}). Bias has shape: {bias.shape}'
assert bias is not None # for type checking
bias_strides = (bias.stride(0), bias.stride(1),
bias.stride(2)) if has_bias else (0, 0, 0)
seqlen_q_rounded = math.ceil(seqlen_q / 128) * 128
lse = torch.empty((batch, nheads, seqlen_q_rounded),
device=q.device,
dtype=torch.float32)
tmp = torch.empty((batch, nheads, seqlen_q_rounded),
device=q.device,
dtype=torch.float32)
o = torch.empty_like(q)
BLOCK_HEADDIM = max(triton.next_power_of_2(d), 16)
# BLOCK = 128
# num_warps = 4 if d <= 64 else 8
grid = lambda META: (triton.cdiv(seqlen_q, META['BLOCK_M']), batch * nheads)
_fwd_kernel[grid]( # type: ignore
q,
k,
v,
bias,
o,
lse,
tmp,
softmax_scale,
q.stride(0),
q.stride(2),
q.stride(1),
k.stride(0),
k.stride(2),
k.stride(1),
v.stride(0),
v.stride(2),
v.stride(1),
*bias_strides,
o.stride(0),
o.stride(2),
o.stride(1),
nheads,
seqlen_q,
seqlen_k,
seqlen_q_rounded,
d,
seqlen_q // 32,
seqlen_k // 32, # key for triton cache (limit number of compilations)
# Can't use kwargs here because triton autotune expects key to be args, not kwargs
# IS_CAUSAL=causal, BLOCK_HEADDIM=d,
bias_type,
causal,
BLOCK_HEADDIM,
# BLOCK_M=BLOCK, BLOCK_N=BLOCK,
# num_warps=num_warps,
# num_stages=1,
)
return o, lse, softmax_scale # softmax_scale could have been updated
def _flash_attn_backward(do,
q,
k,
v,
o,
lse,
dq,
dk,
dv,
bias=None,
causal=False,
softmax_scale=None):
# Make sure that the last dimension is contiguous
if do.stride(-1) != 1:
do = do.contiguous()
batch, seqlen_q, nheads, d = q.shape
_, seqlen_k, _, _ = k.shape
# assert d in {16, 32, 64, 128}
assert d <= 128
seqlen_q_rounded = math.ceil(seqlen_q / 128) * 128
assert lse.shape == (batch, nheads, seqlen_q_rounded)
assert q.stride(-1) == k.stride(-1) == v.stride(-1) == o.stride(-1) == 1
assert dq.stride(-1) == dk.stride(-1) == dv.stride(-1) == 1
softmax_scale = softmax_scale or 1.0 / math.sqrt(d)
# dq_accum = torch.zeros_like(q, dtype=torch.float32)
dq_accum = torch.empty_like(q, dtype=torch.float32)
delta = torch.empty_like(lse)
# delta = torch.zeros_like(lse)
BLOCK_HEADDIM = max(triton.next_power_of_2(d), 16)
grid = lambda META: (triton.cdiv(seqlen_q, META['BLOCK_M']), batch * nheads)
_bwd_preprocess_do_o_dot[grid]( # type: ignore
o,
do,
delta,
o.stride(0),
o.stride(2),
o.stride(1),
do.stride(0),
do.stride(2),
do.stride(1),
nheads,
seqlen_q,
seqlen_q_rounded,
d,
BLOCK_M=128,
BLOCK_HEADDIM=BLOCK_HEADDIM,
)
has_bias = bias is not None
bias_type = 'none'
if has_bias:
assert bias.dtype in [q.dtype, torch.float]
assert bias.is_cuda
assert bias.dim() == 4
assert bias.stride(-1) == 1
if bias.shape[2:] == (1, seqlen_k):
bias_type = 'vector'
elif bias.shape[2:] == (seqlen_q, seqlen_k):
bias_type = 'matrix'
else:
raise RuntimeError('Last 2 dimensions of bias must be (1, seqlen_k)'
' or (seqlen_q, seqlen_k)')
if bias.shape[:2] == (1, nheads):
bias = repeat(bias, '1 h ... -> b h ...', b=batch)
elif bias.shape[:2] == (batch, 1):
bias = repeat(bias, 'b 1 ... -> b h ...', h=nheads)
elif bias.shape[:2] == (1, 1):
bias = repeat(bias, '1 h ... -> b h ...', b=batch)
bias = repeat(bias, 'b 1 ... -> b h ...', h=nheads)
assert bias.shape[:2] == (
batch, nheads
), f'First 2 dimensions of bias must be broadcastible to (batch, nheads) = ({batch, nheads}). Bias has shape: {bias.shape}'
assert bias is not None # type checking
bias_strides = (bias.stride(0), bias.stride(1),
bias.stride(2)) if has_bias else (0, 0, 0)
# BLOCK_M = 128
# BLOCK_N = 64
# num_warps = 4
grid = lambda META: (triton.cdiv(seqlen_k, META['BLOCK_N'])
if META['SEQUENCE_PARALLEL'] else 1, batch * nheads)
_bwd_kernel[grid]( # type: ignore
q,
k,
v,
bias,
do,
dq_accum,
dk,
dv,
lse,
delta,
softmax_scale,
q.stride(0),
q.stride(2),
q.stride(1),
k.stride(0),
k.stride(2),
k.stride(1),
v.stride(0),
v.stride(2),
v.stride(1),
*bias_strides,
do.stride(0),
do.stride(2),
do.stride(1),
dq_accum.stride(0),
dq_accum.stride(2),
dq_accum.stride(1),
dk.stride(0),
dk.stride(2),
dk.stride(1),
dv.stride(0),
dv.stride(2),
dv.stride(1),
nheads,
seqlen_q,
seqlen_k,
seqlen_q_rounded,
d,
seqlen_q // 32,
seqlen_k // 32, # key for triton cache (limit number of compilations)
# Can't use kwargs here because triton autotune expects key to be args, not kwargs
# IS_CAUSAL=causal, BLOCK_HEADDIM=d,
bias_type,
causal,
BLOCK_HEADDIM,
# SEQUENCE_PARALLEL=False,
# BLOCK_M=BLOCK_M, BLOCK_N=BLOCK_N,
# num_warps=num_warps,
# num_stages=1,
)
dq.copy_(dq_accum)
class _FlashAttnQKVPackedFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, qkv, bias=None, causal=False, softmax_scale=None):
"""Forward pass for packed FlashAttention.
Args:
ctx: autograd context
qkv: (batch, seqlen, 3, nheads, headdim)
bias: optional, shape broadcastible to (batch, nheads, seqlen, seqlen).
For example, ALiBi mask for causal would have shape (1, nheads, 1, seqlen).
ALiBi mask for non-causal would have shape (1, nheads, seqlen, seqlen)
causal (bool): whether to incorporate causal attention masking
softmax_scale (float, optional): scale factor for softmax
"""
# Make sure that the last dimension is contiguous
if qkv.stride(-1) != 1:
qkv = qkv.contiguous()
o, lse, ctx.softmax_scale = _flash_attn_forward(
qkv[:, :, 0],
qkv[:, :, 1],
qkv[:, :, 2],
bias=bias,
causal=causal,
softmax_scale=softmax_scale)
ctx.save_for_backward(qkv, o, lse, bias)
ctx.causal = causal
return o
@staticmethod
def backward(ctx, do):
qkv, o, lse, bias = ctx.saved_tensors
assert not ctx.needs_input_grad[
1], 'FlashAttention does not support bias gradient yet'
# Triton's autotune causes the Tensor._version to change, and so Pytorch autograd
# does a memcpy. To avoid this we run in inference_mode, which doesn't track the version.
with torch.inference_mode():
dqkv = torch.empty_like(qkv)
_flash_attn_backward(do,
qkv[:, :, 0],
qkv[:, :, 1],
qkv[:, :, 2],
o,
lse,
dqkv[:, :, 0],
dqkv[:, :, 1],
dqkv[:, :, 2],
bias=bias,
causal=ctx.causal,
softmax_scale=ctx.softmax_scale)
return dqkv, None, None, None
flash_attn_qkvpacked_func = _FlashAttnQKVPackedFunc.apply
class _FlashAttnFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, q, k, v, bias=None, causal=False, softmax_scale=None):
"""Forward pass for FlashAttention.
Args:
ctx: autograd context
q: (batch_size, seqlen_q, nheads, headdim)
k: (batch_size, seqlen_k, nheads, headdim)
v: (batch_size, seqlen_k, nheads, headdim)
bias: optional, shape broadcastible to (batch, nheads, seqlen_q, seqlen_k).
For example, ALiBi mask for causal would have shape (1, nheads, 1, seqlen_k).
ALiBi mask for non-causal would have shape (1, nheads, seqlen_q, seqlen_k)
causal (bool): whether to incorporate causal attention masking
softmax_scale (float, optional): scale factor for softmax
"""
# Make sure that the last dimension is contiguous
q, k, v = [
x if x.stride(-1) == 1 else x.contiguous() for x in [q, k, v]
]
o, lse, ctx.softmax_scale = _flash_attn_forward(
q, k, v, bias=bias, causal=causal, softmax_scale=softmax_scale)
ctx.save_for_backward(q, k, v, o, lse, bias)
ctx.causal = causal
return o
@staticmethod
def backward(ctx, do):
q, k, v, o, lse, bias = ctx.saved_tensors
assert not ctx.needs_input_grad[
3], 'FlashAttention does not support bias gradient yet'
# Triton's autotune causes the Tensor._version to change, and so Pytorch autograd
# does a memcpy. To avoid this we run in inference_mode, which doesn't track the version.
with torch.inference_mode():
dq = torch.empty_like(q)
dk = torch.empty_like(k)
dv = torch.empty_like(v)
_flash_attn_backward(do,
q,
k,
v,
o,
lse,
dq,
dk,
dv,
bias=bias,
causal=ctx.causal,
softmax_scale=ctx.softmax_scale)
return dq, dk, dv, None, None, None
flash_attn_func = _FlashAttnFunc.apply
|
m2-main
|
bert/src/flash_attn_triton.py
|
# Copyright 2022 MosaicML Examples authors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import os
import sys
from typing import Optional
# Add src folder root to path to allow us to use relative imports regardless of what directory the script is run from
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
import bert_layers as bert_layers_module
import configuration_bert as configuration_bert_module
import transformers
from composer.metrics.nlp import (BinaryF1Score, LanguageCrossEntropy,
MaskedAccuracy)
from composer.models.huggingface import HuggingFaceModel
from torchmetrics import MeanSquaredError
from torchmetrics.classification.accuracy import MulticlassAccuracy
from torchmetrics.classification.matthews_corrcoef import MatthewsCorrCoef
from torchmetrics.regression.spearman import SpearmanCorrCoef
all = ['create_bert_mlm', 'create_bert_classification']
def create_bert_mlm(pretrained_model_name: str = 'bert-base-uncased',
model_config: Optional[dict] = None,
tokenizer_name: Optional[str] = None,
gradient_checkpointing: Optional[bool] = False,
pretrained_checkpoint: Optional[str] = None):
"""BERT masked language model based on |:hugging_face:| Transformers.
For more information, see
`Transformers. <https://huggingface.co/transformers/>`_ and Mosaic's BERT repo <https://github.com/mosaicml/examples/tree/main/examples/benchmarks/bert>
Args:
pretrained_model_name (str): Name of the Hugging Face model to
instantiate. This will determine the default model configuration.
Default: ``bert-base-uncased``.
model_config (dict): A dictionary of user-specified configurations to
update/add to the default model configuration.
tokenizer_name (str, optional): Tokenizer name used to preprocess the
dataset and validate the models inputs.
gradient_checkpointing (bool, optional): Use gradient checkpointing.
Default: ``False``.
pretrained_checkpoint (str, optional): The pretrained checkpoint to
initialize the model weights. If provided, the state dictionary
stored at `pretrained_checkpoint` will be loaded into the model
after initialization. Default: ``None``.
.. code-block::
{
"_name_or_path": "bert-base-uncased",
"alibi_starting_size": 512,
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": 0.0,
"classifier_dropout": null,
"gradient_checkpointing": false,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 768,
"initializer_range": 0.02,
"intermediate_size": 3072,
"layer_norm_eps": 1e-12,
"max_position_embeddings": 512,
"model_type": "bert",
"num_attention_heads": 12,
"num_hidden_layers": 12,
"pad_token_id": 0,
"position_embedding_type": "absolute",
"transformers_version": "4.16.0",
"type_vocab_size": 2,
"use_cache": true,
"vocab_size": 30522
}
"""
if not model_config:
model_config = {}
if not pretrained_model_name:
pretrained_model_name = 'bert-base-uncased'
config = configuration_bert_module.BertConfig.from_pretrained(
pretrained_model_name, **model_config)
for key, value in model_config.items():
config.update({f'{key}': value})
# Padding for divisibility by 8
if config.vocab_size % 8 != 0:
config.vocab_size += 8 - (config.vocab_size % 8)
if pretrained_checkpoint is not None:
model = bert_layers_module.BertForMaskedLM.from_composer(
pretrained_checkpoint=pretrained_checkpoint, config=config)
else:
model = bert_layers_module.BertForMaskedLM(config)
if gradient_checkpointing:
model.gradient_checkpointing_enable() # type: ignore
# setup the tokenizer
if tokenizer_name:
tokenizer = transformers.AutoTokenizer.from_pretrained(tokenizer_name)
else:
tokenizer = transformers.AutoTokenizer.from_pretrained(
pretrained_model_name)
metrics = [
LanguageCrossEntropy(ignore_index=-100,
vocab_size=model.config.vocab_size),
MaskedAccuracy(ignore_index=-100)
]
hf_model = HuggingFaceModel(model=model,
tokenizer=tokenizer,
use_logits=True,
metrics=metrics)
# Padding for divisibility by 8
# We have to do it again here because wrapping by HuggingFaceModel changes it
if config.vocab_size % 8 != 0:
config.vocab_size += 8 - (config.vocab_size % 8)
hf_model.model.resize_token_embeddings(config.vocab_size)
return hf_model
def create_bert_classification(
num_labels: int,
pretrained_model_name: str = 'bert-base-uncased',
model_config: Optional[dict] = None,
tokenizer_name: Optional[str] = None,
gradient_checkpointing: Optional[bool] = False,
pretrained_checkpoint: Optional[str] = None):
"""BERT classification model based on |:hugging_face:| Transformers.
For more information, see `Transformers. <https://huggingface.co/transformers/>`_ and Mosaic's BERT repo <https://github.com/mosaicml/examples/tree/main/examples/benchmarks/bert>
Args:
num_labels (int): The number of classes in the classification task.
pretrained_model_name (str): Name of the Hugging Face model to
instantiate. This will determine the default model configuration.
Default: ``bert-base-uncased``.
model_config (dict): A dictionary of user-specified configurations to
update/add to the default model configuration.
tokenizer_name (str, optional): Tokenizer name used to preprocess the
dataset and validate the models inputs.
gradient_checkpointing (bool, optional): Use gradient checkpointing.
Default: ``False``.
pretrained_checkpoint (str, optional): The pretrained checkpoint to
initialize the model weights. If provided,
the state dictionary stored at `pretrained_checkpoint` will be
loaded into the model after initialization. Default: ``None``.
.. code-block::
{
"_name_or_path": "bert-base-uncased",
"alibi_starting_size": 512,
"architectures": [
"BertForSequenceClassification
],
"attention_probs_dropout_prob": 0.0,
"classifier_dropout": null,
"gradient_checkpointing": false,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 768,
"id2label": {
"0": "LABEL_0",
"1": "LABEL_1",
"2": "LABEL_2"
},
"initializer_range": 0.02,
"intermediate_size": 3072,
"label2id": {
"LABEL_0": 0,
"LABEL_1": 1,
"LABEL_2": 2
},
"layer_norm_eps": 1e-12,
"max_position_embeddings": 512,
"model_type": "bert",
"num_attention_heads": 12,
"num_hidden_layers": 12,
"pad_token_id": 0,
"position_embedding_type": "absolute",
"transformers_version": "4.16.0",
"type_vocab_size": 2,
"use_cache": true,
"vocab_size": 30522
}
To create a BERT model for classification:
.. testcode::
from create_bert import create_bert_classification
model = create_bert_classification(num_labels=3) # if the task has three classes.
Note:
This function can be used to construct a BERT model for regression by
setting ``num_labels == 1``. This will have two noteworthy effects.
First, it will switch the training loss to :class:`~torch.nn.MSELoss`.
Second, the returned :class:`.ComposerModel`'s train/validation metrics
will be :class:`~torchmetrics.MeanSquaredError` and
:class:`~torchmetrics.SpearmanCorrCoef`. For the classifcation case
(when ``num_labels > 1``), the training loss is
:class:`~torch.nn.CrossEntropyLoss`, and the train/validation
metrics are :class:`~torchmetrics.MulticlassAccuracy` and
:class:`~torchmetrics.MatthewsCorrCoef`, as well as
:class:`.BinaryF1Score` if ``num_labels == 2``.
"""
if not model_config:
model_config = {}
# By default, turn off attention dropout for the Transformer baseline
# otherwise, Flash Attention will be off by default
if 'attention_probs_dropout_prob' not in model_config:
model_config['attention_probs_dropout_prob'] = 0.0
# Use `alibi_starting_size` to determine how large of an alibi tensor to
# create when initializing the model. You should be able to ignore
# this parameter in most cases.
if 'alibi_starting_size' not in model_config:
model_config['alibi_starting_size'] = 512
model_config['num_labels'] = num_labels
if not pretrained_model_name:
pretrained_model_name = 'bert-base-uncased'
config, unused_kwargs = configuration_bert_module.BertConfig.from_pretrained(
pretrained_model_name, return_unused_kwargs=True, **model_config)
# This lets us use non-standard config fields (e.g. `starting_alibi_size`)
for key, value in model_config.items():
config.update({f'{key}': value})
config.update(unused_kwargs)
# Padding for divisibility by 8
if config.vocab_size % 8 != 0:
config.vocab_size += 8 - (config.vocab_size % 8)
if pretrained_checkpoint is not None:
model = bert_layers_module.BertForSequenceClassification.from_composer(
pretrained_checkpoint=pretrained_checkpoint, config=config)
else:
model = bert_layers_module.BertForSequenceClassification(config)
if gradient_checkpointing:
model.gradient_checkpointing_enable() # type: ignore
# setup the tokenizer
if tokenizer_name:
tokenizer = transformers.AutoTokenizer.from_pretrained(tokenizer_name)
else:
tokenizer = transformers.AutoTokenizer.from_pretrained(
pretrained_model_name)
if num_labels == 1:
# Metrics for a regression model
metrics = [MeanSquaredError(), SpearmanCorrCoef()]
else:
# Metrics for a classification model
metrics = [
MulticlassAccuracy(num_classes=num_labels, average='micro'),
MatthewsCorrCoef(task='multiclass',
num_classes=model.config.num_labels)
]
if num_labels == 2:
metrics.append(BinaryF1Score())
hf_model = HuggingFaceModel(model=model,
tokenizer=tokenizer,
use_logits=True,
metrics=metrics)
# Padding for divisibility by 8
# We have to do it again here because wrapping by HuggingFaceModel changes it
if config.vocab_size % 8 != 0:
config.vocab_size += 8 - (config.vocab_size % 8)
hf_model.model.resize_token_embeddings(config.vocab_size)
return hf_model
|
m2-main
|
bert/src/create_bert.py
|
# Copyright 2022 MosaicML Examples authors
# SPDX-License-Identifier: Apache-2.0
"""Build a StreamingTextDataset dataset and dataloader for training."""
import os
from itertools import islice
from typing import Any, Callable, Dict, List, Optional, Sequence, Union
import numpy as np
import torch
import transformers
from omegaconf import DictConfig
from omegaconf import OmegaConf as om
from streaming import Stream, StreamingDataset
from torch.utils.data import DataLoader
from transformers import (AutoTokenizer, PreTrainedTokenizer,
PreTrainedTokenizerFast)
Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
def build_tokenizer(om_tokenizer_config: DictConfig,) -> Tokenizer:
os.environ['TRANSFORMERS_NO_ADVISORY_WARNINGS'] = '1'
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
resolved_om_tokenizer_config = om.to_container(om_tokenizer_config,
resolve=True)
tokenizer_kwargs = resolved_om_tokenizer_config.get( # type: ignore
'kwargs', {})
tokenizer_name = resolved_om_tokenizer_config['name'] # type: ignore
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name,
**tokenizer_kwargs)
# HuggingFace does not respect the model_max_length kwarg, and overrides it with
# min(kwargs['model_max_length'], original_config['model_max_length']), so we
# explicitly set it here
tokenizer.model_max_length = tokenizer_kwargs.get(
'model_max_length',
int(1e30),
)
return tokenizer
class StreamingTextDataset(StreamingDataset):
"""Generic text dataset using MosaicML's StreamingDataset.
Args:
tokenizer (Tokenizer): HuggingFace tokenizer to
tokenize samples.
max_seq_len (int): The max sequence length of each sample.
streams (Sequence[Stream], optional): One or more Streams to stream/cache samples from,
which may be upsampled or downsampled. StreamingDataset uses either ``streams`` or
``remote``/``local``. Defaults to ``None``.
remote (str, optional): Remote path or directory to download the dataset from. If ``None``,
its data must exist locally. StreamingDataset uses either ``streams`` or
``remote``/``local``. Defaults to ``None``.
local (str, optional): Local working directory to download shards to. This is where shards
are cached while they are being used. Uses a temp directory if not set.
StreamingDataset uses either ``streams`` or ``remote``/``local``. Defaults to ``None``.
split (str, optional): Which dataset split to use, if any. If provided, we stream from/to
the ``split`` subdirs of ``remote`` and ``local``. Defaults to ``None``.
download_retry (int): Number of download re-attempts before giving up. Defaults to ``2``.
download_timeout (float): Number of seconds to wait for a shard to download before raising
an exception. Defaults to ``60``.
validate_hash (str, optional): Optional hash or checksum algorithm to use to validate
shards. Defaults to ``None``.
keep_zip (bool): Whether to keep or delete the compressed form when decompressing
downloaded shards. If ``False``, keep iff remote is local or no remote. Defaults to
`False``.
keep_raw (bool): Whether to keep or delete the decompressed form (or only form)
of shards after all their samples have been yielded this epoch. If ``False``, keep iff
remote is local or no remote and no compression. Defaults to ``True``.
samples_per_epoch (int, optional): Provide this field iff you are weighting sub-datasets
proportionally. Defaults to ``None``.
predownload (int, optional): Target number of samples ahead to download the shards of while
iterating. Defaults to ``100_000``.
partition_algo (str): Which partitioning algorithm to use. Defaults to ``orig``.
num_canonical_nodes (int, optional): Canonical number of nodes for shuffling with
resumption. Defaults to ``None``, which is interpreted as the number of nodes of the
initial run.
batch_size (int, optional): Batch size of its DataLoader, which affects how the dataset is
partitioned over the workers. Defaults to ``None``.
shuffle (bool): Whether to iterate over the samples in randomized order. Defaults to
``False``.
shuffle_algo (str): Which shuffling algorithm to use. Defaults to ``py1s``.
shuffle_seed (int): Seed for Deterministic data shuffling. Defaults to ``9176``.
"""
def __init__(self,
tokenizer: Tokenizer,
max_seq_len: int,
streams: Optional[Sequence[Stream]] = None,
remote: Optional[str] = None,
local: Optional[str] = None,
split: Optional[str] = None,
download_retry: int = 2,
download_timeout: float = 60,
validate_hash: Optional[str] = None,
keep_zip: bool = False,
keep_raw: bool = True,
samples_per_epoch: Optional[int] = None,
predownload: int = 100_000,
partition_algo: str = 'orig',
num_canonical_nodes: Optional[int] = None,
batch_size: Optional[int] = None,
shuffle: bool = False,
shuffle_algo: str = 'py1s',
shuffle_seed: int = 9176,
**kwargs: Dict[str, Any]):
group_method = kwargs.pop('group_method', None)
if group_method is not None:
raise NotImplementedError(
'group_method is deprecated and has been removed.\nTo ' +
'concatenate, use the --concat_tokens ' +
'argument when creating your MDS dataset with concat_c4.py')
if kwargs is not None and len(kwargs) > 0:
raise ValueError(
f'StreamingTextDataset() got an unexpected keyword argument: {kwargs}'
)
if local is not None and (remote is None or (local == remote)):
if os.path.isdir(local):
contents = set(os.listdir(local))
if split not in contents:
raise ValueError(
f'local directory {local} does not contain split {split}'
)
# Build Dataset
super().__init__(
streams=streams,
remote=remote,
local=local,
split=split,
download_retry=download_retry,
download_timeout=download_timeout,
validate_hash=validate_hash,
keep_zip=keep_zip,
keep_raw=keep_raw,
samples_per_epoch=samples_per_epoch,
predownload=predownload,
partition_algo=partition_algo,
num_canonical_nodes=num_canonical_nodes,
batch_size=batch_size,
shuffle=shuffle,
shuffle_algo=shuffle_algo,
shuffle_seed=shuffle_seed,
)
self.tokenizer = tokenizer
self.max_seq_len = max_seq_len
# How to tokenize a text sample to a token sample
def _tokenize(self, text_sample):
if self.tokenizer._pad_token is None:
# Some tokenizers (e.g. GPT2 tokenizer) have no padding token which causes bugs
raise RuntimeError(
'If tokenizing on-the-fly, tokenizer must have a pad_token_id')
return self.tokenizer(text_sample['text'],
truncation=True,
padding='max_length',
max_length=self.max_seq_len)
def _read_binary_tokenized_sample(self, sample):
return torch.from_numpy(
np.frombuffer(sample['tokens'],
dtype=np.int64)[:self.max_seq_len].copy())
# How to process a sample
def __getitem__(self, idx: int) -> Union[Dict[str, Any], torch.Tensor]:
sample = super().__getitem__(idx)
if 'text' in sample:
token_sample = self._tokenize(sample)
elif 'tokens' in sample:
token_sample = self._read_binary_tokenized_sample(sample)
else:
raise RuntimeError(
'StreamingTextDataset needs samples to have a `text` or `tokens` column'
)
return token_sample
class ConcatenatedSequenceCollatorWrapper:
"""Collator wrapper to add sequence_id to batch."""
def __init__(self,
base_collator: Callable,
eos_token_id: Optional[int] = None,
bos_token_id: Optional[int] = None):
self.base_collator = base_collator
if (eos_token_id is None) and (bos_token_id is None):
raise ValueError(
'Must supply a value for either eos_token_id or bos_token_id, but got None for both.'
)
if (eos_token_id is not None) and (bos_token_id is not None):
raise ValueError(
'Cannot use *both* EOS and BOS tokens for detecting sequence boundaries. ' +\
'Please supply `eos_token_id` if sequences end with an EOS token, or use ' +\
'`bos_token_id` if sequences start with a BOS token.'
)
if eos_token_id is None:
self.split_token_id = bos_token_id
self.bos_mode = True
else:
self.split_token_id = eos_token_id
self.bos_mode = False
def __call__(self, examples: List[Any]) -> Dict[str, torch.Tensor]:
batch = self.base_collator(examples)
batch['sequence_id'] = self.get_sequence_id_from_batch(batch)
return batch
def get_sequence_id_from_batch(
self, batch: Dict[str, torch.Tensor]) -> torch.Tensor:
assert self.split_token_id is not None
is_separator = torch.eq(batch['input_ids'], self.split_token_id)
cumulative_sep = torch.cumsum(is_separator,
dim=1).to(batch['input_ids'].dtype)
# If separator token is bos, we're already done
if self.bos_mode:
return cumulative_sep
# If separator token is eos, right shift 1 space
left_zeros = cumulative_sep.new_zeros((cumulative_sep.shape[0], 1))
return torch.cat([left_zeros, cumulative_sep[:, :-1]], dim=1)
def build_text_dataloader(
cfg: DictConfig,
tokenizer: Tokenizer,
device_batch_size: int,
):
assert cfg.name == 'text', f'Tried to build text dataloader with cfg.name={cfg.name}'
if cfg.dataset.get('group_method', None) is not None:
raise NotImplementedError(
'group_method is deprecated and has been removed.\nTo ' +
'concatenate, use the --concat_tokens ' +
'argument when creating your MDS dataset with convert_dataset.py')
# build streams
streams_dict = cfg.dataset.get('streams', None)
streams = None
if streams_dict is not None:
streams = []
for _, stream in streams_dict.items():
streams.append(
Stream(
remote=stream.get('remote', None) or
cfg.dataset.get('remote', None),
local=stream.get('local', None) or
cfg.dataset.get('local', None),
split=stream.get('split', None) or
cfg.dataset.get('split', None),
proportion=stream.get('proportion', None),
repeat=stream.get('repeat', None),
samples=stream.get('samples', None),
download_retry=stream.get('download_retry', None) or
cfg.dataset.get('download_retry', 2),
download_timeout=stream.get('download_timeout', None) or
cfg.dataset.get('download_timeout', 60),
validate_hash=stream.get('validate_hash', None) or
cfg.dataset.get('validate_hash', None),
keep_zip=stream.get('keep_zip', None) or
cfg.dataset.get('keep_zip', False),
keep_raw=stream.get('keep_raw', None) or
cfg.dataset.get('keep_raw', True),
))
# build dataset potentially with streams
dataset = StreamingTextDataset(
tokenizer=tokenizer,
max_seq_len=cfg.dataset.max_seq_len,
streams=streams,
remote=cfg.dataset.get('remote', None),
local=cfg.dataset.get('local', None),
split=cfg.dataset.get('split', None),
download_retry=cfg.dataset.get('download_retry', 2),
download_timeout=cfg.dataset.get('download_timeout', 60),
validate_hash=cfg.dataset.get('validate_hash', None),
keep_zip=cfg.dataset.get('keep_zip', False),
keep_raw=cfg.dataset.get('keep_raw', True),
samples_per_epoch=cfg.dataset.get('samples_per_epoch', None),
predownload=cfg.dataset.get('predownload', 100_000),
partition_algo=cfg.dataset.get('partition_algo', 'orig'),
num_canonical_nodes=cfg.dataset.get('num_canonical_nodes', 128),
batch_size=device_batch_size,
shuffle=cfg.dataset.get('shuffle', False),
shuffle_algo=cfg.dataset.get('shuffle_algo', 'py1s'),
shuffle_seed=cfg.dataset.get('shuffle_seed', 9176),
)
mlm_probability = cfg.dataset.get('mlm_probability', None)
collate_fn = transformers.DataCollatorForLanguageModeling(
tokenizer=dataset.tokenizer,
mlm=mlm_probability is not None,
mlm_probability=mlm_probability)
eos_token_id = cfg.dataset.get('eos_token_id')
bos_token_id = cfg.dataset.get('bos_token_id')
if (eos_token_id is not None) or (bos_token_id is not None):
# Note: Will raise an error if both are non-None
collate_fn = ConcatenatedSequenceCollatorWrapper(
base_collator=collate_fn,
eos_token_id=eos_token_id,
bos_token_id=bos_token_id)
return DataLoader(
dataset,
collate_fn=collate_fn,
batch_size=device_batch_size,
drop_last=cfg.drop_last,
num_workers=cfg.num_workers,
pin_memory=cfg.get('pin_memory', True),
prefetch_factor=cfg.get('prefetch_factor', 2),
persistent_workers=cfg.get('persistent_workers', True),
timeout=cfg.get('timeout', 0),
)
def build_synthetic_dataloader(
cfg: DictConfig,
device_batch_size: int,
):
assert cfg.name == 'synthetic', f'Tried to build synthetic dataloader with cfg.name={cfg.name}'
# build dataset potentially with streams
from src.synthetics.two_sentence import TwoSentenceDataset
dataset = TwoSentenceDataset(
max_seq_len=cfg.dataset.max_seq_len,
vocab_size = cfg.dataset.num_vocab,
num_samples = cfg.dataset.num_examples,
)
mlm_probability = cfg.dataset.get('mlm_probability', None)
collate_fn = transformers.DataCollatorForLanguageModeling(
tokenizer=dataset.tokenizer,
mlm=mlm_probability is not None,
mlm_probability=mlm_probability)
eos_token_id = cfg.dataset.get('eos_token_id')
bos_token_id = cfg.dataset.get('bos_token_id')
if (eos_token_id is not None) or (bos_token_id is not None):
# Note: Will raise an error if both are non-None
collate_fn = ConcatenatedSequenceCollatorWrapper(
base_collator=collate_fn,
eos_token_id=eos_token_id,
bos_token_id=bos_token_id)
return DataLoader(
dataset,
collate_fn=collate_fn,
batch_size=device_batch_size,
drop_last=True,
# num_workers=cfg.num_workers,
pin_memory=cfg.get('pin_memory', True),
prefetch_factor=cfg.get('prefetch_factor', 2),
# persistent_workers=cfg.get('persistent_workers', True),
timeout=cfg.get('timeout', 0),
)
# Helpful to test if your dataloader is working locally
# Run `python data.py --local_path [local] [--remote_path remote, optional]` and verify that batches are printed out
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--tokenizer',
type=str,
default='gpt2',
help='the name of the tokenizer to use')
parser.add_argument('--local_path',
type=str,
required=True,
help='the path to the local copy of the dataset')
parser.add_argument(
'--remote_path',
type=str,
default=None,
help='the path to the remote copy to stream from (optional)')
parser.add_argument('--split',
type=str,
default='val',
help='which split of the dataset to use')
parser.add_argument('--max_seq_len',
type=int,
default=32,
help='max sequence length to test')
args = parser.parse_args()
if args.remote_path is not None:
print(
f'Reading {args.split} split from {args.local_path} <- streamed from <- {args.remote_path}'
)
else:
print(f'Reading {args.split} split from {args.local_path}')
cfg = {
'name': 'text',
'dataset': {
'local': args.local_path,
'remote': args.remote_path,
'split': args.split,
'shuffle': False,
'max_seq_len': args.max_seq_len,
'keep_zip': True, # in case we need compressed files after testing
},
'drop_last': False,
'num_workers': 4,
}
cfg = om.create(cfg)
device_batch_size = 2
tokenizer_cfg = {'name': args.tokenizer, 'kwargs': {}}
tokenizer_cfg['kwargs'] = {'model_max_length': args.max_seq_len}
tokenizer_cfg = om.create(tokenizer_cfg)
tokenizer = build_tokenizer(tokenizer_cfg)
loader = build_text_dataloader(cfg, tokenizer, device_batch_size)
tokenizer = loader.dataset.tokenizer # type: ignore
for batch_ix, batch in enumerate(islice(loader, 5)):
print('\n')
print('#' * 20, f'Batch {batch_ix}', '#' * 20)
for k, v in batch.items():
print(k, v.shape, v.dtype)
for sample_ix, token_sample in enumerate(batch['input_ids']):
print('-' * 20, f' Sample {sample_ix} ', '-' * 20)
print(tokenizer.decode(token_sample))
|
m2-main
|
bert/src/text_data.py
|
# Copyright 2022 MosaicML Examples authors
# SPDX-License-Identifier: Apache-2.0
"""Streaming dataset conversion scripts for C4 and The Pile."""
import os
import platform
import warnings
from argparse import ArgumentParser, Namespace
from dataclasses import dataclass
from enum import Enum
from typing import Dict, Iterable, Optional, Union
import datasets as hf_datasets
import numpy as np
from streaming import MDSWriter
from torch.utils.data import DataLoader, IterableDataset
from tqdm import tqdm
from transformers import AutoTokenizer, PreTrainedTokenizerBase
class ConcatMode(Enum):
NO_CONCAT = 'NO_CONCAT'
CONCAT_TOKENS = 'CONCAT_TOKENS'
def parse_args() -> Namespace:
"""Parse commandline arguments."""
parser = ArgumentParser(
description=
'Convert dataset into MDS format, optionally concatenating and tokenizing'
)
parser.add_argument('--dataset', type=str, required=True)
parser.add_argument('--data_subset',
type=str,
default=None,
help='E.g. "all" or "en"')
parser.add_argument('--splits',
nargs='+',
default=['train', 'train_small', 'val'])
parser.add_argument('--out_root', type=str, required=True)
parser.add_argument('--compression', type=str, default=None)
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument(
'--concat_tokens',
type=int,
help='Convert text to tokens and concatenate up to this many tokens')
parser.add_argument('--tokenizer', type=str, required=False, default=None)
parser.add_argument('--bos_text', type=str, required=False, default=None)
parser.add_argument('--eos_text', type=str, required=False, default=None)
parser.add_argument('--no_wrap', default=False, action='store_true')
parsed = parser.parse_args()
if os.path.isdir(parsed.out_root) and len(
set(os.listdir(parsed.out_root)).intersection(set(
parsed.splits))) > 0:
raise ValueError(
f'--out_root={parsed.out_root} contains {os.listdir(parsed.out_root)} which cannot overlap with the requested splits {parsed.splits}.'
)
# Make sure we have needed concat options
if (parsed.concat_tokens is not None and
isinstance(parsed.concat_tokens, int) and parsed.tokenizer is None):
parser.error(
'When setting --concat_tokens, you must specify a --tokenizer')
# now that we have validated them, change BOS/EOS to strings
if parsed.bos_text is None:
parsed.bos_text = ''
if parsed.eos_text is None:
parsed.eos_text = ''
return parsed
@dataclass
class DataSplitConstants:
hf_split: str
folder_split: str
raw_samples: int
truncated_samples: Union[int, None]
@dataclass
class DatasetConstants:
chars_per_sample: int
chars_per_token: int
splits = {}
def __iter__(self):
for _, v in self.splits.items():
yield v
class TrainSmallConstants(DataSplitConstants):
def __init__(self,
hf_split: str = 'train',
folder_split: str = 'train_small',
raw_samples: int = 1000000,
truncated_samples: int = 100000):
super().__init__(hf_split, folder_split, raw_samples, truncated_samples)
class ValSmallConstants(DataSplitConstants):
def __init__(self,
hf_split: str = 'validation',
folder_split: str = 'val_small',
raw_samples: int = 10000,
truncated_samples: int = 10000):
super().__init__(hf_split, folder_split, raw_samples, truncated_samples)
pileconstants = DatasetConstants(
chars_per_sample=6212, # Computed over validation set
chars_per_token=4 # OpenAI estimate
)
pileconstants.splits['train'] = DataSplitConstants(hf_split='train',
folder_split='train',
raw_samples=210607728,
truncated_samples=None)
pileconstants.splits['train_small'] = DataSplitConstants(
hf_split='train',
folder_split='train_small',
raw_samples=1000000,
truncated_samples=100000)
pileconstants.splits['val'] = DataSplitConstants(hf_split='validation',
folder_split='val',
raw_samples=214670,
truncated_samples=None)
pileconstants.splits['val_small'] = DataSplitConstants(hf_split='validation',
folder_split='val_small',
raw_samples=10000,
truncated_samples=10000)
c4constants = DatasetConstants(
chars_per_sample=2163, # Computed over validation set
chars_per_token=4 # OpenAI estimate
)
c4constants.splits['train'] = DataSplitConstants(hf_split='train',
folder_split='train',
raw_samples=364868892,
truncated_samples=None)
c4constants.splits['train_small'] = DataSplitConstants(
hf_split='train',
folder_split='train_small',
raw_samples=1000000,
truncated_samples=100000)
c4constants.splits['val'] = DataSplitConstants(hf_split='validation',
folder_split='val',
raw_samples=364608,
truncated_samples=None)
c4constants.splits['val_small'] = DataSplitConstants(hf_split='validation',
folder_split='val_small',
raw_samples=10000,
truncated_samples=10000)
redpajamaconstants = DatasetConstants(
chars_per_sample=505927, # GPT-NeoX tokenized size * 4 over book subset
chars_per_token=4 # OpenAI estimate
)
redpajamaconstants.splits['train'] = DataSplitConstants(
hf_split='train',
folder_split='train',
raw_samples=205744,
truncated_samples=None
)
wikiconstants = DatasetConstants(
chars_per_sample=2163, # Computed over validation set
chars_per_token=4 # OpenAI estimate
)
wikiconstants.splits['train'] = DataSplitConstants(hf_split='train',
folder_split='train',
raw_samples=6452100,
truncated_samples=None)
wikiconstants.splits['val'] = DataSplitConstants(hf_split='validation',
folder_split='val',
raw_samples=6570,
truncated_samples=None)
booksconstants = DatasetConstants(
chars_per_sample=2163, # Computed over validation set
chars_per_token=4 # OpenAI estimate
)
booksconstants.splits['train'] = DataSplitConstants(hf_split='train',
folder_split='train',
raw_samples=6452100,
truncated_samples=None)
booksconstants.splits['val'] = DataSplitConstants(hf_split='validation',
folder_split='val',
raw_samples=6570,
truncated_samples=None)
CONSTS = {
'c4': c4constants,
'the_pile': pileconstants,
'togethercomputer/RedPajama-Data-1T': redpajamaconstants,
'wikipedia': wikiconstants,
'bookcorpus': booksconstants
}
class NoConcatDataset(IterableDataset):
"""An IterableDataset that returns text samples for MDSWriter.
Returns dicts of {'text': bytes}
"""
def __init__(self, dataset_name: str, data_subset: Union[str, None],
split: str):
self.hf_dataset = hf_datasets.load_dataset(path=dataset_name,
name=data_subset,
split=split,
streaming=True)
def __iter__(self) -> Iterable[Dict[str, bytes]]:
for sample in self.hf_dataset:
# convert to bytes to store in MDS binary format
yield {'text': sample['text'][:4000].encode('utf-8')}
class ConcatTokensDataset(IterableDataset):
"""An IterableDataset that returns token samples for MDSWriter.
Returns dicts of {'tokens': bytes}
To use data created by this class and written to MDS format:
```python
import torch
from streaming.base import StreamingDataset
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('your/tokenizer')
ds = StreamingDataset(local='mds-data-folder', split='val')
# note, you need to copy the numpy array because the original is non-writeable
# and torch does not support non-writeable tensors, so you get a scary warning and
# if you do try to write to the tensor you get undefined behavior
tokens = torch.from_numpy(np.frombuffer(ds[0]['tokens'], dtype=np.int64).copy())
print(tokenizer.decode(tokens))
```
"""
def __init__(self,
dataset_name: str,
split: str,
tokenizer: PreTrainedTokenizerBase,
max_length: int,
bos_text: str,
eos_text: str,
no_wrap: bool,
data_subset: Union[str, None] = None):
self.tokenizer = tokenizer
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
self.max_length = max_length
self.bos_text = bos_text
self.eos_text = eos_text
self.should_wrap = not no_wrap
self.hf_dataset = hf_datasets.load_dataset(path=dataset_name,
name=data_subset,
split=split,
streaming=True)
self.bos_tokens = self.tokenizer(self.bos_text,
truncation=False,
padding=False,
add_special_tokens=False)['input_ids']
if len(self.bos_tokens) > 1:
warnings.warn(
f'You specified --concat_tokens with --bos_text, but your BOS text is not tokenizing to one token\
, instead we got {self.bos_tokens}. Quit if this was in error.')
self.eos_tokens = self.tokenizer(self.eos_text,
truncation=False,
padding=False,
add_special_tokens=False)['input_ids']
if len(self.eos_tokens) > 1:
warnings.warn(
f'You specified --concat_tokens with --eos_text, but your EOS text is not tokenizing to one token\
, instead we got {self.eos_tokens}. Quit if this was in error.')
eos_text_provided = self.eos_text != ''
bos_text_provided = self.bos_text != ''
test_text = self.tokenizer('')
if len(test_text['input_ids']) > 0 and (eos_text_provided or
bos_text_provided):
message = 'both eos and bos' if eos_text_provided and bos_text_provided else (
'eos_text' if eos_text_provided else 'bos_text')
warnings.warn(
f'The provided tokenizer adds special tokens, but you also specified {message}. This may result '
'in duplicated special tokens. Please be sure this is what you intend.'
)
def __iter__(self) -> Iterable[Dict[str, bytes]]:
buffer = []
for sample in self.hf_dataset:
encoded = self.tokenizer(sample['text'][:8000],
truncation=False,
padding=False,
add_special_tokens=False
)
iids = encoded['input_ids']
buffer = buffer + iids
while len(buffer) >= self.max_length:
use_for_sample = self.max_length-2 # -2 for bos and eos
concat_sample = buffer[:use_for_sample]
concat_sample = self.bos_tokens + concat_sample + self.eos_tokens
buffer = buffer[use_for_sample:] if self.should_wrap else []
yield {
# convert to bytes to store in MDS binary format
'tokens': np.asarray(concat_sample).tobytes()
}
def build_hf_dataset(dataset_name: str,
split: str,
mode: ConcatMode,
max_length: int,
bos_text: Optional[str],
eos_text: Optional[str],
no_wrap: bool,
tokenizer: Optional[PreTrainedTokenizerBase],
data_subset: Union[str, None] = None) -> IterableDataset:
"""Build an IterableDataset over the HF C4 or pile source data.
Args:
dataset_name (str): Dataset name
split (str): Split name.
mode (ConcatMode): NO_CONCAT, or CONCAT_TOKENS
bos_text (str): text to insert at the beginning of each sequence
eos_text (str): text to insert at the end of each sequence
no_wrap (bool): if concatenating, whether to wrap text across `max_length` boundaries
tokenizer (PreTrainedTokenizerBase): if mode is CONCAT_TOKENS, the tokenizer to use
data_subset (str): Referred to as "name" in HuggingFace datasets.load_dataset.
Typically "all" (The Pile) or "en" (c4).
Returns:
An IterableDataset.
"""
if mode == ConcatMode.NO_CONCAT:
dataset = NoConcatDataset(dataset_name=dataset_name,
data_subset=data_subset,
split=split)
else:
assert bos_text is not None
assert eos_text is not None
assert tokenizer is not None
if bos_text + eos_text == '':
test_tokens = tokenizer('test')
if test_tokens['input_ids'][
0] != tokenizer.bos_token_id and test_tokens['input_ids'][
-1] != tokenizer.eos_token_id:
tok_error_msg = 'This tokenizer does not insert an EOS nor BOS token. '
tok_error_msg += 'Concatenating with this tokenizer will result in sequences being '
tok_error_msg += 'attached without a separating token. Please use another tokenizer, '
tok_error_msg += 'such as facebook/opt-125m, or specify EOS/BOS text with e.g. '
tok_error_msg += '--bos_text=<|endoftext|>.'
raise ValueError(tok_error_msg)
dataset = ConcatTokensDataset(dataset_name=dataset_name,
data_subset=data_subset,
split=split,
tokenizer=tokenizer,
max_length=max_length,
bos_text=bos_text,
eos_text=eos_text,
no_wrap=no_wrap)
return dataset
def _est_progress_denominator(total_samples: int, chars_per_sample: int,
chars_per_token: int, mode: ConcatMode,
max_length: int):
est_tokens_per_sample = chars_per_sample // chars_per_token
if mode == ConcatMode.NO_CONCAT:
return total_samples
elif mode == ConcatMode.CONCAT_TOKENS:
return total_samples * est_tokens_per_sample // max_length
def build_dataloader(dataset, batch_size) -> DataLoader:
# Multiple workers is only supported on linux machines
if 'linux' in platform.platform().lower():
num_workers = min(64, dataset.hf_dataset.n_shards) # type: ignore
else:
num_workers = 0
# If using multiple workers, configure each worker to prefetch as many samples as it can, up to
# the aggregate device batch size
# If not using workers, the torch DataLoader expects the default value for prefetch_factor,
# which non-intuitively must be 2.
prefetch_factor = max(1, 2 * batch_size //
num_workers) if num_workers > 0 else 2
return DataLoader(
dataset=dataset,
sampler=None,
batch_size=batch_size,
num_workers=num_workers,
prefetch_factor=prefetch_factor,
)
def generate_samples(
loader: DataLoader,
truncate_num_samples: Optional[int] = None
) -> Iterable[Dict[str, bytes]]:
"""Generator over samples of a dataloader.
Args:
loader (DataLoader): A dataloader emitting batches like {key: [sample0_bytes, sample1_bytes, sample2_bytes, ...]}
truncate_num_samples (Optional[int]): An optional # of samples to stop at.
Yields:
Sample dicts.
"""
n_samples = 0
for batch in loader:
keys = list(batch.keys())
current_bs = len(batch[keys[0]])
for idx in range(current_bs):
if truncate_num_samples is not None and n_samples == truncate_num_samples:
return
n_samples += 1
yield {k: v[idx] for k, v in batch.items()}
def main(args: Namespace) -> None:
"""Main: create C4/pile streaming dataset.
Args:
args (Namespace): Commandline arguments.
"""
try:
dataset_constants = CONSTS[args.dataset]
except KeyError:
raise ValueError(
f'Constants for dataset "{args.dataset}" not found. Currently only "the_pile" and "c4" are supported.'
)
if args.concat_tokens is not None:
mode = ConcatMode.CONCAT_TOKENS
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer)
# we will enforce length, so suppress warnings about sequences too long for the model
tokenizer.model_max_length = int(1e30)
columns = {'tokens': 'bytes'}
else:
mode = ConcatMode.NO_CONCAT
tokenizer = None
columns = {'text': 'str'}
for split_name in args.splits:
try:
split = dataset_constants.splits[split_name]
except KeyError:
raise KeyError(f'Constants not defined for split {split_name}.')
hf_split = split.hf_split
folder_split = split.folder_split
expected_num_samples = split.raw_samples
truncate_num_samples = split.truncated_samples
# Only generate the splits requested
if folder_split not in args.splits:
continue
# Get samples
dataset = build_hf_dataset(dataset_name=args.dataset,
data_subset=args.data_subset,
split=hf_split,
mode=mode,
max_length=args.concat_tokens,
bos_text=args.bos_text,
eos_text=args.eos_text,
no_wrap=args.no_wrap,
tokenizer=tokenizer)
loader = build_dataloader(dataset=dataset, batch_size=512)
if 'wiki' in args.dataset or 'bookcorpus':
truncate_num_samples = None
samples = generate_samples(loader,
truncate_num_samples=truncate_num_samples)
if expected_num_samples is not None:
if 'wiki' in args.dataset or 'bookcorpus' in args.dataset:
denominator = expected_num_samples
denominator = truncate_num_samples if truncate_num_samples is not None else _est_progress_denominator(
total_samples=expected_num_samples,
chars_per_sample=dataset_constants.chars_per_sample,
chars_per_token=dataset_constants.chars_per_token,
mode=mode,
max_length=args.concat_tokens,
)
else:
denominator = None
# Write samples
print(f'Converting {folder_split} to MDS format...')
with MDSWriter(columns=columns,
out=os.path.join(args.out_root, folder_split),
compression=args.compression) as out:
if denominator is not None:
for sample in tqdm(samples,
desc=folder_split,
total=denominator):
out.write(sample)
else:
for sample in tqdm(samples, desc=folder_split):
out.write(sample)
if __name__ == '__main__':
main(parse_args())
|
m2-main
|
bert/src/convert_dataset.py
|
# Adapted from https://github.com/HazyResearch/hippo/blob/datasets/benchmark/utils.py
""" Useful functions for writing test code. """
import torch
import torch.utils.benchmark as benchmark
def benchmark_forward(fn, *inputs, repeats = 10, desc='', verbose=True, amp=False,
amp_dtype=torch.float16, **kwinputs):
""" Use Pytorch Benchmark on the forward pass of an arbitrary function. """
if verbose:
print(desc, '- Forward pass')
def amp_wrapper(*inputs, **kwinputs):
with torch.autocast(device_type='cuda', dtype=amp_dtype, enabled=amp):
fn(*inputs, **kwinputs)
t = benchmark.Timer(
stmt='fn_amp(*inputs, **kwinputs)',
globals={'fn_amp': amp_wrapper, 'inputs': inputs, 'kwinputs': kwinputs},
num_threads=torch.get_num_threads(),
)
m = t.timeit(repeats)
if verbose:
print(m)
return t, m
def benchmark_backward(fn, *inputs, grad=None, repeats=10, desc='', verbose=True, amp=False,
amp_dtype=torch.float16, **kwinputs):
""" Use Pytorch Benchmark on the backward pass of an arbitrary function. """
if verbose:
print(desc, '- Backward pass')
with torch.autocast(device_type='cuda', dtype=amp_dtype, enabled=amp):
y = fn(*inputs, **kwinputs)
if type(y) is tuple:
y = y[0]
if grad is None:
grad = torch.randn_like(y)
else:
if grad.shape != y.shape:
raise RuntimeError('Grad shape does not match output shape')
t = benchmark.Timer(
stmt='y.backward(grad, retain_graph=True)',
globals={'y': y, 'grad': grad},
num_threads=torch.get_num_threads(),
)
m = t.timeit(repeats)
if verbose:
print(m)
return t, m
def benchmark_combined(fn, *inputs, grad=None, repeats=10, desc='', verbose=True, amp=False,
amp_dtype=torch.float16, **kwinputs):
""" Use Pytorch Benchmark on the forward+backward pass of an arbitrary function. """
if verbose:
print(desc, '- Forward + Backward pass')
# y = fn(*inputs, **kwinputs)
# if grad is None:
# grad = torch.randn_like(y)
# else:
# if grad.shape != y.shape:
# raise RuntimeError('Grad shape does not match output shape')
# del y
def f(grad, *inputs, **kwinputs):
with torch.autocast(device_type='cuda', dtype=amp_dtype, enabled=amp):
y = fn(*inputs, **kwinputs)
if type(y) is tuple:
y = y[0]
if grad is None:
grad = torch.randn_like(y)
else:
if grad.shape != y.shape:
raise RuntimeError('Grad shape does not match output shape')
y.backward(grad, retain_graph=True)
t = benchmark.Timer(
stmt='f(grad, *inputs, **kwinputs)',
globals={'f': f, 'fn': fn, 'inputs': inputs, 'grad': grad, 'kwinputs': kwinputs},
num_threads=torch.get_num_threads(),
)
m = t.timeit(repeats)
if verbose:
print(m)
return t, m
def benchmark_all(fn, *inputs, grad=None, repeats=10, desc='', verbose=True, amp=False,
amp_dtype=torch.float16, **kwinputs):
""" Use Pytorch Benchmark on the forward+backward pass of an arbitrary function. """
return (
benchmark_forward(fn, *inputs, repeats=repeats, desc=desc, verbose=verbose,
amp=amp, amp_dtype=amp_dtype, **kwinputs),
benchmark_backward(fn, *inputs, grad=grad, repeats=repeats, desc=desc, verbose=verbose,
amp=amp, amp_dtype=amp_dtype, **kwinputs),
benchmark_combined(fn, *inputs, grad=grad, repeats=repeats, desc=desc, verbose=verbose,
amp=amp, amp_dtype=amp_dtype, **kwinputs),
)
def pytorch_profiler(fn, *inputs, trace_filename=None, backward=False, amp=False,
amp_dtype=torch.float16, cpu=False, verbose=True, **kwinputs):
""" Wrap benchmark functions in Pytorch profiler to see CUDA information. """
if backward:
with torch.autocast(device_type='cuda', dtype=amp_dtype, enabled=amp):
g = torch.randn_like(fn(*inputs, **kwinputs))
for _ in range(30): # Warm up
with torch.autocast(device_type='cuda', dtype=amp_dtype, enabled=amp):
if backward:
for x in inputs:
if isinstance(x, torch.Tensor):
x.grad = None
# fn(*inputs, **kwinputs) if not backward else fn(*inputs, **kwinputs).backward(g)
out = fn(*inputs, **kwinputs)
# Backward should be done outside autocast
if backward:
out.backward(g)
activities = ([torch.profiler.ProfilerActivity.CPU] if cpu else []) + [torch.profiler.ProfilerActivity.CUDA]
with torch.profiler.profile(
activities=activities,
record_shapes=True,
# profile_memory=True,
with_stack=True,
) as prof:
with torch.autocast(device_type='cuda', dtype=amp_dtype, enabled=amp):
if backward:
for x in inputs:
if isinstance(x, torch.Tensor):
x.grad = None
out = fn(*inputs, **kwinputs)
if backward: out.backward(g)
if verbose:
# print(prof.key_averages().table(sort_by="self_cuda_time_total", row_limit=50))
print(prof.key_averages().table(row_limit=50))
if trace_filename is not None:
prof.export_chrome_trace(trace_filename)
def benchmark_memory(fn, *inputs, desc='', verbose=True, **kwinputs):
torch.cuda.empty_cache()
torch.cuda.reset_peak_memory_stats()
torch.cuda.synchronize()
fn(*inputs, **kwinputs)
torch.cuda.synchronize()
mem = torch.cuda.max_memory_allocated() / ((2 ** 20) * 1000)
if verbose:
print(f'{desc} max memory: {mem}GB')
torch.cuda.empty_cache()
return mem
|
m2-main
|
bert/src/benchmark/benchmark.py
|
m2-main
|
bert/src/benchmark/__init__.py
|
|
# Copyright 2022 MosaicML Examples authors
# SPDX-License-Identifier: Apache-2.0
# """Contains GLUE job objects for the simple_glue_trainer."""
import atexit
import copy
import gc
import multiprocessing as mp
import os
import sys
from multiprocessing import managers
from typing import Any, Dict, List, Optional, Union, cast
# Add glue folder root to path to allow us to use relative imports regardless of what directory the script is run from
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
print(sys.path)
import torch
from composer import ComposerModel
from composer.core import Callback
from composer.core.evaluator import Evaluator
from composer.core.types import Dataset
from composer.devices import Device, DeviceGPU
from composer.loggers import LoggerDestination
from composer.optim import ComposerScheduler, DecoupledAdamW
from src.optim.create_param_groups import create_param_groups
from composer.trainer.trainer import Trainer
from composer.utils import dist, reproducibility
from data import create_glue_dataset
from torch.utils.data import DataLoader
def _build_dataloader(dataset, **kwargs):
import transformers
dataset = cast(Dataset, dataset)
return DataLoader(
dataset=dataset,
sampler=dist.get_sampler(dataset, drop_last=False, shuffle=False),
collate_fn=transformers.default_data_collator,
**kwargs,
)
Metrics = Dict[str, Dict[str, Any]]
TASK_NAME_TO_NUM_LABELS = {
'mnli': 3,
'rte': 2,
'mrpc': 2,
'qnli': 2,
'qqp': 2,
'sst2': 2,
'stsb': 1,
'cola': 2,
}
def reset_trainer(trainer: Trainer, garbage_collect: bool = False):
"""Cleans up memory usage left by trainer."""
trainer.close()
# Unregister engine from atexit to remove ref
atexit.unregister(trainer.engine._close)
# Close potentially persistent dataloader workers
loader = trainer.state.train_dataloader
if loader and loader._iterator is not None: # type: ignore
loader._iterator._shutdown_workers() # type: ignore
# Explicitly delete attributes of state as otherwise gc.collect() doesn't free memory
for key in list(trainer.state.__dict__.keys()):
delattr(trainer.state, key)
# Delete the rest of trainer attributes
for key in list(trainer.__dict__.keys()):
delattr(trainer, key)
if garbage_collect:
gc.collect()
torch.cuda.empty_cache()
class FineTuneJob:
"""Encapsulates a fine-tuning job.
Tasks should subclass FineTuneJob and implement the
get_trainer() method.
Args:
name (str, optional): job name. Defaults to the class name.
load_path (str, optional): path to load checkpoints. Default: None
save_folder (str, optional): path to save checkpoints. Default: None
kwargs (dict, optional): additional arguments passed available to the Trainer.
"""
def __init__(
self,
job_name: Optional[str] = None,
load_path: Optional[str] = None,
save_folder: Optional[str] = None,
seed: int = 42,
**kwargs,
):
reproducibility.seed_all(seed)
self._job_name = job_name
self.seed = seed
self.load_path = load_path
self.save_folder = save_folder
self.kwargs = kwargs
def get_trainer(self, device: Optional[Union[str, Device]]) -> Trainer:
"""Returns the trainer for the job."""
raise NotImplementedError
def print_metrics(self, metrics: Metrics):
"""Prints fine-tuning results."""
job_name = self.job_name
print(f'Results for {job_name}:')
print('-' * (12 + len(job_name)))
for eval, metric in metrics.items():
for metric_name, value in metric.items():
print(f'{eval}: {metric_name}, {value*100:.2f}')
print('-' * (12 + len(job_name)))
@property
def job_name(self) -> str:
"""Job name, defaults to class name."""
if self._job_name is not None:
return self._job_name
return self.__class__.__name__
def run(self,
gpu_queue: Optional[mp.Queue] = None,
process_to_gpu: Optional[managers.DictProxy] = None
) -> Dict[str, Any]:
"""Trains the model, optionally pulling a GPU id from the queue.
Returns:
A dict with keys:
* 'checkpoints': list of saved_checkpoints, if any,
* 'metrics': nested dict of results, accessed by
dataset and metric name, e.g.
``metrics['glue_mnli']['MulticlassAccuracy']``.
"""
if gpu_queue is None:
if torch.cuda.device_count() > 0:
gpu_id = 0
device = DeviceGPU(gpu_id)
else:
gpu_id = None
device = 'cpu'
else:
current_pid = os.getpid()
assert process_to_gpu is not None
if current_pid in process_to_gpu:
gpu_id = process_to_gpu[current_pid]
else:
gpu_id = gpu_queue.get()
process_to_gpu[current_pid] = gpu_id
device = DeviceGPU(gpu_id)
print(f'Running {self.job_name} on GPU {gpu_id}')
trainer = self.get_trainer(device=device)
trainer.fit()
collected_metrics: Dict[str, Dict[str, Any]] = {}
for eval_name, metrics in trainer.state.eval_metrics.items():
collected_metrics[eval_name] = {
name: metric.compute().cpu().numpy()
for name, metric in metrics.items()
}
saved_checkpoints = copy.copy(trainer.saved_checkpoints)
reset_trainer(trainer, garbage_collect=True)
self.print_metrics(collected_metrics)
output = {
'checkpoints': saved_checkpoints,
'metrics': collected_metrics,
'job_name': self.job_name
}
return output
class GlueClassificationJob(FineTuneJob):
def __init__(
self,
model: ComposerModel,
tokenizer_name: str,
job_name: Optional[str] = None,
seed: int = 42,
task_name: Optional[str] = None,
num_labels: Optional[int] = -1,
eval_interval: str = '1000ba',
scheduler: Optional[ComposerScheduler] = None,
max_sequence_length: Optional[int] = 256,
max_duration: Optional[str] = '3ep',
batch_size: Optional[int] = 32,
load_path: Optional[str] = None,
save_folder: Optional[str] = None,
loggers: Optional[List[LoggerDestination]] = None,
callbacks: Optional[List[Callback]] = None,
precision: Optional[str] = None,
**kwargs,
):
if task_name is None:
raise ValueError(
'GlueClassificationJob should not be instantiated directly. Please instantiate a specific glue job type instead (e.g. MNLIJob).'
)
super().__init__(job_name, load_path, save_folder, seed, **kwargs)
self.task_name = task_name
self.num_labels = num_labels
self.eval_interval = eval_interval
self.tokenizer_name = tokenizer_name
self.model = model
self.scheduler = scheduler
print('Max sequence length', max_sequence_length)
self.max_sequence_length = max_sequence_length
self.max_duration = max_duration
self.batch_size = batch_size
self.loggers = loggers
self.callbacks = callbacks
self.precision = precision
# These will be set by the subclasses for specific GLUE tasks
self.train_dataloader = None
self.evaluators = None
self.optimizer = None
def get_trainer(self, device: Optional[Union[Device, str]] = None):
return Trainer(model=self.model,
optimizers=self.optimizer,
schedulers=self.scheduler,
train_dataloader=self.train_dataloader,
eval_dataloader=self.evaluators,
eval_interval=self.eval_interval,
load_path=self.load_path,
save_folder=self.save_folder,
max_duration=self.max_duration,
seed=self.seed,
device_train_microbatch_size='auto'
if torch.cuda.device_count() > 0 else None,
load_weights_only=True,
load_strict_model_weights=False,
loggers=self.loggers,
callbacks=self.callbacks,
python_log_level='ERROR',
run_name=self.job_name,
load_ignore_keys=['state/model/model.classifier*'],
precision=self.precision,
device=device,
progress_bar=True,
log_to_console=False,
**self.kwargs)
class MNLIJob(GlueClassificationJob):
"""MNLI."""
def __init__(
self,
model: ComposerModel,
tokenizer_name: str,
job_name: Optional[str] = None,
seed: int = 42,
eval_interval: str = '2300ba',
scheduler: Optional[ComposerScheduler] = None,
max_sequence_length: Optional[int] = 256,
max_duration: Optional[str] = '3ep',
batch_size: Optional[int] = 48,
load_path: Optional[str] = None,
save_folder: Optional[str] = None,
loggers: Optional[List[LoggerDestination]] = None,
callbacks: Optional[List[Callback]] = None,
precision: Optional[str] = None,
lr: Optional[float] = 5.0e-05,
wd: Optional[float] = 5.0e-06,
optim_name: Optional[str] = 'DecoupledAdamW',
**kwargs,
):
super().__init__(model=model,
tokenizer_name=tokenizer_name,
job_name=job_name,
seed=seed,
task_name='mnli',
num_labels=3,
eval_interval=eval_interval,
scheduler=scheduler,
max_sequence_length=max_sequence_length,
max_duration=max_duration,
batch_size=batch_size,
load_path=load_path,
save_folder=save_folder,
loggers=loggers,
callbacks=callbacks,
precision=precision,
**kwargs)
print(f"\nGLUE task {self.task_name} Details:")
print('-- lr:', lr)
print('-- wd:', wd)
print(f"-- seed: {seed}")
if optim_name == 'DecoupledAdamW':
print(f"-- using DecoupledAdamW optimizer")
self.optimizer = DecoupledAdamW(create_param_groups(None, self.model),
lr=lr,
betas=(0.9, 0.98),
eps=1.0e-06,
weight_decay=wd)
else:
from torch.optim import AdamW
print(f"-- using AdamW optimizer")
self.optimizer = AdamW(create_param_groups(None, self.model),
lr=lr,
betas=(0.9, 0.98),
eps=1.0e-06,
weight_decay=wd)
dataset_kwargs = {
'task': self.task_name,
'tokenizer_name': self.tokenizer_name,
'max_seq_length': self.max_sequence_length,
}
print('Max sequence length in MNLI', max_sequence_length, self.max_sequence_length)
dataloader_kwargs = {
'batch_size': self.batch_size,
'num_workers': 0,
'shuffle': False,
'drop_last': False,
}
train_dataset = create_glue_dataset(split='train', **dataset_kwargs)
self.train_dataloader = _build_dataloader(train_dataset,
**dataloader_kwargs)
mnli_eval_dataset = create_glue_dataset(split='validation_matched',
**dataset_kwargs)
mnli_eval_mismatched_dataset = create_glue_dataset(
split='validation_mismatched', **dataset_kwargs)
mnli_evaluator = Evaluator(label='glue_mnli',
dataloader=_build_dataloader(
mnli_eval_dataset, **dataloader_kwargs),
metric_names=['MulticlassAccuracy'])
mnli_evaluator_mismatched = Evaluator(
label='glue_mnli_mismatched',
dataloader=_build_dataloader(mnli_eval_mismatched_dataset,
**dataloader_kwargs),
metric_names=['MulticlassAccuracy'])
self.evaluators = [mnli_evaluator, mnli_evaluator_mismatched]
class RTEJob(GlueClassificationJob):
"""RTE."""
def __init__(
self,
model: ComposerModel,
tokenizer_name: str,
job_name: Optional[str] = None,
seed: int = 42,
eval_interval: str = '100ba',
scheduler: Optional[ComposerScheduler] = None,
max_sequence_length: Optional[int] = 256,
max_duration: Optional[str] = '3ep',
batch_size: Optional[int] = 16,
load_path: Optional[str] = None,
save_folder: Optional[str] = None,
loggers: Optional[List[LoggerDestination]] = None,
callbacks: Optional[List[Callback]] = None,
precision: Optional[str] = None,
lr: Optional[float] = 1.0e-5,
wd: Optional[float] = 1.0e-6,
optim_name: Optional[str] = 'DecoupledAdamW',
**kwargs,
):
super().__init__(model=model,
tokenizer_name=tokenizer_name,
job_name=job_name,
seed=seed,
task_name='rte',
num_labels=2,
eval_interval=eval_interval,
scheduler=scheduler,
max_sequence_length=max_sequence_length,
max_duration=max_duration,
batch_size=batch_size,
load_path=load_path,
save_folder=save_folder,
loggers=loggers,
callbacks=callbacks,
precision=precision,
**kwargs)
if optim_name == 'DecoupledAdamW':
self.optimizer = DecoupledAdamW(create_param_groups(None, self.model),
lr=lr,
betas=(0.9, 0.98),
eps=1.0e-06,
weight_decay=wd)
else:
from torch.optim import AdamW
self.optimizer = AdamW(create_param_groups(None, self.model),
lr=lr,
betas=(0.9, 0.98),
eps=1.0e-06,
weight_decay=wd)
dataset_kwargs = {
'task': self.task_name,
'tokenizer_name': self.tokenizer_name,
'max_seq_length': self.max_sequence_length,
}
dataloader_kwargs = {
'batch_size': self.batch_size,
'num_workers': 0,
'shuffle': False,
'drop_last': False,
}
train_dataset = create_glue_dataset(split='train', **dataset_kwargs)
self.train_dataloader = _build_dataloader(train_dataset,
**dataloader_kwargs)
rte_eval_dataset = create_glue_dataset(split='validation',
**dataset_kwargs)
rte_evaluator = Evaluator(label='glue_rte',
dataloader=_build_dataloader(
rte_eval_dataset, **dataloader_kwargs),
metric_names=['MulticlassAccuracy'])
self.evaluators = [rte_evaluator]
class QQPJob(GlueClassificationJob):
"""QQP."""
def __init__(
self,
model: ComposerModel,
tokenizer_name: str,
job_name: Optional[str] = None,
seed: int = 42,
eval_interval: str = '2000ba',
scheduler: Optional[ComposerScheduler] = None,
max_sequence_length: Optional[int] = 256,
max_duration: Optional[str] = '5ep',
batch_size: Optional[int] = 16,
load_path: Optional[str] = None,
save_folder: Optional[str] = None,
loggers: Optional[List[LoggerDestination]] = None,
callbacks: Optional[List[Callback]] = None,
precision: Optional[str] = None,
lr: Optional[float] = 3.0e-5,
wd: Optional[float] = 3.0e-6,
optim_name: Optional[str] = 'DecoupledAdamW',
**kwargs,
):
super().__init__(model=model,
tokenizer_name=tokenizer_name,
job_name=job_name,
seed=seed,
task_name='qqp',
num_labels=2,
eval_interval=eval_interval,
scheduler=scheduler,
max_sequence_length=max_sequence_length,
max_duration=max_duration,
batch_size=batch_size,
load_path=load_path,
save_folder=save_folder,
loggers=loggers,
callbacks=callbacks,
precision=precision,
**kwargs)
print(f"QNLI:")
print('-- lr:', lr)
print('-- wd:', wd)
print(f"-- optim_name: {optim_name}")
if optim_name == 'DecoupledAdamW':
self.optimizer = DecoupledAdamW(create_param_groups(None, self.model),
lr=lr,
betas=(0.9, 0.98),
eps=1.0e-06,
weight_decay=wd)
else:
from torch.optim import AdamW
self.optimizer = AdamW(create_param_groups(None, self.model),
lr=lr,
betas=(0.9, 0.98),
eps=1.0e-06,
weight_decay=wd)
dataset_kwargs = {
'task': self.task_name,
'tokenizer_name': self.tokenizer_name,
'max_seq_length': self.max_sequence_length,
}
dataloader_kwargs = {
'batch_size': self.batch_size,
'num_workers': 0,
'shuffle': False,
'drop_last': False,
}
train_dataset = create_glue_dataset(split='train', **dataset_kwargs)
self.train_dataloader = _build_dataloader(train_dataset,
**dataloader_kwargs)
qqp_eval_dataset = create_glue_dataset(split='validation',
**dataset_kwargs)
qqp_evaluator = Evaluator(
label='glue_qqp',
dataloader=_build_dataloader(qqp_eval_dataset, **dataloader_kwargs),
metric_names=['MulticlassAccuracy', 'BinaryF1Score'])
self.evaluators = [qqp_evaluator]
class COLAJob(GlueClassificationJob):
"""COLA."""
def __init__(
self,
model: ComposerModel,
tokenizer_name: str,
job_name: Optional[str] = None,
seed: int = 42,
eval_interval: str = '250ba',
scheduler: Optional[ComposerScheduler] = None,
max_sequence_length: Optional[int] = 256,
max_duration: Optional[str] = '10ep',
batch_size: Optional[int] = 32,
load_path: Optional[str] = None,
save_folder: Optional[str] = None,
loggers: Optional[List[LoggerDestination]] = None,
callbacks: Optional[List[Callback]] = None,
precision: Optional[str] = None,
lr: Optional[float] = 5.0e-5,
wd: Optional[float] = 5.0e-6,
optim_name: Optional[str] = 'DecoupledAdamW',
**kwargs,
):
super().__init__(model=model,
tokenizer_name=tokenizer_name,
job_name=job_name,
seed=seed,
task_name='cola',
num_labels=2,
eval_interval=eval_interval,
scheduler=scheduler,
max_sequence_length=max_sequence_length,
max_duration=max_duration,
batch_size=batch_size,
load_path=load_path,
save_folder=save_folder,
loggers=loggers,
callbacks=callbacks,
precision=precision,
**kwargs)
print('COLA LR', lr)
print('COLA WD', wd)
if optim_name == 'DecoupledAdamW':
self.optimizer = DecoupledAdamW(create_param_groups(None, self.model),
lr=lr,
betas=(0.9, 0.98),
eps=1.0e-06,
weight_decay=wd)
else:
from torch.optim import AdamW
self.optimizer = AdamW(create_param_groups(None, self.model),
lr=lr,
betas=(0.9, 0.98),
eps=1.0e-06,
weight_decay=wd)
dataset_kwargs = {
'task': self.task_name,
'tokenizer_name': self.tokenizer_name,
'max_seq_length': self.max_sequence_length,
}
dataloader_kwargs = {
'batch_size': self.batch_size,
'num_workers': 0,
'shuffle': False,
'drop_last': False,
}
train_dataset = create_glue_dataset(split='train', **dataset_kwargs)
self.train_dataloader = _build_dataloader(train_dataset,
**dataloader_kwargs)
cola_eval_dataset = create_glue_dataset(split='validation',
**dataset_kwargs)
cola_evaluator = Evaluator(label='glue_cola',
dataloader=_build_dataloader(
cola_eval_dataset, **dataloader_kwargs),
# metric_names=['MatthewsCorrCoef', 'MulticlassAccuracy', 'BinaryF1Score']
metric_names=['MatthewsCorrCoef']
)
self.evaluators = [cola_evaluator]
class MRPCJob(GlueClassificationJob):
"""MRPC."""
def __init__(
self,
model: ComposerModel,
tokenizer_name: str,
job_name: Optional[str] = None,
seed: int = 42,
eval_interval: str = '100ba',
scheduler: Optional[ComposerScheduler] = None,
max_sequence_length: Optional[int] = 256,
max_duration: Optional[str] = '10ep',
batch_size: Optional[int] = 32,
load_path: Optional[str] = None,
save_folder: Optional[str] = None,
loggers: Optional[List[LoggerDestination]] = None,
callbacks: Optional[List[Callback]] = None,
precision: Optional[str] = None,
lr: Optional[float] = 8.0e-5,
wd: Optional[float] = 8.0e-6,
optim_name: Optional[str] = 'DecoupledAdamW',
**kwargs,
):
super().__init__(model=model,
tokenizer_name=tokenizer_name,
job_name=job_name,
seed=seed,
task_name='mrpc',
num_labels=2,
eval_interval=eval_interval,
scheduler=scheduler,
max_sequence_length=max_sequence_length,
max_duration=max_duration,
batch_size=batch_size,
load_path=load_path,
save_folder=save_folder,
loggers=loggers,
callbacks=callbacks,
precision=precision,
**kwargs)
if optim_name == 'DecoupledAdamW':
self.optimizer = DecoupledAdamW(create_param_groups(None, self.model),
lr=lr,
betas=(0.9, 0.98),
eps=1.0e-06,
weight_decay=wd)
else:
from torch.optim import AdamW
self.optimizer = AdamW(create_param_groups(None, self.model),
lr=lr,
betas=(0.9, 0.98),
eps=1.0e-06,
weight_decay=wd)
dataset_kwargs = {
'task': self.task_name,
'tokenizer_name': self.tokenizer_name,
'max_seq_length': self.max_sequence_length,
}
dataloader_kwargs = {
'batch_size': self.batch_size,
'num_workers': 0,
'shuffle': False,
'drop_last': False,
}
train_dataset = create_glue_dataset(split='train', **dataset_kwargs)
self.train_dataloader = _build_dataloader(train_dataset,
**dataloader_kwargs)
mrpc_eval_dataset = create_glue_dataset(split='validation',
**dataset_kwargs)
mrpc_evaluator = Evaluator(
label='glue_mrpc',
dataloader=_build_dataloader(mrpc_eval_dataset,
**dataloader_kwargs),
# metric_names=['MulticlassAccuracy', 'BinaryF1Score']
metric_names=['BinaryF1Score']
)
self.evaluators = [mrpc_evaluator]
class QNLIJob(GlueClassificationJob):
"""QNLI."""
def __init__(
self,
model: ComposerModel,
tokenizer_name: str,
job_name: Optional[str] = None,
seed: int = 42,
eval_interval: str = '1000ba',
scheduler: Optional[ComposerScheduler] = None,
max_sequence_length: Optional[int] = 256,
max_duration: Optional[str] = '10ep',
batch_size: Optional[int] = 16,
load_path: Optional[str] = None,
save_folder: Optional[str] = None,
loggers: Optional[List[LoggerDestination]] = None,
callbacks: Optional[List[Callback]] = None,
precision: Optional[str] = None,
lr: Optional[float] = 1.0e-5,
wd: Optional[float] = 1.0e-6,
optim_name: Optional[str] = 'DecoupledAdamW',
**kwargs,
):
super().__init__(model=model,
tokenizer_name=tokenizer_name,
job_name=job_name,
seed=seed,
task_name='qnli',
num_labels=2,
eval_interval=eval_interval,
scheduler=scheduler,
max_sequence_length=max_sequence_length,
max_duration=max_duration,
batch_size=batch_size,
load_path=load_path,
save_folder=save_folder,
loggers=loggers,
callbacks=callbacks,
precision=precision,
**kwargs)
print(f"\nGLUE task {self.task_name} Details:")
print('-- lr:', lr)
print('-- wd:', wd)
print(f"-- seed: {seed}")
if optim_name == 'DecoupledAdamW':
print(f"-- using DecoupledAdamW optimizer")
self.optimizer = DecoupledAdamW(create_param_groups(None, self.model),
lr=lr,
betas=(0.9, 0.98),
eps=1.0e-06,
weight_decay=wd)
else:
from torch.optim import AdamW
print(f"-- using AdamW optimizer")
self.optimizer = AdamW(create_param_groups(None, self.model),
lr=lr,
betas=(0.9, 0.98),
eps=1.0e-06,
weight_decay=wd)
dataset_kwargs = {
'task': self.task_name,
'tokenizer_name': self.tokenizer_name,
'max_seq_length': self.max_sequence_length,
}
dataloader_kwargs = {
'batch_size': self.batch_size,
'num_workers': 0,
'shuffle': False,
'drop_last': False,
}
train_dataset = create_glue_dataset(split='train', **dataset_kwargs)
self.train_dataloader = _build_dataloader(train_dataset,
**dataloader_kwargs)
qnli_eval_dataset = create_glue_dataset(split='validation',
**dataset_kwargs)
qnli_evaluator = Evaluator(label='glue_qnli',
dataloader=_build_dataloader(
qnli_eval_dataset, **dataloader_kwargs),
metric_names=['MulticlassAccuracy'])
self.evaluators = [qnli_evaluator]
class SST2Job(GlueClassificationJob):
"""SST2."""
def __init__(
self,
model: ComposerModel,
tokenizer_name: str,
job_name: Optional[str] = None,
seed: int = 42,
eval_interval: str = '500ba',
scheduler: Optional[ComposerScheduler] = None,
max_sequence_length: Optional[int] = 256,
max_duration: Optional[str] = '3ep',
batch_size: Optional[int] = 16,
load_path: Optional[str] = None,
save_folder: Optional[str] = None,
loggers: Optional[List[LoggerDestination]] = None,
callbacks: Optional[List[Callback]] = None,
precision: Optional[str] = None,
lr: Optional[float] = 3.0e-5,
wd: Optional[float] = 3.0e-6,
optim_name: Optional[str] = 'DecoupledAdamW',
**kwargs,
):
super().__init__(model=model,
tokenizer_name=tokenizer_name,
job_name=job_name,
seed=seed,
task_name='sst2',
num_labels=2,
eval_interval=eval_interval,
scheduler=scheduler,
max_sequence_length=max_sequence_length,
max_duration=max_duration,
batch_size=batch_size,
load_path=load_path,
save_folder=save_folder,
loggers=loggers,
callbacks=callbacks,
precision=precision,
**kwargs)
print('SST LR', lr)
print('SST WD', wd)
if optim_name == 'DecoupledAdamW':
print(f"-- using DecoupledAdamW optimizer")
self.optimizer = DecoupledAdamW(create_param_groups(None, self.model),
lr=lr,
betas=(0.9, 0.98),
eps=1.0e-06,
weight_decay=wd)
else:
from torch.optim import AdamW
print(f"-- using AdamW optimizer")
self.optimizer = AdamW(create_param_groups(None, self.model),
lr=lr,
betas=(0.9, 0.98),
eps=1.0e-06,
weight_decay=wd)
dataset_kwargs = {
'task': self.task_name,
'tokenizer_name': self.tokenizer_name,
'max_seq_length': self.max_sequence_length,
}
dataloader_kwargs = {
'batch_size': self.batch_size,
'num_workers': 0,
'shuffle': False,
'drop_last': False,
}
train_dataset = create_glue_dataset(split='train', **dataset_kwargs)
self.train_dataloader = _build_dataloader(train_dataset,
**dataloader_kwargs)
sst2_eval_dataset = create_glue_dataset(split='validation',
**dataset_kwargs)
sst2_evaluator = Evaluator(label='glue_sst2',
dataloader=_build_dataloader(
sst2_eval_dataset, **dataloader_kwargs),
metric_names=['MulticlassAccuracy'])
self.evaluators = [sst2_evaluator]
class STSBJob(GlueClassificationJob):
"""STSB."""
def __init__(
self,
model: ComposerModel,
tokenizer_name: str,
job_name: Optional[str] = None,
seed: int = 42,
eval_interval: str = '200ba',
scheduler: Optional[ComposerScheduler] = None,
max_sequence_length: Optional[int] = 256,
max_duration: Optional[str] = '10ep',
batch_size: Optional[int] = 32,
load_path: Optional[str] = None,
save_folder: Optional[str] = None,
loggers: Optional[List[LoggerDestination]] = None,
callbacks: Optional[List[Callback]] = None,
precision: Optional[str] = None,
lr: Optional[float] = 3.0e-5,
wd: Optional[float] = 3.0e-6,
optim_name: Optional[str] = 'DecoupledAdamW',
**kwargs,
):
super().__init__(model=model,
tokenizer_name=tokenizer_name,
job_name=job_name,
seed=seed,
task_name='stsb',
num_labels=1,
eval_interval=eval_interval,
scheduler=scheduler,
max_sequence_length=max_sequence_length,
max_duration=max_duration,
batch_size=batch_size,
load_path=load_path,
save_folder=save_folder,
loggers=loggers,
callbacks=callbacks,
precision=precision,
**kwargs)
if optim_name == 'DecoupledAdamW':
self.optimizer = DecoupledAdamW(create_param_groups(None, self.model),
lr=lr,
betas=(0.9, 0.98),
eps=1.0e-06,
weight_decay=wd)
else:
from torch.optim import AdamW
self.optimizer = AdamW(create_param_groups(None, self.model),
lr=lr,
betas=(0.9, 0.98),
eps=1.0e-06,
weight_decay=wd)
dataset_kwargs = {
'task': self.task_name,
'tokenizer_name': self.tokenizer_name,
'max_seq_length': self.max_sequence_length,
}
dataloader_kwargs = {
'batch_size': self.batch_size,
'num_workers': 0,
'shuffle': False,
'drop_last': False,
}
train_dataset = create_glue_dataset(split='train', **dataset_kwargs)
self.train_dataloader = _build_dataloader(train_dataset,
**dataloader_kwargs)
stsb_eval_dataset = create_glue_dataset(split='validation',
**dataset_kwargs)
stsb_evaluator = Evaluator(label='glue_stsb',
dataloader=_build_dataloader(
stsb_eval_dataset, **dataloader_kwargs),
metric_names=['SpearmanCorrCoef'])
self.evaluators = [stsb_evaluator]
# Hardcoded for STSB due to a bug (Can be removed once torchmetrics fixes https://github.com/Lightning-AI/metrics/issues/1294)
self.precision = 'fp32'
|
m2-main
|
bert/src/glue/finetuning_jobs.py
|
# Copyright 2022 MosaicML Examples authors
# SPDX-License-Identifier: Apache-2.0
|
m2-main
|
bert/src/glue/__init__.py
|
# Copyright 2022 MosaicML Examples authors
# SPDX-License-Identifier: Apache-2.0
import logging
from composer.utils import MissingConditionalImportError, dist
_task_column_names = {
'cola': ('sentence', None),
'mnli': ('premise', 'hypothesis'),
'mrpc': ('sentence1', 'sentence2'),
'qnli': ('question', 'sentence'),
'qqp': ('question1', 'question2'),
'rte': ('sentence1', 'sentence2'),
'sst2': ('sentence', None),
'stsb': ('sentence1', 'sentence2'),
}
log = logging.getLogger(__name__)
def create_glue_dataset(
task: str,
tokenizer_name: str,
split: str,
max_seq_length: int = 256,
max_retries: int = 10,
num_workers: int = 0,
**kwargs,
):
print(f"Max sequence length: {max_seq_length}")
try:
import datasets
import transformers
except ImportError as e:
raise MissingConditionalImportError(extra_deps_group='nlp',
conda_package='transformers') from e
if task not in _task_column_names:
raise ValueError(
f'task ({task}) must be one of {_task_column_names.keys()}')
if (max_seq_length % 8) != 0:
log.warning(
'For performance, a max_seq_length as a multiple of 8 is recommended.'
)
tokenizer = transformers.AutoTokenizer.from_pretrained(tokenizer_name)
log.info(f'Loading {task.upper()} on rank {dist.get_global_rank()}')
download_config = datasets.DownloadConfig(max_retries=max_retries)
dataset = datasets.load_dataset(
'glue',
task,
split=split,
download_config=download_config,
)
log.info(
f'Starting tokenization by preprocessing over {num_workers} threads!')
text_column_names = _task_column_names[task]
def tokenize_function(inp):
# truncates sentences to max_length or pads them to max_length
first_half = inp[text_column_names[0]]
second_half = inp[
text_column_names[1]] if text_column_names[1] in inp else None
return tokenizer(
text=first_half,
text_pair=second_half,
padding='max_length',
max_length=max_seq_length,
truncation=True,
)
columns_to_remove = ['idx'
] + [i for i in text_column_names if i is not None]
assert isinstance(dataset, datasets.Dataset)
safe_name = tokenizer_name.replace('/', ',')
dataset = dataset.map(
tokenize_function,
batched=True,
num_proc=None if num_workers == 0 else num_workers,
batch_size=1000,
remove_columns=columns_to_remove,
new_fingerprint=f'{task}-{safe_name}-tok-4-{split}-{max_seq_length}',
load_from_cache_file=True,
)
return dataset
|
m2-main
|
bert/src/glue/data.py
|
def create_param_groups(cfg, model):
'''Create sets of parameter groups based on whether parameter has `_optim` attribute.'''
if not any(hasattr(p, '_optim') for p in model.parameters()):
return model.parameters()
special_params = set()
other_params = set()
param_dict = {pn: p for pn, p in model.named_parameters() if p.requires_grad}
for mn, m in model.named_modules():
for pn, p in m.named_parameters():
fpn = '%s.%s' % (mn, pn) if mn else pn # full param name
# In case of parameter sharing, some parameters show up here but are not in
# param_dict.keys()
if not p.requires_grad or fpn not in param_dict:
continue # frozen weights
if hasattr(p, '_optim'):
special_params.add(fpn)
else:
other_params.add(fpn)
param_groups = [
{"params": [param_dict[pn] for pn in other_params]}
]
# Add parameters with special hyperparameters
# Unique dicts
hps = [
dict(s)
for s in set(frozenset(param_dict[pn]._optim.items())
for pn in special_params)
]
for hp in hps:
params = [
param_dict[pn]
for pn in sorted(list(special_params)) if param_dict[pn]._optim == hp
]
param_groups.append({"params": params, **hp})
return param_groups
|
m2-main
|
bert/src/optim/create_param_groups.py
|
m2-main
|
bert/src/optim/__init__.py
|
|
import json
import math
from tqdm import tqdm
from collections import defaultdict
directory = # Enter path to your data directory here
new_directory = # Enter output path here
val_pct = 0.0005 # Percentage of data to use for validation
index = f"{directory}/train/index.json"
with open(index, "r") as f:
index = json.load(f)
train_index = {}
val_index = {}
# Version
train_index["version"] = index["version"]
val_index["version"] = index["version"]
# Shards
num_shards = len(index["shards"])
num_train_shards = math.floor((1 - val_pct) * num_shards)
train_index['shards'] = []
val_index['shards'] = []
train_basenames = []
val_basenames = []
print(f"Splitting into {num_train_shards} train shards and {num_shards - num_train_shards} val shards")
for item in tqdm(index['shards'], desc="Splitting shards"):
shard_basename = item['raw_data']['basename']
shard = shard_basename.split('.')[1]
shard = int(shard)
if shard < num_train_shards:
train_index['shards'].append(item)
train_basenames.append(shard_basename)
else:
val_index['shards'].append(item)
val_basenames.append(shard_basename)
# Save down the new indices
import os
train_directory = f"{new_directory}/train/"
val_directory = f"{new_directory}/val/"
os.makedirs(train_directory, exist_ok=True)
os.makedirs(val_directory, exist_ok=True)
with open(f"{train_directory}/index.json", "w") as f:
json.dump(train_index, f)
with open(f"{val_directory}/index.json", "w") as f:
json.dump(val_index, f)
# Copy the shards from the old directory to the new directories
import shutil
for basename in tqdm(train_basenames, desc="Copying train shards"):
shutil.copy(f"{directory}/train/{basename}", f"{train_directory}/{basename}")
for basename in tqdm(val_basenames, desc="Copying val shards"):
shutil.copy(f"{directory}/train/{basename}", f"{val_directory}/{basename}")
|
m2-main
|
bert/src/utils/create_val_split.py
|
m2-main
|
bert/src/utils/__init__.py
|
|
""" Utils for the training loop. Copied from https://github.com/HazyResearch/transformers/blob/master/src/utils/utils.py """
import torch.nn as nn
class OptimModule(nn.Module):
""" Interface for Module that allows registering buffers/parameters with configurable optimizer hyperparameters """
def register(self, name, tensor, lr=None, wd=0.0):
"""Register a tensor with a configurable learning rate and 0 weight decay"""
if lr == 0.0:
self.register_buffer(name, tensor)
else:
self.register_parameter(name, nn.Parameter(tensor))
optim = {}
if lr is not None: optim["lr"] = lr
if wd is not None: optim["weight_decay"] = wd
setattr(getattr(self, name), "_optim", optim)
|
m2-main
|
bert/src/utils/train.py
|
# Adapted from https://github.com/HazyResearch/fly/tree/master/src/models/layers
import math
import torch
import torch.nn as nn
from einops import rearrange
from src.mm.structured_linear import StructuredLinear
from src.mm.blockdiag_multiply import blockdiag_multiply
class BlockdiagLinear(StructuredLinear):
def __init__(self, *args, nblocks=4, shuffle=False, **kwargs):
"""shuffle: apply channel_shuffle operation before the matmul as in ShuffleNet
"""
super().__init__(*args, **kwargs)
in_blksz = int(math.ceil(self.in_features / nblocks))
out_blksz = int(math.ceil(self.out_features / nblocks))
self.in_features_extended = in_blksz * nblocks
self.out_features_extended = out_blksz * nblocks
self.shuffle = shuffle
self.weight = nn.Parameter(torch.empty(nblocks, out_blksz, in_blksz))
self.reset_parameters()
def set_weights_from_dense_init(self, dense_init_fn_):
dense_weight = torch.empty(self.out_features_extended, self.in_features_extended,
device=self.weight.device, dtype=self.weight.dtype)
dense_init_fn_(dense_weight)
# Scale by sqrt because the weight is sparse
scaling = math.sqrt(dense_weight.numel() / self.weight.numel())
dense_weight *= scaling
with torch.no_grad():
nblocks = self.weight.shape[0]
self.weight.copy_(rearrange(dense_weight, '(b o) (b1 i) -> b b1 o i',
b=nblocks, b1=nblocks)[0])
@property
def saving(self):
return self.weight.numel() / (self.in_features * self.out_features)
def forward_matmul(self, x):
x = self.preprocess(x)
if self.shuffle:
x = rearrange(x, '... (group c_per_group) -> ... (c_per_group group)',
group=self.weight.shape[0]) # group=nblocks
output = blockdiag_multiply(x, self.weight)
return self.postprocess(output)
class BlockdiagSparsityConfig:
def __init__(self, nblocks, block=32, global_size=0):
"""shuffle: apply channel_shuffle operation before the matmul as in ShuffleNet
"""
self.nblocks = nblocks
self.block = block
self.global_size = global_size
def make_layout(self, out_features, in_features):
assert out_features % self.block == 0 and in_features % self.block == 0
assert out_features % self.nblocks == 0 and in_features % self.nblocks == 0
layout = torch.block_diag(*[torch.ones(out_features // self.nblocks,
in_features // self.nblocks,
dtype=torch.int32)] * self.nblocks)
if self.global_size > 0:
layout[:self.global_size] = 1
layout[:, :self.global_size] = 1
# Convert from (out_features, in_features) mask to
# (out_features // block, in_features // block) mask
layout = rearrange(layout, '(p blksz) (r blksz1) -> p r (blksz blksz1)',
blksz=self.block, blksz1=self.block)
return (layout > 0).any(dim=-1).int()
|
m2-main
|
bert/src/mm/blockdiag_linear.py
|
# Copyright (c) 2023, Dan Fu and Simran Arora.
# Adapted from https://github.com/HazyResearch/safari/blob/main/src/models/sequence/hyena.py
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
import opt_einsum as oe
contract = oe.contract
from src.utils.train import OptimModule
def fftconv_ref(u, k, D, dropout_mask, gelu=True, k_rev=None):
# u.shape: B H L
seqlen = u.shape[-1]
fft_size = 2 * seqlen
k_f = torch.fft.rfft(k, n=fft_size) / fft_size
if k_rev is not None:
k_rev_f = torch.fft.rfft(k_rev, n=fft_size) / fft_size
k_f = k_f + k_rev_f.conj()
u_f = torch.fft.rfft(u.to(dtype=k.dtype), n=fft_size)
if len(u.shape) > 3:
k_f = k_f.unsqueeze(1)
y = torch.fft.irfft(u_f * k_f, n=fft_size, norm="forward")[..., :seqlen]
out = y + u * D
if gelu:
out = F.gelu(out)
if dropout_mask is not None:
return (out * rearrange(dropout_mask, "b H -> b H 1")).to(dtype=u.dtype)
else:
return out.to(dtype=u.dtype)
@torch.jit.script
def mul_sum(q, y):
return (q * y).sum(dim=1)
class Sin(nn.Module):
def __init__(self, dim, w=10, w_mod=1, train_freq=True):
super().__init__()
init_tensor = torch.ones(1, dim)
self.freq = (
nn.Parameter(w * init_tensor)
if train_freq
else w * torch.ones(1, dim)
)
self.w_mod = w_mod
def forward(self, x):
return torch.sin(self.w_mod * self.freq * x)
class PositionalEmbedding(OptimModule):
def __init__(self, emb_dim: int, seq_len: int, lr_pos_emb: float = 1e-5, **kwargs):
"""Complex exponential positional embeddings for Hyena filters."""
super().__init__()
self.seq_len = seq_len
# The time embedding fed to the filteres is normalized so that t_f = 1
t = torch.linspace(0, 1, self.seq_len)[None, :, None] # 1, L, 1
if emb_dim > 1:
bands = (emb_dim - 1) // 2
# To compute the right embeddings we use the "proper" linspace
t_rescaled = torch.linspace(0, seq_len - 1, seq_len)[None, :, None]
w = 2 * math.pi * t_rescaled / seq_len # 1, L, 1
f = torch.linspace(1e-4, bands - 1, bands)[None, None]
z = torch.exp(-1j * f * w)
z = torch.cat([t, z.real, z.imag], dim=-1)
self.register("z", z, lr=lr_pos_emb)
self.register("t", t, lr=0.0)
def forward(self, L):
return self.z[:, :L], self.t[:, :L]
class ExponentialModulation(OptimModule):
def __init__(
self,
d_model,
fast_decay_pct=0.3,
slow_decay_pct=1.5,
target=1e-2,
modulation_lr=0.0,
shift: float = 0.0,
**kwargs,
):
super().__init__()
self.shift = shift
max_decay = math.log(target) / fast_decay_pct
min_decay = math.log(target) / slow_decay_pct
deltas = torch.linspace(min_decay, max_decay, d_model)[None, None]
self.register("deltas", deltas, lr=modulation_lr)
def forward(self, t, x):
decay = torch.exp(-t * self.deltas.abs())
x = x * (decay + self.shift)
return x
class HyenaFilter(OptimModule):
def __init__(
self,
d_model,
emb_dim=3, # dim of input to MLP, augments with positional encoding
order=16, # width of the implicit MLP
seq_len=1024,
lr=1e-3,
lr_pos_emb=1e-5,
dropout=0.0,
w=1, # frequency of periodic activations
w_mod=1, # non-learnable modification of w
wd=0, # weight decay of kernel parameters
bias=True,
num_inner_mlps=2,
linear_mixer=False,
modulate: bool = True,
normalized=False,
bidirectional=False,
**kwargs,
):
"""
Implicit long filter with modulation.
Args:
d_model: number of channels in the input
emb_dim: dimension of the positional encoding (`emb_dim` - 1) // 2 is the number of bands
order: width of the FFN
num_inner_mlps: number of inner linear layers inside filter MLP
Note:
filter_dropout is not implemented
"""
super().__init__()
self.d_model=d_model
self.emb_dim=emb_dim
self.seq_len=seq_len
self.modulate=modulate
self.use_bias = bias
self.bidirectional = bidirectional
self.bias = nn.Parameter(torch.randn(self.d_model))
self.dropout = nn.Dropout(dropout)
act = Sin(dim=order, w=w, w_mod=w_mod)
assert (
emb_dim % 2 != 0 and emb_dim >= 3
), "emb_dim must be odd and greater or equal to 3 (time, sine and cosine)"
self.pos_emb = PositionalEmbedding(emb_dim, seq_len, lr_pos_emb)
# uses a variable number of inner linear layers
if linear_mixer is False:
self.implicit_filter = nn.Sequential(
nn.Linear(emb_dim, order),
act,
)
for i in range(num_inner_mlps):
self.implicit_filter.append(nn.Linear(order, order))
self.implicit_filter.append(act)
self.implicit_filter.append(nn.Linear(order, d_model, bias=False))
else:
self.implicit_filter = nn.Sequential(
nn.Linear(emb_dim, d_model, bias=False),
)
if self.bidirectional:
self.implicit_filter_rev = nn.Sequential(
nn.Linear(emb_dim, order),
act,
)
for i in range(num_inner_mlps):
self.implicit_filter_rev.append(nn.Linear(order, order))
self.implicit_filter_rev.append(act)
self.implicit_filter_rev.append(nn.Linear(order, d_model, bias=False))
self.modulation = ExponentialModulation(d_model, **kwargs)
self.normalized = normalized
for c in self.implicit_filter.children():
for name, v in c.state_dict().items():
optim = {"weight_decay": wd, "lr": lr}
setattr(getattr(c, name), "_optim", optim)
def filter(self, L, *args, **kwargs):
z, t = self.pos_emb(L)
h = self.implicit_filter(z)
if self.modulate:
h = self.modulation(t, h)
if self.normalized:
h = h / torch.norm(h, dim=-1, p=1, keepdim=True)
return h
def filter_rev(self, L, *args, **kwargs):
z, t = self.pos_emb(L)
h = self.implicit_filter_rev(z)
if self.modulate:
h = self.modulation(t, h)
if self.normalized:
h = h / torch.norm(h, dim=-1, p=1, keepdim=True)
return h
def forward(self, x, L, k_fwd=None, k_rev=None, bias=None, *args, **kwargs):
if k_fwd is None:
k_fwd = self.filter(L)
if self.bidirectional and k_rev is None:
k_rev = self.filter_rev(L)
# Ensure compatibility with filters that return a tuple
k_fwd = k_fwd[0] if type(k_fwd) is tuple else k_fwd
if bias is None:
bias = self.bias
bias = bias if self.use_bias else 0 * bias
if self.bidirectional:
k_rev = k_rev[0] if type(k_rev) is tuple else k_rev
k = F.pad(k_fwd, (0, L)) \
+ F.pad(k_rev.flip(-1), (L, 0))
else:
k = k_fwd
y = fftconv_ref(
x,
k,
bias,
dropout_mask=None,
gelu=False,
)
return y.to(dtype=x.dtype)
|
m2-main
|
bert/src/mm/hyena_utils.py
|
# Adapted from https://github.com/HazyResearch/fly/tree/master/src/models/layers
import math
import numpy as np
import torch
from torch.nn import functional as F
from einops import rearrange
def blockdiag_butterfly_multiply_reference(x, w1_bfly, w2_bfly, version=2):
"""
This implementation is slow but more likely to be correct.
There are 3 implementations, which should all yield the same answer
Arguments:
x: (batch, n)
w1_bfly: (k, q, p), where k = n / p
w2_bfly: (l, s, r), where l = k * q / r = n * q / (p * r)
Outputs:
out: (batch, m), where m = l * s = n * s * q / (p * r)
"""
if version not in [1, 2, 3]:
raise NotImplementedError('version must be either 1, 2, or 3')
batch, n = x.shape
k, q, p = w1_bfly.shape
l, s, r = w2_bfly.shape
assert k * p == n
assert l * r == k * q
x_reshaped = rearrange(x, 'b (k p) -> b k p', k=k)
if version == 1: # Implementation 1 (only works for when k = q = p = l = s = r = sqrt(n))
assert k == q == p == l == s == r == int(math.sqrt(n))
return torch.einsum('bkp,kqp,qlk->blq', x_reshaped, w1_bfly, w2_bfly).reshape(batch, n)
elif version == 2: # Implementation 2
out1 = torch.einsum('kqp,bkp->bkq', w1_bfly, x_reshaped)
out1 = rearrange(rearrange(out1, 'b k q -> b (k q)'), 'b (r l) -> b l r', l=l)
return torch.einsum('lsr,blr->bsl', w2_bfly, out1).reshape(batch, s * l)
# Implementation 3: most likely to be correct, but it's the slowest
elif version == 3:
w1_dense = torch.block_diag(*torch.unbind(w1_bfly, dim=0))
out1 = F.linear(x, w1_dense)
out1 = rearrange(out1, 'b (r l) -> b (l r)', l=l)
w2_dense = torch.block_diag(*torch.unbind(w2_bfly, dim=0))
out2 = F.linear(out1, w2_dense)
out2 = rearrange(out2, 'b (l s) -> b (s l)', l=l)
return out2
class BlockdiagButterflyMultiply(torch.autograd.Function):
"""This is a faster implementation, with careful memory copies for the fastest
bmm performance.
The backward pass is also written manually with careful memory copies.
Arguments:
x: (batch, n)
w1_bfly: (k, q, p), where k = n / p
w2_bfly: (l, s, r), where l = k * q / r = n * q / (p * r)
Outputs:
out: (batch, m), where m = l * s = n * s * q / (p * r)
"""
@staticmethod
@torch.cuda.amp.custom_fwd(cast_inputs=torch.bfloat16)
def forward(ctx, x, w1_bfly, w2_bfly):
batch_shape, n = x.shape[:-1], x.shape[-1]
batch_dim = np.prod(batch_shape)
k, q, p = w1_bfly.shape
l, s, r = w2_bfly.shape
assert k * p == n
assert l * r == k * q
x_reshaped = x.reshape(batch_dim, k, p).transpose(0, 1)
out1 = torch.empty(batch_dim, k, q, device=x.device, dtype=x.dtype).transpose(0, 1)
out1 = torch.bmm(x_reshaped, w1_bfly.transpose(-1, -2), out=out1)
out1 = out1.transpose(0, 1).reshape(batch_dim, r, l).transpose(-1, -2).contiguous().transpose(0, 1)
out2 = torch.empty(batch_dim, l, s, device=x.device, dtype=x.dtype).transpose(0, 1)
out2 = torch.bmm(out1, w2_bfly.transpose(-1, -2), out=out2)
out2 = out2.permute(1, 2, 0).reshape(*batch_shape, s * l)
ctx.save_for_backward(x, w1_bfly, w2_bfly, out1)
return out2
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx, dout):
x, w1_bfly, w2_bfly, out1 = ctx.saved_tensors
batch_shape, n = x.shape[:-1], x.shape[-1]
batch_dim = np.prod(batch_shape)
k, q, p = w1_bfly.shape
l, s, r = w2_bfly.shape
# assert k * p == n
# assert l * r == k * q
dx, dw1_bfly, dw2_bfly = None, None, None
# dout_reshaped = dout.reshape(batch_dim, sqrtn, sqrtn).permute(2, 1, 0).contiguous()
dout_reshaped = dout.reshape(batch_dim, s, l).transpose(-1, -2).contiguous()
dout_reshaped = dout_reshaped.transpose(0, 1)
if ctx.needs_input_grad[2]:
# dw2_bfly = torch.empty(l, s, r, device=w2_bfly.device, dtype=w2_bfly.dtype)
# dw2_bfly = torch.bmm(dout_reshaped.transpose(-1, -2), out1, out=dw2_bfly)
dw2_bfly = torch.bmm(dout_reshaped.transpose(-1, -2), out1.conj())
if ctx.needs_input_grad[1] or ctx.needs_input_grad[0]:
dout1 = torch.empty(batch_dim, l, r, device=x.device, dtype=x.dtype).transpose(0, 1)
dout1 = torch.bmm(dout_reshaped, w2_bfly.conj(), out=dout1)
dout1 = dout1.transpose(0, 1).transpose(-1, -2).contiguous().reshape(batch_dim, k, q).transpose(0, 1)
# dout1 = dout1.permute(1, 2, 0).contiguous().transpose(0, 1)
if ctx.needs_input_grad[0]:
dx = torch.empty(batch_dim, k, p, device=x.device, dtype=x.dtype)
dx = torch.bmm(dout1, w1_bfly.conj(), out=dx.transpose(0, 1)).transpose(0, 1).reshape(*batch_shape, n)
if ctx.needs_input_grad[1]:
x_reshaped = x.reshape(batch_dim, k, p).transpose(0, 1)
dw1_bfly = torch.bmm(dout1.transpose(-1, -2), x_reshaped.conj())
return dx, dw1_bfly, dw2_bfly
blockdiag_butterfly_multiply = BlockdiagButterflyMultiply.apply
|
m2-main
|
bert/src/mm/blockdiag_butterfly_multiply.py
|
# Copyright (c) 2023, Dan Fu and Simran Arora.
# Adapted from https://github.com/HazyResearch/safari/blob/main/src/models/sequence/hyena.py
import torch.nn as nn
from einops import rearrange
import opt_einsum as oe
contract = oe.contract
from src.mm.hyena_utils import HyenaFilter
class MonarchMixerSequenceMixing(nn.Module):
def __init__(
self,
d_model,
l_max=128,
dropout=0.0,
hyena_kernel_lr=None,
bidirectional=False,
hyena_lr_pos_emb=1e-5,
hyena_w=10,
hyena_w_mod=1,
hyena_wd=0.1,
hyena_emb_dim=3,
hyena_filter_dropout=0.0,
hyena_filter_order=16,
residual_long_conv=False,
hyena_training_additions=False,
):
super().__init__()
self.d_model = d_model
self.l_max = l_max
self.kernel_lr = hyena_kernel_lr
self.channels = 1
self.bidirectional = bidirectional
self.residual_long_conv = residual_long_conv
self.NUM_PROJECTIONS = 3
print('-- Bidirectional:', self.bidirectional)
print("-- Using Long Conv Residual:", self.residual_long_conv)
print('-- Hyena w:', hyena_w)
print('-- Hyena w mod:', hyena_w_mod)
print(f"-- Hyena filter order: {hyena_filter_order}")
print(f"-- Hyena filter dropout: {hyena_filter_dropout}")
print(f"-- Hyena filter wd: {hyena_wd}")
print(f"-- Hyena filter emb dim: {hyena_emb_dim}")
print(f"-- Hyena filter lr: {hyena_kernel_lr}")
print(f"-- Hyena filter lr pos emb: {hyena_lr_pos_emb}")
self.filter_fn = HyenaFilter(
self.d_model,
order=hyena_filter_order,
seq_len=self.l_max,
dropout=hyena_filter_dropout,
bidirectional=self.bidirectional,
lr=hyena_kernel_lr,
lr_pos_emb=hyena_lr_pos_emb,
w=hyena_w, # frequency of periodic activations
w_mod=hyena_w_mod,
wd=hyena_wd, # weight decay of kernel parameters
emb_dim=hyena_emb_dim,
)
if self.residual_long_conv:
self.filter_fn2 = HyenaFilter(
self.d_model,
order=hyena_filter_order,
seq_len=self.l_max,
dropout=hyena_filter_dropout,
bidirectional=self.bidirectional,
lr=hyena_kernel_lr,
lr_pos_emb=hyena_lr_pos_emb,
w=hyena_w, # frequency of periodic activations
w_mod=hyena_w_mod,
wd=hyena_wd, # weight decay of kernel parameters
emb_dim=hyena_emb_dim,
)
# setup projections
self.in_linear = nn.Linear(d_model, 3 * d_model)
self.out_linear = nn.Linear(d_model, d_model)
self.hyena_training_additions = hyena_training_additions
if self.hyena_training_additions:
self.act = nn.Identity()
self.drop = nn.Dropout(dropout)
self.layernorm = nn.LayerNorm(d_model)
# setup short conv
total_width = self.d_model * self.NUM_PROJECTIONS
self.short_filter = nn.Conv1d(
in_channels=total_width,
out_channels=total_width,
kernel_size=3,
groups=total_width,
padding=2,
)
def forward(self, u, **kwargs):
# u is B L H
if self.hyena_training_additions:
u = self.layernorm(u)
L = u.size(-2)
# in projection
u_orig = u
u = self.in_linear(u)
u = rearrange(u, "b l d -> b d l")
# short filter
uc = self.short_filter(u)[..., :L]
x1, x2, v = uc.split(self.d_model, dim=1)
v = v * x1
if self.hyena_training_additions:
v = self.drop(v)
k = self.filter_fn.filter(L, device=u.device)
k = rearrange(k, "c l d -> c d l")[0] # `c` is always 1 by default
if self.bidirectional:
k_rev = self.filter_fn.filter_rev(L, device=u.device)
k_rev = rearrange(k_rev, "c l d -> c d l")[0] # `c` is always 1 by default
else:
k_rev = None
y = self.filter_fn(v, L, k_fwd=k, k_rev=k_rev, bias= self.filter_fn.bias[None, :, None])
if self.residual_long_conv:
k2 = self.filter_fn2.filter(L, device=u.device)
k2 = rearrange(k2, "c l d -> c d l")[0]
if self.bidirectional:
k2_rev = self.filter_fn2.filter_rev(L, device=u.device)
k2_rev = rearrange(k2_rev, "c l d -> c d l")[0] # `c` is always 1 by default
else:
k2_rev = None
yu = self.filter_fn2(u_orig.transpose(-1, -2), L, k_fwd=k2, k_rev=k2_rev, bias= self.filter_fn2.bias[None, :, None])
# post gating
y = y * x2
if self.residual_long_conv:
y = y + yu
y = y.transpose(-1, -2)
if self.hyena_training_additions:
y = self.drop(self.act(y))
y = self.out_linear(y)
return y, None
|
m2-main
|
bert/src/mm/monarch_mixer_sequence_mixer.py
|
# Adapted from https://github.com/HazyResearch/fly/tree/master/src/models/layers
import numpy as np
import torch
from torch.nn import functional as F
from einops import rearrange
def blockdiag_weight_to_dense_weight(weight):
"""
Argumments:
weight: (nblocks, out / nblocks, in / blocks)
Return:
dense_weight: (out / in)
"""
return torch.block_diag(*torch.unbind(weight, dim=0))
def blockdiag_multiply_reference(x, weight):
"""
This implementation is slow but more likely to be correct.
Arguments:
x: (..., n)
weight: (nblocks, q, n / nblocks)
Outputs:
out: (..., nblocks * q)
"""
n = x.shape[-1]
nblocks, q, p = weight.shape
assert nblocks * p == n
x_reshaped = rearrange(x, '... (nblocks p) -> ... nblocks p', nblocks=nblocks)
return rearrange(torch.einsum('...kp, kqp -> ...kq', x_reshaped, weight),
'... nblocks q -> ... (nblocks q)')
class BlockdiagMultiply(torch.autograd.Function):
"""This is a faster implementation, with careful memory copies for the fastest
bmm performance.
The backward pass is also written manually with careful memory copies.
Arguments:
x: (..., n)
weight: (nblocks, q, n / nblocks)
Outputs:
out: (..., nblocks * q)
"""
@staticmethod
@torch.cuda.amp.custom_fwd(cast_inputs=torch.bfloat16)
def forward(ctx, x, weight):
ctx.save_for_backward(x, weight)
batch_shape, n = x.shape[:-1], x.shape[-1]
batch_dim = np.prod(batch_shape)
nblocks, q, p = weight.shape
assert nblocks * p == n
x_reshaped = x.reshape(batch_dim, nblocks, p).transpose(0, 1)
out = torch.empty(batch_dim, nblocks, q, device=x.device, dtype=x.dtype).transpose(0, 1)
out = torch.bmm(x_reshaped, weight.transpose(-1, -2), out=out).transpose(0, 1)
return out.reshape(*batch_shape, nblocks * q)
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx, dout):
x, weight = ctx.saved_tensors
batch_shape, n = x.shape[:-1], x.shape[-1]
batch_dim = np.prod(batch_shape)
nblocks, q, p = weight.shape
assert nblocks * p == n
dx, dweight = None, None
dout_reshaped = dout.reshape(batch_dim, nblocks, q).transpose(0, 1)
if ctx.needs_input_grad[0]:
dx = torch.empty(batch_dim, nblocks, p, device=x.device, dtype=x.dtype)
dx = torch.bmm(dout_reshaped, weight.conj(),
out=dx.transpose(0, 1)).transpose(0, 1).reshape(*batch_shape, n)
if ctx.needs_input_grad[1]:
x_reshaped = x.reshape(batch_dim, nblocks, p).transpose(0, 1)
dweight = torch.bmm(dout_reshaped.transpose(-1, -2), x_reshaped.conj())
return dx, dweight
blockdiag_multiply = BlockdiagMultiply.apply
|
m2-main
|
bert/src/mm/blockdiag_multiply.py
|
m2-main
|
bert/src/mm/__init__.py
|
|
# Adapted from https://github.com/HazyResearch/fly/tree/master/src/models/layers
import math
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
class StructuredLinear(nn.Module):
def __init__(self, in_features, out_features, bias=True, device=None, dtype=None):
"""Subclasses should call reset_parameters
"""
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.in_features = in_features
self.out_features = out_features
# Subclasses may override {in,out}_features_extended
if not hasattr(self, 'in_features_extended'):
self.in_features_extended = in_features
if not hasattr(self, 'out_features_extended'):
self.out_features_extended = out_features
if bias:
self.bias = nn.Parameter(torch.zeros(out_features, **factory_kwargs))
else:
self.register_parameter('bias', None)
def reset_parameters(self) -> None:
self.set_weights_from_dense_init(dense_init_fn_=partial(init.kaiming_uniform_, a=math.sqrt(5)))
self.reset_parameters_bias()
def set_weights_from_dense_init(self, dense_init_fn_):
raise NotImplementedError
def reset_parameters_bias(self):
if self.bias is not None:
fan_in = self.bias.shape[-1]
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
init.uniform_(self.bias, -bound, bound)
@property
def saving(self):
raise NotImplementedError
def convert_to_dense_weight(self):
factory_kwargs = {'device': self.weight.device, 'dtype': self.weight.dtype}
dense_weight = self.forward_matmul(torch.eye(self.in_features, **factory_kwargs)).T
return dense_weight
def preprocess(self, x):
in_features = x.shape[-1]
if in_features < self.in_features_extended:
x = F.pad(x, (0, self.in_features_extended - in_features))
return x
def postprocess(self, output):
out_features_extended = output.shape[-1]
if out_features_extended > self.out_features:
output = output[..., :self.out_features]
return output
def forward_matmul(self, x):
raise NotImplementedError
def forward(self, x):
output = self.forward_matmul(x)
# Convert bias to output.dtype in case of AMP, otherwise bias and activation will be in FP32
return (output + self.bias.to(dtype=output.dtype)) if self.bias is not None else output
|
m2-main
|
bert/src/mm/structured_linear.py
|
# Copyright (c) 2023, Dan Fu and Simran Arora.
import torch
import torch.nn as nn
import math
from einops import rearrange
import opt_einsum as oe
contract = oe.contract
from flashmm import mm_block_fwd, hyena_filter_fwd, exp_mod_in_place_fwd
from src.utils.train import OptimModule
def fast_mm_block(
u,
linear, out_linear,
x1_s, x2_s, v_s,
x1_s_bias, x2_s_bias, v_s_bias,
k, k_resid, D, Du,
dropout_mask,
gelu, fft_size
):
# u.shape: B L H
x1x2v = linear(u)
H = x1x2v.shape[-1] // 3
x1, x2, v = x1x2v.split(H, dim=-1)
x1 = x1.transpose(1, 2).contiguous()
x2 = x2.transpose(1, 2).contiguous()
v = v.transpose(1, 2).contiguous()
k_f = torch.fft.rfft(k.to(torch.float32), n=fft_size)
k_residual_f = torch.fft.rfft(k_resid.to(torch.float32), n=fft_size)
out = mm_block_fwd(
x1, x2, v,
x1_s, x2_s, v_s,
x1_s_bias, x2_s_bias, v_s_bias,
k_f, None, u.transpose(1, 2).to(x1.dtype).contiguous(), k_residual_f, Du, D, dropout_mask, gelu, fft_size,
False, False
)
out = out.transpose(-1, -2)
return out_linear(out)
def pos_emb_init(seq_len, emb_dim):
t = torch.linspace(0, 1, seq_len)[None, :, None] # 1, L, 1
if emb_dim > 1:
bands = (emb_dim - 1) // 2
# To compute the right embeddings we use the "proper" linspace
t_rescaled = torch.linspace(0, seq_len - 1, seq_len)[None, :, None]
w = 2 * math.pi * t_rescaled / seq_len # 1, L, 1
f = torch.linspace(1e-4, bands - 1, bands)[None, None]
z = torch.exp(-1j * f * w)
z = torch.cat([t, z.real, z.imag], dim=-1)
return z
class FastFilter(OptimModule):
def __init__(
self,
d_model,
channels,
bidirectional=True,
order=16,
seq_len=128,
lr=1e-3,
lr_pos_emb=1e-5,
w=1, # frequency of periodic activations
wd=0, # weight decay of kernel parameters
emb_dim=5,
):
# create positional embeddings
super().__init__()
self.bidirectional = bidirectional
if self.bidirectional:
channels *= 2
self.channels = channels
z = pos_emb_init(seq_len, emb_dim).repeat(self.channels, 1, 1)
sin_freq = w * torch.ones(self.channels, order)
# create parameters for eo_mat, eo_bias
eo_linears = [
nn.Linear(emb_dim, order)
for _ in range(self.channels)
]
eo_mat = torch.stack([l.weight for l in eo_linears], dim=0).transpose(-1, -2).contiguous()
eo_bias = torch.stack([l.bias for l in eo_linears], dim=0)
# create parameters for oo1_mat, oo1_bias
oo1_linears = [
nn.Linear(order, order)
for _ in range(self.channels)
]
oo1_mat = torch.stack([l.weight for l in oo1_linears], dim=0).transpose(-1, -2).contiguous()
oo1_bias = torch.stack([l.bias for l in oo1_linears], dim=0)
# create parameters for oo2_mat, oo2_bias
oo2_linears = [
nn.Linear(order, order)
for _ in range(self.channels)
]
oo2_mat = torch.stack([l.weight for l in oo2_linears], dim=0).transpose(-1, -2).contiguous()
oo2_bias = torch.stack([l.bias for l in oo2_linears], dim=0)
# create parameters for oh_mat
oh_linears = [
nn.Linear(order, d_model, bias=False)
for _ in range(self.channels)
]
oh_mat = torch.stack([l.weight for l in oh_linears], dim=0).transpose(-1, -2)
# create reverse parameter
if self.bidirectional:
reverse = torch.Tensor([
[0, 1] for _ in range(self.channels // 2)
]).flatten().int()
else:
reverse = torch.Tensor([0 for _ in range(self.channels)]).int()
self.register("z", z, lr=lr_pos_emb)
self.register('sin_freq', sin_freq, lr=lr, wd=wd)
self.register('eo_mat', eo_mat, lr=lr, wd=wd)
self.register('eo_bias', eo_bias, lr=lr, wd=wd)
self.register('oo1_mat', oo1_mat, lr=lr, wd=wd)
self.register('oo1_bias', oo1_bias, lr=lr, wd=wd)
self.register('oo2_mat', oo2_mat, lr=lr, wd=wd)
self.register('oo2_bias', oo2_bias, lr=lr, wd=wd)
self.register('oh_mat', oh_mat, lr=lr, wd=wd)
self.register('reverse', reverse, lr=0)
target=1e-2
fast_decay_pct=0.3
slow_decay_pct=1.5
self.min_decay = math.log(target) / slow_decay_pct
self.max_decay = math.log(target) / fast_decay_pct
self.shift = 0.
def forward(self):
k = hyena_filter_fwd(
self.z, self.sin_freq, self.eo_mat, self.eo_bias,
self.oo1_mat, self.oo1_bias, self.oo2_mat, self.oo2_bias,
self.reverse, None
)
k = torch.bmm(k, self.oh_mat)
k = exp_mod_in_place_fwd(k, self.reverse, self.min_decay, self.max_decay, self.shift)
return k
class FlashMMSequenceMixing(nn.Module):
def __init__(
self,
d_model,
l_max=128,
hyena_kernel_lr=None,
bidirectional=False,
hyena_lr_pos_emb=1e-5,
hyena_w=10,
hyena_w_mod=1,
hyena_wd=0.1,
hyena_emb_dim=5,
hyena_filter_order=128,
residual_long_conv=False,
**kwargs,
):
super().__init__()
self.d_model = d_model
self.l_max = l_max
self.kernel_lr = hyena_kernel_lr
self.channels = 1
self.bidirectional = bidirectional
self.residual_long_conv = residual_long_conv
print('Using Flash MM Sequence Mixing (no bwd pass!)')
print('-- Bidirectional:', self.bidirectional)
print("-- Using Long Conv Residual:", self.residual_long_conv)
print('-- Hyena w:', hyena_w)
print('-- Hyena w mod:', hyena_w_mod)
channels = 1
if self.residual_long_conv:
channels *= 2
self.fast_filter = FastFilter(
self.d_model,
channels=channels,
bidirectional=self.bidirectional,
order=hyena_filter_order,
seq_len=self.l_max,
lr=hyena_kernel_lr,
lr_pos_emb=hyena_lr_pos_emb,
w=hyena_w, # frequency of periodic activations
wd=hyena_wd, # weight decay of kernel parameters
emb_dim=hyena_emb_dim,
)
# setup projections
self.in_linear = nn.Linear(d_model, 3 * d_model)
self.out_linear = nn.Linear(d_model, d_model)
# to use inits from Conv1d
short_filter = nn.Conv1d(
in_channels=3 * d_model,
out_channels=3 * d_model,
kernel_size=4,
groups=3 * d_model,
padding=3
)
self.x1_s = nn.Parameter(short_filter.weight[:d_model, 0, :].clone())
self.x2_s = nn.Parameter(short_filter.weight[d_model:2 * d_model, 0, :].clone())
self.v_s = nn.Parameter(short_filter.weight[2 * d_model:, 0, :].clone())
self.x1_s_bias = nn.Parameter(short_filter.bias[:d_model].clone())
self.x2_s_bias = nn.Parameter(short_filter.bias[d_model:2 * d_model].clone())
self.v_s_bias = nn.Parameter(short_filter.bias[2 * d_model:].clone())
self.bias = nn.Parameter(torch.randn(self.d_model))
if self.residual_long_conv:
self.residual_bias = nn.Parameter(torch.randn(self.d_model))
else:
self.residual_bias = None
def forward(self, u, **kwargs):
fft_size = 2 * self.l_max
all_kernels = self.fast_filter() # C L H
C, L, H = all_kernels.shape
if self.residual_long_conv:
k = all_kernels[:C // 2].reshape((C // 2) * L, H).transpose(0, 1).contiguous()
k_resid = all_kernels[C // 2:].reshape((C // 2) * L, H).transpose(0, 1).contiguous()
else:
k = all_kernels.reshape(C * L, H).transpose(0, 1).contiguous()
k_resid = None
return fast_mm_block(
u,
self.in_linear, self.out_linear,
self.x1_s, self.x2_s, self.v_s,
self.x1_s_bias, self.x2_s_bias, self.v_s_bias,
k, k_resid, self.bias, self.residual_bias,
None,
False, fft_size
), None
|
m2-main
|
bert/src/mm/flash_mm.py
|
import torch
from einops import rearrange
def low_rank_project(M, rank):
"""Supports batches of matrices as well.
"""
U, S, Vt = torch.linalg.svd(M)
S_sqrt = S[..., :rank].sqrt()
U = U[..., :rank] * rearrange(S_sqrt, '... rank -> ... 1 rank')
Vt = rearrange(S_sqrt, '... rank -> ... rank 1') * Vt[..., :rank, :]
return U, Vt
|
m2-main
|
bert/src/ops/low_rank.py
|
import math
import torch
from einops import rearrange
def butterfly_factor_to_matrix(twiddle: torch.Tensor, factor_index: int) -> torch.Tensor:
"""
Let b be the base (most commonly 2).
Parameters:
twiddle: (n // b, b, b)
factor_index: an int from 0 to log_b(n) - 1
"""
n_div_b, b, _ = twiddle.shape
n = b * n_div_b
log_b_n = int(math.log(n) / math.log(b))
assert n == b ** log_b_n, f'n must be a power of {b}'
assert twiddle.shape == (n // b, b, b)
assert 0 <= factor_index <= log_b_n
stride = b ** factor_index
x = rearrange(torch.eye(n), 'bs (diagblk j stride) -> bs diagblk j stride', stride=stride, j=b)
t = rearrange(twiddle, '(diagblk stride) i j -> diagblk stride i j', stride=stride)
out = torch.einsum('d s i j, b d j s -> b d i s', t, x)
out = rearrange(out, 'b diagblk i stride -> b (diagblk i stride)')
return out.t() # Transpose because we assume the 1st dimension of x is the batch dimension
if __name__ == '__main__':
b = 2
log_b_n = 3
n = b ** log_b_n
twiddle = torch.arange(1, n * b + 1, dtype=torch.float).reshape(n // b, b, b)
for factor_index in range(log_b_n):
print(butterfly_factor_to_matrix(twiddle, factor_index))
b = 3
log_b_n = 2
n = b ** log_b_n
twiddle = torch.arange(1, n * b + 1, dtype=torch.float).reshape(n // b, b, b)
for factor_index in range(log_b_n):
print(butterfly_factor_to_matrix(twiddle, factor_index))
|
m2-main
|
bert/src/ops/butterfly_factor.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import flash_attn_cuda
def _get_block_size(device, head_dim, is_dropout):
assert head_dim % 8 == 0 and head_dim <= 128
return 256 if head_dim <= 64 else 128
def _flash_attn_forward(q, k, v, out, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k,
dropout_p, softmax_scale, causal, return_softmax, num_splits=0,
generator=None):
"""
num_splits: how much to parallelize over the seqlen_q dimension. num_splits=0 means
it will be set by an internal heuristic. We're exposing num_splits mostly for benchmarking.
Don't change it unless you know what you're doing.
"""
softmax_lse, rng_state, *rest = flash_attn_cuda.fwd(
q, k, v, out, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, dropout_p,
softmax_scale, False, causal, return_softmax, num_splits, generator
)
# if out.isnan().any() or softmax_lse.isnan().any():
# breakpoint()
S_dmask = rest[0] if return_softmax else None
return out, softmax_lse, rng_state, S_dmask
def _flash_attn_backward(dout, q, k, v, out, softmax_lse, dq, dk, dv, cu_seqlens_q, cu_seqlens_k,
max_seqlen_q, max_seqlen_k, dropout_p, softmax_scale, causal,
rng_state=None, num_splits=0, generator=None):
"""
num_splits: whether to parallelize over the seqlen_k dimension (num_splits > 1) or
not (num_splits = 1). num_splits=0 means it will be set by an internal heuristic.
Any value above 1 will call the same kernel (i.e. num_splits=2 would call the same kernel
as num_splits=3), so effectively the choices are 0, 1, and 2.
This hyperparameter can be tuned for performance, but default value (heuristic) should work fine.
"""
dout = dout.contiguous() # CUDA code assumes that dout is contiguous
_, _, _, softmax_d = flash_attn_cuda.bwd(
dout, q, k, v, out, softmax_lse, dq, dk, dv, cu_seqlens_q, cu_seqlens_k,
max_seqlen_q, max_seqlen_k, dropout_p, softmax_scale, False, causal,
num_splits, generator, rng_state)
# if dk.isnan().any() or dk.isnan().any() or dv.isnan().any() or softmax_d.isnan().any():
# breakpoint()
return dq, dk, dv, softmax_d
class FlashAttnQKVPackedFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, qkv, cu_seqlens, max_seqlen, dropout_p, softmax_scale, causal,
return_softmax, deterministic):
if softmax_scale is None:
softmax_scale = qkv.shape[-1] ** (-0.5)
out, softmax_lse, rng_state, S_dmask = _flash_attn_forward(
qkv[:, 0], qkv[:, 1], qkv[:, 2], torch.empty_like(qkv[:, 0]), cu_seqlens, cu_seqlens,
max_seqlen, max_seqlen, dropout_p, softmax_scale, causal=causal,
return_softmax=return_softmax
)
ctx.save_for_backward(qkv, out, softmax_lse, cu_seqlens, rng_state)
ctx.dropout_p = dropout_p
ctx.max_seqlen = max_seqlen
ctx.softmax_scale = softmax_scale
ctx.causal = causal
ctx.deterministic = deterministic
return out if not return_softmax else (out, softmax_lse, S_dmask)
@staticmethod
def backward(ctx, dout, *args):
qkv, out, softmax_lse, cu_seqlens, rng_state = ctx.saved_tensors
dqkv = torch.empty_like(qkv)
_flash_attn_backward(
dout, qkv[:, 0], qkv[:, 1], qkv[:, 2], out, softmax_lse,
dqkv[:, 0], dqkv[:, 1], dqkv[:, 2], cu_seqlens, cu_seqlens,
ctx.max_seqlen, ctx.max_seqlen, ctx.dropout_p, ctx.softmax_scale, ctx.causal,
rng_state=rng_state, num_splits=1 if ctx.deterministic else 0,
)
return dqkv, None, None, None, None, None, None, None
class FlashAttnKVPackedFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, q, kv, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, dropout_p,
softmax_scale, causal, return_softmax, deterministic):
if softmax_scale is None:
softmax_scale = q.shape[-1] ** (-0.5)
out, softmax_lse, rng_state, S_dmask = _flash_attn_forward(
q, kv[:, 0], kv[:, 1], torch.empty_like(q), cu_seqlens_q, cu_seqlens_k, max_seqlen_q,
max_seqlen_k, dropout_p, softmax_scale, causal=causal, return_softmax=return_softmax
)
ctx.save_for_backward(q, kv, out, softmax_lse, cu_seqlens_q, cu_seqlens_k, rng_state)
ctx.dropout_p = dropout_p
ctx.max_seqlen_q = max_seqlen_q
ctx.max_seqlen_k = max_seqlen_k
ctx.softmax_scale = softmax_scale
ctx.causal = causal
ctx.deterministic = deterministic
return out if not return_softmax else (out, softmax_lse, S_dmask)
@staticmethod
def backward(ctx, dout, *args):
q, kv, out, softmax_lse, cu_seqlens_q, cu_seqlens_k, rng_state = ctx.saved_tensors
dq = torch.empty_like(q)
dkv = torch.empty_like(kv)
_flash_attn_backward(
dout, q, kv[:, 0], kv[:, 1], out, softmax_lse,
dq, dkv[:, 0], dkv[:, 1], cu_seqlens_q, cu_seqlens_k,
ctx.max_seqlen_q, ctx.max_seqlen_k, ctx.dropout_p, ctx.softmax_scale, ctx.causal,
rng_state=rng_state, num_splits=1 if ctx.deterministic else 0,
)
return dq, dkv, None, None, None, None, None, None, None, None, None
class FlashAttnFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, q, k, v, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, dropout_p,
softmax_scale, causal, return_softmax, deterministic):
if softmax_scale is None:
softmax_scale = q.shape[-1] ** (-0.5)
out, softmax_lse, rng_state, S_dmask = _flash_attn_forward(
q, k, v, torch.empty_like(q), cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k,
dropout_p, softmax_scale, causal=causal, return_softmax=return_softmax
)
ctx.save_for_backward(q, k, v, out, softmax_lse, cu_seqlens_q, cu_seqlens_k, rng_state)
ctx.dropout_p = dropout_p
ctx.max_seqlen_q = max_seqlen_q
ctx.max_seqlen_k = max_seqlen_k
ctx.softmax_scale = softmax_scale
ctx.causal = causal
ctx.deterministic = deterministic
return out if not return_softmax else (out, softmax_lse, S_dmask)
@staticmethod
def backward(ctx, dout, *args):
q, k, v, out, softmax_lse, cu_seqlens_q, cu_seqlens_k, rng_state = ctx.saved_tensors
dq, dk, dv = torch.empty_like(q), torch.empty_like(k), torch.empty_like(v)
_flash_attn_backward(
dout, q, k, v, out, softmax_lse, dq, dk, dv, cu_seqlens_q, cu_seqlens_k,
ctx.max_seqlen_q, ctx.max_seqlen_k, ctx.dropout_p, ctx.softmax_scale, ctx.causal,
rng_state=rng_state, num_splits=1 if ctx.deterministic else 0,
)
return dq, dk, dv, None, None, None, None, None, None, None, None, None
class FlashAttnQKVPackedSplitFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, qkv, cu_seqlens, max_seqlen0, max_seqlen1, batch_size0, dropout_p,
softmax_scale, causal, return_softmax, deterministic):
# Save rng_state because the backward pass will regenerate the dropout mask
if dropout_p > 0:
rng_state0 = torch.cuda.get_rng_state()
generator1 = torch.Generator(device='cuda')
rng_state1 = generator1.get_state()
else:
rng_state0, generator1, rng_state1 = None, None, None
if softmax_scale is None:
softmax_scale = qkv.shape[-1] ** (-0.5)
out = torch.empty_like(qkv[:, 0])
_, softmax_lse0, S_dmask0 = _flash_attn_forward(
qkv[:, 0], qkv[:, 1], qkv[:, 2], out, cu_seqlens[:batch_size0 + 1],
cu_seqlens[:batch_size0 + 1], max_seqlen0, max_seqlen0, dropout_p, softmax_scale,
causal=causal, return_softmax=return_softmax
)
s = torch.cuda.Stream()
with torch.cuda.stream(s):
_, softmax_lse1, S_dmask1 = _flash_attn_forward(
qkv[:, 0], qkv[:, 1], qkv[:, 2], out, cu_seqlens[batch_size0:],
cu_seqlens[batch_size0:], max_seqlen1, max_seqlen1, dropout_p, softmax_scale,
causal=causal, return_softmax=return_softmax, generator=generator1
)
torch.cuda.current_stream().wait_stream(s)
ctx.save_for_backward(qkv, out, softmax_lse0, softmax_lse1, cu_seqlens,
rng_state0, rng_state1)
ctx.dropout_p = dropout_p
ctx.max_seqlen0 = max_seqlen0
ctx.max_seqlen1 = max_seqlen1
ctx.batch_size0 = batch_size0
ctx.softmax_scale = softmax_scale
ctx.causal = causal
ctx.deterministic = deterministic
if not return_softmax:
return out
else:
max_seqlen_q = max(softmax_lse0.shape[2], softmax_lse1.shape[2])
max_seqlen_k = max(S_dmask0.shape[3], S_dmask1.shape[3])
softmax_lse = torch.cat([F.pad(softmax_lse0, (0, max_seqlen_q - softmax_lse0.shape[2])),
F.pad(softmax_lse1, (0, max_seqlen_q - softmax_lse1.shape[2]))],
dim=0)
return out, softmax_lse, S_dmask0, S_dmask1
@staticmethod
def backward(ctx, dout, *args):
qkv, out, softmax_lse0, softmax_lse1, cu_seqlens, rng_state0, rng_state1 = ctx.saved_tensors
batch_size0 = ctx.batch_size0
if rng_state0 is not None:
cur_rng_state = torch.cuda.get_rng_state()
torch.cuda.set_rng_state(rng_state0)
if rng_state1 is not None:
generator1 = torch.Generator(device='cuda')
generator1.set_state(rng_state1)
else:
generator1 = None
dqkv = torch.empty_like(qkv)
_flash_attn_backward(
dout, qkv[:, 0], qkv[:, 1], qkv[:, 2], out, softmax_lse0,
dqkv[:, 0], dqkv[:, 1], dqkv[:, 2], cu_seqlens[:batch_size0 + 1],
cu_seqlens[:batch_size0 + 1], ctx.max_seqlen0, ctx.max_seqlen0, ctx.dropout_p,
ctx.softmax_scale, ctx.causal, num_splits=1 if ctx.deterministic else 0,
)
s = torch.cuda.Stream()
with torch.cuda.stream(s):
_flash_attn_backward(
dout, qkv[:, 0], qkv[:, 1], qkv[:, 2], out, softmax_lse1,
dqkv[:, 0], dqkv[:, 1], dqkv[:, 2], cu_seqlens[batch_size0:],
cu_seqlens[batch_size0:], ctx.max_seqlen1, ctx.max_seqlen1, ctx.dropout_p,
ctx.softmax_scale, ctx.causal, generator=generator1,
num_splits=1 if ctx.deterministic else 0,
)
torch.cuda.current_stream().wait_stream(s)
if rng_state0 is not None:
torch.cuda.set_rng_state(cur_rng_state)
return dqkv, None, None, None, None, None, None, None, None, None
def flash_attn_unpadded_qkvpacked_func(qkv, cu_seqlens, max_seqlen, dropout_p, softmax_scale=None,
causal=False, return_attn_probs=False, deterministic=False):
"""dropout_p should be set to 0.0 during evaluation
Arguments:
qkv: (total, 3, nheads, headdim), where total = total number of tokens in the batch.
cu_seqlens: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
of the sequences in the batch, used to index into qkv.
max_seqlen: int. Maximum sequence length in the batch.
dropout_p: float. Dropout probability.
softmax_scale: float. The scaling of QK^T before applying softmax.
Default to 1 / sqrt(headdim).
causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
return_attn_probs: bool. Whether to return the attention probabilities. This option is for
testing only. The returned probabilities are not guaranteed to be correct
(they might not have the right scaling).
deterministic: bool. Whether or not to ensure deterministic execution.
Return:
out: (total, nheads, headdim).
softmax_lse [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen). The
logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
normalization factor).
S_dmask [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen, seqlen).
The output of softmax (possibly with different scaling). It also encodes the dropout
pattern (negative means that location was dropped, nonnegative means it was kept).
"""
return FlashAttnQKVPackedFunc.apply(qkv, cu_seqlens, max_seqlen, dropout_p, softmax_scale,
causal, return_attn_probs, deterministic)
def flash_attn_unpadded_kvpacked_func(q, kv, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k,
dropout_p, softmax_scale=None, causal=False,
return_attn_probs=False, deterministic=False):
"""dropout_p should be set to 0.0 during evaluation
Arguments:
q: (total_q, nheads, headdim), where total_q = total number of query tokens in the batch.
kv: (total_k, 2, nheads, headdim), where total_k = total number of key tokens in the batch.
cu_seqlens_q: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
of the sequences in the batch, used to index into q.
cu_seqlens_k: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
of the sequences in the batch, used to index into kv.
max_seqlen_q: int. Maximum query sequence length in the batch.
max_seqlen_k: int. Maximum key sequence length in the batch.
dropout_p: float. Dropout probability.
softmax_scale: float. The scaling of QK^T before applying softmax.
Default to 1 / sqrt(headdim).
causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
return_attn_probs: bool. Whether to return the attention probabilities. This option is for
testing only. The returned probabilities are not guaranteed to be correct
(they might not have the right scaling).
deterministic: bool. Whether or not to ensure deterministic execution.
Return:
out: (total, nheads, headdim).
softmax_lse [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen). The
logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
normalization factor).
S_dmask [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen, seqlen).
The output of softmax (possibly with different scaling). It also encodes the dropout
pattern (negative means that location was dropped, nonnegative means it was kept).
"""
return FlashAttnKVPackedFunc.apply(q, kv, cu_seqlens_q, cu_seqlens_k,
max_seqlen_q, max_seqlen_k, dropout_p, softmax_scale, causal,
return_attn_probs, deterministic)
def flash_attn_unpadded_func(q, k, v, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k,
dropout_p, softmax_scale=None, causal=False, return_attn_probs=False,
deterministic=False):
"""dropout_p should be set to 0.0 during evaluation
Arguments:
q: (total_q, nheads, headdim), where total_q = total number of query tokens in the batch.
k: (total_k, nheads, headdim), where total_k = total number of key tokens in the batch.
v: (total_k, nheads, headdim), where total_k = total number of key tokens in the batch.
cu_seqlens_q: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
of the sequences in the batch, used to index into q.
cu_seqlens_k: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
of the sequences in the batch, used to index into kv.
max_seqlen_q: int. Maximum query sequence length in the batch.
max_seqlen_k: int. Maximum key sequence length in the batch.
dropout_p: float. Dropout probability.
softmax_scale: float. The scaling of QK^T before applying softmax.
Default to 1 / sqrt(headdim).
causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
return_attn_probs: bool. Whether to return the attention probabilities. This option is for
testing only. The returned probabilities are not guaranteed to be correct
(they might not have the right scaling).
deterministic: bool. Whether or not to ensure deterministic execution.
Return:
out: (total, nheads, headdim).
softmax_lse [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen). The
logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
normalization factor).
S_dmask [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen, seqlen).
The output of softmax (possibly with different scaling). It also encodes the dropout
pattern (negative means that location was dropped, nonnegative means it was kept).
"""
return FlashAttnFunc.apply(q, k, v, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k,
dropout_p, softmax_scale, causal, return_attn_probs, deterministic)
def flash_attn_unpadded_qkvpacked_split_func(
qkv, cu_seqlens, max_seqlen0, max_seqlen1, batch_size0, dropout_p, softmax_scale=None,
causal=False, return_attn_probs=False, deterministic=False):
"""
Split attention into 2 kernels running on 2 separate streams for performance reason:
e.g., if the batch has some sequences of length <= 128 and some > 128, it might be faster to
have one kernel dealing with seqlen <= 128 and one kernel for seqlen > 128.
dropout_p should be set to 0.0 during evaluation.
Arguments:
qkv: (total, 3, nheads, headdim), where total = total number of tokens in the batch.
cu_seqlens: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
of the sequences in the batch, used to index into qkv.
max_seqlen0: int. Maximum sequence length in 1st part of the batch.
max_seqlen1: int. Maximum sequence length in 2nd part of the batch.
batch_size0: int. Number of sequences in the 1st part of the batch.
dropout_p: float. Dropout probability.
softmax_scale: float. The scaling of QK^T before applying softmax.
Default to 1 / sqrt(headdim).
causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
return_attn_probs: bool. Whether to return the attention probabilities. This option is for
testing only. The returned probabilities are not guaranteed to be correct
(they might not have the right scaling).
deterministic: bool. Whether or not to ensure deterministic execution.
Return:
out: (total, nheads, headdim).
softmax_lse [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen). The
logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
normalization factor).
S_dmask [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen, seqlen).
The output of softmax (possibly with different scaling). It also encodes the dropout
pattern (negative means that location was dropped, nonnegative means it was kept).
"""
return FlashAttnQKVPackedSplitFunc.apply(qkv, cu_seqlens, max_seqlen0, max_seqlen1, batch_size0,
dropout_p, softmax_scale, causal, return_attn_probs,
deterministic)
def flash_attn_func(qkv, cu_seqlens, dropout_p, max_s, softmax_scale=None, causal=False,
return_attn_probs=False):
"""For backward-compatibility only, will remove soon.
dropout_p should be set to 0.0 during evaluation
"""
return flash_attn_unpadded_qkvpacked_func(qkv, cu_seqlens, max_s, dropout_p, softmax_scale,
causal, return_attn_probs)
|
m2-main
|
bert/src/ops/bert_flashattention.py
|
# Adapted from https://github.com/mlcommons/training_results_v1.1/blob/main/NVIDIA/benchmarks/bert/implementations/pytorch/padding.py
import torch
import torch.nn.functional as F
from einops import rearrange, repeat
class IndexFirstAxis(torch.autograd.Function):
@staticmethod
def forward(ctx, input, indices):
ctx.save_for_backward(indices)
ctx.first_axis_dim = input.shape[0]
assert input.ndim == 2
# TD [2022-03-04] For some reason torch.gather is a bit faster than indexing.
# return input[indices]
return torch.gather(input, 0, repeat(indices, 'z -> z d', d=input.shape[1]))
@staticmethod
def backward(ctx, grad_output):
indices, = ctx.saved_tensors
grad_input = torch.zeros([ctx.first_axis_dim, *grad_output.shape[1:]],
device=grad_output.device, dtype=grad_output.dtype)
# TD [2022-03-04] For some reason torch.scatter is a bit faster than indexing.
# grad_input[indices] = grad_output
grad_input.scatter_(0, repeat(indices, 'z -> z d', d=grad_output.shape[1]), grad_output)
return grad_input, None
index_first_axis = IndexFirstAxis.apply
index_first_axis_residual = IndexFirstAxis.apply
class IndexPutFirstAxis(torch.autograd.Function):
@staticmethod
def forward(ctx, values, indices, first_axis_dim):
ctx.save_for_backward(indices)
assert indices.ndim == 1
assert values.ndim == 2
output = torch.zeros(first_axis_dim, values.shape[1], device=values.device,
dtype=values.dtype)
# TD [2022-03-04] For some reason torch.scatter is a bit faster than indexing.
output[indices] = values
# output.scatter_(0, repeat(indices, 'z -> z d', d=values.shape[1]), values)
return output
@staticmethod
def backward(ctx, grad_output):
indices, = ctx.saved_tensors
# TD [2022-03-04] For some reason torch.gather is a bit faster than indexing.
grad_values = grad_output[indices]
# grad_values = torch.gather(grad_output, 0, repeat(indices, 'z -> z d', d=grad_output.shape[1]))
return grad_values, None, None
index_put_first_axis = IndexPutFirstAxis.apply
def unpad_input(hidden_states, attention_mask):
"""
Arguments:
hidden_states: (batch, seqlen, dim)
attention_mask: (batch, seqlen), bool / int, 1 means valid and 0 means not valid.
Return:
hidden_states: (total_nnz, dim), where total_nnz = number of tokens in selected in attention_mask.
cu_seqlens: (batch + 1), the cumulative sequence lengths, used to index into hidden_states.
max_seqlen_in_batch: int
"""
seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
max_seqlen_in_batch = seqlens_in_batch.max().item()
cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
# TD [2022-03-04] We don't want to index with a bool mask, because Pytorch will expand the
# bool mask, then call nonzero to get the indices, then index with those. The indices is @dim
# times large than it needs to be, wasting memory. It's faster and more memory-efficient to
# index with integer indices. Moreover, torch's index is a bit slower than it needs to be,
# so we write custom forward and backward to make it a bit faster.
return (index_first_axis(rearrange(hidden_states, 'b s d -> (b s) d'), indices), indices,
cu_seqlens, max_seqlen_in_batch)
def pad_input(hidden_states, indices, batch, seqlen):
"""
Arguments:
hidden_states: (total_nnz, dim), where total_nnz = number of tokens in selected in attention_mask.
indices: (total_nnz)
Return:
hidden_states: (batch, seqlen, dim)
"""
dim = hidden_states.shape[-1]
# output = torch.zeros((batch * seqlen), dim, device=hidden_states.device, dtype=hidden_states.dtype)
# output[indices] = hidden_states
output = index_put_first_axis(hidden_states, indices, batch * seqlen)
return rearrange(output, '(b s) d -> b s d', b=batch)
|
m2-main
|
bert/src/ops/bert_padding.py
|
import numpy as np
import torch
from torch.nn import functional as F
from einops import rearrange
from src.ops.low_rank import low_rank_project
def blockdiag_weight_to_dense_weight(weight):
"""
Argumments:
weight: (nblocks, out / nblocks, in / blocks)
Return:
dense_weight: (out / in)
"""
return torch.block_diag(*torch.unbind(weight, dim=0))
def blockdiag_multiply_reference(x, weight):
"""
This implementation is slow but more likely to be correct.
Arguments:
x: (..., n)
weight: (nblocks, q, n / nblocks)
Outputs:
out: (..., nblocks * q)
"""
n = x.shape[-1]
nblocks, q, p = weight.shape
assert nblocks * p == n
x_reshaped = rearrange(x, '... (nblocks p) -> ... nblocks p', nblocks=nblocks)
return rearrange(torch.einsum('...kp, kqp -> ...kq', x_reshaped, weight),
'... nblocks q -> ... (nblocks q)')
class BlockdiagMultiply(torch.autograd.Function):
"""This is a faster implementation, with careful memory copies for the fastest
bmm performance.
The backward pass is also written manually with careful memory copies.
Arguments:
x: (..., n)
weight: (nblocks, q, n / nblocks)
Outputs:
out: (..., nblocks * q)
"""
@staticmethod
@torch.cuda.amp.custom_fwd(cast_inputs=torch.float16)
def forward(ctx, x, weight):
ctx.save_for_backward(x, weight)
batch_shape, n = x.shape[:-1], x.shape[-1]
batch_dim = np.prod(batch_shape)
nblocks, q, p = weight.shape
assert nblocks * p == n
x_reshaped = x.reshape(batch_dim, nblocks, p).transpose(0, 1)
out = torch.empty(batch_dim, nblocks, q, device=x.device, dtype=x.dtype).transpose(0, 1)
out = torch.bmm(x_reshaped, weight.transpose(-1, -2), out=out).transpose(0, 1)
return out.reshape(*batch_shape, nblocks * q)
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx, dout):
x, weight = ctx.saved_tensors
batch_shape, n = x.shape[:-1], x.shape[-1]
batch_dim = np.prod(batch_shape)
nblocks, q, p = weight.shape
assert nblocks * p == n
dx, dweight = None, None
dout_reshaped = dout.reshape(batch_dim, nblocks, q).transpose(0, 1)
if ctx.needs_input_grad[0]:
dx = torch.empty(batch_dim, nblocks, p, device=x.device, dtype=x.dtype)
dx = torch.bmm(dout_reshaped, weight.conj(),
out=dx.transpose(0, 1)).transpose(0, 1).reshape(*batch_shape, n)
if ctx.needs_input_grad[1]:
x_reshaped = x.reshape(batch_dim, nblocks, p).transpose(0, 1)
dweight = torch.bmm(dout_reshaped.transpose(-1, -2), x_reshaped.conj())
return dx, dweight
blockdiag_multiply = BlockdiagMultiply.apply
|
m2-main
|
bert/src/ops/blockdiag_multiply.py
|
import math
import torch
import torch.nn as nn
from einops import rearrange
from src.models.layers.blockdiag_butterfly_multiply import blockdiag_butterfly_multiply
# from src.ops.low_rank import low_rank_project
# Copied here so it's more self-contained
def low_rank_project(M, rank):
"""Supports batches of matrices as well.
"""
U, S, Vt = torch.linalg.svd(M)
S_sqrt = S[..., :rank].sqrt()
U = U[..., :rank] * rearrange(S_sqrt, '... rank -> ... 1 rank')
Vt = rearrange(S_sqrt, '... rank -> ... rank 1') * Vt[..., :rank, :]
return U, Vt
def factors(n):
return [(i, n // i) for i in range(1, math.floor(math.sqrt(n)) + 1) if n % i == 0]
def blockdiag_butterfly_project(M, sizes=None):
"""Only works for square matrices for now
"""
m, n = M.shape
if m != n:
raise NotImplementedError('Only support square matrices')
if sizes is None:
# Find the factors that are closest to sqrt(n)
sizes = factors(n)[-1]
# Larger factor first is probably more efficient, idk
sizes = (sizes[1], sizes[0])
assert n == sizes[0] * sizes[1]
M_permuted_batched = rearrange(M, '(p k) (r s) -> k r p s', k=sizes[1], r=sizes[0])
U, Vt = low_rank_project(M_permuted_batched, rank=1)
w1_bfly = rearrange(Vt, 'k r 1 s -> r k s')
w2_bfly = rearrange(U, 'k r s 1 -> k s r')
return w1_bfly, w2_bfly
class ButterflyFFT(nn.Module):
def __init__(self, n, direction='fft', norm='ortho', sizes=None):
super().__init__()
eye = torch.eye(n, dtype=torch.complex128)
assert direction in ['fft', 'ifft']
transform = torch.fft.fft if direction == 'fft' else torch.fft.ifft
dft = transform(eye, norm=norm).t()
# Find the factors that are closest to sqrt(n)
sizes = factors(n)[-1]
# Larger factor first is probably more efficient, idk
sizes = (sizes[1], sizes[0])
self.register_buffer('perm', rearrange(torch.arange(n), '(i j) -> (j i)', j=sizes[0]))
w1, w2 = blockdiag_butterfly_project(dft[:, self.perm], sizes=sizes)
# Store parameters as real instead of complex to avoid issues with Adam / AdamW
self.w1_bfly = nn.Parameter(torch.view_as_real(w1.cfloat()))
self.w2_bfly = nn.Parameter(torch.view_as_real(w2.cfloat()))
def forward(self, x):
w1_bfly, w2_bfly = torch.view_as_complex(self.w1_bfly), torch.view_as_complex(self.w2_bfly)
return blockdiag_butterfly_multiply(rearrange(x[..., self.perm], '... n -> (...) n'),
w1_bfly, w2_bfly).reshape_as(x)
class ButterflyFFT2(nn.Module):
def __init__(self, n1, n2, direction='fft', norm='ortho'):
"""Input will have shape (..., n1, n2)
"""
super().__init__()
self.fft1 = ButterflyFFT(n1, direction=direction, norm=norm)
self.fft2 = ButterflyFFT(n2, direction=direction, norm=norm)
def forward(self, x):
out = rearrange(self.fft1(rearrange(x, '... n1 n2 -> ... n2 n1')), '... n2 n1 -> ... n1 n2')
return self.fft2(out)
|
m2-main
|
bert/src/ops/blockdiag_butterfly_projection.py
|
import torch
@torch.jit.script
def jit_dropout_add(x, residual, prob):
# type: (Tensor, Tensor, float) -> Tensor
return torch.nn.functional.dropout(x, p=prob, training=True) + residual
def fused_dropout_add(x, residual, prob, is_training) :
# type: (Tensor, Tensor, float, bool) -> Tensor
if is_training:
out = jit_dropout_add(x, residual, prob)
else:
out = torch.nn.functional.dropout(x, p=prob, training=is_training) + residual
return out
@torch.jit.script
def jit_bias_dropout_add(x, bias, residual, prob) :
# type: (Tensor, Tensor, Tensor, float) -> Tensor
return torch.nn.functional.dropout(x + bias, p=prob, training=True) + residual
def fused_bias_dropout_add(x, bias, residual, prob, is_training) :
# type: (Tensor, Tensor, Tensor, float, bool) -> Tensor
if is_training:
out = jit_bias_dropout_add(x, bias, residual, prob)
else:
out = torch.nn.functional.dropout(x + bias, p=prob, training=is_training) + residual
return out
|
m2-main
|
bert/src/ops/fused_dropout_add.py
|
# Adapted from https://github.com/NVIDIA/apex/blob/master/apex/fused_dense/fused_dense.py
# On the backward pass, we don't use the fused kernel from cublasLt since that's a bit slower.
# Instead we use the regular backward from F.linear.
# We also make it work with pytorch amp.
# TD [2022-02-27] The fused backward is also less accurate, and it might silently fail to compute
# grad_bias (when it takes the cublas gemm code path instead of the cublasLt code path)
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.cuda.amp import custom_bwd, custom_fwd
import fused_dense_cuda # from apex
# import fused_dense_lib as fused_dense_cuda
# implements fused GEMM+bias in forward pass using mlp_cuda from apex
class FusedDenseFuncMine(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx, x, weight, bias):
ctx.save_for_backward(x, weight)
batch_shape, n = x.shape[:-1], x.shape[-1]
batch_dim = np.prod(batch_shape)
output = fused_dense_cuda.linear_bias_forward(x.reshape(batch_dim, n), weight, bias)
return output.reshape(*batch_shape, output.shape[-1])
@staticmethod
@custom_bwd
def backward(ctx, grad_output):
x, weight = ctx.saved_tensors
batch_shape, n = x.shape[:-1], x.shape[-1]
batch_dim = np.prod(batch_shape)
grad_input, grad_weight, grad_bias = fused_dense_cuda.linear_bias_backward(
x.reshape(batch_dim, n), weight, grad_output.reshape(batch_dim, grad_output.shape[-1])
)
# print((grad_bias - grad_output.view(-1, grad_output.shape[-1]).sum(dim=0)).abs().max())
return grad_input.reshape_as(x), grad_weight, grad_bias
# grad_input, grad_weight = None, None
# grad_output_reshaped = grad_output.reshape(batch_dim, grad_output.shape[-1])
# if ctx.needs_input_grad[0]:
# grad_input = (grad_output_reshaped @ weight.conj()).reshape(*batch_shape, n)
# if ctx.needs_input_grad[1]:
# grad_weight = grad_output_reshaped.t() @ x.conj().reshape(batch_dim, n)
# # We don't need to compute grad_bias explicitly, when we return grad_out Pytorch
# # will sum over the batch dimension to get grad_bias.
# return grad_input, grad_weight, grad_output
fused_dense_function_mine = FusedDenseFuncMine.apply
class FusedDenseMine(nn.Linear):
def forward(self, x):
if x.is_cuda and self.bias is not None:
return fused_dense_function_mine(x, self.weight, self.bias)
else:
return F.linear(x, self.weight, self.bias)
|
m2-main
|
bert/src/ops/fused_dense.py
|
import torch
from einops import rearrange
from src.ops.low_rank import low_rank_project
def blockdiag_butterfly_multiply_einsum_simple(x, w1_bfly, w2_bfly):
"""
Arguments:
x: (batch, n)
w1_bfly: (k, j, i), where k = n / i
w2_bfly: (j, l, k)
Outputs:
out: (batch, m), where m = l * j
"""
batch, n = x.shape
k, j, i = w1_bfly.shape
j1, l, k1 = w2_bfly.shape
assert j1 == j
assert k1 == k
assert k * i == n
x_reshaped = rearrange(x, 'b (k i) -> b k i', k=k)
out = torch.einsum('b k i, k j i, j l k -> b l j', x_reshaped, w1_bfly, w2_bfly)
return rearrange(out, 'b l j -> b (l j)')
def blockdiag_butterfly_project_einsum_simple(M, nblocks1, nblocks2):
"""
Arguments:
M: (m, n)
Outputs:
w1_bfly: (nblocks1, nblocks2, i)
w2_bfly: (nblocks2, l, nblocks1)
"""
m, n = M.shape
k, j = nblocks1, nblocks2
M_permuted_batched = rearrange(M, '(l j) (k i) -> k j l i', k=nblocks1, j=nblocks2)
U, Vt = low_rank_project(M_permuted_batched, rank=1)
w1_bfly = rearrange(Vt, 'k j 1 i -> k j i')
w2_bfly = rearrange(U, 'k j l 1 -> j l k')
return w1_bfly, w2_bfly
def blockdiag_butterfly_multiply_einsum(x, w1_bfly, w2_bfly, b2):
"""
Arguments:
x: (batch, n)
w1_bfly: (k, (j * b1), i), where k = n / i
w2_bfly: (j, (l * b2), (k b1))
Outputs:
out: (batch, m), where m = l * j * b2
"""
batch, n = x.shape
k, jb1, i = w1_bfly.shape
j, lb2, kb1 = w2_bfly.shape
b1 = jb1 // j
assert jb1 == j * b1
assert kb1 == k * b1
assert k * i == n
x_reshaped = rearrange(x, 'b (k i) -> b k i', k=k)
w1_bfly = rearrange(w1_bfly, 'k (j b1) i -> k j b1 i', b1=b1)
w2_bfly = rearrange(w2_bfly, 'j (l b2) (k b1) -> j l b2 k b1', b1=b1, b2=b2)
# torch.einsum doesn't support indices named b1 or b2, so we map b1 -> y, b2 -> z
out = torch.einsum('b k i, k j y i, j l z k y -> b l j z', x_reshaped, w1_bfly, w2_bfly)
return rearrange(out, 'b l j b2 -> b (l j b2)')
def blockdiag_butterfly_project_einsum(M, nblocks1, nblocks2, b1, b2):
"""
Arguments:
M: (m, n)
Outputs:
w1_bfly: (nblocks1, nblocks2, i)
w2_bfly: (nblocks2, l, nblocks1)
"""
m, n = M.shape
k, j = nblocks1, nblocks2
M_permuted_batched = rearrange(M, '(l j b2) (k i) -> k j (l b2) i', k=nblocks1, j=nblocks2,
b2=b2)
U, Vt = low_rank_project(M_permuted_batched, rank=b1)
w1_bfly = rearrange(Vt, 'k j b1 i -> k (j b1) i')
w2_bfly = rearrange(U, 'k j lb2 b1 -> j lb2 (k b1)')
return w1_bfly, w2_bfly
def blockdiag_butterfly_multiply_einsum_rank(x, w1_bfly, w2_bfly):
"""
Arguments:
x: (batch, n)
w1_bfly: (k, (r * j), i), where k = n / i
w2_bfly: (j, l, (k r))
Outputs:
out: (batch, m), where m = l * j
"""
batch, n = x.shape
k, jb1, i = w1_bfly.shape
j, l, kb1 = w2_bfly.shape
r = jb1 // j
assert jb1 == j * r
assert kb1 == k * r
assert k * i == n
x_reshaped = rearrange(x, 'b (k i) -> b k i', k=k)
w1_bfly = rearrange(w1_bfly, 'k (r j) i -> k r j i', r=r)
w2_bfly = rearrange(w2_bfly, 'j l (k r) -> j l k r', r=r)
out = torch.einsum('b k i, k r j i, j l k r -> b l j', x_reshaped, w1_bfly, w2_bfly)
return rearrange(out, 'b l j -> b (l j)')
def blockdiag_butterfly_project_einsum_rank(M, nblocks1, nblocks2, rank):
"""
Arguments:
M: (m, n)
Outputs:
w1_bfly: (nblocks1, r * nblocks2, i)
w2_bfly: (nblocks2, l, nblocks1 * r)
"""
m, n = M.shape
k, j = nblocks1, nblocks2
M_permuted_batched = rearrange(M, '(l j) (k i) -> k j l i', k=nblocks1, j=nblocks2)
U, Vt = low_rank_project(M_permuted_batched, rank=rank)
w1_bfly = rearrange(Vt, 'k j r i -> k (r j) i')
w2_bfly = rearrange(U, 'k j l r -> j l (k r)')
return w1_bfly, w2_bfly
|
m2-main
|
bert/src/ops/blockdiag_butterfly_einsum.py
|
import torch
from softmaxlib import additive_masked_softmax_dropout_forward
from softmaxlib import masked_scale_softmax_backward_recompute
from src.ops.triton.softmax_dropout import softmax_dropout
class _fused_softmax_dropout(torch.autograd.Function):
@staticmethod
def forward(ctx, x, p, mask, return_dropout_mask=False):
"""
x: (batch_size, nheads, q_seqlen, k_seqlen)
p: float
mask: (batch_size, 1, 1, k_seqlen)
"""
assert x.dtype == torch.float16
assert x.ndim == 4
assert mask is not None
x = x.contiguous()
dropout_results, dropout_mask = additive_masked_softmax_dropout_forward(x, mask, p)
ctx.save_for_backward(x, mask, dropout_mask)
ctx.dropout_prob = p
return dropout_results, (None if not return_dropout_mask else dropout_mask)
@staticmethod
def backward(ctx, grad_out, grad_dropout_mask):
x, mask, dropout_mask = ctx.saved_tensors
p = ctx.dropout_prob
grad_in = masked_scale_softmax_backward_recompute(grad_out, x, mask, dropout_mask, p)
return grad_in, None, None, None
def fused_softmax_dropout(x, p, mask):
if x.is_cuda and x.dtype == torch.float16 and mask is not None and p != 0.0:
return _fused_softmax_dropout.apply(x, p, mask)[0]
else:
return softmax_dropout(x, p, mask, mask_type='bk')
|
m2-main
|
bert/src/ops/fused_softmax_dropout.py
|
from PIL import Image, ImageEnhance, ImageOps
import numpy as np
from torchvision import transforms
import random
norm_stats = {
"imagenet": ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
"clevr": ([0.47097, 0.46812, 0.46185, 0], [0.08974, 0.08686, 0.09197, 1]),
"cxr": ([0.48865], [0.24621]),
"poet": ([0.46649, 0.4467, 0.41329], [0.26, 0.25853, 0.27346]),
}
num_channels_dict = {
"cxr": 1,
"cxr2": 1,
"mets": 1,
}
def get_data_transforms(dataset_name, normalization_type="none"):
"""Get data transforms based on dataset name"""
num_channels = num_channels_dict[dataset_name]
if normalization_type == "none":
mean, std = [0] * num_channels, [1] * num_channels
elif normalization_type == "imagenet":
if num_channels != 3:
raise ValueError("Cannot use imagenet statistics with ≠3 channels")
mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
elif normalization_type == "train_images":
if dataset_name in norm_stats:
mean, std = norm_stats[dataset_name]
else:
mean, std = [0] * num_channels, [1] * num_channels
else:
raise ValueError(f"Unknown normalization type {normalization_type}")
eval_transform = transforms.Compose(
[
transforms.Resize([224, 224]), # 384 for ViT
transforms.ToTensor(),
transforms.Normalize(mean, std),
]
)
train_transform = eval_transform
data_transforms = {
"train": train_transform,
"eval": eval_transform,
}
data_transforms["val"] = eval_transform
data_transforms["test"] = eval_transform
return data_transforms
|
observational-main
|
transforms.py
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models
import json
import os
import pickle
from emmental.scorer import Scorer
from emmental.task import EmmentalTask
from emmental.data import EmmentalDataLoader
from emmental.utils.utils import pred_to_prob, move_to_device
from end_model.dataset import (
ObservationalDataset,
num_classes_dict,
)
from end_model.soft_cross_entropy import SoftCrossEntropyLoss
from transforms import get_data_transforms
from tqdm import tqdm
from functools import partial
import pdb
gaze_norm_stats = {
"cxr": (
torch.Tensor([0.3585, 0.5312, 1.2828]),
torch.Tensor([0.2142, 0.2206, 0.5644]),
)
}
helper_output_dim_dict = {"loc": 9, "time": 2, "diffusivity": 2}
def ce_loss(task_name, num_classes, immediate_output_dict, Y, active):
if num_classes > 2:
if len(Y.shape) < 2: # i.e. if Y not already in pred form
Y_ = torch.Tensor(pred_to_prob(Y, num_classes))
Y_ = move_to_device(Y_, 0)
else:
Y_ = Y
else:
Y_ = torch.stack((1 - Y, Y), axis=1)
# active = Y != -1
ce_loss = SoftCrossEntropyLoss().forward(
immediate_output_dict[f"classification_module_{task_name}"][0][active],
Y_[active],
)
loss = ce_loss
return loss
def ce_loss_reweight(task_name, num_classes, immediate_output_dict, Y, active):
if num_classes > 2:
Y_ = torch.Tensor(pred_to_prob(Y, num_classes))
Y_ = move_to_device(Y_, 0)
else:
Y_ = torch.stack((1 - Y, Y), axis=1)
loss_outputs = SoftCrossEntropyLoss(reduction="none").forward(
immediate_output_dict[f"classification_module_{task_name}"][0][active],
Y_[active],
)
confidence_weights = torch.Tensor(
immediate_output_dict["_input_"]["confidence"]
).cuda()
return confidence_weights.dot(loss_outputs) / confidence_weights.sum()
def output(task_name, immediate_output_dict):
return F.softmax(
immediate_output_dict[f"classification_module_{task_name}"][0], dim=1
)
# Two functions below for MSE gaze feature learning
def mse_loss(task_name, immediate_output_dict, Y, active):
prediction = immediate_output_dict[f"classification_module_{task_name}"][0]
return F.mse_loss(prediction[active], Y[active])
def mse_output(task_name, immediate_ouput_dict):
return immediate_ouput_dict[f"classification_module_{task_name}"][0]
# this squeeze module was necessary to connect resnet encoder in emmental
class SqueezeModule(nn.Module):
"""A default identity input module that simply passes the squeezed input through."""
def __init__(self):
super().__init__()
def reset_parameters(self):
pass
def forward(self, x):
return x.squeeze()
# helper
def save_dict_to_json(d, json_path):
"""Saves dict of floats in json file
Args:
d: (dict) of float-castable values (np.float, int, float, etc.)
json_path: (string) path to json file
"""
with open(json_path, "w") as f:
# We need to convert the values to float for json (it doesn't accept np.array, np.float, )
d = {k: float(v) for k, v in d.items()}
json.dump(d, f, indent=4)
def str2list(v, dim=","):
return [t.strip() for t in v.split(dim)]
def write_to_file(path, file_name, value):
if not isinstance(value, str):
value = str(value)
fout = open(os.path.join(path, file_name), "w")
fout.write(value + "\n")
fout.close()
def fetch_dataloaders(
task_type,
gaze_mtl_task,
source,
data_dir,
train_scale,
val_scale,
seed,
batch_size,
):
helper_tasks = gaze_mtl_task.split("_")
num_helper_tasks = len(helper_tasks)
datasets = {}
transforms = get_data_transforms(source, normalization_type="train_images")
for split in ["train", "val", "test"]:
datasets[split] = ObservationalDataset(
source=source,
task=task_type,
gaze_mtl_task=gaze_mtl_task,
data_dir=data_dir,
split_type=split,
transform=transforms[split],
train_scale=train_scale,
val_scale=val_scale,
seed=seed,
)
task_to_label_dict = {"target": "target"}
if task_type == "gaze_mtl":
for i in range(num_helper_tasks):
task_to_label_dict["helper_task_" + str(i)] = "helper_task_" + str(i)
elif task_type == "weak_gaze":
task_to_label_dict = {"target": "weak"}
dataloaders = []
for split in ["train", "val", "test"]:
dataloaders.append(
EmmentalDataLoader(
task_to_label_dict=task_to_label_dict,
dataset=datasets[split],
split=split,
shuffle=split == "train",
batch_size=batch_size,
num_workers=8,
)
)
return dataloaders
def create_tasks(
task_type,
gaze_mtl_task,
source,
pretrained,
task_weights,
load_path=None,
):
helper_tasks = gaze_mtl_task.split("_")
num_helper_tasks = len(helper_tasks)
helper_output_dims = [helper_output_dim_dict[task] for task in helper_tasks]
num_classes = num_classes_dict[source]
input_module = models.resnet50(pretrained=pretrained)
num_features = 2048
# remove last FC layer
modules = list(input_module.children())[:-1]
modules.append(SqueezeModule())
cnn_module = nn.Sequential(*modules)
if load_path is not None:
print(f"Loading from {load_path}...")
loaded = torch.load(load_path)
cnn_module.load_state_dict(loaded["model"]["module_pool"]["cnn"])
task_names = ["target"]
weights = [1]
num_classes_task = {"target": num_classes}
if task_type in ["gaze_mtl", "gaze_mtl_ws"]:
for i in range(num_helper_tasks):
task_names.append("helper_task_" + str(i))
num_classes_task["helper_task_" + str(i)] = helper_output_dims[i]
weights.append(task_weights[i])
loss_fnc = ce_loss
tasks = [
EmmentalTask(
name=task_name,
module_pool=nn.ModuleDict(
{
"cnn": cnn_module,
f"classification_module_{task_name}": nn.Linear(
num_features, num_classes_task[task_name]
),
}
),
task_flow=[
{"name": "cnn", "module": "cnn", "inputs": [("_input_", "image")]},
{
"name": f"classification_module_{task_name}",
"module": f"classification_module_{task_name}",
"inputs": [("cnn", 0)],
},
],
loss_func=partial(loss_fnc, task_name, num_classes_task[task_name]),
output_func=partial(output, task_name),
scorer=Scorer(metrics=["accuracy", "roc_auc", "precision", "recall", "f1"]),
weight=weights[t_ndx],
)
for t_ndx, task_name in enumerate(task_names)
]
return tasks
def save_features(save_pth, model, dataloader, task):
model.eval()
features_dict = {}
encoder = model.module_pool.cnn
for x_dict_b, y_dict_b in tqdm(dataloader, total=len(dataloader)):
if task in ["unsup_gaze", "gaze_lstm"]:
input_b = x_dict_b["gaze_seq"]
img_pth_b = x_dict_b["id"]
else:
input_b = x_dict_b["image"]
img_pth_b = x_dict_b["img_id"]
feature_b = encoder(input_b.cuda()).squeeze()
for i in range(feature_b.shape[0]):
features_dict[img_pth_b[i]] = feature_b[i, :].detach().cpu().numpy()
with open(os.path.join(save_pth, "train_features_dict.pkl"), "wb") as pkl_f:
pickle.dump(features_dict, pkl_f)
def save_predictions(save_pth, emm_model, dataloaders, task):
emm_model.eval()
encoder = emm_model.module_pool.cnn
target_head = emm_model.module_pool.classification_module_target
model = nn.Sequential(encoder, target_head)
if task == "gaze_mtl":
helper_head = emm_model.module_pool.classification_module_helper_task_0
helper_model = nn.Sequential(encoder, helper_head)
for ndx, dataloader in enumerate(dataloaders):
predictions_dict = {}
for x_dict_b, y_dict_b in tqdm(dataloader, total=len(dataloader)):
input_b = x_dict_b["image"]
img_pth_b = x_dict_b["img_id"]
true_labels_b = y_dict_b["target"]
logits = model(input_b)
predictions_b = F.softmax(logits, dim=1)[:, 1].detach().cpu().numpy()
if task == "gaze_mtl":
helper_true_labels_b = y_dict_b["helper_task_0"]
logits = helper_model(input_b)
helper_predictions_b = F.softmax(logits, dim=1).detach().cpu().numpy()
for i in range(predictions_b.shape[0]):
if task == "gaze_mtl":
predictions_dict[img_pth_b[i]] = (
predictions_b[i],
true_labels_b[i],
helper_predictions_b[i],
helper_true_labels_b[i],
)
else:
predictions_dict[img_pth_b[i]] = (
predictions_b[i],
true_labels_b[i],
)
if ndx == 0:
file_name = "train_predictions_dict.pkl"
elif ndx == 1:
file_name = "val_predictions_dict.pkl"
elif ndx == 2:
file_name = "test_predictions_dict.pkl"
with open(os.path.join(save_pth, file_name), "wb") as pkl_f:
pickle.dump(predictions_dict, pkl_f)
|
observational-main
|
emmental_utils.py
|
import logging
import os, sys
import argparse
import emmental
from emmental import Meta
from emmental.learner import EmmentalLearner
from emmental.model import EmmentalModel
from emmental.utils.parse_args import parse_args, parse_args_to_config
from emmental_utils import (
fetch_dataloaders,
create_tasks,
write_to_file,
save_predictions,
)
from utils import add_application_args
logger = logging.getLogger(__name__)
def get_parser():
# Parse cmdline args and setup environment
parser = argparse.ArgumentParser(
"Observational Runner", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser = parse_args(parser=parser)
add_application_args(parser)
return parser
def main(args):
# Ensure that global state is fresh
Meta.reset()
# Initialize Emmental
config = parse_args_to_config(args)
emmental.init(
config["meta_config"]["log_path"],
config=config,
use_exact_log_path=config["meta_config"]["use_exact_log_path"],
)
# Save command line argument into file
cmd_msg = " ".join(sys.argv)
logger.info(f"COMMAND: {cmd_msg}")
write_to_file(Meta.log_path, "cmd.txt", cmd_msg)
# Save Emmental config into file
logger.info(f"Config: {Meta.config}")
write_to_file(Meta.log_path, "config.txt", Meta.config)
# fetch dataloaders
dataloaders = fetch_dataloaders(
args.task,
args.gaze_mtl_task,
args.source,
args.data_dir,
args.train_scale,
args.val_scale,
args.seed,
args.batch_size,
)
# create emmental tasks
tasks = create_tasks(
args.task,
args.gaze_mtl_task,
args.source,
args.pretrained,
args.task_weights,
load_path=args.load_cnn,
)
# create emmental model
model = EmmentalModel(name="Observational", tasks=tasks)
logger.info(f"Total parameters: {sum(p.numel() for p in model.parameters())}")
logger.info(f"Model task weights: {model.weights}")
if not (args.evaluate):
# run learner
emmental_learner = EmmentalLearner()
if args.transfer_learning:
model.weights = {"target": 1.0, "helper_task_0": 0}
emmental_learner.learn(model, dataloaders)
if args.transfer_learning:
model.weights = {"target": 0.0, "helper_task_0": 1.0}
# Freeze CNN encoder
cnn_encoder = model.module_pool.cnn
for param in cnn_encoder.parameters():
param.requires_grad = False
Meta.config["learner_config"]["n_epochs"] = 15
emmental_learner.learn(model, dataloaders)
if args.load_cnn:
cnn_encoder = model.module_pool.cnn
for param in cnn_encoder.parameters():
param.requires_grad = False
scores = model.score(dataloaders)
# Save metrics into file
write_to_file(Meta.log_path, "metrics.txt", scores)
save_predictions(Meta.log_path, model, dataloaders, args.task)
# Save best metrics into file
write_to_file(
Meta.log_path,
"best_metrics.txt",
emmental_learner.logging_manager.checkpointer.best_metric_dict,
)
# save_features(Meta.config["meta_config"]["log_path"],model,dataloaders)
else:
best_model_pth = os.path.join(
Meta.config["meta_config"]["log_path"],
"best_model_target_" + args.source + "_val_accuracy.pth",
)
model.load(best_model_pth)
# Save metrics into file
# write_to_file(Meta.log_path, "metrics.txt", scores)
save_predictions(Meta.log_path, model, dataloaders, args.task)
# save_features(Meta.config["meta_config"]["log_path"],model,dataloaders)
if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
main(args)
|
observational-main
|
train_emmental.py
|
observational-main
|
__init__.py
|
|
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import cv2
import pydicom
def plot_heatmap(source, img_pth, heatmap):
figure, axs = plt.subplots(nrows=1, ncols=2)
if source == "cxr":
ds = pydicom.dcmread(img_pth)
img = ds.pixel_array
else:
img = plt.imread(img_pth)
img_h, img_w = img.shape[0], img.shape[1]
axs[0].imshow(img, cmap="gray", vmin=0, vmax=255)
axs[1].imshow(img, cmap="gray", vmin=0, vmax=255)
hm2 = cv2.resize(heatmap, (img_h, img_w))
axs[1].imshow(hm2.T, alpha=0.4)
axs[0].axis("off")
axs[1].axis("off")
figure.tight_layout()
plt.show()
def plot_saccade(img_pth, gaze_seq, source):
if source == "cxr":
ds = pydicom.dcmread(img_pth)
img = ds.pixel_array
else:
img = plt.imread(img_pth)
img_h, img_w = img.shape[0], img.shape[1]
s = 100
gaze_x_list = []
gaze_y_list = []
size_list = []
for i, gaze_pt in enumerate(gaze_seq):
time_spent = gaze_pt[2]
gaze_x, gaze_y = img_w * gaze_pt[0], img_h * gaze_pt[1]
gaze_x_list.append(gaze_x)
gaze_y_list.append(gaze_y)
size_list.append(s * time_spent)
if source == "cxr":
plt.imshow(img, cmap="gray", vmin=0, vmax=255)
else:
plt.imshow(img)
plt.axis("OFF")
plt.scatter(gaze_x_list, gaze_y_list, s=size_list, alpha=0.7)
for i in range(len(gaze_x_list)):
plt.annotate(str(i + 1), (gaze_x_list[i], gaze_y_list[i]))
plt.show()
|
observational-main
|
viz_utils.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.