python_code
stringlengths 0
4.04M
| repo_name
stringlengths 8
58
| file_path
stringlengths 5
147
|
---|---|---|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import logging
import os
from os.path import join as pjoin
import subprocess
import sys
import numpy as np
class COLMAPParams:
def __init__(self):
self.parser = argparse.ArgumentParser()
self.parser.add_argument("image_path", help="image path")
self.parser.add_argument("workspace_path", help="workspace path")
self.parser.add_argument(
"--mask_path",
help="path for mask to exclude feature extration from those regions",
default=None,
)
self.parser.add_argument(
"--dense_max_size", type=int, help='Max size for dense COLMAP', default=384,
)
self.add_arguments(self.parser)
@staticmethod
def add_arguments(parser):
parser.add_argument(
"--colmap_bin_path",
help="path to colmap bin. COLMAP 3.6 is required to enable mask_path",
default='colmap'
)
parser.add_argument(
"--sparse", help="disable dense reconstruction", action='store_true'
)
parser.add_argument(
"--initialize_pose", help="Intialize Pose", action='store_true'
)
parser.add_argument(
"--camera_params", help="prior camera parameters", default=None
)
parser.add_argument(
"--camera_model", help="camera_model", default='SIMPLE_PINHOLE'
)
parser.add_argument(
"--refine_intrinsics",
help="refine camera parameters. Not used when camera_params is None",
action="store_true"
)
parser.add_argument(
"--matcher", choices=["exhaustive", "sequential"], default="exhaustive",
help="COLMAP matcher ('exhaustive' or 'sequential')"
)
def parse_args(self, args=None, namespace=None):
return self.parser.parse_args(args, namespace=namespace)
class COLMAPProcessor:
def __init__(self, colmap_bin: str = 'colmap'):
self.colmap_bin = colmap_bin
def process(self, args):
os.makedirs(args.workspace_path, exist_ok=True)
self.extract_features(args)
self.match(args)
if args.initialize_pose:
self.triangulate(args)
else:
self.map(args)
models = os.listdir(self.sparse_dir(args.workspace_path))
num_models = len(models)
logging.info('#models = %d', num_models)
if num_models > 1:
logging.error(
"COLMAP reconstructs more than one model (#models=%d)",
num_models
)
if 'sparse' not in vars(args) or not args.sparse:
for sub_model in models:
self.dense(sub_model, args)
def extract_features(self, args):
cmd = [
self.colmap_bin,
'feature_extractor',
'--database_path', self.db_path(args.workspace_path),
'--image_path', args.image_path,
'--ImageReader.camera_model', args.camera_model,
'--ImageReader.single_camera', '1'
]
if args.camera_params:
cmd.extend(['--ImageReader.camera_params', args.camera_params])
if args.mask_path:
cmd.extend(['--ImageReader.mask_path', args.mask_path])
if args.initialize_pose:
cmd.extend(['--SiftExtraction.num_threads', '1'])
cmd.extend(['--SiftExtraction.gpu_index', '0'])
run(cmd)
def match(self, args):
cmd = [
self.colmap_bin,
f'{args.matcher}_matcher',
'--database_path', self.db_path(args.workspace_path),
'--SiftMatching.guided_matching', '1',
]
if args.matcher == "sequential":
cmd.extend([
'--SequentialMatching.overlap', '50',
'--SequentialMatching.quadratic_overlap', '0',
])
run(cmd)
def triangulate(self, args):
if self.check_sparse(self.sparse_dir(args.workspace_path, model_index=0)):
return
pose_init_dir = self.pose_init_dir(args.workspace_path)
assert self.check_sparse(pose_init_dir)
sparse_dir = self.sparse_dir(args.workspace_path, model_index=0)
os.makedirs(sparse_dir, exist_ok=True)
cmd = [
self.colmap_bin,
'point_triangulator',
'--database_path', self.db_path(args.workspace_path),
'--image_path', args.image_path,
'--output_path', sparse_dir,
'--input_path', pose_init_dir,
'--Mapper.ba_refine_focal_length', '0',
'--Mapper.ba_local_max_num_iterations', '0',
'--Mapper.ba_global_max_num_iterations', '1',
]
run(cmd)
def map(self, args):
if self.check_sparse(self.sparse_dir(args.workspace_path, model_index=0)):
return
sparse_dir = self.sparse_dir(args.workspace_path)
os.makedirs(sparse_dir, exist_ok=True)
cmd = [
self.colmap_bin,
'mapper',
'--database_path', self.db_path(args.workspace_path),
'--image_path', args.image_path,
'--output_path', sparse_dir,
# add the following options for KITTI evaluation. Should help in general.
'--Mapper.abs_pose_min_inlier_ratio', '0.5',
'--Mapper.abs_pose_min_num_inliers', '50',
'--Mapper.init_max_forward_motion', '1',
'--Mapper.ba_local_num_images', '15',
]
if args.camera_params and not args.refine_intrinsics:
cmd.extend([
'--Mapper.ba_refine_focal_length', '0',
'--Mapper.ba_refine_extra_params', '0',
])
run(cmd)
def dense(self, recon_model: str, args):
dense_dir = self.dense_dir(args.workspace_path, model_index=recon_model)
if self.check_dense(dense_dir, args.image_path):
return
os.makedirs(dense_dir, exist_ok=True)
cmd = [
self.colmap_bin,
'image_undistorter',
'--image_path', args.image_path,
'--input_path',
self.sparse_dir(args.workspace_path, model_index=recon_model),
'--output_path', dense_dir,
'--output_type', "COLMAP",
'--max_image_size', str(args.dense_max_size),
]
run(cmd)
cmd = [
self.colmap_bin,
'patch_match_stereo',
'--workspace_path', dense_dir,
'--workspace_format', "COLMAP",
'--PatchMatchStereo.max_image_size', str(args.dense_max_size),
]
run(cmd)
@staticmethod
def dense_depth_suffix():
return ".geometric.bin"
@staticmethod
def db_path(workspace):
return pjoin(workspace, 'database.db')
@staticmethod
def sparse_dir(workspace, model_index=None):
p = pjoin(workspace, 'sparse')
if model_index is None:
return p
return pjoin(p, str(model_index))
@staticmethod
def dense_dir(workspace, model_index=None):
p = pjoin(workspace, 'dense')
if model_index is None:
return p
return pjoin(p, str(model_index))
@staticmethod
def pose_init_dir(workspace):
return pjoin(workspace, 'pose_init')
@staticmethod
def check_sparse(sparse_model_dir: str):
return any(
all(
(os.path.isfile(pjoin(sparse_model_dir, name))
for name in ["cameras" + ext, "images" + ext])
)
for ext in ['.bin', '.txt']
)
@classmethod
def check_dense(cls, dense_model_dir: str, image_path: str, valid_ratio=1):
assert valid_ratio <= 1
depth_fmt = pjoin(
dense_model_dir, "stereo", "depth_maps", "{}" + cls.dense_depth_suffix()
)
color_names = os.listdir(image_path)
num_valid = np.sum(os.path.isfile(depth_fmt.format(n)) for n in color_names)
return (num_valid / len(color_names)) >= valid_ratio
def run(cmd):
print(' '.join(cmd))
subprocess.run(cmd)
def main(args):
processor = COLMAPProcessor(args.colmap_bin)
processor.process(args)
return 0
def parse_args():
return COLMAPParams().parser.parse_args()
if __name__ == '__main__':
sys.exit(main(parse_args()))
|
consistent_depth-main
|
tools/colmap_processor.py
|
#!/usr/bin/env python3
from torch.optim.optimizer import Optimizer
from torch.optim import Adam
OPTIMIZER_MAP = {
"Adam": Adam,
}
OPTIMIZER_NAMES = OPTIMIZER_MAP.keys()
OPTIMIZER_CLASSES = OPTIMIZER_MAP.values()
def create(optimizer_name: str, *args, **kwargs) -> Optimizer:
return OPTIMIZER_MAP[optimizer_name](*args, **kwargs)
|
consistent_depth-main
|
optimizer/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
import torch.nn as nn
from utils.torch_helpers import _device
from utils.geometry import (
pixel_grid,
focal_length,
project,
pixels_to_points,
reproject_points,
sample,
)
def select_tensors(x):
"""
x (B, N, C, H, W) -> (N, B, C, H, W)
Each batch (B) is composed of a pair or more samples (N).
"""
return x.transpose(0, 1)
def weighted_mse_loss(input, target, weights, dim=1, eps=1e-6):
"""
Args:
input (B, C, H, W)
target (B, C, H, W)
weights (B, 1, H, W)
Returns:
scalar
"""
assert (
input.ndimension() == target.ndimension()
and input.ndimension() == weights.ndimension()
)
# normalize to sum=1
B = weights.shape[0]
weights_sum = torch.sum(weights.view(B, -1), dim=-1).view(B, 1, 1, 1)
weights_sum = torch.clamp(weights_sum, min=eps)
weights_n = weights / weights_sum
sq_error = torch.sum((input - target) ** 2, dim=dim, keepdim=True) # BHW
return torch.sum((weights_n * sq_error).reshape(B, -1), dim=1)
def weighted_rmse_loss(input, target, weights, dim=1, eps=1e-6):
"""
Args:
input (B, C, H, W)
target (B, C, H, W)
weights (B, 1, H, W)
Returns:
scalar = weighted_mean(rmse_along_dim)
"""
assert (
input.ndimension() == target.ndimension()
and input.ndimension() == weights.ndimension()
)
# normalize to sum=1
B = weights.shape[0]
weights_sum = torch.sum(weights.view(B, -1), dim=-1).view(B, 1, 1, 1)
weights_sum = torch.clamp(weights_sum, min=eps)
weights_n = weights / weights_sum
diff = torch.norm(input - target, dim=dim, keepdim=True)
return torch.sum((weights_n * diff).reshape(B, -1), dim=1)
def weighted_mean_loss(x, weights, eps=1e-6):
"""
Args:
x (B, ...)
weights (B, ...)
Returns:
a scalar
"""
assert x.ndimension() == weights.ndimension() and x.shape[0] == weights.shape[0]
# normalize to sum=1
B = weights.shape[0]
weights_sum = torch.sum(weights.view(B, -1), dim=-1).view(B, 1, 1, 1)
weights_sum = torch.clamp(weights_sum, min=eps)
weights_n = weights / weights_sum
return torch.sum((weights_n * x).reshape(B, -1), dim=1)
class ConsistencyLoss(nn.Module):
def __init__(self, opt):
super().__init__()
self.opt = opt
self.dist = torch.abs
def geometry_consistency_loss(self, points_cam, metadata, pixels):
"""Geometry Consistency Loss.
For each pair as specified by indices,
geom_consistency = reprojection_error + disparity_error
reprojection_error is measured in the screen space of each camera in the pair.
Args:
points_cam (B, N, 3, H, W): points in local camera coordinate.
pixels (B, N, 2, H, W)
metadata: dictionary of related metadata to compute the loss. Here assumes
metadata include entries as below.
{
'extrinsics': torch.tensor (B, N, 3, 4), # extrinsics of each frame.
Each (3, 4) = [R, t]
'intrinsics': torch.tensor (B, N, 4), # (fx, fy, cx, cy)
'geometry_consistency':
{
'flows': (B, 2, H, W),) * 2 in pixels.
For k in range(2) (ref or tgt),
pixel p = pixels[indices[b, k]][:, i, j]
correspond to
p + flows[k][b, :, i, j]
in frame indices[b, (k + 1) % 2].
'masks': ((B, 1, H, W),) * 2. Masks of valid flow
matches. Values are 0 or 1.
}
}
"""
geom_meta = metadata["geometry_consistency"]
points_cam_pair = select_tensors(points_cam)
extrinsics = metadata["extrinsics"]
extrinsics_pair = select_tensors(extrinsics)
intrinsics = metadata["intrinsics"]
intrinsics_pair = select_tensors(intrinsics)
pixels_pair = select_tensors(pixels)
flows_pair = (flows for flows in geom_meta["flows"])
masks_pair = (masks for masks in geom_meta["masks"])
reproj_losses, disp_losses = [], []
inv_idxs = [1, 0]
for (
points_cam_ref,
tgt_points_cam_tgt,
pixels_ref,
flows_ref,
masks_ref,
intrinsics_ref,
intrinsics_tgt,
extrinsics_ref,
extrinsics_tgt,
) in zip(
points_cam_pair,
points_cam_pair[inv_idxs],
pixels_pair,
flows_pair,
masks_pair,
intrinsics_pair,
intrinsics_pair[inv_idxs],
extrinsics_pair,
extrinsics_pair[inv_idxs],
):
# change to camera space for target_camera
points_cam_tgt = reproject_points(
points_cam_ref, extrinsics_ref, extrinsics_tgt
)
matched_pixels_tgt = pixels_ref + flows_ref
pixels_tgt = project(points_cam_tgt, intrinsics_tgt)
if self.opt.lambda_reprojection > 0:
reproj_dist = torch.norm(pixels_tgt - matched_pixels_tgt,
dim=1, keepdim=True)
reproj_losses.append(
weighted_mean_loss(self.dist(reproj_dist), masks_ref)
)
if self.opt.lambda_view_baseline > 0:
# disparity consistency
f = torch.mean(focal_length(intrinsics_ref))
# warp points in target image grid target camera coordinates to
# reference image grid
warped_tgt_points_cam_tgt = sample(
tgt_points_cam_tgt, matched_pixels_tgt
)
disp_diff = 1.0 / points_cam_tgt[:, -1:, ...] \
- 1.0 / warped_tgt_points_cam_tgt[:, -1:, ...]
disp_losses.append(
f * weighted_mean_loss(self.dist(disp_diff), masks_ref)
)
B = points_cam_pair[0].shape[0]
dtype = points_cam_pair[0].dtype
reproj_loss = (
self.opt.lambda_reprojection
* torch.mean(torch.stack(reproj_losses, dim=-1), dim=-1)
if len(reproj_losses) > 0
else torch.zeros(B, dtype=dtype, device=_device)
)
disp_loss = (
self.opt.lambda_view_baseline
* torch.mean(torch.stack(disp_losses, dim=-1), dim=-1)
if len(disp_losses) > 0
else torch.zeros(B, dtype=dtype, device=_device)
)
batch_losses = {"reprojection": reproj_loss, "disparity": disp_loss}
return torch.mean(reproj_loss + disp_loss), batch_losses
def __call__(
self,
depths,
metadata,
):
"""Compute total loss.
The network predicts a set of depths results. The number of samples, N, is
not the batch_size, but computed based on the loss.
For instance, geometry_consistency_loss requires pairs as samples, then
N = 2 .
If with more losses, say triplet one from temporal_consistency_loss. Then
N = 2 + 3.
Args:
depths (B, N, H, W): predicted_depths
metadata: dictionary of related metadata to compute the loss. Here assumes
metadata include data as below. But each loss assumes more.
{
'extrinsics': torch.tensor (B, N, 3, 4), # extrinsics of each frame.
Each (3, 4) = [R, t]
'intrinsics': torch.tensor (B, N, 4),
# (fx, fy, cx, cy) for each frame in pixels
}
Returns:
loss: python scalar. And set self.total_loss
"""
def squeeze(x):
return x.reshape((-1,) + x.shape[2:])
def unsqueeze(x, N):
return x.reshape((-1, N) + x.shape[1:])
depths = depths.unsqueeze(-3)
intrinsics = metadata["intrinsics"]
B, N, C, H, W = depths.shape
pixels = pixel_grid(B * N, (H, W))
points_cam = pixels_to_points(squeeze(intrinsics), squeeze(depths), pixels)
pixels = unsqueeze(pixels, N)
points_cam = unsqueeze(points_cam, N)
return self.geometry_consistency_loss(points_cam, metadata, pixels)
|
consistent_depth-main
|
loss/consistency_loss.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
class LossParams:
"""
Loss related parameters
"""
@staticmethod
def add_arguments(parser):
parser.add_argument(
"--lambda_view_baseline",
type=float,
default=-1,
help="The baseline to define weight to penalize disparity difference."
" If < 0 it will be set automatically to the default for the"
" specified model adapter.",
)
parser.add_argument(
"--lambda_reprojection",
type=float,
default=1.0,
help="weight for reprojection loss.",
)
parser.add_argument(
"--lambda_parameter",
type=float,
default=0,
help="weight for network parameter regularization loss.",
)
return parser
@staticmethod
def make_str(opt):
return (
"B{}".format(opt.lambda_view_baseline)
+ "_R{}".format(opt.lambda_reprojection)
+ '_PL1-{}'.format(opt.lambda_parameter)
)
|
consistent_depth-main
|
loss/loss_params.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
class ParameterLoss(torch.nn.Module):
def __init__(self, parameters_init, opt):
self.parameters_init = parameters_init
self.opt = opt
assert opt.lambda_parameter > 0
def __call__(self, parameters):
sq_diff = [torch.abs(p - pi.data)
for p, pi in zip(parameters, self.parameters_init)]
sq_sum = torch.sum(torch.cat([d.flatten() for d in sq_diff]))
loss = self.opt.lambda_parameter * sq_sum
return loss, {"parameter_loss": loss.reshape(1, -1)}
|
consistent_depth-main
|
loss/parameter_loss.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
from typing import List, Optional
import torch
from torch.nn import Parameter
from .parameter_loss import ParameterLoss
from .consistency_loss import ConsistencyLoss
from utils.torch_helpers import _device
from loaders.video_dataset import _dtype
class JointLoss(torch.nn.Module):
def __init__(self, opt, parameters_init=None):
super().__init__()
self.opt = opt
if opt.lambda_parameter > 0:
assert parameters_init is not None
self.parameter_loss = ParameterLoss(parameters_init, opt)
if opt.lambda_view_baseline > 0 or opt.lambda_reprojection > 0:
self.consistency_loss = ConsistencyLoss(opt)
def __call__(
self,
depths,
metadata,
parameters: Optional[List[Parameter]] = None,
):
loss = torch.zeros(1, dtype=_dtype, device=_device)
batch_losses = {}
if self.opt.lambda_parameter > 0:
assert parameters is not None
para_loss, para_batch_losses = self.parameter_loss(parameters)
loss += para_loss
batch_losses.update(para_batch_losses)
if self.opt.lambda_view_baseline > 0 or self.opt.lambda_reprojection > 0:
consis_loss, consis_batch_losses = self.consistency_loss(
depths, metadata,
)
loss += consis_loss
batch_losses.update(consis_batch_losses)
return loss, batch_losses
|
consistent_depth-main
|
loss/joint_loss.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
from collections import namedtuple
from enum import Enum, unique, auto
from typing import Iterable, NamedTuple, Dict, Any, Set
import numpy as np
from .frame_range import FrameRange
@unique
class SamplePairsMode(Enum):
EXHAUSTED = 0
CONSECUTIVE = auto()
HIERARCHICAL = auto()
HIERARCHICAL2 = auto()
@classmethod
def name_mode_map(cls):
return {v.name.lower(): v for v in cls}
@classmethod
def names(cls):
return [v.name.lower() for v in cls]
# param is default to {} while mode is required
class SamplePairsOptions(NamedTuple):
mode: SamplePairsMode
params: Dict[str, Any] = {}
Pair = namedtuple("Pair", ["first", "second"])
Pairs_t = Set[Pair]
class SamplePairs:
@classmethod
def sample(
cls,
opts: Iterable[SamplePairsOptions],
frame_range: FrameRange,
two_way=False,
) -> Pairs_t:
num_frames = len(frame_range)
rel_pairs = set()
for opt in opts:
rel_pairs = rel_pairs.union(cls.factory(num_frames, opt, two_way))
pairs = set()
for rel_pair in rel_pairs:
pair = Pair(
frame_range.index_to_frame[rel_pair[0]],
frame_range.index_to_frame[rel_pair[1]]
)
# Filter out pairs where no end is in depth_frames. Can be optimized
# when constructing these pairs
if (pair[0] in frame_range.frames() or pair[1] in frame_range.frames()):
pairs.add(pair)
return pairs
@classmethod
def factory(
cls, num_frames: int, opt: SamplePairsOptions, two_way: bool
) -> Pairs_t:
funcs = {
SamplePairsMode.EXHAUSTED: cls.sample_exhausted,
SamplePairsMode.CONSECUTIVE: cls.sample_consecutive,
SamplePairsMode.HIERARCHICAL: cls.sample_hierarchical,
SamplePairsMode.HIERARCHICAL2: cls.sample_hierarchical2,
}
return funcs[opt.mode](num_frames, two_way, **opt.params)
@staticmethod
def sample_hierarchical(
num_frames: int,
two_way: bool,
min_dist=1,
max_dist=None,
include_mid_point=False,
) -> Pairs_t:
"""
Args:
min_dist, max_dist: minimum and maximum distance to the neighbour
"""
assert min_dist >= 1
if max_dist is None:
max_dist = num_frames - 1
min_level = np.ceil(np.log2(min_dist)).astype(int)
max_level = np.floor(np.log2(max_dist)).astype(int)
step_level = (lambda l: max(0, l - 1)) if include_mid_point else (lambda l: l)
signs = (-1, 1) if two_way else (1,)
pairs = set()
for level in range(min_level, max_level + 1):
dist = 1 << level
step = 1 << step_level(level)
for start in range(0, num_frames, step):
for sign in signs:
end = start + sign * dist
if end < 0 or end >= num_frames:
continue
pairs.add(Pair(start, end))
return pairs
@classmethod
def sample_hierarchical2(
cls, num_frames: int, two_way: bool, min_dist=1, max_dist=None
) -> Pairs_t:
return cls.sample_hierarchical(
num_frames,
two_way,
min_dist=min_dist,
max_dist=max_dist,
include_mid_point=True,
)
@classmethod
def sample_consecutive(cls, num_frames: int, two_way: bool) -> Pairs_t:
return cls.sample_hierarchical(num_frames, two_way, min_dist=1, max_dist=1)
@staticmethod
def sample_exhausted(cls, num_frames: int, two_way: bool) -> Pairs_t:
second_frame_range = (
(lambda i, N: range(N)) if two_way else (lambda i, N: range(i + 1, N))
)
pairs = set()
for i in range(num_frames):
for j in second_frame_range(i, num_frames):
if i != j:
pairs.add(Pair(i, j))
return pairs
@classmethod
def to_one_way(cls, pairs) -> Pairs_t:
def ordered(pair):
if pair[0] > pair[1]:
return Pair(*pair[::-1])
return Pair(*pair)
return {ordered(p) for p in pairs}
def to_in_range(pairs, frame_range=None):
if frame_range is None:
return pairs
def in_range(idx):
return frame_range[0] <= idx and idx < frame_range[1]
return [pair for pair in pairs if all(in_range(i) for i in pair)]
|
consistent_depth-main
|
utils/frame_sampling.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
from typing import Set, Optional
from collections import namedtuple
# set is an OptionalSet as below
NamedOptionalSet = namedtuple("NamedOptionalSet", ["name", "set"])
class OptionalSet:
def __init__(self, set: Optional[Set] = None):
self.set = set
def intersection(self, other):
if self.set is None:
return other
if other.set is None:
return self
return OptionalSet(set=self.set.intersection(other.set))
def __str__(self):
return str(self.set)
class FrameRange:
"""
Compute the indices of frames we are interested in from the specified range.
"""
def __init__(
self,
frame_range: OptionalSet,
num_frames: int = None,
):
full_range = OptionalSet(set=set(range(num_frames))
if num_frames is not None else None)
self.update(frame_range.intersection(full_range))
def intersection(self, other: OptionalSet):
return FrameRange(self.frame_range.intersection(other))
def update(self, frame_range: OptionalSet):
assert frame_range.set is not None
self.frame_range = frame_range
# Continuous index of all frames in the range.
all_frames = sorted(self.frame_range.set)
self.index_to_frame = {i: f for i, f in enumerate(all_frames)}
def frames(self):
return sorted(self.index_to_frame.values())
def __len__(self):
return len(self.index_to_frame)
def parse_frame_range(frame_range_str: str) -> NamedOptionalSet:
"""
Create a frame range from a string, e.g.: 1-10,15,21-40,51-62.
"""
if len(frame_range_str) == 0:
return NamedOptionalSet(name=frame_range_str, set=OptionalSet())
range_strs = frame_range_str.split(',')
def parse_sub_range(sub_range_str: str):
splits = [int(s) for s in sub_range_str.split('-', maxsplit=1)]
if len(splits) == 1:
return splits
start, end = splits
assert start <= end
return range(start, end + 1)
frame_range = set()
for range_str in range_strs:
frame_range.update(parse_sub_range(range_str))
# Convert the range to a friendly string representation, e.g.,
# 6,6,5,8,0,2-4,5-6,10,9 -> "0,2-6,8-10"
it = iter(sorted(frame_range))
ranges = []
start = next(it)
last_index = start
def add_range(ranges):
if last_index == start:
ranges.append(f"{start}")
else:
ranges.append(f"{start}-{last_index}")
for i in it:
if i < 0:
raise ValueError("Frame indices must be positive.")
assert(i > last_index)
if i - last_index > 1:
add_range(ranges)
start = i
last_index = i
add_range(ranges)
name = ",".join(ranges)
return NamedOptionalSet(name=name, set=OptionalSet(frame_range))
|
consistent_depth-main
|
utils/frame_range.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import numpy as np
import os
from PIL import Image
import cv2
import struct
from subprocess import call
import warnings
import six
if six.PY2:
class ResourceWarning(RuntimeWarning):
pass
# Needed to suppress ResourceWarning for unclosed image file on dev server.
warnings.simplefilter("ignore", ResourceWarning)
warnings.simplefilter("ignore", UserWarning)
# resizes the image
def resize_to_target(image, max_size, align=1, suppress_messages=False):
if not suppress_messages:
print("Original size: %d x %d" % (image.shape[1], image.shape[0]))
H, W = image.shape[:2]
long_side = float(max(W, H))
scale = min(1.0, max_size / long_side)
resized_height = int(H * scale)
resized_width = int(W * scale)
if resized_width % align != 0:
resized_width = align * round(resized_width / align)
if not suppress_messages:
print("Rounding width to closest multiple of %d." % align)
if resized_height % align != 0:
resized_height = align * round(resized_height / align)
if not suppress_messages:
print("Rounding height to closest multiple of %d." % align)
if not suppress_messages:
print("Resized: %d x %d" % (resized_width, resized_height))
image = cv2.resize(
image, (resized_width, resized_height), interpolation=cv2.INTER_AREA
)
return image
# Reads an image and returns a normalized float buffer (0-1 range). Corrects
# rotation based on EXIF tags.
def load_image(file_name, max_size=None, align=1, suppress_messages=False):
img, angle = load_image_angle(
file_name, max_size, align=align, suppress_messages=suppress_messages
)
return img
def load_image_angle(
file_name, max_size=None, min_size=None,
angle=0, align=1, suppress_messages=False
):
with Image.open(file_name) as img:
if hasattr(img, "_getexif") and img._getexif() is not None:
# orientation tag in EXIF data is 274
exif = dict(img._getexif().items())
# adjust the rotation
if 274 in exif:
if exif[274] == 8:
angle = 90
elif exif[274] == 6:
angle = 270
elif exif[274] == 3:
angle = 180
if angle != 0:
img = img.rotate(angle, expand=True)
img = np.float32(img) / 255.0
if max_size is not None:
if min_size is not None:
img = cv2.resize(
img, (max_size, min_size), interpolation=cv2.INTER_AREA)
else:
img = resize_to_target(
img, max_size, align=align, suppress_messages=suppress_messages
)
return img, angle
return [[]], 0.0
# Load image from binary file in the same way as read in C++ with
# #include "compphotolib/core/CvUtil.h"
# freadimg(fileName, image);
def load_raw_float32_image(file_name):
with open(file_name, "rb") as f:
CV_CN_MAX = 512
CV_CN_SHIFT = 3
CV_32F = 5
I_BYTES = 4
Q_BYTES = 8
h = struct.unpack("i", f.read(I_BYTES))[0]
w = struct.unpack("i", f.read(I_BYTES))[0]
cv_type = struct.unpack("i", f.read(I_BYTES))[0]
pixel_size = struct.unpack("Q", f.read(Q_BYTES))[0]
d = ((cv_type - CV_32F) >> CV_CN_SHIFT) + 1
assert d >= 1
d_from_pixel_size = pixel_size // 4
if d != d_from_pixel_size:
raise Exception(
"Incompatible pixel_size(%d) and cv_type(%d)" % (pixel_size, cv_type)
)
if d > CV_CN_MAX:
raise Exception("Cannot save image with more than 512 channels")
data = np.frombuffer(f.read(), dtype=np.float32)
result = data.reshape(h, w) if d == 1 else data.reshape(h, w, d)
return result
# Save image to binary file, so that it can be read in C++ with
# #include "compphotolib/core/CvUtil.h"
# freadimg(fileName, image);
def save_raw_float32_image(file_name, image):
with open(file_name, "wb") as f:
CV_CN_MAX = 512
CV_CN_SHIFT = 3
CV_32F = 5
dims = image.shape
h = 0
w = 0
d = 1
if len(dims) == 2:
h, w = image.shape
float32_image = np.transpose(image).astype(np.float32)
else:
h, w, d = image.shape
float32_image = np.transpose(image, [2, 1, 0]).astype("float32")
cv_type = CV_32F + ((d - 1) << CV_CN_SHIFT)
pixel_size = d * 4
if d > CV_CN_MAX:
raise Exception("Cannot save image with more than 512 channels")
f.write(struct.pack("i", h))
f.write(struct.pack("i", w))
f.write(struct.pack("i", cv_type))
f.write(struct.pack("Q", pixel_size)) # Write size_t ~ uint64_t
# Set buffer size to 16 MiB to hide the Python loop overhead.
buffersize = max(16 * 1024 ** 2 // image.itemsize, 1)
for chunk in np.nditer(
float32_image,
flags=["external_loop", "buffered", "zerosize_ok"],
buffersize=buffersize,
order="F",
):
f.write(chunk.tobytes("C"))
def save_image(file_name, image):
ext = os.path.splitext(file_name)[1].lower()
if ext == ".raw":
save_raw_float32_image(file_name, image)
else:
image = 255.0 * image
image = Image.fromarray(image.astype("uint8"))
image.save(file_name)
def save_depth_map_colored(file_name, depth_map, color_binary):
save_image(file_name, depth_map)
color_depth_name = os.path.splitext(file_name)[0] + "_color.jpg"
if color_binary != "":
call([color_binary, "--inputFile", file_name, "--outputFile", color_depth_name])
# main print_function
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--input_image", type=str, help="input image")
parser.add_argument("--output_image", type=str, help="output image")
parser.add_argument(
"--max_size", type=int, default=768, help="max size of long image dimension"
)
args, unknown = parser.parse_known_args()
img = load_image(args.input_image, int(args.max_size))
save_image(args.output_image, img)
|
consistent_depth-main
|
utils/image_io.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
from third_party.colmap.scripts.python.read_write_model import (
CAMERA_MODELS,
rotmat2qvec,
Camera,
BaseImage,
write_model
)
# for exporting these functions to the rest of the code
from third_party.colmap.scripts.python.read_dense import read_array
from third_party.colmap.scripts.python.read_write_model import (
qvec2rotmat,
read_images_binary,
read_points3d_binary,
read_cameras_binary,
read_model,
)
CAMERA_NAME_TO_IDS = {
c.model_name: c.model_id for c in CAMERA_MODELS
}
# maps colmap point xc to normal coordinate frame x
# x = ROT_COLMAP_TO_NORMAL * x
ROT_COLMAP_TO_NORMAL = np.diag([1, -1, -1])
def intrinsics_to_camera(intrinsics, src_im_size=None, dst_im_size=None, eps=0.01):
"""Convert metadata intrinsics to COLMAP Camera.
Only support shared SIMPLE_PINHOLE camera.
Args:
intrinsics: (N, 4) where each row is fx, fy, cx, cy.
Assume intrinsics is the same across all frames.
src_im_size: image size corresponding to intrinsics
dst_im_size: the image size we want to convert to
"""
fxy, cxy = intrinsics[0][:2], intrinsics[0][-2:]
if src_im_size is None:
src_im_size = (2 * cxy).astype(int)
if dst_im_size is None:
dst_im_size = src_im_size
ratio = np.array(dst_im_size) / np.array(src_im_size).astype(float)
fxy *= ratio
cxy *= ratio
if np.abs(fxy[0] - fxy[1]) < eps:
model = 'SIMPLE_PINHOLE'
params = np.array((fxy[0], cxy[0], cxy[1]))
else:
model = 'PINHOLE'
params = np.array((fxy[0], fxy[1], cxy[0], cxy[1]))
camera = Camera(
id=1, model=model,
width=dst_im_size[0], height=dst_im_size[1],
params=params
)
return {camera.id: camera}
def extrinsics_to_images(extrinsics):
images = {}
for i, extr in enumerate(extrinsics):
R, t = extr[:, :3], extr[:, -1:]
Rc = ROT_COLMAP_TO_NORMAL.dot(R.T).dot(ROT_COLMAP_TO_NORMAL.T)
tc = -Rc.dot(ROT_COLMAP_TO_NORMAL.T).dot(t)
frame_id = i + 1
image = BaseImage(
id=frame_id, qvec=rotmat2qvec(Rc), tvec=tc.flatten(),
camera_id=1, name="frame_%06d.png" % i,
xys=[], point3D_ids=[]
)
images[image.id] = image
return images
def to_colmap(intrinsics, extrinsics, src_im_size=None, dst_im_size=None):
"""Convert Extrinsics and intrinsics to an empty COLMAP project with no points.
"""
cameras = intrinsics_to_camera(
intrinsics, src_im_size=src_im_size, dst_im_size=dst_im_size
)
images = extrinsics_to_images(extrinsics)
points3D = {}
return cameras, images, points3D
def save_colmap(
path, intrinsics, extrinsics, src_im_size=None, dst_im_size=None, ext=".txt"
):
cameras, images, points3D = to_colmap(intrinsics, extrinsics,
src_im_size=src_im_size, dst_im_size=dst_im_size)
write_model(cameras, images, points3D, path, ext)
def cameras_to_intrinsics(cameras, camera_ids, size_new):
"""
Args:
size_new: image size after resizing and produce equivalent intrinsics
for this size
"""
# params = f, cx, cy
assert all(
(c.model == "SIMPLE_PINHOLE" or c.model == "PINHOLE"
or c.model == "SIMPLE_RADIAL"
for c in cameras.values()))
intrinsics = []
for id in camera_ids:
c = cameras[id]
if c.model == "SIMPLE_PINHOLE":
f, cx, cy = c.params
fxy = np.array([f, f])
elif c.model == "PINHOLE":
fx, fy, cx, cy = c.params
fxy = np.array([fx, fy])
elif c.model == "SIMPLE_RADIAL":
f, cx, cy, r = c.params
fxy = np.array([f, f])
else:
raise AssertionError()
ratio = np.array(size_new) / np.array((c.width, c.height))
fxy = fxy * ratio
cxy = np.array((cx, cy)) * ratio
intrinsics.append(np.concatenate((fxy, cxy)))
return np.stack(intrinsics, axis=0)
def images_to_extrinsics(images, image_ids):
"""Let p be in local camera coordinates. x in global coordinates.
Rc, tc be rotation and translation from colmap
p = Rc * x + tc, i.e., x = Rc^T * p - Rc^T * tc
But we want to generate R, t, s.t.,
x = Rx+t,
so R = Rc^T, t = - Rc^T * tc
Note that colmap uses a different coordinate system where y points down and
z points to the world.
"""
extrinsics = []
for id in image_ids:
im = images[id]
Rc, tc = im.qvec2rotmat(), im.tvec
R, t = Rc.T, -Rc.T.dot(tc.reshape(-1, 1))
R = ROT_COLMAP_TO_NORMAL.dot(R).dot(ROT_COLMAP_TO_NORMAL.T)
t = ROT_COLMAP_TO_NORMAL.dot(t)
extrinsics.append(np.concatenate([R, t], axis=1))
return np.stack(extrinsics, axis=0)
def convert_points3D(pts3D: np.ndarray):
"""
points (3, N)
"""
return ROT_COLMAP_TO_NORMAL.dot(pts3D)
def ordered_image_ids(images):
return sorted(images.keys(), key=lambda id: images[id].name)
def convert_calibration(cameras, images, size_new):
sorted_im_ids = ordered_image_ids(images)
sorted_cam_ids = [images[id].camera_id for id in sorted_im_ids]
intrinsics = cameras_to_intrinsics(cameras, sorted_cam_ids, size_new)
extrinsics = images_to_extrinsics(images, sorted_im_ids)
return intrinsics, extrinsics
|
consistent_depth-main
|
utils/load_colmap.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
import torch.nn
def sample(data, uv):
"""Sample data (H, W, <C>) by uv (H, W, 2) (in pixels). """
shape = data.shape
# data from (H, W, <C>) to (1, C, H, W)
data = data.reshape(data.shape[:2] + (-1,))
data = torch.tensor(data).permute(2, 0, 1)[None, ...]
# (H, W, 2) -> (1, H, W, 2)
uv = torch.tensor(uv)[None, ...]
H, W = shape[:2]
# grid needs to be in [-1, 1] and (B, H, W, 2)
size = torch.tensor((W, H), dtype=uv.dtype).view(1, 1, 1, -1)
grid = (2 * uv / size - 1).to(data.dtype)
tensor = torch.nn.functional.grid_sample(data, grid, padding_mode="border")
# from (1, C, H, W) to (H, W, <C>)
return tensor.permute(0, 2, 3, 1).reshape(shape).numpy()
def sse(x, y, axis=-1):
"""Sum of suqare error"""
d = x - y
return np.sum(d * d, axis=axis)
def consistency_mask(im_ref, im_tgt, flow, threshold, diff_func=sse):
H, W = im_ref.shape[:2]
im_ref = im_ref.reshape(H, W, -1)
im_tgt = im_tgt.reshape(H, W, -1)
x, y = np.arange(W), np.arange(H)
X, Y = np.meshgrid(x, y)
u, v = flow[..., 0], flow[..., 1]
idx_x, idx_y = u + X, v + Y
# first constrain to within the image
mask = np.all(
np.stack((idx_x >= 0, idx_x <= W - 1, 0 <= idx_y, idx_y <= H - 1), axis=-1),
axis=-1,
)
im_tgt_to_ref = sample(im_tgt, np.stack((idx_x, idx_y), axis=-1))
mask = np.logical_and(mask, diff_func(im_ref, im_tgt_to_ref) < threshold)
return mask
def consistent_flow_masks(flows, colors, flow_thresh, color_thresh):
# mask from flow consistency
masks_flow = [
consistency_mask(flow_ref, -flow_tgt, flow_ref, flow_thresh ** 2)
for flow_ref, flow_tgt in zip(flows, flows[::-1])
]
# mask from photometric consistency
C = colors[0].shape[-1]
masks_photo = [
consistency_mask(c_ref, c_tgt, flow_ref, C * (color_thresh ** 2))
for c_ref, c_tgt, flow_ref in zip(colors, colors[::-1], flows)
]
# merge the two
masks = [np.logical_and(mf, mp) for mf, mp in zip(masks_flow, masks_photo)]
return masks
|
consistent_depth-main
|
utils/consistency.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import os
from os.path import join as pjoin
import wget
from zipfile import ZipFile
def get_model_from_url(
url: str, local_path: str, is_zip: bool = False, path_root: str = "checkpoints"
) -> str:
local_path = pjoin(path_root, local_path)
if os.path.exists(local_path):
print(f"Found cache {local_path}")
return local_path
# download
local_path = local_path.rstrip(os.sep)
download_path = local_path if not is_zip else f"{local_path}.zip"
os.makedirs(os.path.dirname(download_path), exist_ok=True)
if os.path.isfile(download_path):
print(f"Found cache {download_path}")
else:
print(f"Dowloading {url} to {download_path} ...")
wget.download(url, download_path)
if is_zip:
print(f"Unziping {download_path} to {local_path}")
with ZipFile(download_path, 'r') as f:
f.extractall(local_path)
os.remove(download_path)
return local_path
|
consistent_depth-main
|
utils/url_helpers.py
|
consistent_depth-main
|
utils/__init__.py
|
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import cv2
import numpy
import os
import subprocess
import sys
import logging
from matplotlib.cm import get_cmap
from . import image_io
CM_MAGMA = (numpy.array([get_cmap('magma').colors]).
transpose([1, 0, 2]) * 255)[..., ::-1].astype(numpy.uint8)
def visualize_depth(depth, depth_min=None, depth_max=None):
"""Visualize the depth map with colormap.
Rescales the values so that depth_min and depth_max map to 0 and 1,
respectively.
"""
if depth_min is None:
depth_min = numpy.amin(depth)
if depth_max is None:
depth_max = numpy.amax(depth)
depth_scaled = (depth - depth_min) / (depth_max - depth_min)
depth_scaled = depth_scaled ** 0.5
depth_scaled_uint8 = numpy.uint8(depth_scaled * 255)
return ((cv2.applyColorMap(
depth_scaled_uint8, CM_MAGMA) / 255) ** 2.2) * 255
def visualize_depth_dir(
src_dir: str, dst_dir: str, force: bool = False, extension: str = ".raw",
min_percentile: float = 0, max_percentile: float = 100,
):
src_files = []
dst_files = []
for file in sorted(os.listdir(src_dir)):
base, ext = os.path.splitext(file)
if ext.lower() == extension:
src_files.append(file)
dst_files.append(f"{base}.png")
if len(src_files) == 0:
return
# Check if all dst_files already exist
dst_exists = True
for file in dst_files:
if not os.path.exists(f"{dst_dir}/{file}"):
dst_exists = False
break
if not force and dst_exists:
return
d_min = sys.float_info.max
d_max = sys.float_info.min
for src_file in src_files:
print("reading '%s'." % src_file)
if extension == ".raw":
disparity = image_io.load_raw_float32_image(f"{src_dir}/{src_file}")
else:
disparity = cv2.imread(f"{src_dir}/{src_file}")
ix = numpy.isfinite(disparity)
if numpy.sum(ix) == 0:
logging.warning(f"{src_file} has 0 valid depth")
continue
valid_disp = disparity[ix]
d_min = min(d_min, numpy.percentile(valid_disp, min_percentile))
d_max = max(d_max, numpy.percentile(valid_disp, max_percentile))
for i in range(len(src_files)):
src_file = src_files[i]
dst_file = dst_files[i]
print(f"reading '{src_file}'.")
if os.path.exists(f"{dst_dir}/{dst_file}") and not force:
print(f"skipping existing file '{dst_file}'.")
else:
if extension == ".raw":
disparity = image_io.load_raw_float32_image(
f"{src_dir}/{src_file}")
else:
disparity = cv2.imread(f"{src_dir}/{src_file}")
disparity_vis = visualize_depth(disparity, d_min, d_max)
print(f"writing '{dst_file}'.")
cv2.imwrite(f"{dst_dir}/{dst_file}", disparity_vis)
def create_video(pattern: str, output_file: str, ffmpeg_bin: str = 'ffmpeg'):
if not os.path.exists(output_file):
cmd = [ffmpeg_bin, "-r", "30",
"-i", pattern,
"-c:v", "libx264",
"-crf", "27",
"-pix_fmt", "yuv420p",
output_file]
subprocess.call(cmd)
def apply_mask(im, mask, mask_color=None):
im = im.reshape(im.shape[:2] + (-1,))
C = im.shape[-1]
mask = mask.reshape(mask.shape[:2] + (-1,)) > 0
if mask_color is None:
mask_color = numpy.array([0, 255, 0] if C == 3 else 1)
mask_color = mask_color.reshape(1, 1, C)
inv_mask = (1 - mask) * mask_color
result = 0.7 * im + 0.3 * inv_mask
return result.squeeze()
|
consistent_depth-main
|
utils/visualization.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
from typing import Tuple
def reproject(pts3d: np.ndarray, extr: np.ndarray) -> np.ndarray:
assert pts3d.shape[0] == extr.shape[0] and pts3d.shape[0] == 3
p_dim, _ = pts3d.shape
R, t = extr[:, :p_dim], extr[:, -1:]
return R.T.dot(pts3d - t)
def focal_length(intr: np.ndarray):
return intr[:2]
def principal_point(intrinsics):
"""
Args:
intrinsics: (fx, fy, cx, cy)
"""
return intrinsics[2:]
# # center version
# H, W = shape
# return torch.tensor(((W - 1) / 2.0, (H - 1) / 2.0), device=_device)
def project(pts3d: np.ndarray, intr: np.ndarray) -> np.ndarray:
"""
Args:
pts3d (3, N)
intr (4)
Returns:
pixels (2, N)
"""
rays = pts3d / -pts3d[-1:]
fxy = focal_length(intr)
uvs = rays[:2] * fxy.reshape(-1, 1)
cs = principal_point(intr)
# to pixels: (i, j) = (u, -v) + (cx, cy)
uvs[1] = -uvs[1]
pixels = uvs + cs.reshape(-1, 1)
return pixels
def sample(depth: np.ndarray, pixels: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
Args:
depth (H, W)
pixels (2, N)
Returns:
depths (M): depths at corresponding pixels with nearest neighbour sampling,
M <= N, because some depth can be invalid
ix (N): whether a pixels[:, i] is inside the image
"""
pixels_nn = (pixels + 0.5).astype(int)
H, W = depth.shape
ix = np.all(
(
0 <= pixels_nn[0], pixels_nn[0] <= W - 1,
0 <= pixels_nn[1], pixels_nn[1] <= H - 1,
),
axis=0,
)
pixels_valid = pixels_nn[:, ix]
indices = pixels_valid[1] * W + pixels_valid[0]
ds = depth.flatten()[indices]
return ds, ix
|
consistent_depth-main
|
utils/geometry_np.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
_device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
def to_device(data):
if isinstance(data, torch.Tensor):
data = data.to(_device, non_blocking=True)
return data
if isinstance(data, dict):
for k, v in data.items():
data[k] = to_device(v)
return data
# list or tuple
for i, v in enumerate(data):
data[i] = to_device(v)
return data
|
consistent_depth-main
|
utils/torch_helpers.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
from .torch_helpers import _device
from typing import List
def pixel_grid(batch_size, shape):
"""Returns pixel grid of size (batch_size, 2, H, W).
pixel positions (x, y) are in range [0, W-1] x [0, H-1]
top left is (0, 0).
"""
H, W = shape
x = torch.linspace(0, W - 1, W, device=_device)
y = torch.linspace(0, H - 1, H, device=_device)
Y, X = torch.meshgrid(y, x)
pixels = torch.stack((X, Y), dim=0)[None, ...]
return pixels.expand(batch_size, -1, -1, -1)
def principal_point(intrinsics, shape):
"""
Args:
intrinsics: (fx, fy, cx, cy)
shape: (H, W)
"""
return intrinsics[:, 2:]
# # center version
# H, W = shape
# return torch.tensor(((W - 1) / 2.0, (H - 1) / 2.0), device=_device)
def focal_length(intrinsics):
return intrinsics[:, :2]
def pixels_to_rays(pixels, intrinsics):
"""Convert pixels to rays in camera space using intrinsics.
Args:
pixels (B, 2, H, W)
intrinsics (B, 4): (fx, fy, cx, cy)
Returns:
rays: (B, 3, H, W), where z component is -1, i.e., rays[:, -1] = -1
"""
# Assume principal point is ((W-1)/2, (H-1)/2).
B, _, H, W = pixels.shape
cs = principal_point(intrinsics, (H, W))
# Convert to [-(W-1)/2, (W-1)/2] x [-(H-1)/2, (H-1)/2)] and bottom left is (0, 0)
uvs = pixels - cs.view(-1, 2, 1, 1)
uvs[:, 1] = -uvs[:, 1] # flip v
# compute rays (u/fx, v/fy, -1)
fxys = focal_length(intrinsics).view(-1, 2, 1, 1)
rays = torch.cat(
(uvs / fxys, -torch.ones((B, 1, H, W), dtype=uvs.dtype, device=_device)), dim=1
)
return rays
def project(points, intrinsics):
"""Project points in camera space to pixel coordinates based on intrinsics.
Args:
points (B, 3, H, W)
intrinsics (B, 4): (fx, fy, cx, cy)
Returns:
pixels (B, 2, H, W)
"""
rays = points / -points[:, -1:]
# rays in pixel unit
fxys = focal_length(intrinsics)
uvs = rays[:, :2] * fxys.view(-1, 2, 1, 1)
B, _, H, W = uvs.shape
cs = principal_point(intrinsics, (H, W))
# to pixels: (i, j) = (u, -v) + (cx, cy)
uvs[:, 1] = -uvs[:, 1] # flip v
pixels = uvs + cs.view(-1, 2, 1, 1)
return pixels
def pixels_to_points(intrinsics, depths, pixels):
"""Convert pixels to 3D points in camera space. (Camera facing -z direction)
Args:
intrinsics:
depths (B, 1, H, W)
pixels (B, 2, H, W)
Returns:
points (B, 3, H, W)
"""
rays = pixels_to_rays(pixels, intrinsics)
points = rays * depths
return points
def reproject_points(points_cam_ref, extrinsics_ref, extrinsics_tgt):
"""Reproject points in reference camera coordinate to target camera coordinate
Args:
points_cam_ref (B, 3, H, W): points in reference camera coordinate.
extrinsics_ref (B, 3, 4): [R, t] of reference camera.
extrinsics_tgt (B, 3, 4): [R, t] of target_camera.
Returns:
points_cam_tgt (B, 3, H, W): points in target camera coordinate.
"""
B, p_dim, H, W = points_cam_ref.shape
assert p_dim == 3, "dimension of point {} != 3".format(p_dim)
# t + R * p where t of (B, 3, 1), R of (B, 3, 3) and p of (B, 3, H*W)
R_ref = extrinsics_ref[..., :p_dim]
t_ref = extrinsics_ref[..., -1:]
points_world = torch.baddbmm(t_ref, R_ref, points_cam_ref.view(B, p_dim, -1))
# Reproject to target:
# R'^T * (p - t') where t' of (B, 3, 1), R' of (B, 3, 3) and p of (B, 3, H*W)
R_tgt = extrinsics_tgt[..., :p_dim]
t_tgt = extrinsics_tgt[..., -1:]
points_cam_tgt = torch.bmm(R_tgt.transpose(1, 2), points_world - t_tgt)
return points_cam_tgt.view(B, p_dim, H, W)
def depth_to_points(depths, intrinsics):
"""
Args:
depths: (B, 1, H, W)
intrinsics: (B, num_params)
"""
B, _, H, W = depths.shape
pixels = pixel_grid(B, (H, W))
points_cam = pixels_to_points(intrinsics, depths, pixels)
return points_cam
def calibrate_scale(extrinsics, intrinsics, depths):
"""Given depths, compute the global scale to adjust the extrinsics.
Given a pair of depths, intrinsics, extrinsics, unproject the depth maps,
rotate these points based on camera rotation and compute the center for each one.
The distance between these centers should be of the same scale as the translation
between the cameras. Therefore, let mu1, mu2 and t1, t2 be the two scene centers
and the two camera projection centers. Then
-scale * (t1 - t2) = mu1 - mu2.
Therefore,
scale = -dt.dot(dmu) / dt.dot(dt), where dt = t1 - t2, dmu = mu1 - mu2.
Args:
intrinsics (2, num_params)
extrinsics (2, 3, 4): each one is [R, t]
depths (2, 1, H, W)
"""
assert (
extrinsics.shape[0] == intrinsics.shape[0]
and intrinsics.shape[0] == depths.shape[0]
)
points_cam = depth_to_points(depths, intrinsics)
B, p_dim, H, W = points_cam.shape
Rs = extrinsics[..., :p_dim]
ts = extrinsics[..., p_dim]
points_rot = torch.bmm(Rs, points_cam.view(B, p_dim, -1))
mus = torch.mean(points_rot, axis=-1)
# TODO(xuanluo): generalize this to more framse B>2 via variances of the points.
assert B == 2
dmu = mus[0] - mus[1]
dt = ts[0] - ts[1]
t_scale = -dt.dot(dmu) / dt.dot(dt)
return t_scale
def warping_field(extrinsics, intrinsics, depths, tgt_ids: List[int]):
""" Generate the warping field to warp the other frame the current frame.
Args:
intrinsics (N, num_params)
extrinsics (N, 3, 4): each one is [R, t]
depths (N, 1, H, W)
tgt_ids (N, 1): warp frame tgt_ids[i] to i
Returns:
uvs (N, 2, H, W): sampling the other frame tgt_ids[i] with uvs[i] produces
the current frame i.
"""
assert (
extrinsics.shape[0] == intrinsics.shape[0]
and intrinsics.shape[0] == depths.shape[0]
)
points_cam = depth_to_points(depths, intrinsics)
extrinsics_tgt = extrinsics[tgt_ids]
points_tgt_cam = reproject_points(points_cam, extrinsics, extrinsics_tgt)
uv_tgt = project(points_tgt_cam, intrinsics[tgt_ids])
return uv_tgt
def sample(data, uv):
"""Sample data (B, C, H, W) by uv (B, 2, H, W) (in pixels). """
H, W = data.shape[2:]
# grid needs to be in [-1, 1] and (B, H, W, 2)
# NOTE: divide by (W-1, H-1) instead of (W, H) because uv is in [-1,1]x[-1,1]
size = torch.tensor((W - 1, H - 1), dtype=uv.dtype).view(1, -1, 1, 1).to(_device)
grid = (2 * uv / size - 1).permute(0, 2, 3, 1)
return torch.nn.functional.grid_sample(data, grid, padding_mode="border")
def warp_image(images, depths, extrinsics, intrinsics, tgt_ids: List[int]):
""" Warp target images to the reference image based on depths and camera params
Warp images[tgt_ids[i]] to images[i].
Args:
images (N, C, H, W)
depths (N, 1, H, W)
extrinsics (N, 3, 4)
intrinsics (N, 4)
tgt_ids (N, 1)
Returns:
images_warped
"""
uv_tgt = warping_field(extrinsics, intrinsics, depths, tgt_ids)
images_warped_to_ref = sample(images[tgt_ids], uv_tgt)
return images_warped_to_ref
|
consistent_depth-main
|
utils/geometry.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
from numpy.linalg import inv
import cv2
from sklearn import linear_model
def resize_small(gt, x, interp=cv2.INTER_NEAREST):
"""
Resize to match the smaller image.
"""
def size(x):
return x.shape[:2][::-1]
size_gt, size_x = size(gt), size(x)
if size_gt == size_x:
return gt, x
if np.prod(size_gt) < np.prod(size_x):
x = cv2.resize(x, size_gt, interpolation=interp)
else:
gt = cv2.resize(gt, size_x, interpolation=interp)
return gt, x
# calibration
def calibrate_scale_shift(gt, x):
ix = np.isfinite(gt) & np.isfinite(x)
gt = gt[ix]
x = x[ix]
x2s = (x * x).flatten().sum()
xs = x.flatten().sum()
os = np.ones_like(x.flatten()).sum()
xgs = (x * gt).flatten().sum()
gs = gt.sum()
A = np.array([
[x2s, xs],
[xs, os]
])
b = np.array(
[xgs, gs]
).T
s, t = inv(A).dot(b)
return np.array([s, t])
def calibrate_scale_shift_RANSAC(
gt, x, max_trials=100000, stop_prob=0.999
):
ix = np.isfinite(gt) & np.isfinite(x)
gt = gt[ix].reshape(-1, 1)
x = x[ix].reshape(-1, 1)
ransac = linear_model.RANSACRegressor(
max_trials=max_trials, stop_probability=stop_prob
)
ransac.fit(x, gt)
s = ransac.estimator_.coef_[0, 0]
t = ransac.estimator_.intercept_[0]
return s, t
def calibrate_scale(gt, x, reduce=np.median):
ix = np.isfinite(gt) & np.isfinite(x)
ratios = gt[ix] / x[ix]
return reduce(ratios)
# conversion
def cvt_by_scale_shift(depth, calib_data):
s, t = calib_data
return depth * s + t
CALIB_METHOD_MAP = {
"scale": calibrate_scale,
"scale-shift": calibrate_scale_shift,
"ransac": calibrate_scale_shift_RANSAC,
}
def calibrate(gt, x, method: str):
return CALIB_METHOD_MAP[method](gt, x)
|
consistent_depth-main
|
utils/calibrate.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import os
import sys
class dotdict(dict):
"""dot.notation access to dictionary attributes"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def mkdir_ifnotexists(dir):
if os.path.exists(dir):
return
os.mkdir(dir)
def print_title(text):
print()
print("-" * len(text))
print(text)
print("-" * len(text))
def print_banner(text):
w = 12 + len(text)
print()
print("*" * w)
print(f"{'*' * 4} {text} {'*' * 4}")
print("*" * w)
class SuppressedStdout:
def __enter__(self):
self._original_stdout = sys.stdout
sys.stdout = open(os.devnull, "w")
def __exit__(self, exception_type, exception_value, traceback):
sys.stdout.close()
sys.stdout = self._original_stdout
|
consistent_depth-main
|
utils/helpers.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import os
from os.path import join as pjoin
from typing import Dict, Tuple
import numpy as np
from . import load_colmap, image_io as tr
from .geometry_np import reproject, project, sample
def store_visible_points_per_image(
points3D: Dict[int, load_colmap.Point3D]
) -> Dict[int, np.ndarray]:
"""
returns dictionary where key is the
image id: int
and the value is
3D points (3, N) that are visible in each image
(Note: currently images do not contain this info, but a list of -1's)
"""
map_img_to_pt3D = {}
for _cur_key, cur_point in points3D.items():
# assert(cur_key == cur_point) # They are the same by design
for img_id in cur_point.image_ids:
if img_id in map_img_to_pt3D:
map_img_to_pt3D[img_id].append(cur_point.xyz)
else:
map_img_to_pt3D[img_id] = [cur_point.xyz]
for img_id, pt_list in map_img_to_pt3D.items():
map_img_to_pt3D[img_id] = load_colmap.convert_points3D(np.array(pt_list).T)
return map_img_to_pt3D
def vote_scale(
scales: np.ndarray, min_percentile_thresh: int = 10, max_percentile_thresh: int = 90
) -> float:
"""
Note if len(scales) is really small, e.g., len(scales) == 2, it will return nan
"""
m = np.percentile(scales, min_percentile_thresh)
M = np.percentile(scales, max_percentile_thresh)
ix = np.logical_and(m <= scales, scales <= M)
scale = np.mean(scales[ix])
return scale
def calibrate_frame_w_sparse_points(
pts3d: np.ndarray, intr: np.ndarray, extr: np.ndarray, inv_depth: np.ndarray
) -> float:
"""
Args:
pts3d (3, N)
intr (4)
extr (3, 4)
depth (H, W)
Returns:
scale: depth * scale = -pts_in_local_camera_coordinate.z
"""
# points 3d in local camera coordinate
# FIXME: deal with the case when a point is behind the camera
pts3d_cam = reproject(pts3d, extr)
pts2d = project(pts3d_cam, intr)
inv_depths, ix = sample(inv_depth, pts2d)
ds = -pts3d[-1, :][ix] # Note negative sign
scales = ds * inv_depths
return vote_scale(scales)
def calibrate_w_sparse_colmap(
colmap_dir: str, dense_depth_dir: str
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Args:
colmap_dir: sparse colmap directory containing
cameras.bin/txt, points3D.bin/txt, images.bin/txt
dense_depth_dir: folder name for dense depth directory
scales_fn (optional): dump per frame scale
Returns:
Calibrated intrinsics and extrinsics
intrinsics (N, 4)
extrinsics (N, 3, 4)
scales (N)
"""
cameras, images, points3D = load_colmap.read_model(path=colmap_dir, ext=".bin")
# compute intrinsics, extrinsics
depth_names = [
x for x in os.listdir(dense_depth_dir) if os.path.splitext(x)[-1] == ".raw"
]
size = tr.load_raw_float32_image(pjoin(dense_depth_dir, depth_names[0])).shape[:2][
::-1
]
intrinsics, extrinsics = load_colmap.convert_calibration(cameras, images, size)
# TODO: make the following up to compute the scale a single function
map_img_to_pt3D = store_visible_points_per_image(points3D)
ordered_im_ids = load_colmap.ordered_image_ids(images)
scales = np.empty(intrinsics.shape[0])
for i, im_id in enumerate(ordered_im_ids):
if im_id not in map_img_to_pt3D:
scales[i] = np.nan
print('[WARNING] %s does not have visible feature point' % images[im_id].name)
im_name = images[im_id].name
depth_fn = pjoin(dense_depth_dir, os.path.splitext(im_name)[0] + ".raw")
inv_depth = tr.load_raw_float32_image(depth_fn)
pts3D = map_img_to_pt3D[im_id]
scale = calibrate_frame_w_sparse_points(
pts3D, intrinsics[i], extrinsics[i], inv_depth
)
scales[i] = scale
mean_scale = scales[~np.isnan(scales)].mean()
extrinsics[..., -1] /= mean_scale
return intrinsics, extrinsics, scales
|
consistent_depth-main
|
utils/calibration.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import os
import cv2
from os.path import join as pjoin
import json
import math
import numpy as np
import torch.utils.data as data
import torch
from typing import Optional
from utils import image_io, frame_sampling as sampling
_dtype = torch.float32
def load_image(
path: str,
channels_first: bool,
check_channels: Optional[int] = None,
post_proc_raw=lambda x: x,
post_proc_other=lambda x: x,
) -> torch.FloatTensor:
if os.path.splitext(path)[-1] == ".raw":
im = image_io.load_raw_float32_image(path)
im = post_proc_raw(im)
else:
im = cv2.imread(path, cv2.IMREAD_UNCHANGED)
im = post_proc_other(im)
im = im.reshape(im.shape[:2] + (-1,))
if check_channels is not None:
assert (
im.shape[-1] == check_channels
), "receive image of shape {} whose #channels != {}".format(
im.shape, check_channels
)
if channels_first:
im = im.transpose((2, 0, 1))
# to torch
return torch.tensor(im, dtype=_dtype)
def load_color(path: str, channels_first: bool) -> torch.FloatTensor:
"""
Returns:
torch.tensor. color in range [0, 1]
"""
im = load_image(
path,
channels_first,
post_proc_raw=lambda im: im[..., [2, 1, 0]] if im.ndim == 3 else im,
post_proc_other=lambda im: im / 255,
)
return im
def load_flow(path: str, channels_first: bool) -> torch.FloatTensor:
"""
Returns:
flow tensor in pixels.
"""
flow = load_image(path, channels_first, check_channels=2)
return flow
def load_mask(path: str, channels_first: bool) -> torch.ByteTensor:
"""
Returns:
mask takes value 0 or 1
"""
mask = load_image(path, channels_first, check_channels=1) > 0
return mask.to(_dtype)
class VideoDataset(data.Dataset):
"""Load 3D video frames and related metadata for optimizing consistency loss.
File organization of the corresponding 3D video dataset should be
color_down/frame_{__ID__:06d}.raw
flow/flow_{__REF_ID__:06d}_{__TGT_ID__:06d}.raw
mask/mask_{__REF_ID__:06d}_{__TGT_ID__:06d}.png
metadata.npz: {'extrinsics': (N, 3, 4), 'intrinsics': (N, 4)}
<flow_list.json>: [[i, j], ...]
"""
def __init__(self, path: str, meta_file: str = None):
"""
Args:
path: folder path of the 3D video
"""
self.color_fmt = pjoin(path, "color_down", "frame_{:06d}.raw")
if not os.path.isfile(self.color_fmt.format(0)):
self.color_fmt = pjoin(path, "color_down", "frame_{:06d}.png")
self.mask_fmt = pjoin(path, "mask", "mask_{:06d}_{:06d}.png")
self.flow_fmt = pjoin(path, "flow", "flow_{:06d}_{:06d}.raw")
if meta_file is not None:
with open(meta_file, "rb") as f:
meta = np.load(f)
self.extrinsics = torch.tensor(meta["extrinsics"], dtype=_dtype)
self.intrinsics = torch.tensor(meta["intrinsics"], dtype=_dtype)
assert (
self.extrinsics.shape[0] == self.intrinsics.shape[0]
), "#extrinsics({}) != #intrinsics({})".format(
self.extrinsics.shape[0], self.intrinsics.shape[0]
)
flow_list_fn = pjoin(path, "flow_list.json")
if os.path.isfile(flow_list_fn):
with open(flow_list_fn, "r") as f:
self.flow_indices = json.load(f)
else:
names = os.listdir(os.path.dirname(self.flow_fmt))
self.flow_indices = [
self.parse_index_pair(name)
for name in names
if os.path.splitext(name)[-1] == os.path.splitext(self.flow_fmt)[-1]
]
self.flow_indices = sampling.to_in_range(self.flow_indices)
self.flow_indices = list(sampling.SamplePairs.to_one_way(self.flow_indices))
def parse_index_pair(self, name):
strs = os.path.splitext(name)[0].split("_")[-2:]
return [int(s) for s in strs]
def __getitem__(self, index: int):
"""Fetch tuples of data. index = i * (i-1) / 2 + j, where i > j for pair (i,j)
So [-1+sqrt(1+8k)]/2 < i <= [1+sqrt(1+8k))]/2, where k=index. So
i = floor([1+sqrt(1+8k))]/2)
j = k - i * (i - 1) / 2.
The number of image frames fetched, N, is not the 1, but computed
based on what kind of consistency to be measured.
For instance, geometry_consistency_loss requires random pairs as samples.
So N = 2.
If with more losses, say triplet one from temporal_consistency_loss. Then
N = 2 + 3.
Returns:
stacked_images (N, C, H, W): image frames
targets: {
'extrinsics': torch.tensor (N, 3, 4), # extrinsics of each frame.
Each (3, 4) = [R, t].
point_wolrd = R * point_cam + t
'intrinsics': torch.tensor (N, 4), # (fx, fy, cx, cy) for each frame
'geometry_consistency':
{
'indices': torch.tensor (2),
indices for corresponding pairs
[(ref_index, tgt_index), ...]
'flows': ((2, H, W),) * 2 in pixels.
For k in range(2) (ref or tgt),
pixel p = pixels[indices[b, k]][:, i, j]
correspond to
p + flows[k][b, :, i, j]
in frame indices[b, (k + 1) % 2].
'masks': ((1, H, W),) * 2. Masks of valid flow matches
to compute the consistency in training.
Values are 0 or 1.
}
}
"""
pair = self.flow_indices[index]
indices = torch.tensor(pair)
intrinsics = torch.stack([self.intrinsics[k] for k in pair], dim=0)
extrinsics = torch.stack([self.extrinsics[k] for k in pair], dim=0)
images = torch.stack(
[load_color(self.color_fmt.format(k), channels_first=True) for k in pair],
dim=0,
)
flows = [
load_flow(self.flow_fmt.format(k_ref, k_tgt), channels_first=True)
for k_ref, k_tgt in [pair, pair[::-1]]
]
masks = [
load_mask(self.mask_fmt.format(k_ref, k_tgt), channels_first=True)
for k_ref, k_tgt in [pair, pair[::-1]]
]
metadata = {
"extrinsics": extrinsics,
"intrinsics": intrinsics,
"geometry_consistency": {
"indices": indices,
"flows": flows,
"masks": masks,
},
}
if getattr(self, "scales", None):
if isinstance(self.scales, dict):
metadata["scales"] = torch.stack(
[torch.Tensor([self.scales[k]]) for k in pair], dim=0
)
else:
metadata["scales"] = torch.Tensor(
[self.scales, self.scales]).reshape(2, 1)
return (images, metadata)
def __len__(self):
return len(self.flow_indices)
class VideoFrameDataset(data.Dataset):
"""Load video frames from
color_fmt.format(frame_id)
"""
def __init__(self, color_fmt, frames=None):
"""
Args:
color_fmt: e.g., <video_dir>/frame_{:06d}.raw
"""
self.color_fmt = color_fmt
if frames is None:
files = os.listdir(os.path.dirname(self.color_fmt))
self.frames = range(len(files))
else:
self.frames = frames
def __getitem__(self, index):
"""Fetch image frame.
Returns:
image (C, H, W): image frames
"""
frame_id = self.frames[index]
image = load_color(self.color_fmt.format(frame_id), channels_first=True)
meta = {"frame_id": frame_id}
return image, meta
def __len__(self):
return len(self.frames)
|
consistent_depth-main
|
loaders/video_dataset.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
from utils.url_helpers import get_model_from_url
from .midas_v2.midas_net import MidasNet
from .depth_model import DepthModel
class MidasV2Model(DepthModel):
# Requirements and default settings
align = 32
learning_rate = 0.0001
lambda_view_baseline = 0.0001
def __init__(self, support_cpu: bool = False, pretrained: bool = True):
super().__init__()
if support_cpu:
# Allow the model to run on CPU when GPU is not available.
self.device = torch.device(
"cuda" if torch.cuda.is_available() else "cpu")
else:
# Rather raise an error when GPU is not available.
self.device = torch.device("cuda")
self.model = MidasNet(non_negative=True)
# Load pretrained checkpoint
if pretrained:
checkpoint = (
"https://github.com/intel-isl/MiDaS/releases/download/v2/model-f46da743.pt"
)
state_dict = torch.hub.load_state_dict_from_url(
checkpoint, progress=True, check_hash=True
)
self.model.load_state_dict(state_dict)
num_gpus = torch.cuda.device_count()
if num_gpus > 1:
self.model = torch.nn.DataParallel(self.model)
self.model.to(self.device)
self.norm_mean = torch.Tensor(
[0.485, 0.456, 0.406]).reshape(1, -1, 1, 1)
self.norm_stdev = torch.Tensor(
[0.229, 0.224, 0.225]).reshape(1, -1, 1, 1)
def estimate_depth(self, images):
# Reshape ...CHW -> XCHW
shape = images.shape
C, H, W = shape[-3:]
input_ = images.reshape(-1, C, H, W).to(self.device)
input_ = (input_ - self.norm_mean.to(self.device)) / \
self.norm_stdev.to(self.device)
output = self.model(input_)
# Reshape X1HW -> BNHW
depth = output.reshape(shape[:-3] + output.shape[-2:])
# Convert from disparity to depth
depth = depth.reciprocal()
return depth
def save(self, file_name):
state_dict = self.model.state_dict()
torch.save(state_dict, file_name)
|
consistent_depth-main
|
monodepth/midas_v2_model.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
import torch.autograd as autograd
from utils.helpers import SuppressedStdout
from utils.url_helpers import get_model_from_url
from .mannequin_challenge.models import pix2pix_model
from .mannequin_challenge.options.train_options import TrainOptions
from .depth_model import DepthModel
class MannequinChallengeModel(DepthModel):
# Requirements and default settings
align = 16
learning_rate = 0.0004
lambda_view_baseline = 0.1
def __init__(self):
super().__init__()
parser = TrainOptions()
parser.initialize()
params = parser.parser.parse_args(["--input", "single_view"])
params.isTrain = False
model_file = get_model_from_url(
"https://storage.googleapis.com/mannequinchallenge-data/checkpoints/best_depth_Ours_Bilinear_inc_3_net_G.pth",
"mc.pth"
)
class FixedMcModel(pix2pix_model.Pix2PixModel):
# Override the load function, so we can load the snapshot stored
# in our specific location.
def load_network(self, network, network_label, epoch_label):
return torch.load(model_file)
with SuppressedStdout():
self.model = FixedMcModel(params)
def train(self):
self.model.switch_to_train()
def eval(self):
self.model.switch_to_eval()
def parameters(self):
return self.model.netG.parameters()
def estimate_depth(self, images):
images = autograd.Variable(images.cuda(), requires_grad=False)
# Reshape ...CHW -> XCHW
shape = images.shape
C, H, W = shape[-3:]
images = images.reshape(-1, C, H, W)
self.model.prediction_d, _ = self.model.netG.forward(images)
# Reshape X1HW -> BNHW
out_shape = shape[:-3] + self.model.prediction_d.shape[-2:]
self.model.prediction_d = self.model.prediction_d.reshape(out_shape)
self.model.prediction_d = torch.exp(self.model.prediction_d)
self.model.prediction_d = self.model.prediction_d.squeeze(-3)
return self.model.prediction_d
def save(self, file_name):
state_dict = self.model.netG.state_dict()
torch.save(state_dict, file_name)
|
consistent_depth-main
|
monodepth/mannequin_challenge_model.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
from os.path import join as pjoin
import torch
from utils.url_helpers import get_model_from_url
from .depth_model import DepthModel
from .monodepth2.networks.resnet_encoder import ResnetEncoder
from .monodepth2.networks.depth_decoder import DepthDecoder
class Monodepth2Model(DepthModel):
# Requirements and default settings
align = 1
learning_rate = 0.00004
lambda_view_baseline = 1
def __init__(self):
super().__init__()
self.device = torch.device("cuda")
model_url = "https://storage.googleapis.com/niantic-lon-static/research/monodepth2/mono+stereo_1024x320.zip"
local_model_dir = get_model_from_url(model_url, "monodepth2_mono+stereo_1024x320/", is_zip=True)
encoder_model_file = pjoin(local_model_dir, "encoder.pth")
depth_model_file = pjoin(local_model_dir, "depth.pth")
self.encoder = ResnetEncoder(18, False)
loaded_dict_enc = torch.load(encoder_model_file, map_location=self.device)
# extract the height and width of image that this model was trained with
self.feed_height = loaded_dict_enc['height']
self.feed_width = loaded_dict_enc['width']
print(f"Model was trained at {self.feed_width} x {self.feed_height}.")
filtered_dict_enc = {
k: v for k, v in loaded_dict_enc.items() if k in self.encoder.state_dict()
}
self.encoder.load_state_dict(filtered_dict_enc)
self.encoder.to(self.device)
self.depth_decoder = DepthDecoder(
num_ch_enc=self.encoder.num_ch_enc, scales=range(4))
loaded_dict = torch.load(depth_model_file, map_location=self.device)
self.depth_decoder.load_state_dict(loaded_dict)
self.depth_decoder.to(self.device)
def train(self):
self.encoder.train()
self.depth_decoder.train()
def eval(self):
self.encoder.eval()
self.depth_decoder.eval()
def parameters(self):
return list(self.encoder.parameters()) + list(self.depth_decoder.parameters())
def estimate_depth(self, images):
# import pdb; pdb.set_trace()
# Reshape ...CHW -> NCHW
shape = images.shape
C, H, W = shape[-3:]
images = images.reshape(-1, C, H, W)
# Estimate depth
feed_size = [self.feed_height, self.feed_width]
images = torch.nn.functional.interpolate(
images, size=feed_size, mode='bicubic', align_corners=False)
features = self.encoder(images)
outputs = self.depth_decoder(features)
disparity = outputs[("disp", 0)]
disparity = torch.nn.functional.interpolate(
disparity, size=[H, W], mode='bicubic', align_corners=False)
depth = disparity.reciprocal()
# Reshape N1HW -> ...1HW
out_shape = shape[:-3] + depth.shape[-2:]
depth = depth.reshape(out_shape)
return depth
def save(self, file_name):
pass
|
consistent_depth-main
|
monodepth/monodepth2_model.py
|
consistent_depth-main
|
monodepth/__init__.py
|
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
from abc import abstractmethod
import torch
class DepthModel(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, images, metadata=None):
"""
Images should be feed in the format (N, C, H, W). Channels are in BGR
order and values in [0, 1].
Metadata is not used by the depth models itself, only here, for value
transformations.
metadata["scales"]: (optional, can be None) specifies a post-scale
transformation of the depth values. Format (1, N, 1).
"""
depth = self.estimate_depth(images)
if metadata is not None:
if "scales" in metadata:
factor = metadata["scales"].unsqueeze(3).cuda()
depth = depth * factor
return depth
@abstractmethod
def estimate_depth(self, images, metadata) -> torch.Tensor:
pass
@abstractmethod
def save(self, label):
pass
|
consistent_depth-main
|
monodepth/depth_model.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
from .depth_model import DepthModel
from .mannequin_challenge_model import MannequinChallengeModel
from .midas_v2_model import MidasV2Model
from .monodepth2_model import Monodepth2Model
from typing import List
def get_depth_model_list() -> List[str]:
return ["mc", "midas2", "monodepth2"]
def get_depth_model(type: str) -> DepthModel:
if type == "mc":
return MannequinChallengeModel
elif type == "midas2":
return MidasV2Model
elif type == "monodepth2":
return Monodepth2Model
else:
raise ValueError(f"Unsupported model type '{type}'.")
def create_depth_model(type: str) -> DepthModel:
model_class = get_depth_model(type)
return model_class()
|
consistent_depth-main
|
monodepth/depth_model_registry.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os.path as osp
import setuptools
cur_dir = osp.dirname(osp.realpath(__file__))
requirementPath = osp.join(cur_dir, "requirements.txt")
install_requires = []
with open(requirementPath) as f:
install_requires = f.read().splitlines()
setuptools.setup(
name="imitation_learning",
version="0.1",
author="Andrew Szot",
author_email="",
description="imitation_learning",
url="",
install_requires=install_requires,
packages=setuptools.find_packages(),
)
|
bc-irl-main
|
setup.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import os.path as osp
from collections import defaultdict
from typing import Dict, Optional
import gym.spaces as spaces
import hydra
import numpy as np
import torch
import torch.nn as nn
from hydra.utils import instantiate as hydra_instantiate
from omegaconf import DictConfig, OmegaConf
from rl_utils.common import (Evaluator, compress_dict, get_size_for_space,
set_seed)
from rl_utils.envs import create_vectorized_envs
from rl_utils.logging import Logger
from imitation_learning.policy_opt.policy import Policy
from imitation_learning.policy_opt.ppo import PPO
from imitation_learning.policy_opt.storage import RolloutStorage
@hydra.main(config_path="config", config_name="default")
def main(cfg) -> Dict[str, float]:
set_seed(cfg.seed)
device = torch.device(cfg.device)
set_env_settings = {
k: hydra_instantiate(v) if isinstance(v, DictConfig) else v
for k, v in cfg.env.env_settings.items()
}
envs = create_vectorized_envs(
cfg.env.env_name,
cfg.num_envs,
seed=cfg.seed,
device=device,
**set_env_settings,
)
steps_per_update = cfg.num_steps * cfg.num_envs
num_updates = int(cfg.num_env_steps) // steps_per_update
cfg.obs_shape = envs.observation_space.shape
cfg.action_dim = get_size_for_space(envs.action_space)
cfg.action_is_discrete = isinstance(cfg.action_dim, spaces.Discrete)
cfg.total_num_updates = num_updates
logger: Logger = hydra_instantiate(cfg.logger, full_cfg=cfg)
storage: RolloutStorage = hydra_instantiate(cfg.storage, device=device)
policy: Policy = hydra_instantiate(cfg.policy)
policy = policy.to(device)
updater = hydra_instantiate(cfg.policy_updater, policy=policy, device=device)
evaluator: Evaluator = hydra_instantiate(
cfg.evaluator,
envs=envs,
vid_dir=logger.vid_path,
updater=updater,
logger=logger,
device=device,
)
start_update = 0
if cfg.load_checkpoint is not None:
ckpt = torch.load(cfg.load_checkpoint)
updater.load_state_dict(ckpt["updater"], should_load_opt=cfg.resume_training)
if cfg.load_policy:
policy.load_state_dict(ckpt["policy"])
if cfg.resume_training:
start_update = ckpt["update_i"] + 1
eval_info = {"run_name": logger.run_name}
if cfg.only_eval:
eval_result = evaluator.evaluate(policy, cfg.num_eval_episodes, 0)
logger.collect_infos(eval_result, "eval.", no_rolling_window=True)
eval_info.update(eval_result)
logger.interval_log(0, 0)
logger.close()
return eval_info
obs = envs.reset()
storage.init_storage(obs)
for update_i in range(start_update, num_updates):
is_last_update = update_i == num_updates - 1
for step_idx in range(cfg.num_steps):
with torch.no_grad():
act_data = policy.act(
storage.get_obs(step_idx),
storage.recurrent_hidden_states[step_idx],
storage.masks[step_idx],
)
next_obs, reward, done, info = envs.step(act_data["actions"])
storage.insert(next_obs, reward, done, info, **act_data)
logger.collect_env_step_info(info)
updater.update(policy, storage, logger, envs=envs)
storage.after_update()
if cfg.eval_interval != -1 and (
update_i % cfg.eval_interval == 0 or is_last_update
):
with torch.no_grad():
eval_result = evaluator.evaluate(
policy, cfg.num_eval_episodes, update_i
)
logger.collect_infos(eval_result, "eval.", no_rolling_window=True)
eval_info.update(eval_result)
if cfg.log_interval != -1 and (
update_i % cfg.log_interval == 0 or is_last_update
):
logger.interval_log(update_i, steps_per_update * (update_i + 1))
if cfg.save_interval != -1 and (
(update_i + 1) % cfg.save_interval == 0 or is_last_update
):
save_name = osp.join(logger.save_path, f"ckpt.{update_i}.pth")
torch.save(
{
"policy": policy.state_dict(),
"updater": updater.state_dict(),
"update_i": update_i,
},
save_name,
)
print(f"Saved to {save_name}")
eval_info["last_ckpt"] = save_name
logger.close()
return eval_info
if __name__ == "__main__":
main()
|
bc-irl-main
|
imitation_learning/run.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
bc-irl-main
|
imitation_learning/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import hydra
from omegaconf import OmegaConf
from imitation_learning.run import main
@hydra.main(config_path="config", config_name="default")
def run_and_eval(cfg):
eval_cfg = OmegaConf.merge(cfg, cfg.eval_args)
eval_cfg.load_policy = False
assert eval_cfg.load_checkpoint != "" and eval_cfg.load_checkpoint is not None
print("Evaluating reward function")
main(eval_cfg)
if __name__ == "__main__":
run_and_eval()
|
bc-irl-main
|
imitation_learning/eval.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
from collections import defaultdict
from functools import partial
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from hydra.utils import call, instantiate
from omegaconf import DictConfig
from torch.nn.utils import spectral_norm
from torch.utils.data import DataLoader
from imitation_learning.common.plotting import plot_actions
from imitation_learning.common.utils import (create_next_obs,
extract_transition_batch,
log_finished_rewards)
def wass_grad_pen(
expert_state,
expert_action,
policy_state,
policy_action,
use_actions,
disc,
):
num_dims = len(expert_state.shape) - 1
alpha = torch.rand(expert_state.size(0), 1)
alpha_state = (
alpha.view(-1, *[1 for _ in range(num_dims)])
.expand_as(expert_state)
.to(expert_state.device)
)
mixup_data_state = alpha_state * expert_state + (1 - alpha_state) * policy_state
mixup_data_state.requires_grad = True
inputs = [mixup_data_state]
if use_actions:
alpha_action = alpha.expand_as(expert_action).to(expert_action.device)
mixup_data_action = (
alpha_action * expert_action + (1 - alpha_action) * policy_action
)
mixup_data_action.requires_grad = True
inputs.append(mixup_data_action)
else:
mixup_data_action = []
# disc = disc_fn(cur_obs=mixup_data_state, actions=mixup_data_action)
disc = disc.g(mixup_data_state)
ones = torch.ones(disc.size()).to(disc.device)
grad = torch.autograd.grad(
outputs=disc,
inputs=inputs,
grad_outputs=ones,
create_graph=True,
retain_graph=True,
only_inputs=True,
)[0]
grad_pen = (grad.norm(2, dim=1) - 1).pow(2).mean()
return grad_pen
class GAIL(nn.Module):
def __init__(
self,
discriminator: DictConfig,
policy_updater: DictConfig,
get_dataset_fn,
batch_size: int,
num_discrim_batches: int,
discrim_opt: DictConfig,
reward_update_freq: int,
device,
policy,
num_envs,
spectral_norm,
grad_pen,
**kwargs,
):
super().__init__()
self.discriminator = instantiate(discriminator).to(device)
self.policy_updater = instantiate(policy_updater, policy=policy)
self._grad_pen = grad_pen
if spectral_norm:
self._apply_spectral_norm()
self.dataset = call(get_dataset_fn)
self.expert_data = DataLoader(self.dataset, batch_size, shuffle=True)
self.discrim_opt = instantiate(
discrim_opt, params=self.discriminator.parameters()
)
self.reward_update_freq = reward_update_freq
self._n_updates = 0
self.batch_size = batch_size
self.num_discrim_batches = num_discrim_batches
self.device = device
self._ep_rewards = torch.zeros(num_envs, device=self.device)
def _apply_spectral_norm(self):
for name, module in self.discriminator.named_modules():
# Only applies the spectral transformation to the high-level
# modules and goes into the sequential modules and applies to
# each element.
if name == "" or "." in name:
continue
if isinstance(module, nn.Sequential):
new_layers = []
for i in range(len(module)):
layer = module[i]
if isinstance(layer, nn.Linear):
layer = spectral_norm(layer)
new_layers.append(layer)
setattr(self.discriminator, name, nn.Sequential(*new_layers))
elif isinstance(module, nn.Linear):
setattr(self.discriminator, name, spectral_norm(module))
def state_dict(self, **kwargs):
return {
**super().state_dict(**kwargs),
"discrim_opt": self.discrim_opt.state_dict(),
}
def load_state_dict(self, state_dict, should_load_opt):
opt_state = state_dict.pop("discrim_opt")
if should_load_opt:
self.discrim_opt.load_state_dict(opt_state)
return super().load_state_dict(state_dict)
def viz_reward(self, cur_obs=None, action=None, next_obs=None) -> torch.Tensor:
return self.discriminator.get_reward(
cur_obs=cur_obs, actions=action, next_obs=next_obs, viz_reward=True
)
def _get_agent_samples(self, rollouts):
num_batches = len(rollouts) // self.batch_size
agent_data = rollouts.data_generator(num_batches, get_next_obs=True)
if self.num_discrim_batches != -1:
agent_data = itertools.islice(agent_data, self.num_discrim_batches)
return agent_data
def _update_discriminator(self, policy, rollouts, logger):
num_batches = len(rollouts) // self.batch_size
agent_data = self._get_agent_samples(rollouts)
for expert_batch, agent_batch in zip(self.expert_data, agent_data):
expert_d = self.discriminator(
cur_obs=expert_batch["observations"],
actions=expert_batch["actions"],
next_obs=expert_batch["next_observations"],
masks=(~expert_batch["terminals"].bool()).float(),
policy=policy,
)
agent_d = self.discriminator(
cur_obs=agent_batch["obs"],
actions=agent_batch["action"],
next_obs=agent_batch["next_obs"],
masks=agent_batch["mask"],
policy=policy,
)
expert_loss = F.binary_cross_entropy_with_logits(
expert_d, torch.ones_like(expert_d, device=self.device)
)
agent_loss = F.binary_cross_entropy_with_logits(
agent_d, torch.zeros_like(agent_d, device=self.device)
)
loss = expert_loss + agent_loss
# disc_fn = partial(self.discriminator, policy=policy)
if self._grad_pen != 0.0:
n_expert = len(expert_batch["observations"])
grad_pen = wass_grad_pen(
expert_batch["observations"],
expert_batch["actions"],
agent_batch["obs"][:n_expert],
agent_batch["action"][:n_expert],
False,
self.discriminator,
)
loss += self._grad_pen * grad_pen
self.discrim_opt.zero_grad()
loss.backward()
self.discrim_opt.step()
logger.collect_info("expert_loss", expert_loss.item())
logger.collect_info("agent_loss", agent_loss.item())
logger.collect_info("discim_loss", loss.item())
def update(self, policy, rollouts, logger, **kwargs):
if (
self.reward_update_freq != -1
and self._n_updates % self.reward_update_freq == 0
):
self._update_discriminator(policy, rollouts, logger)
obs, actions, next_obs, masks = extract_transition_batch(rollouts)
with torch.no_grad():
rollouts.rewards = self.discriminator.get_reward(
cur_obs=obs,
actions=actions,
next_obs=next_obs,
masks=masks,
policy=policy,
)
self._ep_rewards = log_finished_rewards(rollouts, self._ep_rewards, logger)
self.policy_updater.update(policy, rollouts, logger)
self._n_updates += 1
|
bc-irl-main
|
imitation_learning/gail/updater.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum, auto
from typing import Tuple
import torch
import torch.nn as nn
from rl_utils.common import make_mlp_layers
class GailRewardType(Enum):
AIRL = auto()
GAIL = auto()
RAW = auto()
class GailDiscriminator(nn.Module):
def __init__(
self,
obs_shape: Tuple[int],
action_dim: int,
reward_hidden_dim: int,
n_hidden_layers: int,
cost_take_dim: int,
use_actions: bool,
reward_type: str,
):
super().__init__()
self.cost_take_dim = cost_take_dim
input_size = obs_shape[0] if cost_take_dim == -1 else cost_take_dim
if use_actions:
input_size += action_dim
self.action_input = use_actions
self.discrim_net = nn.Sequential(
*make_mlp_layers(input_size, 1, reward_hidden_dim, n_hidden_layers)
)
self.reward_type = GailRewardType[reward_type]
def forward(self, cur_obs=None, actions=None, **kwargs):
if self.cost_take_dim != -1:
if cur_obs is not None:
cur_obs = cur_obs[:, :, : self.cost_take_dim]
if self.action_input:
inputs = torch.cat([cur_obs, actions], -1)
else:
inputs = cur_obs
return self.discrim_net(inputs)
def get_reward(self, cur_obs=None, actions=None, **kwargs):
d_val = self.forward(cur_obs, actions)
s = torch.sigmoid(d_val)
eps = 1e-20
if self.reward_type == GailRewardType.AIRL:
reward = (s + eps).log() - (1 - s + eps).log()
elif self.reward_type == GailRewardType.GAIL:
reward = (s + eps).log()
elif self.reward_type == GailRewardType.RAW:
reward = d_val
return reward
|
bc-irl-main
|
imitation_learning/gail/discriminator.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from functools import partial
import numpy as np
import torch
import torch.nn as nn
from rl_utils.models import (FixedCategorical, FixedNormal, SimpleCNN,
build_rnn_state_encoder)
def init_weights(m, gain=1):
if isinstance(m, nn.Linear):
torch.nn.init.orthogonal_(m.weight, gain=gain)
m.bias.data.fill_(0.0)
class Categorical(nn.Module):
def __init__(self, num_inputs, num_outputs):
super().__init__()
self.linear = nn.Linear(num_inputs, num_outputs)
self.apply(partial(init_weights, gain=0.01))
def forward(self, x):
x = self.linear(x)
return FixedCategorical(logits=x)
class DiagGaussian(nn.Module):
def __init__(self, num_inputs, num_outputs, std_init, squash_mean):
super().__init__()
if squash_mean:
self.fc_mean = nn.Sequential(
nn.Linear(num_inputs, num_outputs),
nn.Tanh(),
)
else:
self.fc_mean = nn.Linear(num_inputs, num_outputs)
self.logstd = nn.Parameter(torch.full((1, num_outputs), float(std_init)))
self.apply(init_weights)
def forward(self, x):
action_mean = self.fc_mean(x)
action_logstd = self.logstd.expand_as(action_mean)
return FixedNormal(action_mean, action_logstd.exp())
class Policy(nn.Module):
def __init__(
self,
obs_shape,
action_dim,
action_is_discrete,
hidden_size,
recurrent_hidden_size,
is_recurrent,
std_init=0.0,
squash_mean=False,
):
super().__init__()
if isinstance(obs_shape, dict):
is_visual_obs = any([len(v) == 3 for k, v in obs_shape.items()])
else:
is_visual_obs = len(obs_shape) == 3
if is_visual_obs:
self.backbone = SimpleCNN(obs_shape, hidden_size)
input_size = hidden_size
else:
self.backbone = nn.Sequential()
input_size = obs_shape[0]
if is_recurrent:
self.rnn_encoder = build_rnn_state_encoder(
recurrent_hidden_size, recurrent_hidden_size
)
else:
# Pass through
self.rnn_encoder = lambda hidden, hxs, _: (hidden, hxs)
self.actor = nn.Sequential(
nn.Linear(input_size, hidden_size),
nn.Tanh(),
nn.Linear(hidden_size, hidden_size),
nn.Tanh(),
)
self.critic = nn.Sequential(
nn.Linear(input_size, hidden_size),
nn.Tanh(),
nn.Linear(hidden_size, hidden_size),
nn.Tanh(),
nn.Linear(hidden_size, 1),
)
self.apply(partial(init_weights, gain=np.sqrt(2)))
if action_is_discrete:
self.actor_dist = Categorical(hidden_size, action_dim)
else:
self.actor_dist = DiagGaussian(
hidden_size, action_dim, std_init, squash_mean
)
def get_value(self, obs, hxs, masks):
hidden, _ = self.forward(obs, hxs, masks)
return self.critic(hidden)
def evaluate_actions(self, obs, hxs, masks, action):
hidden, hxs = self.forward(obs, hxs, masks)
critic_value = self.critic(hidden)
actor_hidden = self.actor(hidden)
dist = self.actor_dist(actor_hidden)
action_log_probs = dist.log_probs(action)
dist_entropy = dist.entropy()
return {
"log_prob": action_log_probs,
"value": critic_value,
"dist_entropy": dist_entropy,
}
def forward(self, obs, hxs, masks):
hidden = self.backbone(obs)
hidden, hxs = self.rnn_encoder(hidden, hxs, masks)
return hidden, hxs
def get_action_dist(self, obs, hxs, masks):
hidden, hxs = self.forward(obs, hxs, masks)
actor_hidden = self.actor(hidden)
return self.actor_dist(actor_hidden)
def act(self, obs, hxs, masks, is_eval=False):
hidden, hxs = self.forward(obs, hxs, masks)
critic_value = self.critic(hidden)
actor_hidden = self.actor(hidden)
dist = self.actor_dist(actor_hidden)
if is_eval:
action = dist.mode()
else:
action = dist.sample()
action_log_probs = dist.log_probs(action)
dist_entropy = dist.entropy()
return {
"actions": action,
"action_log_probs": action_log_probs,
"value_preds": critic_value,
"hxs": hxs,
"extra": {
"dist_entropy": dist_entropy,
},
}
|
bc-irl-main
|
imitation_learning/policy_opt/policy.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
bc-irl-main
|
imitation_learning/policy_opt/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import defaultdict
from typing import Dict, Optional
import torch
def _flatten_helper(T, N, _tensor):
return _tensor.view(T * N, *_tensor.size()[2:])
class RolloutStorage:
def __init__(
self,
num_steps,
num_processes,
obs_shape,
action_dim,
action_is_discrete,
recurrent_hidden_state_size,
device,
fetch_final_obs,
):
super().__init__()
if isinstance(obs_shape, dict):
self.obs_keys = obs_shape
else:
self.obs_keys = {None: obs_shape}
self.obs: Dict[Optional[str], torch.Tensor] = {}
for k, space_shape in self.obs_keys.items():
ob = torch.zeros(num_steps + 1, num_processes, *space_shape)
self.obs[k] = ob
self.rewards = torch.zeros(num_steps, num_processes, 1)
self.value_preds = torch.zeros(num_steps + 1, num_processes, 1)
self.action_log_probs = torch.zeros(num_steps, num_processes, 1)
self.recurrent_hidden_states = torch.zeros(
num_steps + 1, num_processes, recurrent_hidden_state_size
)
self.actions = torch.zeros(num_steps, num_processes, action_dim)
if action_is_discrete:
self.actions = self.actions.long()
self.masks = torch.zeros(num_steps + 1, num_processes, 1)
self.bad_masks = torch.ones(num_steps + 1, num_processes, 1)
if fetch_final_obs:
self.final_obs = torch.zeros(num_steps, num_processes, *space_shape)
else:
self.final_obs = None
self.num_steps = num_steps
self.n_procs = num_processes
self.step = 0
self.to(device)
def compute_masks(self, done, infos):
# If done then clean the history of observations.
masks = torch.FloatTensor([[0.0] if done_ else [1.0] for done_ in done])
bad_masks = torch.FloatTensor(
[[0.0] if "bad_transition" in info.keys() else [1.0] for info in infos]
)
return masks, bad_masks
def __len__(self):
return self.num_steps * self.n_procs
def init_storage(self, obs):
for k in self.obs_keys:
if k is None:
self.obs[k][0].copy_(obs)
else:
self.obs[k][0].copy_(obs[k])
self.masks = self.masks.zero_()
self.bad_masks = self.bad_masks.zero_()
self.recurrent_hidden_states = self.recurrent_hidden_states.zero_()
def to(self, device):
for k in self.obs_keys:
self.obs[k] = self.obs[k].to(device)
if self.final_obs is not None:
self.final_obs = self.final_obs.to(device)
self.rewards = self.rewards.to(device)
self.value_preds = self.value_preds.to(device)
self.action_log_probs = self.action_log_probs.to(device)
self.actions = self.actions.to(device)
self.masks = self.masks.to(device)
self.bad_masks = self.bad_masks.to(device)
self.recurrent_hidden_states = self.recurrent_hidden_states.to(device)
def insert(
self,
next_obs,
rewards,
done,
infos,
actions,
value_preds,
action_log_probs,
hxs,
**kwargs,
):
masks, bad_masks = self.compute_masks(done, infos)
for k in self.obs_keys:
if k is None:
self.obs[k][self.step + 1].copy_(next_obs)
else:
self.obs[k][self.step + 1].copy_(next_obs[k])
self.actions[self.step].copy_(actions)
self.action_log_probs[self.step].copy_(action_log_probs)
self.value_preds[self.step].copy_(value_preds)
self.rewards[self.step].copy_(rewards)
self.masks[self.step + 1].copy_(masks)
self.bad_masks[self.step + 1].copy_(bad_masks)
self.recurrent_hidden_states[self.step + 1].copy_(hxs)
if self.final_obs is not None:
for env_i, info in enumerate(infos):
if "final_obs" in info:
self.final_obs[self.step, env_i].copy_(info["final_obs"])
self.step = (self.step + 1) % self.num_steps
def get_obs(self, idx):
ret_d = {}
for k in self.obs_keys:
if k is None:
return self.obs[k][idx]
ret_d[k] = self.obs[k][idx]
return ret_d
def after_update(self):
for k in self.obs_keys:
self.obs[k][0].copy_(self.obs[k][-1])
self.masks[0].copy_(self.masks[-1])
self.bad_masks[0].copy_(self.bad_masks[-1])
self.recurrent_hidden_states[0].copy_(self.recurrent_hidden_states[-1])
def data_generator(self, num_mini_batch, get_next_obs=False, **add_data):
if get_next_obs and self.final_obs is None:
raise ValueError(
"Must fetch final observations if getting the next observation"
)
if get_next_obs and len(self.obs_keys) > 1:
raise ValueError("Cannot fetch next obseration with dictionary observation")
num_processes = self.rewards.size(1)
if num_processes < num_mini_batch:
raise ValueError(
f"Number of processes {num_processes} is smaller than num mini batch {num_mini_batch}"
)
num_envs_per_batch = num_processes // num_mini_batch
perm = torch.randperm(num_processes)
for start_ind in range(0, num_processes, num_envs_per_batch):
ret_data = {
"obs": defaultdict(list),
"hxs": [],
"action": [],
"value": [],
"mask": [],
"prev_log_prob": [],
"reward": [],
"add_batch": defaultdict(list),
}
if get_next_obs:
ret_data["next_obs"] = []
for offset in range(num_envs_per_batch):
ind = perm[start_ind + offset]
for k in self.obs_keys:
ret_data["obs"][k].append(self.obs[k][:-1, ind])
if get_next_obs:
mask = self.masks[1:, ind]
final_obs = self.final_obs[:, ind]
# This code assumes observation dict has only 1 key.
first_key = next(iter(self.obs_keys))
obs = self.obs[first_key][1:, ind]
ret_data["next_obs"].append((mask * obs) + ((1 - mask) * final_obs))
ret_data["hxs"].append(self.recurrent_hidden_states[0:1, ind])
ret_data["action"].append(self.actions[:, ind])
ret_data["value"].append(self.value_preds[:-1, ind])
ret_data["reward"].append(self.rewards[:, ind])
ret_data["mask"].append(self.masks[:-1, ind])
ret_data["prev_log_prob"].append(self.action_log_probs[:, ind])
for k, v in add_data.items():
ret_data["add_batch"][k].append(v[:, ind])
T, N = self.num_steps, num_envs_per_batch
for k, v in ret_data.items():
if k == "hxs":
ret_data[k] = torch.stack(v, 1).view(N, -1)
elif isinstance(v, dict):
for sub_k, sub_v in v.items():
ret_data[k][sub_k] = _flatten_helper(
T, N, torch.stack(sub_v, 1)
)
else:
ret_data[k] = _flatten_helper(T, N, torch.stack(v, 1))
# Pop the add batch keys out a level
add_batch = ret_data.pop("add_batch")
ret_data.update(add_batch)
# No need to return obs dict if there's only one thing in
# dictionary
if len(ret_data["obs"]) == 1:
ret_data["obs"] = next(iter(ret_data["obs"].values()))
yield ret_data
|
bc-irl-main
|
imitation_learning/policy_opt/storage.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict
import torch
import torch.nn as nn
from hydra.utils import instantiate as hydra_instantiate
from imitation_learning.policy_opt.policy import Policy
class PPO:
def __init__(
self,
use_gae: bool,
gae_lambda: float,
gamma: float,
use_clipped_value_loss: bool,
clip_param: bool,
value_loss_coef: float,
entropy_coef: float,
max_grad_norm: float,
num_mini_batch: int,
num_epochs: int,
optimizer_params: Dict[str, Any],
num_steps: int,
num_envs: int,
policy: Policy,
**kwargs,
):
super().__init__()
self.use_gae = use_gae
self.gae_lambda = gae_lambda
self.gamma = gamma
self.use_clipped_value_loss = use_clipped_value_loss
self.clip_param = clip_param
self.value_loss_coef = value_loss_coef
self.entropy_coef = entropy_coef
self.max_grad_norm = max_grad_norm
self.num_mini_batch = num_mini_batch
self.num_epochs = num_epochs
self.num_steps = num_steps
self.num_envs = num_envs
self.opt: torch.optim.Optimizer = hydra_instantiate(
optimizer_params, params=policy.parameters()
)
self.returns = torch.zeros(self.num_steps + 1, self.num_envs, 1)
def state_dict(self):
return {"opt": self.opt.state_dict()}
def load_state_dict(self, state_dict, should_load_opt):
opt_state = state_dict.pop("opt")
if should_load_opt:
self.opt.load_state_dict(opt_state)
def update(
self,
policy,
storage,
logger,
**kwargs,
):
with torch.no_grad():
last_value = policy.get_value(
storage.get_obs(-1),
storage.recurrent_hidden_states[-1],
storage.masks[-1],
)
advantages = self.compute_derived(
policy,
storage.rewards,
storage.masks,
storage.bad_masks,
storage.value_preds,
last_value,
)
for _ in range(self.num_epochs):
data_gen = storage.data_generator(
self.num_mini_batch, returns=self.returns[:-1], advantages=advantages
)
for sample in data_gen:
ac_eval = policy.evaluate_actions(
sample["obs"],
sample["hxs"],
sample["mask"],
sample["action"],
)
ratio = torch.exp(ac_eval["log_prob"] - sample["prev_log_prob"])
surr1 = ratio * sample["advantages"]
surr2 = (
torch.clamp(
ratio,
1.0 - self.clip_param,
1.0 + self.clip_param,
)
* sample["advantages"]
)
action_loss = -torch.min(surr1, surr2).mean(0)
if self.use_clipped_value_loss:
value_pred_clipped = sample["value"] + (
ac_eval["value"] - sample["value"]
).clamp(-self.clip_param, self.clip_param)
value_losses = (ac_eval["value"] - sample["returns"]).pow(2)
value_losses_clipped = (value_pred_clipped - sample["returns"]).pow(
2
)
value_loss = (
0.5 * torch.max(value_losses, value_losses_clipped).mean()
)
else:
value_loss = (
0.5 * (sample["returns"] - ac_eval["value"]).pow(2).mean()
)
loss = (
value_loss * self.value_loss_coef
+ action_loss
- ac_eval["dist_entropy"].mean() * self.entropy_coef
)
self.opt.zero_grad()
loss.backward()
if self.max_grad_norm > 0:
nn.utils.clip_grad_norm_(policy.parameters(), self.max_grad_norm)
self.opt.step()
logger.collect_info("value_loss", value_loss.mean().item())
logger.collect_info("action_loss", action_loss.mean().item())
logger.collect_info(
"dist_entropy", ac_eval["dist_entropy"].mean().item()
)
def compute_derived(
self,
policy,
rewards,
masks,
bad_masks,
value_preds,
last_value,
):
if self.use_gae:
value_preds[-1] = last_value
gae = 0
for step in reversed(range(rewards.size(0))):
delta = (
rewards[step]
+ self.gamma * value_preds[step + 1] * masks[step + 1]
- value_preds[step]
)
gae = delta + self.gamma * self.gae_lambda * masks[step + 1] * gae
gae = gae * bad_masks[step + 1]
self.returns[step] = gae + value_preds[step]
else:
self.returns[-1] = last_value
for step in reversed(range(rewards.size(0))):
self.returns[step] = (
self.returns[step + 1] * self.gamma * masks[step + 1]
+ rewards[step]
) * bad_masks[step + 1] + (1 - bad_masks[step + 1]) * value_preds[step]
advantages = self.returns[:-1] - value_preds[:-1]
# Normalize the advantages
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-5)
return advantages
|
bc-irl-main
|
imitation_learning/policy_opt/ppo.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
from higher.optim import DifferentiableOptimizer
from hydra.utils import instantiate
from omegaconf import DictConfig
class DifferentiablePPO(nn.Module):
def __init__(
self,
use_gae: bool,
gae_lambda: float,
gamma: float,
use_clipped_value_loss: bool,
clip_param: bool,
value_loss_coef: float,
entropy_coef: float,
max_grad_norm: float,
num_mini_batch: int,
num_epochs: int,
):
super().__init__()
self.use_gae = use_gae
self.gae_lambda = gae_lambda
self.gamma = gamma
self.use_clipped_value_loss = use_clipped_value_loss
self.clip_param = clip_param
self.entropy_coef = entropy_coef
self.max_grad_norm = max_grad_norm
self.num_mini_batch = num_mini_batch
self.num_epochs = num_epochs
self.value_loss_coef = value_loss_coef
def update(
self,
policy,
storage,
logger,
optimizer: DifferentiableOptimizer,
rewards,
) -> None:
with torch.no_grad():
last_value = policy.get_value(
storage.get_obs(-1),
storage.recurrent_hidden_states[-1],
storage.masks[-1],
)
advantages, returns = self.compute_derived(
rewards,
storage.masks,
storage.bad_masks,
storage.value_preds.detach(),
last_value,
)
for _ in range(self.num_epochs):
data_gen = storage.data_generator(
self.num_mini_batch, returns=returns[:-1], advantages=advantages
)
for sample in data_gen:
ac_eval = policy.evaluate_actions(
sample["obs"],
sample["hxs"],
sample["mask"],
sample["action"],
)
ratio = torch.exp(ac_eval["log_prob"] - sample["prev_log_prob"])
surr1 = ratio * sample["advantages"]
surr2 = (
torch.clamp(
ratio,
1.0 - self.clip_param,
1.0 + self.clip_param,
)
* sample["advantages"]
)
action_loss = -torch.min(surr1, surr2).mean(0)
value_target = sample["returns"].detach()
if self.use_clipped_value_loss:
value_pred_clipped = sample["value"] + (
ac_eval["value"] - sample["value"]
).clamp(-self.clip_param, self.clip_param)
value_losses = (ac_eval["value"] - value_target).pow(2)
value_losses_clipped = (value_pred_clipped - value_target).pow(2)
value_loss = (
0.5 * torch.max(value_losses, value_losses_clipped).mean()
)
else:
value_loss = 0.5 * (value_target - ac_eval["value"]).pow(2).mean()
loss = (
value_loss * self.value_loss_coef
+ action_loss
- ac_eval["dist_entropy"].mean() * self.entropy_coef
)
# if self.max_grad_norm > 0:
# nn.utils.clip_grad_norm_(policy.parameters(), self.max_grad_norm)
optimizer.step(loss)
logger.collect_info("value_loss", value_loss.mean().item())
logger.collect_info("action_loss", action_loss.mean().item())
logger.collect_info(
"dist_entropy", ac_eval["dist_entropy"].mean().item()
)
def compute_derived(
self,
rewards,
masks,
bad_masks,
value_preds,
last_value,
):
num_steps, num_envs = rewards.shape[:2]
returns = torch.zeros(num_steps + 1, num_envs, 1, device=last_value.device)
if self.use_gae:
value_preds[-1] = last_value
gae = 0
for step in reversed(range(rewards.size(0))):
delta = (
rewards[step]
+ self.gamma * value_preds[step + 1] * masks[step + 1]
- value_preds[step]
)
gae = delta + self.gamma * self.gae_lambda * masks[step + 1] * gae
gae = gae * bad_masks[step + 1]
returns[step] = gae + value_preds[step]
else:
returns[-1] = last_value
for step in reversed(range(rewards.size(0))):
returns[step] = (
returns[step + 1] * self.gamma * masks[step + 1] + rewards[step]
) * bad_masks[step + 1] + (1 - bad_masks[step + 1]) * value_preds[step]
advantages = returns[:-1] - value_preds[:-1]
# Normalize the advantages
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-5)
return advantages, returns
|
bc-irl-main
|
imitation_learning/bc_irl/differentiable_ppo.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
bc-irl-main
|
imitation_learning/bc_irl/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Callable
import higher
import torch
import torch.nn as nn
from hydra.utils import call, instantiate
from omegaconf import DictConfig
from rl_utils.common import DictDataset
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from imitation_learning.common.plotting import plot_actions
from imitation_learning.common.utils import (extract_transition_batch,
log_finished_rewards)
class BCIRL(nn.Module):
def __init__(
self,
reward: DictConfig,
inner_updater: DictConfig,
get_dataset_fn,
batch_size: int,
inner_opt: DictConfig,
reward_opt: DictConfig,
irl_loss: DictConfig,
plot_interval: int,
norm_expert_actions: bool,
n_inner_iters: int,
num_steps: int,
reward_update_freq: int,
storage_cfg: DictConfig,
device,
total_num_updates: int,
num_envs: int,
use_lr_decay: bool,
policy_init_fn: Callable[[nn.Module, nn.Module], nn.Module],
force_num_env_steps_lr_decay: float = -1.0,
**kwargs,
):
super().__init__()
if inner_updater is not None:
self.inner_updater = instantiate(inner_updater)
self.reward = instantiate(reward).to(device)
self.dataset = call(get_dataset_fn)
self.data_loader = DataLoader(self.dataset, batch_size, shuffle=True)
self.inner_opt = inner_opt
self.reward_opt = instantiate(reward_opt, params=self.reward.parameters())
self._n_updates = 0
self.use_lr_decay = use_lr_decay
self.policy_init_fn = policy_init_fn
if force_num_env_steps_lr_decay > 0:
use_total_num_updates = force_num_env_steps_lr_decay // (
num_envs * num_steps
)
else:
use_total_num_updates = total_num_updates
self.lr_scheduler = LambdaLR(
optimizer=self.reward_opt,
lr_lambda=lambda x: 1 - (self._n_updates / use_total_num_updates),
)
self.irl_loss = instantiate(irl_loss)
self.data_loader_iter = iter(self.data_loader)
self.plot_interval = plot_interval
self.norm_expert_actions = norm_expert_actions
self.n_inner_iters = n_inner_iters
self.num_steps = num_steps
self.reward_update_freq = reward_update_freq
self.storage_cfg = storage_cfg
self.device = device
self.all_rollouts = [
instantiate(self.storage_cfg, device=self.device)
for _ in range(self.n_inner_iters - 1)
]
self._ep_rewards = torch.zeros(num_envs, device=self.device)
def state_dict(self):
return {
**super().state_dict(),
"reward_opt": self.reward_opt.state_dict(),
}
def load_state_dict(self, state_dict, should_load_opt):
opt_state = state_dict.pop("reward_opt")
if should_load_opt:
self.reward_opt.load_state_dict(opt_state)
return super().load_state_dict(state_dict)
def viz_reward(self, cur_obs=None, action=None, next_obs=None) -> torch.Tensor:
return self.reward(cur_obs, action, next_obs)
def _irl_loss_step(self, policy, logger):
expert_batch = next(self.data_loader_iter, None)
if expert_batch is None:
self.data_loader_iter = iter(self.data_loader)
expert_batch = next(self.data_loader_iter, None)
expert_actions = expert_batch["actions"].to(self.device)
expert_obs = expert_batch["observations"].to(self.device)
if self.norm_expert_actions:
# Clip expert actions to be within [-1,1]. Actions have no effect
# out of that range
expert_actions = torch.clamp(expert_actions, -1.0, 1.0)
dist = policy.get_action_dist(expert_obs, None, None)
pred_actions = dist.mean
irl_loss_val = self.irl_loss(expert_actions, pred_actions)
irl_loss_val.backward(retain_graph=True)
logger.collect_info("irl_loss", irl_loss_val.item())
if self._n_updates % self.plot_interval == 0:
plot_actions(
pred_actions.detach().cpu(),
expert_actions.detach().cpu(),
self._n_updates,
logger.vid_dir,
)
@property
def inner_lr(self):
return self.inner_opt["lr"]
def update(self, policy, rollouts, logger, envs):
self.reward_opt.zero_grad()
policy = call(self.policy_init_fn, old_policy=policy).to(self.device)
policy_opt = instantiate(
self.inner_opt, lr=self.inner_lr, params=policy.parameters()
)
# Setup Meta loop
with higher.innerloop_ctx(
policy,
policy_opt,
) as (dpolicy, diffopt):
for inner_i in range(self.n_inner_iters):
obs, actions, next_obs, masks = extract_transition_batch(rollouts)
rollouts.rewards = self.reward(obs, actions, next_obs)
if inner_i == 0:
self._ep_rewards = log_finished_rewards(
rollouts, self._ep_rewards, logger
)
# Inner loop policy update
self.inner_updater.update(
dpolicy, rollouts, logger, diffopt, rollouts.rewards
)
if inner_i != self.n_inner_iters - 1:
new_rollouts = self.all_rollouts[inner_i - 1]
for k in rollouts.obs_keys:
new_rollouts.obs[k][0].copy_(rollouts.obs[k][-1])
new_rollouts.masks[0].copy_(rollouts.masks[-1])
new_rollouts.bad_masks[0].copy_(rollouts.bad_masks[-1])
new_rollouts.recurrent_hidden_states[0].copy_(
rollouts.recurrent_hidden_states[-1]
)
# Collect the next batch of data.
new_rollouts.after_update()
for step_idx in range(self.num_steps):
with torch.no_grad():
act_data = policy.act(
new_rollouts.get_obs(step_idx),
new_rollouts.recurrent_hidden_states[step_idx],
new_rollouts.masks[step_idx],
)
next_obs, reward, done, info = envs.step(act_data["action"])
new_rollouts.insert(next_obs, reward, done, info, **act_data)
rollouts = new_rollouts
# Compute IRL loss
self._irl_loss_step(dpolicy, logger)
if (
self.reward_update_freq != -1
and self._n_updates % self.reward_update_freq == 0
):
self.reward_opt.step()
if hasattr(self.reward, "log"):
self.reward.log(logger)
policy.load_state_dict(dpolicy.state_dict())
if self.use_lr_decay and self.reward_update_freq != -1:
# Step even if we did not update so we properly decay to 0.
self.lr_scheduler.step()
logger.collect_info("reward_lr", self.lr_scheduler.get_last_lr()[0])
self._n_updates += 1
|
bc-irl-main
|
imitation_learning/bc_irl/updater.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum, auto
import torch
import torch.nn as nn
from hydra.utils import instantiate
from rl_utils.common import make_mlp_layers
def full_reset_init(old_policy, policy_cfg, **kwargs):
return instantiate(policy_cfg)
def reg_init(old_policy, **kwargs):
return old_policy
class StructuredReward(nn.Module):
def __init__(self, obs_shape, **kwargs):
super().__init__()
self.center = nn.Parameter(torch.randn(obs_shape[0]))
def forward(self, X):
return -1.0 * ((X - self.center) ** 2).mean(-1, keepdims=True)
def log(self, logger):
for i, center_val in enumerate(self.center):
logger.collect_info(f"reward_{i}", center_val.item())
class GtReward(nn.Module):
def __init__(
self,
):
pass
def forward(self, cur_obs=None, actions=None, next_obs=None):
cur_dist = torch.linalg.norm(cur_obs, dim=-1)
reward = torch.full(cur_dist.shape, -self._slack)
assign = -self._slack * cur_dist
should_give_reward = cur_dist < self._reward_thresh
reward[should_give_reward] = assign[should_give_reward]
return reward
|
bc-irl-main
|
imitation_learning/bc_irl/rewards.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
from collections import defaultdict
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from hydra.utils import call, instantiate
from omegaconf import DictConfig
from rl_utils.common.net_utils import make_mlp_layers
from torch.utils.data import DataLoader
from imitation_learning.common.plotting import plot_actions
from imitation_learning.common.utils import (create_next_obs,
extract_transition_batch,
log_finished_rewards)
from imitation_learning.gail.updater import GAIL
class fIRL(GAIL):
"""
From https://github.com/twni2016/f-IRL/blob/a3f1ec66f29c6d659abba630f70f8ae2e59ebe1e/firl/divs/f_div_disc.py
"""
def __init__(
self,
discriminator: DictConfig,
reward: DictConfig,
policy_updater: DictConfig,
get_dataset_fn,
batch_size: int,
num_discrim_batches: int,
reward_opt: DictConfig,
discrim_opt: DictConfig,
reward_update_freq: int,
importance_sampling: bool,
div_type: str,
device,
policy,
num_envs,
**kwargs,
):
super().__init__(
discriminator,
policy_updater,
get_dataset_fn,
batch_size,
num_discrim_batches,
discrim_opt,
reward_update_freq,
device,
policy,
num_envs,
)
self.reward = instantiate(reward).to(device)
self._div_type = div_type
self._importance_sampling = importance_sampling
self.reward_opt = instantiate(reward_opt, params=self.reward.parameters())
def state_dict(self, **kwargs):
return {
**super().state_dict(**kwargs),
"reward_opt": self.reward_opt.state_dict(),
}
def load_state_dict(self, state_dict, should_load_opt):
opt_state = state_dict.pop("reward_opt")
if should_load_opt:
self.reward_opt.load_state_dict(opt_state)
return super().load_state_dict(state_dict, should_load_opt)
def viz_reward(self, cur_obs=None, action=None, next_obs=None) -> torch.Tensor:
# Intentional to assign cur obs to next obs so we show reward for that state.
return self.reward(next_obs=cur_obs)
def _update_reward(self, policy, rollouts, logger):
agent_data = self._get_agent_samples(rollouts)
for expert_batch, agent_batch in zip(self.expert_data, agent_data):
# Combine experience from both.
with torch.no_grad():
obs = torch.cat(
[
expert_batch["next_observations"],
agent_batch["next_obs"],
],
0,
)
actions = torch.cat([expert_batch["actions"], agent_batch["action"]], 0)
logits = self.discriminator(cur_obs=obs)
# JS
if self._div_type == "fkl":
t1 = torch.exp(logits)
elif self._div_type == "rkl":
t1 = logits
elif self._div_type == "js":
t1 = F.softplus(logits)
else:
raise ValueError()
t1 = -t1
t2 = self.reward(next_obs=obs)
if self._importance_sampling:
with torch.no_grad():
traj_reward = t2.detach().clone()
traj_log_prob = policy.evaluate_actions(obs, None, None, actions)[
"log_prob"
]
IS_ratio = F.softmax(traj_reward - traj_log_prob, dim=0)
loss = (IS_ratio * t1 * t2).mean() - (
(IS_ratio * t1).mean() * (IS_ratio * t2).mean()
)
else:
loss = (t1 * t2).mean() - (t1.mean() * t2.mean())
self.reward_opt.zero_grad()
loss.backward()
self.reward_opt.step()
logger.collect_info("reward_loss", loss.item())
def update(self, policy, rollouts, logger, **kwargs):
if (
self.reward_update_freq != -1
and self._n_updates % self.reward_update_freq == 0
):
self._update_discriminator(policy, rollouts, logger)
self._update_reward(policy, rollouts, logger)
obs, actions, next_obs, masks = extract_transition_batch(rollouts)
with torch.no_grad():
rollouts.rewards = self.reward(next_obs=next_obs)
self._ep_rewards = log_finished_rewards(rollouts, self._ep_rewards, logger)
self.policy_updater.update(policy, rollouts, logger)
self._n_updates += 1
|
bc-irl-main
|
imitation_learning/f_irl/updater.py
|
bc-irl-main
|
imitation_learning/config/logger/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
import torch.nn as nn
from hydra.utils import call, instantiate
from omegaconf import DictConfig
from rl_utils.common import DictDataset, make_mlp_layers
from torch.utils.data import DataLoader
from imitation_learning.common.utils import (create_next_obs,
extract_transition_batch,
log_finished_rewards)
class MaxEntIRL(nn.Module):
def __init__(
self,
reward: DictConfig,
reward_opt: DictConfig,
get_dataset_fn,
batch_size: int,
num_cost_epochs: int,
device,
policy_updater: DictConfig,
should_update_reward: bool,
policy,
num_envs,
grid_lower=-1.5,
grid_upper=1.5,
grid_density=310,
**kwargs
):
super().__init__()
self.reward = instantiate(reward).to(device)
self.policy_updater = instantiate(policy_updater, policy=policy)
self.demo_obs = call(get_dataset_fn)["next_observations"]
self.reward_opt = instantiate(reward_opt, params=self.reward.parameters())
self.num_cost_epochs = num_cost_epochs
self._ep_rewards = torch.zeros(num_envs, device=device)
self.should_update_reward = should_update_reward
# instead of policy samples we use a grid - which we can do in this low dimensional toy example,
# this will give us "exact" max ent
self.grid_samples = (
torch.stack(
torch.meshgrid(
torch.linspace(grid_lower, grid_upper, grid_density),
torch.linspace(grid_lower, grid_upper, grid_density),
indexing="ij",
),
-1,
)
.to(device)
.view(-1, 2)
)
def state_dict(self, **kwargs):
return {
**super().state_dict(**kwargs),
"reward_opt": self.reward_opt.state_dict(),
}
def load_state_dict(self, state_dict, should_load_opt):
opt_state = state_dict.pop("reward_opt")
if should_load_opt:
self.reward_opt.load_state_dict(opt_state)
return super().load_state_dict(state_dict)
def viz_reward(self, cur_obs=None, action=None, next_obs=None) -> torch.Tensor:
return self.reward(next_obs=next_obs)
def update(self, policy, rollouts, logger, **kwargs):
if self.should_update_reward:
for _ in range(self.num_cost_epochs):
reward_samples = self.reward(next_obs=self.grid_samples)
# we sample a demo
reward_demos = self.reward(next_obs=self.demo_obs)
# optimize reward
loss_ME = -(
torch.mean(reward_demos)
- (
torch.logsumexp(reward_samples, dim=0)
- np.log(len(reward_samples))
)
)
self.reward_opt.zero_grad()
loss_ME.backward()
self.reward_opt.step()
logger.collect_info("irl_loss", loss_ME.item())
else:
with torch.no_grad():
_, _, next_obs, _ = extract_transition_batch(rollouts)
rollouts.rewards = self.reward(next_obs=next_obs)
self._ep_rewards = log_finished_rewards(
rollouts, self._ep_rewards, logger
)
self.policy_updater.update(policy, rollouts, logger)
|
bc-irl-main
|
imitation_learning/maxent/updater.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Tuple
import torch
import torch.nn as nn
from rl_utils.common import make_mlp_layers
class AirlDiscriminator(nn.Module):
def __init__(
self,
obs_shape: Tuple[int],
action_dim: int,
reward_hidden_dim: int,
cost_take_dim: int,
n_hidden_layers: int,
use_shaped_reward: bool,
gamma: float,
airl_reward_bonus: float,
):
super().__init__()
self.cost_take_dim = cost_take_dim
state_size = obs_shape[0] if cost_take_dim == -1 else cost_take_dim
self.g = nn.Sequential(
*make_mlp_layers(state_size, 1, reward_hidden_dim, n_hidden_layers)
)
self.h = nn.Sequential(
*make_mlp_layers(state_size, 1, reward_hidden_dim, n_hidden_layers)
)
self.use_shaped_reward = use_shaped_reward
self.gamma = gamma
self.airl_reward_bonus = airl_reward_bonus
def f(self, cur_obs, next_obs, masks, force_no_shaped=False, **kwargs):
rs = self.g(cur_obs)
if self.use_shaped_reward and not force_no_shaped:
vs = self.h(cur_obs)
next_vs = self.h(next_obs)
return rs + (self.gamma * masks * next_vs) - vs
else:
return rs
def forward(self, cur_obs, next_obs, actions, masks, policy, **kwargs):
log_p = self.f(cur_obs, next_obs, masks)
with torch.no_grad():
log_q = policy.evaluate_actions(cur_obs, {}, masks, actions)["log_prob"]
return log_p - log_q
def get_reward(
self,
cur_obs,
next_obs,
masks=None,
actions=None,
policy=None,
viz_reward=False,
**kwargs
):
log_p = self.f(cur_obs, next_obs, masks=masks, force_no_shaped=viz_reward)
if viz_reward:
return log_p
with torch.no_grad():
log_q = policy.evaluate_actions(cur_obs, {}, masks, actions)["log_prob"]
logits = log_p - (self.airl_reward_bonus * log_q)
s = torch.sigmoid(logits)
eps = 1e-20
return (s + eps).log() - (1 - s + eps).log()
|
bc-irl-main
|
imitation_learning/airl/discriminator.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
from hydra.utils import call, instantiate
from omegaconf import DictConfig
from rl_utils.common import DictDataset, make_mlp_layers
from torch.utils.data import DataLoader
from imitation_learning.common.utils import (create_next_obs,
extract_transition_batch,
log_finished_rewards)
class GCL(nn.Module):
def __init__(
self,
reward: DictConfig,
reward_opt: DictConfig,
get_dataset_fn,
batch_size: int,
device,
policy_updater: DictConfig,
should_update_reward: bool,
policy,
num_envs,
**kwargs
):
super().__init__()
self.reward = instantiate(reward).to(device)
self.policy_updater = instantiate(policy_updater, policy=policy)
self.batch_size = batch_size
self.dataset = call(get_dataset_fn)
self.expert_data = DataLoader(self.dataset, batch_size, shuffle=True)
self.reward_opt = instantiate(reward_opt, params=self.reward.parameters())
self._ep_rewards = torch.zeros(num_envs, device=device)
self.should_update_reward = should_update_reward
def state_dict(self, **kwargs):
return {
**super().state_dict(**kwargs),
"reward_opt": self.reward_opt.state_dict(),
}
def load_state_dict(self, state_dict, should_load_opt):
opt_state = state_dict.pop("reward_opt")
if should_load_opt:
self.reward_opt.load_state_dict(opt_state)
return super().load_state_dict(state_dict)
def viz_reward(self, cur_obs=None, action=None, next_obs=None) -> torch.Tensor:
return self.reward(next_obs=next_obs)
def update(self, policy, rollouts, logger, **kwargs):
if self.should_update_reward:
obs, actions, next_obs, masks = extract_transition_batch(rollouts)
reward_samples = []
num_batches = len(rollouts) // self.batch_size
agent_data = rollouts.data_generator(num_batches, get_next_obs=True)
for expert_batch, agent_batch in zip(self.expert_data, agent_data):
ac_eval = policy.evaluate_actions(
agent_batch["obs"],
agent_batch["hxs"],
agent_batch["mask"],
agent_batch["action"],
)
reward_demos = self.reward(next_obs=expert_batch["next_observations"])
reward_samples = self.reward(next_obs=agent_batch["next_obs"])
loss_IOC = -(
torch.mean(reward_demos)
- (
torch.logsumexp(
reward_samples - ac_eval["log_prob"],
dim=0,
keepdim=True,
)
- torch.log(torch.Tensor([len(reward_samples)]))
)
)
self.reward_opt.zero_grad()
loss_IOC.backward()
self.reward_opt.step()
logger.collect_info("irl_loss", loss_IOC.item())
with torch.no_grad():
_, _, next_obs, _ = extract_transition_batch(rollouts)
rollouts.rewards = self.reward(next_obs=next_obs)
self._ep_rewards = log_finished_rewards(rollouts, self._ep_rewards, logger)
self.policy_updater.update(policy, rollouts, logger)
|
bc-irl-main
|
imitation_learning/gcl/updater.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os.path as osp
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
import seaborn as sns
import torch
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.colors import LogNorm
from matplotlib.patches import Rectangle
from mpl_toolkits.axes_grid1 import make_axes_locatable
from rl_utils.common import Evaluator, group_trajectories
from rl_utils.envs.pointmass import PointMassObstacleEnv
class PMDistReward:
def __init__(self, slack, **kwargs):
self.slack = slack
def __call__(self, cur_pos, prev_pos, action):
cur_dist = torch.linalg.norm(cur_pos, dim=-1)
prev_dist = torch.linalg.norm(prev_pos, dim=-1)
return ((prev_dist - cur_dist) - self.slack).view(-1, 1)
class PMDistActionPenReward:
def __init__(self, slack, action_pen, **kwargs):
self.slack = slack
self.action_pen = action_pen
def __call__(self, cur_pos, prev_pos, action):
cur_dist = torch.linalg.norm(cur_pos, dim=-1)
prev_dist = torch.linalg.norm(prev_pos, dim=-1)
return (
(prev_dist - cur_dist)
- self.slack
- (self.action_pen * torch.linalg.norm(action, dim=-1))
).view(-1, 1)
class PMSparseReward:
def __init__(self, succ_dist, **kwargs):
self._succ_dist = succ_dist
def __call__(self, cur_pos, prev_pos, action):
cur_dist = torch.linalg.norm(cur_pos, dim=-1)
reward = torch.full(cur_dist.shape, -0.1)
reward[cur_dist < self._succ_dist] = 1.0
return reward.view(-1, 1)
class PMSparseDenseReward:
def __init__(self, reward_thresh, slack, **kwargs):
self._reward_thresh = reward_thresh
self._slack = slack
def __call__(self, cur_pos, prev_pos, action):
cur_dist = torch.linalg.norm(cur_pos, dim=-1)
reward = torch.full(cur_dist.shape, -self._slack)
assign = -self._slack * cur_dist
should_give_reward = cur_dist < self._reward_thresh
reward[should_give_reward] = assign[should_give_reward]
return reward.view(-1, 1)
def viz_trajs(trajs: torch.Tensor, plt_lim, agent_point_size, fig, ax, with_arrows):
pal = sns.color_palette("rocket", as_cmap=True)
traj_len = trajs.size(1)
assert len(trajs.shape) == 3
assert trajs.shape[-1] == 2
scatter_len = 0.1
for i in range(trajs.size(1)):
ax.scatter(
trajs[:, i, 0],
trajs[:, i, 1],
color=[pal((i + 1) / traj_len) for _ in range(trajs.size(0))],
s=180,
# s=agent_point_size,
cmap=pal,
)
if with_arrows:
for i in range(trajs.size(0)):
traj_x = trajs[i, :, 0]
traj_y = trajs[i, :, 1]
for t in range(trajs.size(1) - 1):
offset = np.array(
[traj_x[t + 1] - traj_x[t], traj_y[t + 1] - traj_y[t]]
)
offset_dist = np.linalg.norm(offset)
point_offset = offset * (scatter_len / offset_dist)
if offset_dist < 0.05:
continue
ax.arrow(
x=traj_x[t] + point_offset[0],
y=traj_y[t] + point_offset[1],
dx=offset[0] - (2 * point_offset[0]),
dy=offset[1] - (2 * point_offset[1]),
length_includes_head=True,
width=0.04,
head_length=0.05,
# color=np.array([236, 240, 241, 200]) / 255.0,
color=np.array([44, 62, 80]) / 255.0,
# color=np.array([0, 0, 0, 255]) / 255.0,
)
ax.set_xlim(-plt_lim, plt_lim)
ax.set_ylim(-plt_lim, plt_lim)
def plot_obstacles(obstacle_transform, obstacle_len, obstacle_width):
points = torch.tensor(
[
[-obstacle_len, -obstacle_width, 1],
[-obstacle_len, obstacle_width, 1],
[obstacle_len, -obstacle_width, 1],
[obstacle_len, obstacle_width, 1],
]
)
obstacle_points = obstacle_transform @ points
class PointMassVisualizer(Evaluator):
def __init__(
self,
envs,
rnn_hxs_dim,
num_render,
vid_dir,
fps,
save_traj_name,
updater,
agent_point_size,
plt_lim,
num_demo_plot,
plt_density,
plot_il,
plot_expert: bool,
logger,
device,
with_arrows: bool = False,
is_final_render: bool = False,
**kwargs,
):
super().__init__(envs, rnn_hxs_dim, num_render, vid_dir, fps, save_traj_name)
self._agent_point_size = agent_point_size
self._plt_lim = plt_lim
self._plt_density = plt_density
self._plot_il = plot_il
self.logger = logger
self.device = device
self.is_final_render = is_final_render
self.with_arrows = with_arrows
if plot_il and plot_expert:
dones = updater.dataset.get_data("terminals")
grouped_trajs = group_trajectories(dones, **updater.dataset.all_data)
obs_trajs = (
torch.stack([traj["observations"] for traj in grouped_trajs], dim=0)
.detach()
.cpu()
)
add_str = ""
if num_demo_plot < obs_trajs.size(0):
plot_idxs = torch.randint(high=len(obs_trajs), size=(num_demo_plot,))
obs_trajs = obs_trajs[plot_idxs]
add_str = f" (Subsampled {num_demo_plot})"
fig, ax = plt.subplots(figsize=(4, 4))
viz_trajs(
obs_trajs,
plt_lim,
agent_point_size,
fig,
ax,
self.is_final_render,
)
self.plot_obstacle(ax)
ax.set_title(f"Expert Demos{add_str}")
self.save("demos", fig)
plt.clf()
self._updater = updater
def save(self, name, fig):
if self.is_final_render:
full_path = osp.join(self._vid_dir, f"{name}.pdf")
print(f"Saved to {full_path}")
fig.savefig(full_path, bbox_inches="tight", dpi=100)
else:
full_path = osp.join(self._vid_dir, f"{name}.png")
fig.savefig(full_path)
return full_path
def plot_obstacle(self, ax):
if isinstance(self._envs, PointMassObstacleEnv):
for obs, (obs_T, w, l) in zip(
self._envs._params.square_obstacles, self._envs._square_obs_T
):
origin = torch.tensor([-(w / 2), -(l / 2), 1])
origin = obs_T @ origin
color = "orangered"
rect = Rectangle(
origin[:2],
w,
l,
angle=obs.rot_deg,
linewidth=2,
edgecolor=color,
facecolor=color,
)
ax.add_patch(rect)
def plot_reward(self, reward_fn, fig, ax):
with torch.no_grad():
coords = torch.stack(
torch.meshgrid(
torch.linspace(-self._plt_lim, self._plt_lim, self._plt_density),
torch.linspace(-self._plt_lim, self._plt_lim, self._plt_density),
indexing="ij",
),
-1,
).to(self.device)
reward_vals = reward_fn(cur_obs=coords, next_obs=coords).cpu()
max_reward_idx = reward_vals.argmax()
flat_coords = coords.view(-1, 2)
# print("Reward Max XY Coordinate", flat_coords[max_reward_idx])
im_fig = ax.imshow(
reward_vals,
extent=[
-self._plt_lim,
self._plt_lim,
-self._plt_lim,
self._plt_lim,
],
origin="lower",
)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
self.plot_obstacle(ax)
def fmt(x, pos):
return str(x).ljust(5)
fig.colorbar(
im_fig,
cax=cax,
orientation="vertical",
format=ticker.FuncFormatter(fmt),
)
def evaluate(self, policy, num_episodes, eval_i):
fig, ax = plt.subplots(figsize=(4, 4))
if self.is_final_render:
ax.axis("off")
if self._plot_il:
self.plot_reward(
self._updater.viz_reward,
fig,
ax,
)
self.save(f"reward_{eval_i}", fig)
# Intentionally don't clear plot so the evaluation rollouts are
# overlaid on reward.
eval_result = super().evaluate(policy, num_episodes, eval_i)
if len(self.eval_trajs_dones):
grouped_trajs = group_trajectories(
torch.stack(self.eval_trajs_dones, dim=0),
obs=torch.stack(self.eval_trajs_obs, dim=0),
)
obs_trajs = (
torch.stack([traj["obs"] for traj in grouped_trajs], dim=0)
.detach()
.cpu()
)
viz_trajs(
obs_trajs,
self._plt_lim,
self._agent_point_size,
fig,
ax,
self.with_arrows,
)
if not self._plot_il:
self.plot_obstacle(ax)
if not self.is_final_render:
ax.set_title(
f"Evaluation Rollouts (Update {eval_i}, Dist {eval_result['dist_to_goal']:.4f}) "
)
eval_rollouts_path = self.save(f"eval_rollouts_{eval_i}", fig)
plt.clf()
if not self.is_final_render:
self.logger.collect_img("reward", eval_rollouts_path)
return eval_result
|
bc-irl-main
|
imitation_learning/common/pointmass_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os.path as osp
import matplotlib.pyplot as plt
import numpy as np
def plot_actions(pred_actions, gt_actions, n_updates, save_dir):
assert pred_actions.shape == gt_actions.shape
action_len, action_dim = pred_actions.shape
for action_dim_i in range(action_dim):
plt.scatter(
np.arange(action_len), pred_actions[:, action_dim_i], label="Predicted"
)
plt.scatter(np.arange(action_len), gt_actions[:, action_dim_i], label="Expert")
plt.legend()
plt.title(f"Action Update Batch Dim {action_dim_i} @ {n_updates}")
plt.savefig(osp.join(save_dir, f"actions_{n_updates}_{action_dim_i}.png"))
plt.clf()
|
bc-irl-main
|
imitation_learning/common/plotting.py
|
from enum import Enum, auto
import torch
import torch.nn as nn
from rl_utils.common import make_mlp_layers
class RewardInputType(Enum):
ACTION = auto()
NEXT_STATE = auto()
CUR_NEXT_STATE = auto()
class NeuralReward(nn.Module):
def __init__(
self,
obs_shape,
action_dim,
reward_hidden_dim,
n_hidden_layers,
cost_take_dim=-1,
include_tanh=False,
reward_type=None,
clamp_max=None,
):
super().__init__()
if reward_type is None:
self.reward_type = RewardInputType.NEXT_STATE
else:
self.reward_type = RewardInputType[reward_type]
self.cost_take_dim = cost_take_dim
obs_size = obs_shape[0] if cost_take_dim == -1 else cost_take_dim
if self.reward_type == RewardInputType.ACTION:
input_size = obs_size + action_dim
elif self.reward_type == RewardInputType.NEXT_STATE:
input_size = obs_size
elif self.reward_type == RewardInputType.CUR_NEXT_STATE:
input_size = obs_size + obs_size
net_layers = make_mlp_layers(input_size, 1, reward_hidden_dim, n_hidden_layers)
if include_tanh:
net_layers.append(nn.Tanh())
self.net = nn.Sequential(*net_layers)
self.clamp_max = clamp_max
def forward(self, cur_obs=None, actions=None, next_obs=None):
if self.cost_take_dim != -1:
if cur_obs is not None:
cur_obs = cur_obs[:, :, : self.cost_take_dim]
if next_obs is not None:
next_obs = next_obs[:, :, : self.cost_take_dim]
if self.reward_type == RewardInputType.ACTION:
inputs = [cur_obs, actions]
elif self.reward_type == RewardInputType.NEXT_STATE:
inputs = [next_obs]
elif self.reward_type == RewardInputType.CUR_NEXT_STATE:
inputs = [cur_obs, next_obs]
else:
raise ValueError()
inputs = torch.cat(inputs, dim=-1)
ret = self.net(inputs)
if self.clamp_max is not None:
ret = torch.clamp(ret, min=-self.clamp_max, max=self.clamp_max)
return ret
|
bc-irl-main
|
imitation_learning/common/net.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
bc-irl-main
|
imitation_learning/common/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, List, Tuple
import torch
from rl_utils.common import DictDataset
def log_finished_rewards(
rollouts,
rolling_ep_rewards: torch.Tensor,
logger,
) -> torch.Tensor:
"""
:param rolling_ep_rewards: tensor of shape (num_envs,)
"""
num_steps, num_envs = rollouts.rewards.shape[:2]
done_episodes_rewards = []
for env_i in range(num_envs):
for step_i in range(num_steps):
rolling_ep_rewards[env_i] += rollouts.rewards[step_i, env_i].item()
if rollouts.masks[step_i + 1, env_i].item() == 0.0:
done_episodes_rewards.append(rolling_ep_rewards[env_i].item())
rolling_ep_rewards[env_i] = 0
logger.collect_info_list("inferred_episode_reward", done_episodes_rewards)
return rolling_ep_rewards
def extract_transition_batch(rollouts):
obs = next(iter(rollouts.obs.values()))
cur_obs = obs[:-1]
masks = rollouts.masks[1:]
next_obs = (masks * obs[1:]) + ((1 - masks) * rollouts.final_obs)
actions = rollouts.actions
return cur_obs, actions, next_obs, masks
def create_next_obs(dataset: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
obs = dataset["observations"].detach()
final_final_obs = dataset["infos"][-1]["final_obs"]
next_obs = torch.cat([obs[1:], final_final_obs.unsqueeze(0)], 0)
num_eps = 1
for i in range(obs.shape[0] - 1):
cur_info = dataset["infos"][i]
if "final_obs" in cur_info:
num_eps += 1
next_obs[i] = cur_info["final_obs"].detach()
if num_eps != dataset["terminals"].sum():
raise ValueError(
f"Inconsistency in # of episodes {num_eps} vs {dataset['terminals'].sum()}"
)
dataset["next_observations"] = next_obs.detach()
return dataset
def get_dataset_data(dataset_path: str, env_name: str):
return create_next_obs(torch.load(dataset_path))
def get_transition_dataset(dataset_path: str, env_name: str):
dataset = get_dataset_data(dataset_path, env_name)
return DictDataset(
dataset,
[
"observations",
"actions",
"rewards",
"terminals",
"next_observations",
],
)
|
bc-irl-main
|
imitation_learning/common/utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from distutils.core import setup
setup(
name="bela",
version="0.1",
packages=["bela"],
)
|
BELA-main
|
setup.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# HACK: Need to import protobuf before pytorch_lightning to prevent Segmentation Fault: https://github.com/protocolbuffers/protobuf/issues/11934
from google.protobuf import descriptor as _descriptor
|
BELA-main
|
bela/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import hydra
from bela.conf.config import MainConfig
from omegaconf import OmegaConf
from pytorch_lightning.trainer import Trainer
@hydra.main(config_path="conf", config_name="config")
def main(cfg: MainConfig):
print(OmegaConf.to_yaml(cfg))
os.environ["NCCL_NSOCKS_PERTHREAD"] = "4"
os.environ["NCCL_SOCKET_NTHREADS"] = "2"
if cfg.get("debug_mode"):
os.environ["CUDA_LAUNCH_BLOCKING"] = "1"
os.environ["NCCL_BLOCKING_WAIT"] = "1"
os.environ["PL_SKIP_CPU_COPY_ON_DDP_TEARDOWN"] = "1"
transform = hydra.utils.instantiate(cfg.task.transform)
datamodule = hydra.utils.instantiate(cfg.datamodule, transform=transform)
task = hydra.utils.instantiate(cfg.task, datamodule=datamodule, _recursive_=False)
checkpoint_callback = hydra.utils.instantiate(cfg.checkpoint_callback)
trainer = Trainer(**cfg.trainer, callbacks=[checkpoint_callback])
if cfg.test_only:
ckpt_path = cfg.task.load_from_checkpoint
trainer.test(
model=task,
ckpt_path=ckpt_path,
verbose=True,
datamodule=datamodule,
)
else:
trainer.fit(task, datamodule=datamodule)
print(f"*** Best model path is {checkpoint_callback.best_model_path}")
trainer.test(
model=None,
ckpt_path="best",
verbose=True,
datamodule=datamodule,
)
if __name__ == "__main__":
main()
|
BELA-main
|
bela/main.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import logging
import mmap
from typing import List, Optional
import torch
from pytorch_lightning import LightningDataModule
from bela.transforms.joint_el_transform import JointELTransform
logger = logging.getLogger()
def get_seq_lengths(batch: List[List[int]]):
return [len(example) for example in batch]
class EntityCatalogue:
def __init__(self, idx_path):
logger.info(f"Reading entity catalogue index {idx_path}")
self.idx = {}
with open(idx_path, "rt") as fd:
for idx, line in enumerate(fd):
ent_id = line.strip()
self.idx[ent_id] = idx
def __len__(self):
return len(self.idx)
def __getitem__(self, entity_id):
ent_index = self.idx[entity_id]
return ent_index
def __contains__(self, entity_id):
return entity_id in self.idx
class ElMatchaDataset(torch.utils.data.Dataset):
"""
A memory mapped dataset for EL in Matcha format
Each example in this dataset contains several mentions.
We laso filter out mentions, that are not present in entity catalogue
"""
def __init__(
self,
path,
ent_catalogue,
use_raw_text,
use_augmentation=False,
augmentation_frequency=0.1,
):
self.ent_catalogue = ent_catalogue
self.use_raw_text = use_raw_text
self.use_augmentation = use_augmentation
self.augmentation_frequency = augmentation_frequency
logger.info(f"Downloading file {path}")
# TODO: Maybe we should lazily load the file to speed up datamodule instanciation (e.g. in model_eval.py)
self.file = open(path, mode="r")
self.mm = mmap.mmap(self.file.fileno(), 0, prot=mmap.PROT_READ)
self.offsets = []
self.count = 0
logger.info(f"Build mmap index for {path}")
line = self.mm.readline()
offset = 0
while line:
self.offsets.append(offset)
self.count += 1
offset = self.mm.tell()
line = self.mm.readline()
def __len__(self):
return self.count
def _add_char_offsets(self, tokens, gt_entities):
offsets = []
token_lengths = []
current_pos = 0
for token in tokens:
offsets.append(current_pos)
token_lengths.append(len(token))
current_pos += len(token) + 1
updated_gt_entities = []
for gt_entity in gt_entities:
offset, length, entity, ent_type = gt_entity[:4]
char_offset = offsets[offset]
char_length = (
sum(token_lengths[offset + idx] for idx in range(length)) + length - 1
)
updated_gt_entities.append(
(offset, length, entity, ent_type, char_offset, char_length)
)
return updated_gt_entities
def __getitem__(self, index):
offset = self.offsets[index]
self.mm.seek(offset)
line = self.mm.readline()
example = json.loads(line)
gt_entities = []
if self.use_raw_text and "original_text" not in example:
example["gt_entities"] = self._add_char_offsets(
example["text"], example["gt_entities"]
)
example["original_text"] = " ".join(example["text"])
for gt_entity in example["gt_entities"]:
if self.use_raw_text:
_, _, entity, ent_type, offset, length = gt_entity[:6]
else:
offset, length, entity, ent_type = gt_entity[:4]
if ent_type != "wiki":
continue
if entity in self.ent_catalogue:
gt_entities.append((offset, length, self.ent_catalogue[entity]))
gt_entities = sorted(gt_entities)
# blink predicts
blink_predicts = None
blink_scores = None
if "blink_predicts" in example:
blink_predicts = []
blink_scores = []
for predict, scores in zip(
example["blink_predicts"], example["blink_scores"]
):
candidates = []
candidates_scores = []
for candidate, score in zip(predict, scores):
if candidate in self.ent_catalogue:
candidates.append(self.ent_catalogue[candidate])
candidates_scores.append(score)
blink_predicts.append(candidates)
blink_scores.append(candidates_scores)
# MD model predicts
md_pred_offsets = example.get("md_pred_offsets")
md_pred_lengths = example.get("md_pred_lengths")
md_pred_scores = example.get("md_pred_scores")
result = {
"data_example_id": example.get("document_id") or example.get("data_example_id", ""),
"text": example["original_text"] if self.use_raw_text else example["text"],
"gt_entities": gt_entities,
"blink_predicts": blink_predicts,
"blink_scores": blink_scores,
"md_pred_offsets": md_pred_offsets,
"md_pred_lengths": md_pred_lengths,
"md_pred_scores": md_pred_scores,
}
return result
class JointELDataModule(LightningDataModule):
"""
Read data from EL datatset and prepare mention/entity pairs tensors
"""
def __init__(
self,
transform: JointELTransform,
# Dataset args
train_path: str,
val_path: str,
test_path: str,
ent_catalogue_idx_path: str,
batch_size: int = 2,
val_batch_size: Optional[int] = None,
test_batch_size: Optional[int] = None,
drop_last: bool = False, # drop last batch if len(dataset) not multiple of batch_size
num_workers: int = 0, # increasing this bugs out right now
use_raw_text: bool = True,
use_augmentation: bool = False,
augmentation_frequency: float = 0.1,
shuffle: bool = True,
*args,
**kwargs,
):
super().__init__()
self.batch_size = batch_size
self.val_batch_size = val_batch_size or batch_size
self.test_batch_size = test_batch_size or batch_size
self.drop_last = drop_last
self.num_workers = num_workers
self.transform = transform
self.ent_catalogue = EntityCatalogue(ent_catalogue_idx_path)
self.shuffle = shuffle
self.datasets = {
"train": ElMatchaDataset(
train_path,
self.ent_catalogue,
use_raw_text=use_raw_text,
use_augmentation=use_augmentation,
augmentation_frequency=augmentation_frequency,
) if train_path else None,
"valid": ElMatchaDataset(
val_path,
self.ent_catalogue,
use_raw_text=use_raw_text,
) if val_path else None,
"test": ElMatchaDataset(
test_path,
self.ent_catalogue,
use_raw_text=use_raw_text,
) if test_path else None,
}
def train_dataloader(self):
return torch.utils.data.DataLoader(
self.datasets["train"],
batch_size=self.batch_size,
num_workers=self.num_workers,
collate_fn=self.collate_train,
shuffle=self.shuffle,
drop_last=self.drop_last,
)
def val_dataloader(self):
return torch.utils.data.DataLoader(
self.datasets["valid"],
shuffle=False,
batch_size=self.val_batch_size,
num_workers=self.num_workers,
collate_fn=self.collate_eval,
drop_last=self.drop_last,
)
def test_dataloader(self):
return torch.utils.data.DataLoader(
self.datasets["test"],
shuffle=False,
batch_size=self.test_batch_size,
num_workers=self.num_workers,
collate_fn=self.collate_eval,
drop_last=self.drop_last,
)
def collate_eval(self, batch):
return self.collate(batch, False)
def collate_train(self, batch):
return self.collate(batch, True)
def collate(self, batch, is_train):
"""
Input:
batch: List[Example]
Example fields:
- "text": List[str] - post tokens
- "gt_entities": List[Tuple[int, int, int]] - GT entities in text,
offset, length, entity id
- "blink_predicts": List[List[int]] - list of entity ids for each MD prediction
- "blink_scores": List[List[float]] - list of BLINK scores
- "md_pred_offsets": List[int] - mention offsets predicted by MD
- "md_pred_lengths": List[int] - mention lengths
- "md_pred_scores": List[float] - MD scores
"""
data_example_ids = []
texts = []
offsets = []
lengths = []
entities = []
for example in batch:
data_example_ids.append(example["data_example_id"])
texts.append(example["text"])
example_offsets = []
example_lengths = []
example_entities = []
for offset, length, entity_id in example["gt_entities"]:
example_offsets.append(offset)
example_lengths.append(length)
example_entities.append(entity_id)
offsets.append(example_offsets)
lengths.append(example_lengths)
entities.append(example_entities)
model_inputs = self.transform(
{
"texts": texts,
"mention_offsets": offsets,
"mention_lengths": lengths,
"entities": entities,
}
)
collate_output = {
"data_example_ids": data_example_ids,
"input_ids": model_inputs["input_ids"],
"attention_mask": model_inputs["attention_mask"],
"mention_offsets": model_inputs["mention_offsets"],
"mention_lengths": model_inputs["mention_lengths"],
"entities": model_inputs["entities"],
"tokens_mapping": model_inputs["tokens_mapping"],
}
if "sp_tokens_boundaries" in model_inputs:
collate_output["sp_tokens_boundaries"] = model_inputs[
"sp_tokens_boundaries"
]
return collate_output
|
BELA-main
|
bela/datamodule/joint_el_datamodule.py
|
BELA-main
|
bela/tests/__init__.py
|
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import os
import torch
import torch
from bela.transforms.joint_el_transform import JointELTransform
from bela.datamodule.joint_el_datamodule import JointELDataModule
def assert_equal_tensor_dict(test_case, result, expected):
"""
Compare tensors/values in the dict and assert if they are not equal.
The dict could countain multiple levels of nesting.
"""
for key, value in expected.items():
if isinstance(value, dict):
assert_equal_tensor_dict(test_case, result[key], value)
else:
if isinstance(value, torch.Tensor):
test_case.assertTrue(
torch.equal(result[key], value), f"{key} is not equal"
)
else:
test_case.assertEqual(result[key], value, f"{key} is not equal")
class TestJointELDataModule(unittest.TestCase):
def setUp(self):
torch.manual_seed(0)
self.base_dir = os.path.join(os.path.dirname(__file__), "data")
self.data_path = os.path.join(self.base_dir, "el_matcha_joint.jsonl")
self.ent_catalogue_idx_path = os.path.join(self.base_dir, "el_catalogue.idx")
self.transform = JointELTransform()
def test_joint_el_datamodule(self):
dm = JointELDataModule(
transform=self.transform,
train_path=self.data_path,
val_path=self.data_path,
test_path=self.data_path,
ent_catalogue_idx_path=self.ent_catalogue_idx_path,
use_raw_text=False,
batch_size=2,
)
batches = list(dm.train_dataloader())
self.assertEqual(len(batches), 1)
expected_batches = [
{
"input_ids": torch.tensor(
[
[
0,
44517,
98809,
7687,
83,
142,
14941,
23182,
101740,
11938,
35509,
23,
88437,
3915,
9020,
2,
],
[
0,
360,
9020,
70,
10323,
111,
30715,
136,
70,
14098,
117604,
2,
1,
1,
1,
1,
],
]
),
"attention_mask": torch.tensor(
[
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
]
),
"mention_offsets": torch.tensor([[1, 14], [2, 0]]),
"mention_lengths": torch.tensor([[3, 1], [1, 0]]),
"entities": torch.tensor([[1, 0], [0, 0]]),
"tokens_mapping": torch.tensor(
[
[
[1, 2],
[2, 3],
[3, 4],
[4, 5],
[5, 6],
[6, 7],
[7, 8],
[8, 9],
[9, 10],
[10, 11],
[11, 12],
[12, 14],
[14, 15],
],
[
[1, 2],
[2, 3],
[3, 4],
[4, 5],
[5, 6],
[6, 7],
[7, 8],
[8, 9],
[9, 10],
[10, 11],
[0, 1],
[0, 1],
[0, 1],
],
]
),
}
]
for result, expected in zip(batches, expected_batches):
assert_equal_tensor_dict(self, result, expected)
if __name__ == '__main__':
unittest.main()
|
BELA-main
|
bela/tests/test_datamodules.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from bela.models.hf_encoder import HFEncoder
from bela.transforms.joint_el_transform import JointELTransform
class TestHFEncoder(unittest.TestCase):
def test_xlmr_encoder(self):
transform = JointELTransform()
model = HFEncoder(model_path="xlm-roberta-base")
model_inputs = transform(
{
"texts": [
[
"Some",
"simple",
"text",
"about",
"Real",
"Madrid",
"and",
"Barcelona",
],
["Hola", "amigos", "!"],
["Cristiano", "Ronaldo", "juega", "en", "la", "Juventus"],
],
"mention_offsets": [
[4, 7],
[1],
[0, 5],
],
"mention_lengths": [
[2, 1],
[1],
[2, 1],
],
"entities": [
[1, 2],
[3],
[102041, 267832],
],
}
)
output = model(
input_ids=model_inputs["input_ids"],
attention_mask=model_inputs["attention_mask"],
)
if __name__ == '__main__':
unittest.main()
|
BELA-main
|
bela/tests/test_models.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from bela.transforms.joint_el_transform import JointELTransform, JointELXlmrRawTextTransform
class TestJointELXlmrTransforms(unittest.TestCase):
def test_blink_mention_xlmr_transform(self):
transform = JointELTransform()
model_inputs = transform(
{
"texts": [
[
"Some",
"simple",
"text",
"about",
"Real",
"Madrid",
"and",
"Barcelona",
],
["Hola", "amigos", "!"],
["Cristiano", "Ronaldo", "juega", "en", "la", "Juventus"],
],
"mention_offsets": [
[4, 7],
[1],
[0, 5],
],
"mention_lengths": [
[2, 1],
[1],
[2, 1],
],
"entities": [
[1, 2],
[3],
[102041, 267832],
],
}
)
expected_model_inputs = {
"input_ids": torch.tensor(
[
[0, 31384, 8781, 7986, 1672, 5120, 8884, 136, 5755, 2],
[0, 47958, 19715, 711, 2, 1, 1, 1, 1, 1],
[0, 96085, 43340, 1129, 2765, 22, 21, 65526, 2, 1],
]
),
"attention_mask": torch.tensor(
[
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
]
),
"mention_offsets": torch.tensor([[5, 8], [2, 0], [1, 7]]),
"mention_lengths": torch.tensor([[2, 1], [1, 0], [2, 1]]),
"entities": torch.tensor([[1, 2], [3, 0], [102041, 267832]]),
"tokens_mapping": torch.tensor(
[
[[1, 2], [2, 3], [3, 4], [4, 5], [
5, 6], [6, 7], [7, 8], [8, 9]],
[
[1, 2],
[2, 3],
[3, 4],
[0, 1],
[0, 1],
[0, 1],
[0, 1],
[0, 1],
],
[
[1, 2],
[2, 3],
[3, 5],
[5, 6],
[6, 7],
[7, 8],
[0, 1],
[0, 1],
],
]
),
}
for key, value in expected_model_inputs.items():
self.assertTrue(
torch.all(model_inputs[key].eq(value)), f"{key} not equal")
def test_joint_el_raw_text_xlmr_transform(self):
transform = JointELXlmrRawTextTransform()
model_inputs = transform(
{
"texts": [
"Some simple text about Real Madrid and Barcelona",
"Cristiano Ronaldo juega en la Juventus",
"Hola amigos!",
" Hola amigos! ", # test extra spaces
],
"mention_offsets": [
[23, 39],
[0, 30],
[5],
[10],
],
"mention_lengths": [
[11, 9],
[17, 8],
[6],
[6],
],
"entities": [
[1, 2],
[102041, 267832],
[3],
[3],
],
}
)
expected_model_inputs = {
"input_ids": torch.tensor(
[
[0, 31384, 8781, 7986, 1672, 5120, 8884, 136, 5755, 2],
[0, 96085, 43340, 1129, 2765, 22, 21, 65526, 2, 1],
[0, 47958, 19715, 38, 2, 1, 1, 1, 1, 1],
# Whitespaces are ignored
[0, 47958, 19715, 38, 2, 1, 1, 1, 1, 1],
]
),
"attention_mask": torch.tensor(
[
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
]
),
"mention_offsets": torch.tensor([[5, 8], [1, 7], [2, 0], [2, 0]]),
"mention_lengths": torch.tensor([[2, 1], [2, 1], [1, 0], [1, 0]]),
"entities": torch.tensor([[1, 2], [102041, 267832], [3, 0], [3, 0]]),
"tokens_mapping": torch.tensor(
[
[[1, 2], [2, 3], [3, 4], [4, 5], [
5, 6], [6, 7], [7, 8], [8, 9]],
[
[1, 2],
[2, 3],
[3, 4],
[4, 5],
[5, 6],
[6, 7],
[7, 8],
[0, 1],
],
[
[1, 2],
[2, 3],
[3, 4],
[0, 1],
[0, 1],
[0, 1],
[0, 1],
[0, 1],
],
[
[1, 2],
[2, 3],
[3, 4],
[0, 1],
[0, 1],
[0, 1],
[0, 1],
[0, 1],
],
]
),
"sp_tokens_boundaries": torch.tensor(
[
[
[0, 4],
[4, 11],
[11, 16],
[16, 22],
[22, 27],
[27, 34],
[34, 38],
[38, 48],
],
[
[0, 9],
[9, 17],
[17, 20],
[20, 23],
[23, 26],
[26, 29],
[29, 38],
[0, 1],
],
[[0, 4], [4, 11], [11, 12], [0, 1],
[0, 1], [0, 1], [0, 1], [0, 1]],
[[0, 4+3], [4+3, 11+5], [11+5, 12+5], [0, 1],
[0, 1], [0, 1], [0, 1], [0, 1]], # Add whitespaces
]
),
}
for key, value in expected_model_inputs.items():
self.assertTrue(torch.all(model_inputs[key].eq(
value)), f"{key} not equal: {model_inputs[key]=} != {value=}")
def test_joint_el_raw_text_xlmr_transform_2(self):
examples = [
{
'original_text': ' La Carta de las Naciones Unidas: tratado fundacional de las Naciones Unidas que establece que las obligaciones con las Naciones Unidas prevalecen sobre todas las demás obligaciones del tratado y es vinculante para todos los miembros de las Naciones Unidas. Tipo de documento: tratado., Fecha de la firma: 26 de junio de 1945., Lugar de la firma: San Francisco, California, Estados Unidos., Entrada en vigor: 24 de octubre de 1945., Firmantes: Ratificado por China, Francia, la Unión Soviética, el Reino Unido, Estados Unidos y por la mayoría de estados signatarios., Artículos: 193., Secciones: 20 (preámbulo y 19 capítulos):, *Preámbulo de la Carta de las Naciones Unidas., *Capítulo I: Propósitos y principios., *Capítulo II: Miembros., *Capítulo III: Órganos, *Capítulo IV: La Asamblea General., *Capítulo V: El Consejo de Seguridad, *Capítulo VI: Solución pacífica de controversias., *Capítulo VII: Acción con respecto a las amenazas a la paz, las rupturas de la paz y los actos de agresión., *Capítulo VIII: Acuerdos Regionales., *Capítulo IX: Cooperación internacional económica y social., *Capítulo X: El Consejo Económico y Social., *Capítulo XI: Declaración sobre los territorios no autónomos., *Capítulo XII: Sistema Internacional de Administración Fiduciaria., *Capítulo XIII: El Consejo de Administración Fiduciaria., *Capítulo XIV: La Corte Internacional de Justicia., *Capítulo XV: La Secretaría., *Capítulo XVI: Disposiciones varias., *Capítulo XVII: Arreglos transitorios de seguridad., *Capítulo XVIII: Enmiendas., *Capítulo XIX: Ratificación y firma. ',
'gt_entities': [
[0, 0, 'Q171328', 'wiki', 4, 28],
[0, 0, 'Q131569', 'wiki', 34, 7],
[0, 0, 'Q1065', 'wiki', 61, 15],
[0, 0, 'Q1065', 'wiki', 120, 15],
[0, 0, 'Q131569', 'wiki', 186, 7],
[0, 0, 'Q1065', 'wiki', 241, 15],
[0, 0, 'Q49848', 'wiki', 267, 9],
[0, 0, 'Q384515', 'wiki', 278, 7],
[0, 0, 'Q205892', 'wiki', 288, 5],
[0, 0, 'Q188675', 'wiki', 300, 5],
[0, 0, 'Q2661', 'wiki', 307, 11],
[0, 0, 'Q5240', 'wiki', 322, 4],
[0, 0, 'Q62', 'wiki', 348, 13],
[0, 0, 'Q99', 'wiki', 363, 10],
[0, 0, 'Q30', 'wiki', 375, 14],
[0, 0, 'Q2955', 'wiki', 410, 13],
[0, 0, 'Q148', 'wiki', 460, 5],
[0, 0, 'Q7275', 'wiki', 547, 7],
[0, 0, 'Q1129448', 'wiki', 601, 9],
[0, 0, 'Q1980247', 'wiki', 616, 9],
[0, 0, 'Q7239343', 'wiki', 630, 44],
[0, 0, 'Q211364', 'wiki', 703, 10],
[0, 0, 'Q160016', 'wiki', 730, 8],
[0, 0, 'Q895526', 'wiki', 756, 7],
[0, 0, 'Q47423', 'wiki', 782, 16],
[0, 0, 'Q37470', 'wiki', 817, 20],
[0, 0, 'Q1255828', 'wiki', 874, 13],
[0, 0, 'Q1728608', 'wiki', 891, 105],
[0, 0, 'Q454', 'wiki', 945, 3]
],
},
]
texts = [example['original_text'] for example in examples]
mention_offsets = [[offset for _, _, _, _, offset,
_ in example['gt_entities']] for example in examples]
mention_lengths = [[length for _, _, _, _, _,
length in example['gt_entities']] for example in examples]
entities = [[0 for _, _, _, _, _, _ in example['gt_entities']]
for example in examples]
batch = {
"texts": texts,
"mention_offsets": mention_offsets,
"mention_lengths": mention_lengths,
"entities": entities,
}
transform = JointELXlmrRawTextTransform()
model_inputs = transform(batch)
expected_mention_offsets = [[2, 9, 14, 25, 37, 48, 55, 57, 60, 64, 66, 70, 78, 81, 83, 91, 104, 125, 141, 146, 151, 174, 183, 194, 205, 216, 230]]
expected_mention_lengths = [[6, 1, 3, 3, 1, 3, 1, 1, 2, 1, 3, 1, 2, 1, 2, 3, 1, 1, 3, 2, 11, 1, 3, 3, 2, 3, 2]]
self.assertEqual(
model_inputs['mention_offsets'].tolist(), expected_mention_offsets)
self.assertEqual(
model_inputs['mention_lengths'].tolist(), expected_mention_lengths)
if __name__ == '__main__':
unittest.main()
|
BELA-main
|
bela/tests/test_transforms.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import defaultdict
from functools import lru_cache
from bela.evaluation.model_eval import ModelEval
from bela.transforms.spm_transform import SPMTransform
@lru_cache
def get_sp_transform():
return SPMTransform(max_seq_len=100000)
def get_windows(text, window_length=254, overlap=127):
sp_transform = get_sp_transform()
tokens = sp_transform([text])[0]
tokens = tokens[1:-1]
windows = []
for window_start in range(0, len(tokens), window_length - overlap):
start_pos = tokens[window_start][1]
if window_start + window_length >= len(tokens):
end_pos = tokens[-1][2]
else:
end_pos = tokens[window_start + window_length][2]
windows.append((start_pos, end_pos))
return windows
def convert_predictions_to_dict(example_predictions):
if len(example_predictions) > 0:
offsets, lengths, entities, md_scores, el_scores = zip(*example_predictions)
else:
offsets, lengths, entities, md_scores, el_scores = [], [], [], [], []
return {
"offsets": offsets,
"lengths": lengths,
"entities": entities,
"md_scores": md_scores,
"el_scores": el_scores,
}
def group_predictions_by_example(all_predictions, extended_examples):
grouped_predictions = defaultdict(list)
for prediction, extended_example in zip(all_predictions, extended_examples):
window_start = extended_example["window_start"]
prediction = dict(prediction)
prediction["offsets"] = [
offset + window_start for offset in prediction["offsets"]
]
grouped_predictions[extended_example["document_id"]].append((prediction))
predictions = {}
for document_id, example_prediction_list in grouped_predictions.items():
example_predictions = []
for prediction in example_prediction_list:
for offset, length, ent, md_score, el_score in zip(
prediction["offsets"],
prediction["lengths"],
prediction["entities"],
prediction["md_scores"],
prediction["el_scores"],
):
example_predictions.append((offset, length, ent, md_score, el_score))
example_predictions = sorted(example_predictions)
predictions[document_id] = example_predictions
return predictions
def merge_predictions(example_predictions):
filtered_example_predictions = []
current_end = None
current_offset = None
current_length = None
current_ent_id = None
current_md_score = None
current_el_score = None
for offset, length, ent_id, md_score, el_score in example_predictions:
if current_end is None:
current_end = offset + length
current_offset = offset
current_length = length
current_ent_id = ent_id
current_md_score = md_score
current_el_score = el_score
continue
if offset < current_end:
# intersection of two predictions
if md_score > current_md_score:
current_ent_id = ent_id
current_offset = offset
current_length = length
current_md_score = md_score
current_el_score = el_score
else:
filtered_example_predictions.append(
(
current_offset,
current_length,
current_ent_id,
current_md_score,
current_el_score,
)
)
current_ent_id = ent_id
current_offset = offset
current_length = length
current_md_score = md_score
current_el_score = el_score
current_end = offset + length
if current_offset is not None:
filtered_example_predictions.append(
(
current_offset,
current_length,
current_ent_id,
current_md_score,
current_el_score,
)
)
return filtered_example_predictions
def get_predictions_using_windows(model_eval: ModelEval, test_data, batch_size=1024, window_length=254, window_overlap=10, do_merge_predictions=True):
extended_examples = []
for example in test_data:
assert "document_id" in example or "data_example_id" in example
document_id = example.get("document_id") or example["data_example_id"]
text = example["original_text"]
windows = get_windows(text, window_length, window_overlap)
for idx, (start_pos, end_pos) in enumerate(windows):
new_text = text[start_pos:end_pos]
extended_examples.append(
{
"document_id": document_id,
"original_text": new_text,
"gt_entities": example["gt_entities"],
"window_idx": idx,
"window_start": start_pos,
"window_end": end_pos,
}
)
all_predictions = model_eval.get_predictions(
extended_examples, batch_size=batch_size
)
predictions_dict = group_predictions_by_example(all_predictions, extended_examples)
predictions = []
for example in test_data:
assert "document_id" in example or "data_example_id" in example
document_id = example.get("document_id") or example["data_example_id"]
text = example["original_text"]
example_predictions = predictions_dict[document_id]
if do_merge_predictions:
example_predictions = merge_predictions(example_predictions)
example_predictions = convert_predictions_to_dict(example_predictions)
predictions.append(example_predictions)
return predictions
|
BELA-main
|
bela/utils/prediction_utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
class DummyPathManager:
def get_local_path(self, path, *args, **kwargs):
return path
def open(self, path, *args, **kwargs):
return open(path, *args, **kwargs)
PathManager = DummyPathManager()
|
BELA-main
|
bela/utils/utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
from typing import Any, Dict, Optional, List
@dataclass
class Entity:
entity_id: str # E.g. "Q3312129"
offset: int
length: int
text: str
entity_type: Optional[str] = None # E.g. wiki
md_score: Optional[float] = None
el_score: Optional[float] = None
@property
def mention(self):
return self.text[self.offset : self.offset + self.length]
@property
def extended_mention(self):
"""Mentin in surrounding context (10 chars), with the mention in brackets"""
left_context = self.text[max(0, self.offset - 10) : self.offset]
right_context = self.text[self.offset + self.length : self.offset + self.length + 10]
# Add ... if the context is truncated
if self.offset - 10 > 0:
left_context = "..." + left_context
if self.offset + self.length + 10 < len(self.text):
right_context = right_context + "..."
return f"{left_context}[{self.mention}]{right_context}"
def __repr__(self):
str_repr = f'Entity<mention="{self.extended_mention}", entity_id={self.entity_id}'
if self.md_score is not None and self.el_score is not None:
str_repr += f", md_score={self.md_score:.2f}, el_score={self.el_score:.2f}"
str_repr += ">"
return str_repr
def __eq__(self, other):
return self.offset == other.offset and self.length == other.length and self.entity_id == other.entity_id
class Sample:
text: str
sample_id: Optional[str] = None
ground_truth_entities: Optional[List[Entity]] = None
predicted_entities: Optional[List[Entity]] = None
def __init__(self, text, sample_id=None, ground_truth_entities=None, predicted_entities=None):
self.text = text
self.sample_id = sample_id
self.ground_truth_entities = ground_truth_entities
self.predicted_entities = predicted_entities
if self.ground_truth_entities is not None and self.predicted_entities is not None:
self.compute_scores()
def compute_scores(self):
self.true_positives = [
predicted_entity
for predicted_entity in self.predicted_entities
if predicted_entity in self.ground_truth_entities
]
self.false_positives = [
predicted_entity
for predicted_entity in self.predicted_entities
if predicted_entity not in self.ground_truth_entities
]
self.false_negatives = [
ground_truth_entity
for ground_truth_entity in self.ground_truth_entities
if ground_truth_entity not in self.predicted_entities
]
# Bag of entities
self.ground_truth_entity_ids = set(
[ground_truth_entity.entity_id for ground_truth_entity in self.ground_truth_entities]
)
self.predicted_entity_ids = set(
[predicted_entity.entity_id for predicted_entity in self.predicted_entities]
)
self.true_positives_boe = [
predicted_entity_id
for predicted_entity_id in self.predicted_entity_ids
if predicted_entity_id in self.ground_truth_entity_ids
]
self.false_positives_boe = [
predicted_entity_id
for predicted_entity_id in self.predicted_entity_ids
if predicted_entity_id not in self.ground_truth_entity_ids
]
self.false_negatives_boe = [
ground_truth_entity_id
for ground_truth_entity_id in self.ground_truth_entity_ids
if ground_truth_entity_id not in self.predicted_entity_ids
]
def __repr__(self):
repr_str = f'Sample(text="{self.text[:100]}..."'
if self.ground_truth_entities is not None:
repr_str += f", ground_truth_entities={self.ground_truth_entities[:3]}..."
if self.predicted_entities is not None:
repr_str += f", predicted_entities={self.predicted_entities[:3]}..."
repr_str += ")"
return repr_str
def print(self, max_display_length=1000):
print(f"{self.text[:max_display_length]=}")
if self.ground_truth_entities is not None:
print("***************** Ground truth entities *****************")
print(f"{len(self.ground_truth_entities)=}")
for ground_truth_entity in self.ground_truth_entities:
if ground_truth_entity.offset + ground_truth_entity.length > max_display_length:
continue
print(ground_truth_entity)
if self.predicted_entities is not None:
print("***************** Predicted entities *****************")
print(f"{len(self.predicted_entities)=}")
for predicted_entity in self.predicted_entities:
if predicted_entity.offset + predicted_entity.length > max_display_length:
continue
print(predicted_entity)
def convert_jsonl_data_to_samples(jsonl_data: List[Dict[str, Any]]) -> List[Sample]:
"""Converts the jsonl data to a list of samples."""
samples = []
for example in jsonl_data:
ground_truth_entities = [
Entity(entity_id=entity_id, offset=offset, length=length, text=example["original_text"])
for _, _, entity_id, _, offset, length in example["gt_entities"]
]
sample = Sample(text=example["original_text"], ground_truth_entities=ground_truth_entities)
samples.append(sample)
return samples
def convert_predictions_to_entities(example_predictions: Dict[str, List], text) -> List[Entity]:
"""Converts the predictions of a single example to a list of entities."""
predicted_entities = [
Entity(entity_id=entity_id, offset=offset, length=length, md_score=md_score, el_score=el_score, text=text)
for offset, length, entity_id, md_score, el_score in zip(
example_predictions["offsets"],
example_predictions["lengths"],
example_predictions["entities"],
example_predictions["md_scores"],
example_predictions["el_scores"],
)
]
return predicted_entities
def convert_jsonl_data_and_predictions_to_samples(
jsonl_data: List[Dict[str, Any]], predictions: Dict[str, List], md_threshold, el_threshold
) -> List[Sample]:
samples = convert_jsonl_data_to_samples(jsonl_data)
for sample, example_predictions in zip(samples, predictions):
predicted_entities = convert_predictions_to_entities(example_predictions, sample.text)
predicted_entities = [
entity for entity in predicted_entities if entity.el_score > el_threshold and entity.md_score > md_threshold
]
sample.predicted_entities = predicted_entities
sample.compute_scores()
samples.append(sample)
return samples
|
BELA-main
|
bela/utils/analysis_utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional
import torch.nn as nn
from transformers import AutoModel, AutoConfig
class HFEncoder(nn.Module):
def __init__(
self,
model_path: str = "xlm-roberta-base",
projection_dim: Optional[int] = None,
):
super().__init__()
self.transformer = AutoModel.from_pretrained(model_path)
self.embedding_dim = self.transformer.encoder.config.hidden_size
def forward(self, input_ids, attention_mask=None):
output = self.transformer(input_ids=input_ids, attention_mask=attention_mask)
last_layer = output["last_hidden_state"]
sentence_rep = last_layer[:, 0, :]
return sentence_rep, last_layer
|
BELA-main
|
bela/models/hf_encoder.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
from transformers import AutoTokenizer
class HFTransform(nn.Module):
def __init__(
self,
model_path: str = "xlm-roberta-base",
max_seq_len: int = 256,
add_special_tokens: bool = True,
):
super().__init__()
self.tokenizer = AutoTokenizer.from_pretrained(model_path)
self.sep_token = self.tokenizer.sep_token
self.max_seq_len = max_seq_len
self.add_special_tokens = add_special_tokens
def forward(self, texts):
return self.tokenizer(
texts,
return_tensors=None,
padding=False,
truncation=True,
max_length=self.max_seq_len,
add_special_tokens=self.add_special_tokens,
)["input_ids"]
|
BELA-main
|
bela/transforms/hf_transform.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: sentencepiece.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='sentencepiece.proto',
package='sentencepiece',
syntax='proto2',
serialized_options=b'H\003',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x13sentencepiece.proto\x12\rsentencepiece\"\xdf\x01\n\x11SentencePieceText\x12\x0c\n\x04text\x18\x01 \x01(\t\x12>\n\x06pieces\x18\x02 \x03(\x0b\x32..sentencepiece.SentencePieceText.SentencePiece\x12\r\n\x05score\x18\x03 \x01(\x02\x1a\x62\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\n\n\x02id\x18\x02 \x01(\r\x12\x0f\n\x07surface\x18\x03 \x01(\t\x12\r\n\x05\x62\x65gin\x18\x04 \x01(\r\x12\x0b\n\x03\x65nd\x18\x05 \x01(\r*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"J\n\x16NBestSentencePieceText\x12\x30\n\x06nbests\x18\x01 \x03(\x0b\x32 .sentencepiece.SentencePieceTextB\x02H\x03'
)
_SENTENCEPIECETEXT_SENTENCEPIECE = _descriptor.Descriptor(
name='SentencePiece',
full_name='sentencepiece.SentencePieceText.SentencePiece',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='piece', full_name='sentencepiece.SentencePieceText.SentencePiece.piece', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='id', full_name='sentencepiece.SentencePieceText.SentencePiece.id', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='surface', full_name='sentencepiece.SentencePieceText.SentencePiece.surface', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='begin', full_name='sentencepiece.SentencePieceText.SentencePiece.begin', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='end', full_name='sentencepiece.SentencePieceText.SentencePiece.end', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(200, 536870912), ],
oneofs=[
],
serialized_start=153,
serialized_end=251,
)
_SENTENCEPIECETEXT = _descriptor.Descriptor(
name='SentencePieceText',
full_name='sentencepiece.SentencePieceText',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='text', full_name='sentencepiece.SentencePieceText.text', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='pieces', full_name='sentencepiece.SentencePieceText.pieces', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='score', full_name='sentencepiece.SentencePieceText.score', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_SENTENCEPIECETEXT_SENTENCEPIECE, ],
enum_types=[
],
serialized_options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(200, 536870912), ],
oneofs=[
],
serialized_start=39,
serialized_end=262,
)
_NBESTSENTENCEPIECETEXT = _descriptor.Descriptor(
name='NBestSentencePieceText',
full_name='sentencepiece.NBestSentencePieceText',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='nbests', full_name='sentencepiece.NBestSentencePieceText.nbests', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=264,
serialized_end=338,
)
_SENTENCEPIECETEXT_SENTENCEPIECE.containing_type = _SENTENCEPIECETEXT
_SENTENCEPIECETEXT.fields_by_name['pieces'].message_type = _SENTENCEPIECETEXT_SENTENCEPIECE
_NBESTSENTENCEPIECETEXT.fields_by_name['nbests'].message_type = _SENTENCEPIECETEXT
DESCRIPTOR.message_types_by_name['SentencePieceText'] = _SENTENCEPIECETEXT
DESCRIPTOR.message_types_by_name['NBestSentencePieceText'] = _NBESTSENTENCEPIECETEXT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SentencePieceText = _reflection.GeneratedProtocolMessageType('SentencePieceText', (_message.Message,), {
'SentencePiece' : _reflection.GeneratedProtocolMessageType('SentencePiece', (_message.Message,), {
'DESCRIPTOR' : _SENTENCEPIECETEXT_SENTENCEPIECE,
'__module__' : 'sentencepiece_pb2'
# @@protoc_insertion_point(class_scope:sentencepiece.SentencePieceText.SentencePiece)
})
,
'DESCRIPTOR' : _SENTENCEPIECETEXT,
'__module__' : 'sentencepiece_pb2'
# @@protoc_insertion_point(class_scope:sentencepiece.SentencePieceText)
})
_sym_db.RegisterMessage(SentencePieceText)
_sym_db.RegisterMessage(SentencePieceText.SentencePiece)
NBestSentencePieceText = _reflection.GeneratedProtocolMessageType('NBestSentencePieceText', (_message.Message,), {
'DESCRIPTOR' : _NBESTSENTENCEPIECETEXT,
'__module__' : 'sentencepiece_pb2'
# @@protoc_insertion_point(class_scope:sentencepiece.NBestSentencePieceText)
})
_sym_db.RegisterMessage(NBestSentencePieceText)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
BELA-main
|
bela/transforms/sentencepiece_pb2.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional
import os
import torch.nn as nn
import sentencepiece as spm
from .sentencepiece_pb2 import SentencePieceText
class SPMTransform(nn.Module):
def __init__(
self,
sp_model_path: Optional[str] = None,
max_seq_len: int = 256,
add_special_tokens: bool = True,
):
super().__init__()
sp_model_path = sp_model_path or os.path.join(os.path.dirname(__file__), "../data/sp_model")
self.processor = spm.SentencePieceProcessor(sp_model_path)
self.sep_token = '</s>'
self.unk_token_id = 3
self.max_seq_len = max_seq_len
self.add_special_tokens = add_special_tokens
def forward(self, texts):
output = []
for text in texts:
spt = SentencePieceText()
spt.ParseFromString(self.processor.encode_as_serialized_proto(text))
current_offset = 0
leading_whitespaces_count = 0
for char in text:
if char.isspace():
leading_whitespaces_count += 1
else:
break
token_ids_with_offsets = []
if self.add_special_tokens:
token_ids_with_offsets.append((0,0,0))
for idx, piece in enumerate(spt.pieces):
if piece.id != 0:
token_id = piece.id + 1
else:
token_id = self.unk_token_id
if idx == 0:
# if we process first token, append leading whitespacess count to the sp token length
token_ids_with_offsets.append((token_id, current_offset, current_offset + len(piece.surface) + leading_whitespaces_count))
current_offset += len(piece.surface) + leading_whitespaces_count
else:
token_ids_with_offsets.append((token_id, current_offset, current_offset + len(piece.surface)))
current_offset += len(piece.surface)
# take into account special tokens
if idx == self.max_seq_len - 3:
break
if self.add_special_tokens:
token_ids_with_offsets.append((2,current_offset,0))
output.append(token_ids_with_offsets)
return output
|
BELA-main
|
bela/transforms/spm_transform.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum
from typing import Any, Dict, List, Optional, Tuple
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pad_sequence
from bela.transforms.hf_transform import HFTransform
from bela.transforms.spm_transform import SPMTransform
class ReadState(Enum):
ReadAlphaNum = 1
ReadSpace = 2
ReadOther = 3
def insert_spaces(text: str) -> Tuple[str, List[int]]:
"""
The raw string inputs are sometimes miss spaces between
text pieces, like smiles could joint text:
[smile]Some text.[smile] another text.
This function modify text string to separate alphanumeric tokens
from any other tokens to make models live easier. The above example
will become:
[smile] Some text . [smile] another text .
"""
out_str: str = ""
insertions: List[int] = []
# In the beginning of the string we assume we just read some space
state: ReadState = ReadState.ReadSpace
for idx, char in enumerate(utf8_chars(text)):
if state == ReadState.ReadSpace:
if unicode_isspace(char):
pass
elif unicode_isalnum(char):
state = ReadState.ReadAlphaNum
else:
state = ReadState.ReadOther
elif state == ReadState.ReadAlphaNum:
if unicode_isspace(char):
state = ReadState.ReadSpace
elif unicode_isalnum(char):
pass
else:
out_str += " "
insertions.append(idx)
state = ReadState.ReadOther
elif state == ReadState.ReadOther:
if unicode_isspace(char):
state = ReadState.ReadSpace
elif unicode_isalnum(char):
out_str += " "
insertions.append(idx)
state = ReadState.ReadAlphaNum
else:
pass
out_str += char
return out_str, insertions
def lower_bound(a: List[int], x: int) -> int:
lo: int = 0
hi: int = len(a)
while lo < hi:
mid = (lo + hi) // 2
if a[mid] < x:
lo = mid + 1
else:
hi = mid
return lo
def align_start(start: int, starts: List[int]) -> int:
new_start: int = start
if start not in starts:
if len(starts) > 0:
lb = lower_bound(starts, start)
if lb == len(starts) or starts[lb] != start:
new_start = starts[max(0, lb - 1)]
return new_start
def align_end(end: int, ends: List[int]) -> int:
new_end: int = end
if end not in ends:
if len(ends) > 0:
lb = lower_bound(ends, end)
if lb < len(ends):
new_end = ends[lb]
return new_end
def pieces_to_texts(
texts_pieces_token_ids: List[List[int]],
texts: List[List[str]],
texts_mention_offsets: List[List[int]],
texts_mention_lengths: List[List[int]],
bos_idx: int,
eos_idx: int,
max_seq_len: int = 256,
):
"""
Function takes an array with SP tokenized word tokens and original texts
and convert youda tokenized batch to SP tokenized batch. Mention offsets
and lengths are also converted with respect to SP tokens.
Inputs:
1) texts_pieces_token_ids: List with sp tokens per text token
2) texts: original yoda tokenized texts
3) texts_mention_offsets: mention offsets in original texts
4) texts_mention_lengths: mention lengths in original texts
5) bos_idx: tokenizer bos index
6) eos_idx: tokenizer eos index
7) max_seq_len: tokenizer max sequence length
Outputs:
new_texts_token_ids: List[List[int]] - text batch with sp tokens
new_seq_lengths: List[int] - sp tokenized texts lengths
new_mention_offsets: List[List[int]] - converted mention offsets
new_mention_lengths: List[List[int]] - converted mention lengths
"""
new_texts_token_ids: List[List[int]] = []
new_seq_lengths: List[int] = []
new_mention_offsets: List[List[int]] = []
new_mention_lengths: List[List[int]] = []
tokens_mapping: List[List[List[int]]] = [] # bs x idx x 2
pieces_offset = 0
for text, old_mention_offsets, old_mention_lengths in zip(
texts,
texts_mention_offsets,
texts_mention_lengths,
):
mapping: List[Tuple[int, int]] = []
text_token_ids: List[int] = [bos_idx]
mention_offsets: List[int] = []
mention_lengths: List[int] = []
for token_ids in texts_pieces_token_ids[
pieces_offset : pieces_offset + len(text)
]:
token_ids = token_ids[1:-1]
current_pos = len(text_token_ids)
mapping.append((current_pos, current_pos + len(token_ids)))
text_token_ids.extend(token_ids)
text_token_ids = text_token_ids[: max_seq_len - 1]
text_token_ids.append(eos_idx)
for old_offset, old_length in zip(old_mention_offsets, old_mention_lengths):
new_offset = mapping[old_offset][0]
new_end = mapping[old_offset + old_length - 1][1]
new_length = new_end - new_offset
if new_end > max_seq_len - 1:
break
mention_offsets.append(new_offset)
mention_lengths.append(new_length)
new_texts_token_ids.append(text_token_ids)
new_seq_lengths.append(len(text_token_ids))
new_mention_offsets.append(mention_offsets)
new_mention_lengths.append(mention_lengths)
mapping = [[start, end] for start, end in mapping if end < max_seq_len]
tokens_mapping.append(mapping)
pieces_offset += len(text)
return (
new_texts_token_ids,
new_seq_lengths,
new_mention_offsets,
new_mention_lengths,
tokens_mapping,
)
@torch.jit.script
def pad_tokens_mapping(tokens_mapping: List[List[List[int]]]) -> List[List[List[int]]]:
seq_lens: List[int] = []
for seq in tokens_mapping:
seq_lens.append(len(seq))
pad_to_length = max(seq_lens)
for mapping in tokens_mapping:
padding = pad_to_length - len(mapping)
if padding >= 0:
for _ in range(padding):
mapping.append([0, 1])
else:
for _ in range(-padding):
mapping.pop()
return tokens_mapping
@torch.jit.script
def pad_tokens_mapping(
tokens_mapping: List[List[Tuple[int, int]]]
) -> List[List[Tuple[int, int]]]:
seq_lens: List[int] = []
for seq in tokens_mapping:
seq_lens.append(len(seq))
pad_to_length = max(seq_lens)
for mapping in tokens_mapping:
padding = pad_to_length - len(mapping)
if padding >= 0:
for _ in range(padding):
mapping.append((0, 1))
else:
for _ in range(-padding):
mapping.pop()
return tokens_mapping
@torch.jit.script
def pad_2d(
batch: List[List[int]], seq_lens: List[int], pad_idx: int, max_len: int = -1
) -> List[List[int]]:
pad_to_length = max(seq_lens)
if max_len > 0:
pad_to_length = min(pad_to_length, max_len)
for sentence in batch:
padding = pad_to_length - len(sentence)
if padding >= 0:
for _ in range(padding):
sentence.append(pad_idx)
else:
for _ in range(-padding):
sentence.pop()
return batch
class JointELCollate(torch.nn.Module):
def __init__(
self,
pad_idx: int = 1,
token_ids_column: str = "input_ids",
seq_lens_column: str = "seq_lens",
pad_mask_column: str = "attention_mask",
mention_pad_idx: int = 0,
mention_offsets_column: str = "mention_offsets",
mention_lengths_column: str = "mention_lengths",
mentions_seq_lengths_column: str = "mentions_seq_lengths",
entities_column: str = "entities",
tokens_mapping_column: str = "tokens_mapping",
sp_tokens_boundaries_column: str = "sp_tokens_boundaries",
insertions_column: str = "insertions",
):
super().__init__()
self._pad_idx = pad_idx
self.token_ids_column = token_ids_column
self.seq_lens_column = seq_lens_column
self.pad_mask_column = pad_mask_column
self._mention_pad_idx = mention_pad_idx
self.mention_offsets_column = mention_offsets_column
self.mention_lengths_column = mention_lengths_column
self.entities_column = entities_column
self.mentions_seq_lengths_column = mentions_seq_lengths_column
self.tokens_mapping_column = tokens_mapping_column
self.sp_tokens_boundaries_column = sp_tokens_boundaries_column
self.insertions_column = insertions_column
def forward(self, batch: Dict[str, Any]) -> Dict[str, torch.Tensor]:
token_ids = batch[self.token_ids_column]
assert torch.jit.isinstance(token_ids, List[List[int]])
seq_lens = batch[self.seq_lens_column]
assert torch.jit.isinstance(seq_lens, List[int])
tokens_mapping = batch[self.tokens_mapping_column]
assert torch.jit.isinstance(tokens_mapping, List[List[List[int]]])
pad_token_ids = pad_sequence(
[torch.tensor(ids, dtype=torch.long) for ids in token_ids],
batch_first=True,
padding_value=float(self._pad_idx),
)
pad_mask = torch.ne(pad_token_ids, self._pad_idx).to(dtype=torch.long)
model_inputs: Dict[str, torch.Tensor] = {
self.token_ids_column: pad_token_ids,
self.pad_mask_column: pad_mask,
}
model_inputs[self.tokens_mapping_column] = torch.tensor(
pad_tokens_mapping(tokens_mapping),
dtype=torch.long,
)
if self.mention_offsets_column in batch:
mention_offsets = batch[self.mention_offsets_column]
assert torch.jit.isinstance(mention_offsets, List[List[int]])
mention_lengths = batch[self.mention_lengths_column]
assert torch.jit.isinstance(mention_lengths, List[List[int]])
mentions_seq_lengths = batch[self.mentions_seq_lengths_column]
assert torch.jit.isinstance(mentions_seq_lengths, List[int])
entities = batch[self.entities_column]
assert torch.jit.isinstance(entities, List[List[int]])
model_inputs[self.mention_offsets_column] = torch.tensor(
pad_2d(
mention_offsets,
seq_lens=mentions_seq_lengths,
pad_idx=self._mention_pad_idx,
),
dtype=torch.long,
)
model_inputs[self.mention_lengths_column] = torch.tensor(
pad_2d(
mention_lengths,
seq_lens=mentions_seq_lengths,
pad_idx=self._mention_pad_idx,
),
dtype=torch.long,
)
model_inputs[self.entities_column] = torch.tensor(
pad_2d(
entities,
seq_lens=mentions_seq_lengths,
pad_idx=self._mention_pad_idx,
),
dtype=torch.long,
)
if self.sp_tokens_boundaries_column in batch:
sp_tokens_boundaries = batch[self.sp_tokens_boundaries_column]
assert torch.jit.isinstance(sp_tokens_boundaries, List[List[List[int]]])
model_inputs[self.sp_tokens_boundaries_column] = torch.tensor(
pad_tokens_mapping(sp_tokens_boundaries),
dtype=torch.long,
)
if self.insertions_column in batch:
insertions = batch[self.insertions_column]
assert torch.jit.isinstance(insertions, List[List[int]])
insertions_seq_lens: List[int] = []
for seq in insertions:
insertions_seq_lens.append(len(seq))
model_inputs[self.insertions_column] = torch.tensor(
pad_2d(
insertions,
seq_lens=insertions_seq_lens,
pad_idx=-1,
),
dtype=torch.long,
)
return model_inputs
class JointELTransform(HFTransform):
def __init__(
self,
model_path: str = "xlm-roberta-base",
max_seq_len: int = 256,
texts_column: str = "texts",
mention_offsets_column: str = "mention_offsets",
mention_lengths_column: str = "mention_lengths",
mentions_seq_lengths_column: str = "mentions_seq_lengths",
entities_column: str = "entities",
token_ids_column: str = "input_ids",
seq_lens_column: str = "seq_lens",
pad_mask_column: str = "attention_mask",
tokens_mapping_column: str = "tokens_mapping",
):
super().__init__(model_path=model_path)
if 'xlm' in model_path:
self.bos_idx = self.tokenizer.bos_token_id
self.eos_idx = self.tokenizer.eos_token_id
elif 'bert' in model_path:
self.bos_idx = self.tokenizer.cls_token_id
self.eos_idx = self.tokenizer.sep_token_id
self.max_seq_len = max_seq_len
self.texts_column = texts_column
self.token_ids_column = token_ids_column
self.seq_lens_column = seq_lens_column
self.pad_mask_column = pad_mask_column
self.mention_offsets_column = mention_offsets_column
self.mention_lengths_column = mention_lengths_column
self.mentions_seq_lengths_column = mentions_seq_lengths_column
self.entities_column = entities_column
self.tokens_mapping_column = tokens_mapping_column
self._collate = JointELCollate(
pad_idx=self.tokenizer.pad_token_id,
token_ids_column=token_ids_column,
seq_lens_column=seq_lens_column,
pad_mask_column=pad_mask_column,
mention_offsets_column=mention_offsets_column,
mention_lengths_column=mention_lengths_column,
mentions_seq_lengths_column=mentions_seq_lengths_column,
entities_column=entities_column,
tokens_mapping_column=tokens_mapping_column,
)
def transform(self, batch: Dict[str, Any]) -> Dict[str, Any]:
texts = batch[self.texts_column]
torch.jit.isinstance(texts, List[List[str]])
mention_offsets = batch[self.mention_offsets_column]
torch.jit.isinstance(mention_offsets, List[List[int]])
mention_lengths = batch[self.mention_lengths_column]
torch.jit.isinstance(mention_lengths, List[List[int]])
entities = batch[self.entities_column]
torch.jit.isinstance(entities, List[List[int]])
texts_pieces = [token for tokens in texts for token in tokens]
texts_pieces_token_ids: List[List[int]] = super().forward(
texts_pieces
)
(
token_ids,
seq_lens,
mention_offsets,
mention_lengths,
tokens_mapping,
) = pieces_to_texts(
texts_pieces_token_ids,
texts,
mention_offsets,
mention_lengths,
bos_idx=self.bos_idx,
eos_idx=self.eos_idx,
max_seq_len=self.max_seq_len,
)
entities = [
text_entities[: len(text_mention_offsets)]
for text_entities, text_mention_offsets in zip(entities, mention_offsets)
]
mentions_seq_lens: List[int] = [
len(text_mention_offsets) for text_mention_offsets in mention_offsets
]
return {
self.token_ids_column: token_ids,
self.seq_lens_column: seq_lens,
self.mention_offsets_column: mention_offsets,
self.mention_lengths_column: mention_lengths,
self.mentions_seq_lengths_column: mentions_seq_lens,
self.entities_column: entities,
self.tokens_mapping_column: tokens_mapping,
}
def forward(self, batch: Dict[str, Any]) -> Dict[str, torch.Tensor]:
return self._collate(self.transform(batch))
class JointELXlmrRawTextTransform(SPMTransform):
def __init__(
self,
sp_model_path: Optional[str] = None,
vocab_path: Optional[str] = None,
max_seq_len: int = 256,
insert_spaces: bool = False,
mention_boundaries_on_word_boundaries: bool = False,
align_mention_offsets_to_word_boundaries: bool = False,
texts_column: str = "texts",
mention_offsets_column: str = "mention_offsets",
mention_lengths_column: str = "mention_lengths",
mentions_seq_lengths_column: str = "mentions_seq_lengths",
entities_column: str = "entities",
token_ids_column: str = "input_ids",
seq_lens_column: str = "seq_lens",
pad_mask_column: str = "attention_mask",
tokens_mapping_column: str = "tokens_mapping",
sp_tokens_boundaries_column: str = "sp_tokens_boundaries",
insertions_column: str = "insertions",
):
super().__init__(
sp_model_path=sp_model_path,
max_seq_len=max_seq_len,
add_special_tokens=False,
)
self.bos_idx = 0
self.eos_idx = 2
self.pad_idx = 1
self.max_seq_len = max_seq_len
self.insert_spaces = insert_spaces
self.mention_boundaries_on_word_boundaries = (
mention_boundaries_on_word_boundaries
)
self.align_mention_offsets_to_word_boundaries = (
align_mention_offsets_to_word_boundaries
)
self.texts_column = texts_column
self.mention_offsets_column = mention_offsets_column
self.mention_lengths_column = mention_lengths_column
self.mentions_seq_lengths_column = mentions_seq_lengths_column
self.entities_column = entities_column
self.token_ids_column = token_ids_column
self.seq_lens_column = seq_lens_column
self.tokens_mapping_column = tokens_mapping_column
self.sp_tokens_boundaries_column = sp_tokens_boundaries_column
self.insertions_column = insertions_column
self._collate = JointELCollate(
pad_idx=self.pad_idx,
token_ids_column=token_ids_column,
seq_lens_column=seq_lens_column,
pad_mask_column=pad_mask_column,
mention_offsets_column=mention_offsets_column,
mention_lengths_column=mention_lengths_column,
mentions_seq_lengths_column=mentions_seq_lengths_column,
entities_column=entities_column,
tokens_mapping_column=tokens_mapping_column,
sp_tokens_boundaries_column=sp_tokens_boundaries_column,
)
def _calculate_alpha_num_boundaries(self, texts: List[str]) -> List[List[List[int]]]:
"""Returns for each text, a list of lists of start and end indices of alpha-numeric substrings (~=words)."""
alpha_num_boundaries: List[List[List[int]]] = []
for text in texts:
example_alpha_num_boundaries: List[List[int]] = []
cur_alpha_num_start: int = -1
for idx, char in enumerate(text):
if char.isalnum():
if cur_alpha_num_start == -1:
cur_alpha_num_start = idx
else:
if cur_alpha_num_start != -1:
example_alpha_num_boundaries.append([cur_alpha_num_start, idx])
cur_alpha_num_start = -1
if cur_alpha_num_start != -1:
example_alpha_num_boundaries.append([cur_alpha_num_start, len(text)])
alpha_num_boundaries.append(example_alpha_num_boundaries)
return alpha_num_boundaries
def _calculate_token_mapping(
self,
sp_token_ids: List[List[int]],
sp_token_boundaries: List[List[List[int]]],
word_boundaries: List[List[List[int]]],
) -> List[List[List[int]]]:
# Prepare list of possible mention start, ends pairs in terms of SP tokens.
if self.mention_boundaries_on_word_boundaries:
token_mapping: List[List[List[int]]] = []
for ex_word_boundaries, ex_sp_token_boundaries in zip(
word_boundaries, sp_token_boundaries
):
ex_token_mapping: List[List[int]] = []
sp_idx = 0
for start, end in ex_word_boundaries:
while (
sp_idx < len(ex_sp_token_boundaries)
and start >= ex_sp_token_boundaries[sp_idx][1]
):
sp_idx += 1
word_sp_start = sp_idx
word_sp_end = sp_idx
while (
word_sp_end < len(ex_sp_token_boundaries)
and end >= ex_sp_token_boundaries[word_sp_end][1]
):
word_sp_end += 1
# check if end token <= max_seq_len - 2 (take into account EOS and BOS tokens)
if word_sp_end <= self.max_seq_len - 2:
# shift word_sp_start and word_sp_end by 1 taking into account EOS
ex_token_mapping.append([word_sp_start + 1, word_sp_end + 1])
else:
break
token_mapping.append(ex_token_mapping)
return token_mapping
else:
# Consider any SP token could be a start or end of the mention.
return [
[
[start, start + 1]
for start in range( # start in range from 1 to maximum 255
1, min(len(example_sp_token_ids) - 2, self.max_seq_len - 2) + 1
)
]
for example_sp_token_ids in sp_token_ids
]
def _convert_mention_offsets(
self,
sp_token_boundaries: List[List[List[int]]],
char_offsets: List[List[int]],
char_lengths: List[List[int]],
) -> Tuple[List[List[int]], List[List[int]]]:
# TODO: Doesn't this do something similar to _calculate_token_mapping?
sp_offsets: List[List[int]] = []
sp_lengths: List[List[int]] = []
for example_char_offsets, example_char_lengths, example_token_boundaries in zip(
char_offsets, char_lengths, sp_token_boundaries
):
example_sp_offsets: List[int] = []
example_sp_lengths: List[int] = []
for offset, length in zip(example_char_offsets, example_char_lengths):
# TODO: There might be a bug here, need to write a test and with edge cases
token_idx = 0
while ( # First we find the first token that starts at or after the offset
token_idx < len(example_token_boundaries)
and example_token_boundaries[token_idx][0] <= offset
):
token_idx += 1
if ( # Then if we overshoot, we decrease by one
token_idx == len(example_token_boundaries)
or example_token_boundaries[token_idx][0] != offset
):
token_idx -= 1
example_sp_offsets.append(token_idx)
token_start_idx = token_idx
while ( # Same method for the end token: find the first token that ends before the end of the mention
token_idx < len(example_token_boundaries)
and example_token_boundaries[token_idx][1] < offset + length
):
token_idx += 1
example_sp_lengths.append(token_idx - token_start_idx + 1)
# take into account BOS token and shift offsets by 1
# also remove all pairs that go beyond max_seq_length - 1
shifted_example_sp_offsets: List[int] = []
for offset, length in zip(example_sp_offsets, example_sp_lengths):
if 1 + offset + length <= self.max_seq_len - 1:
shifted_example_sp_offsets.append(offset + 1)
else:
# last mention went off the limit, skip all next mention
break
example_sp_offsets = shifted_example_sp_offsets
example_sp_lengths = example_sp_lengths[: len(example_sp_offsets)]
sp_offsets.append(example_sp_offsets)
sp_lengths.append(example_sp_lengths)
return sp_offsets, sp_lengths
def _adjust_mention_offsets_and_lengths(
self,
offsets: List[List[int]],
lengths: List[List[int]],
insertions: List[List[int]],
) -> Tuple[List[List[int]], List[List[int]]]:
new_offsets: List[List[int]] = []
new_lengths: List[List[int]] = []
for example_offsets, example_lengths, example_insertions in zip(
offsets, lengths, insertions
):
new_example_offsets: List[int] = []
new_example_lengths: List[int] = []
# assume that offsets, lengths sorted by offsets/lengths
insertion_idx = 0
current_shift = 0
for offset, length in zip(example_offsets, example_lengths):
while (
insertion_idx < len(example_insertions)
and example_insertions[insertion_idx] <= offset
):
current_shift += 1
insertion_idx += 1
new_offset = offset + current_shift
new_length = length
length_insertion_idx = insertion_idx
while (
length_insertion_idx < len(example_insertions)
and example_insertions[length_insertion_idx] < offset + length
):
new_length += 1
length_insertion_idx += 1
new_example_offsets.append(new_offset)
new_example_lengths.append(new_length)
new_offsets.append(new_example_offsets)
new_lengths.append(new_example_lengths)
return new_offsets, new_lengths
def _insert_spaces_to_texts(
self, texts: List[str]
) -> Tuple[List[str], List[List[int]]]:
all_texts: List[str] = []
all_insertions: List[List[int]] = []
for text in texts:
out_text, insertions = insert_spaces(text)
all_texts.append(out_text)
all_insertions.append(insertions)
return all_texts, all_insertions
def _align_mention_offsets_to_word_boundaries(
self,
mention_offsets: List[List[int]],
mention_lengths: List[List[int]],
word_boundaries: List[List[List[int]]],
) -> Tuple[List[List[int]], List[List[int]]]:
"""
In some training examples we can face situations where ground
truth offsets point to the middle of the word, ex:
```
Playlist in "#NuevaPlaylist ➡ Desempo"
mente in "simplemente retirarte"
```
we can align the offsets to the word boundaries, so in the examples
above we will mark `NuevaPlaylist` and `simplemente` as mentions.
"""
new_mention_offsets: List[List[int]] = []
new_mention_lengths: List[List[int]] = []
for ex_mention_offsets, ex_mention_length, ex_word_boundaries in zip(
mention_offsets,
mention_lengths,
word_boundaries,
):
starts: List[int] = []
ends: List[int] = []
for wb in ex_word_boundaries:
starts.append(wb[0])
ends.append(wb[1])
ex_new_mention_offsets: List[int] = []
ex_new_mention_lengths: List[int] = []
for offset, length in zip(ex_mention_offsets, ex_mention_length):
start = align_start(offset, starts)
end = align_end(offset + length, ends)
ex_new_mention_offsets.append(start)
ex_new_mention_lengths.append(end - start)
new_mention_offsets.append(ex_new_mention_offsets)
new_mention_lengths.append(ex_new_mention_lengths)
return new_mention_offsets, new_mention_lengths
def transform(self, batch: Dict[str, Any]) -> Dict[str, Any]:
texts = batch[self.texts_column]
assert torch.jit.isinstance(texts, List[str])
insertions: List[List[int]] = []
if self.insert_spaces:
texts, insertions = self._insert_spaces_to_texts(texts)
word_boundaries = self._calculate_alpha_num_boundaries(texts)
sp_tokens_with_indices: List[List[Tuple[int, int, int]]] = super().forward(texts)
sp_token_ids: List[List[int]] = [
[sp_token for sp_token, _, _ in tokens] for tokens in sp_tokens_with_indices
]
# append bos and eos tokens
sp_token_ids = [[self.bos_idx] + tokens + [self.eos_idx] for tokens in sp_token_ids]
sp_token_boundaries: List[List[List[int]]] = [
[[start, end] for _, start, end in tokens]
for tokens in sp_tokens_with_indices
]
seq_lens: List[int] = [
len(example_token_ids) for example_token_ids in sp_token_ids
]
tokens_mapping: List[List[List[int]]] = self._calculate_token_mapping(
sp_token_ids,
sp_token_boundaries,
word_boundaries,
)
output: Dict[str, Any] = {
self.token_ids_column: sp_token_ids,
self.seq_lens_column: seq_lens,
self.tokens_mapping_column: tokens_mapping,
self.sp_tokens_boundaries_column: sp_token_boundaries,
}
if self.insert_spaces:
output[self.insertions_column] = insertions
if self.mention_offsets_column in batch:
mention_offsets = batch[self.mention_offsets_column]
assert torch.jit.isinstance(mention_offsets, List[List[int]])
mention_lengths = batch[self.mention_lengths_column]
assert torch.jit.isinstance(mention_lengths, List[List[int]])
entities = batch[self.entities_column]
assert torch.jit.isinstance(entities, List[List[int]])
if self.insert_spaces:
(
mention_offsets,
mention_lengths,
) = self._adjust_mention_offsets_and_lengths(
mention_offsets, mention_lengths, insertions
)
if self.align_mention_offsets_to_word_boundaries:
(
mention_offsets,
mention_lengths,
) = self._align_mention_offsets_to_word_boundaries(
mention_offsets,
mention_lengths,
word_boundaries,
)
sp_offsets, sp_lengths = self._convert_mention_offsets(
sp_token_boundaries,
mention_offsets,
mention_lengths,
)
entities: List[List[int]] = [
example_entities[: len(example_mention_offsets)]
for example_entities, example_mention_offsets in zip(
entities, sp_offsets
)
]
mentions_seq_lens: List[int] = [
len(example_mention_offsets) for example_mention_offsets in sp_offsets
]
output[self.mention_offsets_column] = sp_offsets
output[self.mention_lengths_column] = sp_lengths
output[self.mentions_seq_lengths_column] = mentions_seq_lens
output[self.entities_column] = entities
return output
def forward(self, batch: Dict[str, Any]) -> Dict[str, torch.Tensor]:
return self._collate(self.transform(batch))
|
BELA-main
|
bela/transforms/joint_el_transform.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from collections import OrderedDict
from typing import Any, Dict, NamedTuple, Optional, Tuple, Union
import faiss
import faiss.contrib.torch_utils
import hydra
import torch
import torch.nn as nn
from pytorch_lightning import LightningModule
from bela.conf import (
DataModuleConf,
ModelConf,
OptimConf,
TransformConf,
)
import datetime
import os
import numpy as np
logger = logging.getLogger(__name__)
class ClassificationMetrics(NamedTuple):
f1: float
precision: float
recall: float
support: int
tp: int
fp: int
fn: int
# Bag-Of-Entities metrics: we consider targets and predictions as set
# of entities instead of strong matching positions and entities.
boe_f1: float
boe_precision: float
boe_recall: float
boe_support: int
boe_tp: int
boe_fp: int
boe_fn: int
class ClassificationHead(nn.Module):
def __init__(
self,
ctxt_output_dim=768,
):
super(ClassificationHead, self).__init__()
self.mlp = nn.Sequential(
# [mention, candidate, mention - candidate, mention * candidate, md_score, dis_score]
nn.Linear(4 * ctxt_output_dim + 2, ctxt_output_dim),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(ctxt_output_dim, ctxt_output_dim),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(ctxt_output_dim, 1),
)
def forward(self, mentions_repr, entities_repr, md_scores, dis_scores):
features = [
mentions_repr,
entities_repr,
mentions_repr - entities_repr,
mentions_repr * entities_repr,
md_scores,
dis_scores,
]
features = torch.cat(features, 1)
return self.mlp(features)
class SaliencyClassificationHead(nn.Module):
def __init__(
self,
ctxt_output_dim=768,
):
super(SaliencyClassificationHead, self).__init__()
self.mlp = nn.Sequential(
nn.Linear(9 * ctxt_output_dim + 4, ctxt_output_dim),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(ctxt_output_dim, ctxt_output_dim),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(ctxt_output_dim, 1),
)
def forward(
self, cls_tokens_repr, mentions_repr, entities_repr, md_scores, dis_scores
):
cls_mention_dot_product = torch.sum(
cls_tokens_repr * mentions_repr, 1
).unsqueeze(-1)
cls_entity_dot_product = torch.sum(
cls_tokens_repr * entities_repr, 1
).unsqueeze(-1)
features = [
cls_tokens_repr,
mentions_repr,
entities_repr,
mentions_repr - entities_repr,
mentions_repr * entities_repr,
cls_tokens_repr - mentions_repr,
cls_tokens_repr * mentions_repr,
cls_tokens_repr - entities_repr,
cls_tokens_repr * entities_repr,
md_scores,
dis_scores,
cls_mention_dot_product,
cls_entity_dot_product,
]
features = torch.cat(features, 1)
return self.mlp(features)
class SpanEncoder(nn.Module):
def __init__(
self,
mention_aggregation="linear",
ctxt_output_dim=768,
cand_output_dim=768,
dropout=0.1,
):
super(SpanEncoder, self).__init__()
if mention_aggregation == "linear":
self.mention_mlp = nn.Linear(ctxt_output_dim * 2, cand_output_dim)
# elif mention_aggregation == "mlp":
# self.mention_mlp = nn.Sequential(
# nn.Linear(ctxt_output_dim, ctxt_output_dim),
# nn.ReLU(),
# nn.Dropout(dropout),
# nn.Linear(ctxt_output_dim, cand_output_dim),
# )
else:
raise NotImplementedError()
def forward(self, text_encodings, mention_offsets, mention_lengths):
idx = (
torch.arange(mention_offsets.shape[0])
.unsqueeze(1)
.repeat(1, mention_offsets.shape[1])
)
mention_starts = text_encodings[idx, mention_offsets]
mention_ends = text_encodings[
idx,
mention_lengths + mention_offsets - 1,
]
mention_emb = torch.cat([mention_starts, mention_ends], dim=2)
mention_encodings = self.mention_mlp(mention_emb)
return mention_encodings
class MentionScoresHead(nn.Module):
def __init__(
self,
encoder_output_dim=768,
max_mention_length=10,
):
super(MentionScoresHead, self).__init__()
self.max_mention_length = max_mention_length
self.bound_classifier = nn.Linear(encoder_output_dim, 3)
def forward(self, text_encodings, mask_ctxt, tokens_mapping):
"""
Retuns scores for *inclusive* mention boundaries
"""
device = text_encodings.device
# (bs, seqlen, 3)
logits = self.bound_classifier(text_encodings)
# (bs, seqlen, 1); (bs, seqlen, 1); (bs, seqlen, 1)
# start_logprobs, end_logprobs, mention_logprobs = logits.split(1, dim=-1)
start_logprobs = logits[:, :, 0].squeeze(-1)
end_logprobs = logits[:, :, 1].squeeze(-1)
mention_logprobs = logits[:, :, 2].squeeze(-1)
# impossible to choose masked tokens as starts/ends of spans
start_logprobs[mask_ctxt != 1] = float("-inf")
end_logprobs[mask_ctxt != 1] = float("-inf")
mention_logprobs[mask_ctxt != 1] = float("-inf")
# take sum of log softmaxes:
# log p(mention) = log p(start_pos && end_pos) = log p(start_pos) + log p(end_pos)
# DIM: (bs, starts, ends)
mention_scores = start_logprobs.unsqueeze(2) + end_logprobs.unsqueeze(1)
# (bs, starts, ends)
mention_cum_scores = torch.zeros(
mention_scores.size(), dtype=mention_scores.dtype
).to(device)
# add ends
mention_logprobs_end_cumsum = torch.zeros(
mask_ctxt.size(0), dtype=mention_scores.dtype
).to(device)
for i in range(mask_ctxt.size(1)):
mention_logprobs_end_cumsum += mention_logprobs[:, i]
mention_cum_scores[:, :, i] += mention_logprobs_end_cumsum.unsqueeze(-1)
# subtract starts
mention_logprobs_start_cumsum = torch.zeros(
mask_ctxt.size(0), dtype=mention_scores.dtype
).to(device)
for i in range(mask_ctxt.size(1) - 1):
mention_logprobs_start_cumsum += mention_logprobs[:, i]
mention_cum_scores[
:, (i + 1), :
] -= mention_logprobs_start_cumsum.unsqueeze(-1)
# DIM: (bs, starts, ends)
mention_scores += mention_cum_scores
# DIM: (starts, ends, 2) -- tuples of [start_idx, end_idx]
mention_bounds = torch.stack(
[
torch.arange(mention_scores.size(1))
.unsqueeze(-1)
.expand(mention_scores.size(1), mention_scores.size(2)), # start idxs
torch.arange(mention_scores.size(1))
.unsqueeze(0)
.expand(mention_scores.size(1), mention_scores.size(2)), # end idxs
],
dim=-1,
).to(device)
# DIM: (starts, ends)
mention_sizes = (
mention_bounds[:, :, 1] - mention_bounds[:, :, 0] + 1
) # (+1 as ends are inclusive)
# Remove invalids (startpos > endpos, endpos > seqlen) and renormalize
# DIM: (bs, starts, ends)
# valid mention starts mask
select_indices = torch.cat(
[
torch.arange(tokens_mapping.shape[0])
.unsqueeze(1)
.repeat(1, tokens_mapping.shape[1])
.unsqueeze(-1),
tokens_mapping[:, :, 0].unsqueeze(-1).to(torch.device("cpu")),
],
-1,
).flatten(0, 1)
token_starts_mask = torch.zeros(mask_ctxt.size(), dtype=mask_ctxt.dtype)
token_starts_mask[select_indices[:, 0], select_indices[:, 1]] = 1
token_starts_mask[:, 0] = 0
# valid mention ends mask
select_indices = torch.cat(
[
torch.arange(tokens_mapping.shape[0])
.unsqueeze(1)
.repeat(1, tokens_mapping.shape[1])
.unsqueeze(-1),
(tokens_mapping[:, :, 1] - 1).unsqueeze(-1).to(torch.device("cpu")),
],
-1,
).flatten(0, 1)
token_ends_mask = torch.zeros(mask_ctxt.size(), dtype=mask_ctxt.dtype)
token_ends_mask[select_indices[:, 0], select_indices[:, 1]] = 1
token_ends_mask[:, 0] = 0
# valid mention starts*ends mask
valid_starts_ends_mask = torch.bmm(
token_starts_mask.unsqueeze(2), token_ends_mask.unsqueeze(1)
).to(device)
valid_mask = (
(mention_sizes.unsqueeze(0) > 0)
& torch.gt(mask_ctxt.unsqueeze(2), 0)
& torch.gt(valid_starts_ends_mask, 0)
)
# DIM: (bs, starts, ends)
# 0 is not a valid
mention_scores[~valid_mask] = float("-inf") # invalids have logprob=-inf (p=0)
# DIM: (bs, starts * ends)
mention_scores = mention_scores.view(mention_scores.size(0), -1)
# DIM: (bs, starts * ends, 2)
mention_bounds = mention_bounds.view(-1, 2)
mention_bounds = mention_bounds.unsqueeze(0).expand(
mention_scores.size(0), mention_scores.size(1), 2
)
if self.max_mention_length is not None:
mention_scores, mention_bounds = self.filter_by_mention_size(
mention_scores,
mention_bounds,
)
return mention_scores, mention_bounds
def batch_reshape_mask_left(
self,
input_t: torch.Tensor,
selected: torch.Tensor,
pad_idx: Union[int, float] = 0,
left_align_mask: Optional[torch.Tensor] = None,
):
"""
Left-aligns all ``selected" values in input_t, which is a batch of examples.
- input_t: >=2D tensor (N, M, *)
- selected: 2D torch.Bool tensor, 2 dims same size as first 2 dims of `input_t` (N, M)
- pad_idx represents the padding to be used in the output
- left_align_mask: if already precomputed, pass the alignment mask in
(mask on the output, corresponding to `selected` on the input)
Example:
input_t = [[1,2,3,4],[5,6,7,8]]
selected = [[0,1,0,1],[1,1,0,1]]
output = [[2,4,0],[5,6,8]]
"""
batch_num_selected = selected.sum(1)
max_num_selected = batch_num_selected.max()
# (bsz, 2)
repeat_freqs = torch.stack(
[batch_num_selected, max_num_selected - batch_num_selected], dim=-1
)
# (bsz x 2,)
repeat_freqs = repeat_freqs.view(-1)
if left_align_mask is None:
# (bsz, 2)
left_align_mask = (
torch.zeros(input_t.size(0), 2).to(input_t.device).to(torch.bool)
)
left_align_mask[:, 0] = 1
# (bsz x 2,): [1,0,1,0,...]
left_align_mask = left_align_mask.view(-1)
# (bsz x max_num_selected,): [1 xrepeat_freqs[0],0 x(M-repeat_freqs[0]),1 xrepeat_freqs[1],0 x(M-repeat_freqs[1]),...]
left_align_mask = left_align_mask.repeat_interleave(repeat_freqs)
# (bsz, max_num_selected)
left_align_mask = left_align_mask.view(-1, max_num_selected)
# reshape to (bsz, max_num_selected, *)
input_reshape = (
torch.empty(left_align_mask.size() + input_t.size()[2:])
.to(input_t.device, input_t.dtype)
.fill_(pad_idx)
)
input_reshape[left_align_mask] = input_t[selected]
# (bsz, max_num_selected, *); (bsz, max_num_selected)
return input_reshape, left_align_mask
def prune_ctxt_mentions(
self,
mention_logits: torch.Tensor,
mention_bounds: torch.Tensor,
num_cand_mentions: int,
threshold: float,
):
"""
Prunes mentions based on mention scores/logits (by either
`threshold` or `num_cand_mentions`, whichever yields less candidates)
Inputs:
mention_logits: torch.FloatTensor (bsz, num_total_mentions)
mention_bounds: torch.IntTensor (bsz, num_total_mentions)
num_cand_mentions: int
threshold: float
Returns:
torch.FloatTensor(bsz, max_num_pred_mentions): top mention scores/logits
torch.IntTensor(bsz, max_num_pred_mentions, 2): top mention boundaries
torch.BoolTensor(bsz, max_num_pred_mentions): mask on top mentions
torch.BoolTensor(bsz, total_possible_mentions): mask for reshaping from total possible mentions -> max # pred mentions
"""
# (bsz, num_cand_mentions); (bsz, num_cand_mentions)
num_cand_mentions = min(num_cand_mentions, mention_logits.shape[1])
top_mention_logits, mention_pos = mention_logits.topk(
num_cand_mentions, sorted=True
)
# (bsz, num_cand_mentions, 2)
# [:,:,0]: index of batch
# [:,:,1]: index into top mention in mention_bounds
mention_pos = torch.stack(
[
torch.arange(mention_pos.size(0))
.to(mention_pos.device)
.unsqueeze(-1)
.expand_as(mention_pos),
mention_pos,
],
dim=-1,
)
# (bsz, num_cand_mentions)
top_mention_pos_mask = torch.sigmoid(top_mention_logits) > threshold
# (total_possible_mentions, 2)
# tuples of [index of batch, index into mention_bounds] of what mentions to include
mention_pos = mention_pos[
top_mention_pos_mask
| (
# 2nd part of OR: if nothing is > threshold, use topK that are > -inf
((top_mention_pos_mask.sum(1) == 0).unsqueeze(-1))
& (top_mention_logits > float("-inf"))
)
]
mention_pos = mention_pos.view(-1, 2)
# (bsz, total_possible_mentions)
# mask of possible logits
mention_pos_mask = torch.zeros(mention_logits.size(), dtype=torch.bool).to(
mention_pos.device
)
mention_pos_mask[mention_pos[:, 0], mention_pos[:, 1]] = 1
# (bsz, max_num_pred_mentions, 2)
chosen_mention_bounds, chosen_mention_mask = self.batch_reshape_mask_left(
mention_bounds, mention_pos_mask, pad_idx=0
)
# (bsz, max_num_pred_mentions)
chosen_mention_logits, _ = self.batch_reshape_mask_left(
mention_logits,
mention_pos_mask,
pad_idx=float("-inf"),
left_align_mask=chosen_mention_mask,
)
return (
chosen_mention_logits,
chosen_mention_bounds,
chosen_mention_mask,
mention_pos_mask,
)
def filter_by_mention_size(
self, mention_scores: torch.Tensor, mention_bounds: torch.Tensor
):
"""
Filter all mentions > maximum mention length
mention_scores: torch.FloatTensor (bsz, num_mentions)
mention_bounds: torch.LongTensor (bsz, num_mentions, 2)
"""
# (bsz, num_mentions)
mention_bounds_mask = (
mention_bounds[:, :, 1] - mention_bounds[:, :, 0] <= self.max_mention_length
)
# (bsz, num_filtered_mentions)
mention_scores = mention_scores[mention_bounds_mask]
mention_scores = mention_scores.view(mention_bounds_mask.size(0), -1)
# (bsz, num_filtered_mentions, 2)
mention_bounds = mention_bounds[mention_bounds_mask]
mention_bounds = mention_bounds.view(mention_bounds_mask.size(0), -1, 2)
return mention_scores, mention_bounds
class JointELTask(LightningModule):
def __init__(
self,
transform: TransformConf,
model: ModelConf,
datamodule: DataModuleConf,
optim: OptimConf,
embeddings_path: str,
faiss_index_path: Optional[str] = None,
n_retrieve_candidates: int = 10,
eval_compure_recall_at: Tuple[int] = (1, 10, 100),
warmup_steps: int = 0,
load_from_checkpoint: Optional[str] = None,
only_train_disambiguation: bool = False,
train_el_classifier: bool = True,
train_saliency: bool = True,
md_threshold: float = 0.2,
el_threshold: float = 0.4,
saliency_threshold: float = 0.4,
use_gpu_index: bool = False,
):
super().__init__()
# encoder setup
self.encoder_conf = model
self.optim_conf = optim
self.embeddings_path = embeddings_path
self.faiss_index_path = faiss_index_path
self.n_retrieve_candidates = n_retrieve_candidates
self.eval_compure_recall_at = eval_compure_recall_at
self.warmup_steps = warmup_steps
self.load_from_checkpoint = load_from_checkpoint
self.disambiguation_loss = nn.CrossEntropyLoss()
self.md_loss = nn.BCEWithLogitsLoss()
self.el_loss = nn.BCEWithLogitsLoss()
self.saliency_loss = nn.BCEWithLogitsLoss()
self.only_train_disambiguation = only_train_disambiguation
self.train_el_classifier = train_el_classifier
self.train_saliency = train_saliency
self.md_threshold = md_threshold
self.el_threshold = el_threshold
self.saliency_threshold = saliency_threshold
self.use_gpu_index = use_gpu_index
@staticmethod
def _get_encoder_state(state, encoder_name):
encoder_state = OrderedDict()
for key, value in state["state_dict"].items():
if key.startswith(encoder_name):
encoder_state[key[len(encoder_name) + 1 :]] = value
return encoder_state
def setup_gpu_index(self):
gpu_id = self.local_rank
flat_config = faiss.GpuIndexFlatConfig()
flat_config.device = gpu_id
flat_config.useFloat16 = True
res = faiss.StandardGpuResources()
self.faiss_index = faiss.GpuIndexFlatIP(res, self.embedding_dim, flat_config)
self.faiss_index.add(self.embeddings)
def setup(self, stage: str):
if stage == "test":
return
# resetting call_configure_sharded_model_hook attribute so that we could configure model
self.call_configure_sharded_model_hook = False
self.embeddings = torch.load(self.embeddings_path)
self.embedding_dim = len(self.embeddings[0])
self.embeddings.requires_grad = False
self.encoder = hydra.utils.instantiate(
self.encoder_conf,
)
self.project_encoder_op = nn.Identity()
if self.encoder.embedding_dim != self.embedding_dim:
self.project_encoder_op = nn.Sequential(
nn.Linear(self.encoder.embedding_dim, self.embedding_dim),
nn.LayerNorm(self.embedding_dim),
)
self.span_encoder = SpanEncoder(
ctxt_output_dim=self.embedding_dim,
cand_output_dim=self.embedding_dim,
)
self.mention_encoder = MentionScoresHead(
encoder_output_dim=self.embedding_dim,
)
self.el_encoder = ClassificationHead(
ctxt_output_dim=self.embedding_dim,
)
if self.train_saliency:
self.saliency_encoder = SaliencyClassificationHead(
ctxt_output_dim=self.embedding_dim,
)
if self.load_from_checkpoint is not None:
logger.info(f"Load encoders state from {self.load_from_checkpoint}")
with open(self.load_from_checkpoint, "rb") as f:
checkpoint = torch.load(f, map_location=torch.device("cpu"))
encoder_state = self._get_encoder_state(checkpoint, "encoder")
self.encoder.load_state_dict(encoder_state)
span_encoder_state = self._get_encoder_state(checkpoint, "span_encoder")
self.span_encoder.load_state_dict(span_encoder_state)
project_encoder_op_state = self._get_encoder_state(
checkpoint, "project_encoder_op"
)
if len(project_encoder_op_state) > 0:
self.project_encoder_op.load_state_dict(project_encoder_op_state)
mention_encoder_state = self._get_encoder_state(
checkpoint, "mention_encoder"
)
if len(mention_encoder_state) > 0:
self.mention_encoder.load_state_dict(mention_encoder_state)
el_encoder_state = self._get_encoder_state(checkpoint, "el_encoder")
if len(el_encoder_state) > 0:
self.el_encoder.load_state_dict(el_encoder_state)
saliency_encoder_state = self._get_encoder_state(
checkpoint, "saliency_encoder"
)
if len(saliency_encoder_state) > 0 and self.train_saliency:
self.saliency_encoder.load_state_dict(saliency_encoder_state)
self.optimizer = hydra.utils.instantiate(self.optim_conf, self.parameters())
if self.use_gpu_index:
logger.info(f"Setup GPU index")
self.setup_gpu_index()
# self.embeddings = None
else:
logger.info(f"Setup CPU index")
assert self.faiss_index_path is not None
self.faiss_index = faiss.read_index(self.faiss_index_path)
def sim_score(self, mentions_repr, entities_repr):
# bs x emb_dim , bs x emb_dim
scores = torch.sum(mentions_repr * entities_repr, 1)
return scores
def forward(
self,
text_inputs,
attention_mask,
mention_offsets,
mention_lengths,
):
# encode query and contexts
_, last_layer = self.encoder(text_inputs, attention_mask)
text_encodings = last_layer
text_encodings = self.project_encoder_op(text_encodings)
mentions_repr = self.span_encoder(
text_encodings, mention_offsets, mention_lengths
)
return text_encodings, mentions_repr
def configure_optimizers(self):
return self.optimizer
def _disambiguation_training_step(
self, mentions_repr, mention_offsets, mention_lengths, entities_ids
):
device = mentions_repr.get_device()
# flat mentions and entities indices (mentions_num x embedding_dim)
flat_mentions_repr = mentions_repr[mention_lengths != 0]
flat_entities_ids = entities_ids[mention_lengths != 0]
if flat_mentions_repr.shape[0] == 0:
return None
# obtain positive entities representations
if self.use_gpu_index:
entities_repr = torch.stack(
[
self.faiss_index.reconstruct(flat_id)
for flat_id in flat_entities_ids.tolist()
]
).to(device)
else:
entities_repr = self.embeddings[flat_entities_ids.to("cpu")].to(device)
# compute scores for positive entities
pos_scores = self.sim_score(flat_mentions_repr, entities_repr)
# retrieve candidates indices
if self.use_gpu_index:
(
_,
neg_cand_indices,
neg_cand_repr,
) = self.faiss_index.search_and_reconstruct(
flat_mentions_repr.detach().cpu().numpy().astype(np.float32),
self.n_retrieve_candidates,
)
neg_cand_indices = torch.from_numpy(neg_cand_indices).to(device)
neg_cand_repr = torch.from_numpy(neg_cand_repr).to(device)
else:
(
_,
neg_cand_indices,
) = self.faiss_index.search(
flat_mentions_repr.detach().cpu().numpy().astype(np.float32),
self.n_retrieve_candidates,
)
# get candidates embeddings
neg_cand_repr = (
self.embeddings[neg_cand_indices.flatten()]
.reshape(
neg_cand_indices.shape[0], # bs
neg_cand_indices.shape[1], # n_retrieve_candidates
self.embeddings.shape[1], # emb dim
)
.to(device)
)
neg_cand_indices = torch.from_numpy(neg_cand_indices).to(device)
# compute scores (bs x n_retrieve_candidates)
neg_cand_scores = torch.bmm(
flat_mentions_repr.unsqueeze(1), neg_cand_repr.transpose(1, 2)
).squeeze(1)
# zero score for the positive entities
neg_cand_scores[
neg_cand_indices.eq(
flat_entities_ids.unsqueeze(1).repeat([1, self.n_retrieve_candidates])
)
] = float("-inf")
# append positive scores to neg scores (bs x (1 + n_retrieve_candidates))
scores = torch.hstack([pos_scores.unsqueeze(1), neg_cand_scores])
# cosntruct targets
targets = torch.tensor([0] * neg_cand_scores.shape[0]).to(device)
loss = self.disambiguation_loss(scores, targets)
return loss
def _md_training_step(
self,
text_encodings,
text_pad_mask,
gold_mention_offsets,
gold_mention_lengths,
entities_ids,
tokens_mapping,
):
device = text_encodings.get_device()
mention_logits, mention_bounds = self.mention_encoder(
text_encodings,
text_pad_mask,
tokens_mapping,
)
gold_mention_ends = gold_mention_offsets + gold_mention_lengths - 1
gold_mention_bounds = torch.cat(
[gold_mention_offsets.unsqueeze(-1), gold_mention_ends.unsqueeze(-1)], -1
)
gold_mention_bounds[gold_mention_lengths == 0] = -1
gold_mention_pos_idx = (
(
mention_bounds.unsqueeze(1)
- gold_mention_bounds.unsqueeze(
2
) # (bs, num_mentions, start_pos * end_pos, 2)
)
.abs()
.sum(-1)
== 0
).nonzero()
# (bs, total_possible_spans)
gold_mention_binary = torch.zeros(
mention_logits.size(), dtype=mention_logits.dtype
).to(device)
gold_mention_binary[gold_mention_pos_idx[:, 0], gold_mention_pos_idx[:, 2]] = 1
# prune masked spans
mask = mention_logits != float("-inf")
masked_mention_logits = mention_logits[mask]
masked_gold_mention_binary = gold_mention_binary[mask]
return (
self.md_loss(masked_mention_logits, masked_gold_mention_binary),
mention_logits,
mention_bounds,
)
def _el_training_step(
self,
text_encodings,
mention_logits,
mention_bounds,
gold_mention_offsets,
gold_mention_lengths,
entities_ids,
tokens_mapping,
):
"""
Train "rejection" head.
Inputs:
text_encodings: last layer output of text encoder
mention_logits: mention scores produced by mention detection head
mention_bounds: mention bounds (start, end (inclusive)) by MD head
gold_mention_offsets: ground truth mention offsets
gold_mention_lengths: ground truth mention lengths
entities_ids: entity ids for ground truth mentions
tokens_mapping: sentencepiece to text token mapping
Returns:
el_loss: sum of entity linking loss over all predicted mentions
"""
device = text_encodings.get_device()
# get predicted mention_offsets and mention_bounds by MD model
(
chosen_mention_logits,
chosen_mention_bounds,
chosen_mention_mask,
mention_pos_mask,
) = self.mention_encoder.prune_ctxt_mentions(
mention_logits,
mention_bounds,
num_cand_mentions=50,
threshold=self.md_threshold,
)
mention_offsets = chosen_mention_bounds[:, :, 0]
mention_lengths = (
chosen_mention_bounds[:, :, 1] - chosen_mention_bounds[:, :, 0] + 1
)
# get mention representations for predicted mentions
mentions_repr = self.span_encoder(
text_encodings, mention_offsets, mention_lengths
)
mention_lengths[mention_offsets == 0] = 0
# flat mentions and entities indices (mentions_num x embedding_dim)
flat_mentions_repr = mentions_repr[mention_lengths != 0]
flat_mentions_scores = torch.sigmoid(
chosen_mention_logits[mention_lengths != 0]
)
flat_mentions_repr = flat_mentions_repr[flat_mentions_scores > 0]
# cand_scores, cand_indices = self.faiss_index.search(
# flat_mentions_repr.detach().cpu().numpy(), 1
# )
# cand_scores = torch.from_numpy(cand_scores)
# cand_indices = torch.from_numpy(cand_indices)
cand_scores, cand_indices = self.faiss_index.search(
flat_mentions_repr.detach().cpu().numpy().astype(np.float32),
1,
)
if self.use_gpu_index:
cand_scores = torch.from_numpy(cand_scores)
cand_indices = torch.from_numpy(cand_indices)
# iterate over predicted and gold mentions to create targets for
# predicted mentions
targets = []
for (
e_mention_offsets,
e_mention_lengths,
e_gold_mention_offsets,
e_gold_mention_lengths,
e_entities,
) in zip(
mention_offsets.detach().cpu().tolist(),
mention_lengths.detach().cpu().tolist(),
gold_mention_offsets.cpu().tolist(),
gold_mention_lengths.cpu().tolist(),
entities_ids.cpu().tolist(),
):
e_gold_targets = {
(offset, length): ent
for offset, length, ent in zip(
e_gold_mention_offsets, e_gold_mention_lengths, e_entities
)
}
e_targets = [
e_gold_targets.get((offset, length), -1)
for offset, length in zip(e_mention_offsets, e_mention_lengths)
]
targets.append(e_targets)
targets = torch.tensor(targets, device=device)
flat_targets = targets[mention_lengths != 0][flat_mentions_scores > 0]
md_scores = flat_mentions_scores[flat_mentions_scores > 0].unsqueeze(-1)
# flat_entities_repr = self.embeddings[cand_indices.squeeze(1)].to(device)
if self.use_gpu_index:
flat_entities_repr = torch.stack(
[
self.faiss_index.reconstruct(flat_id)
for flat_id in cand_indices.squeeze(1).tolist()
]
).to(device)
else:
flat_entities_repr = self.embeddings[cand_indices.squeeze(1)].to(device)
cand_scores = cand_scores.to(device)
cand_indices = cand_indices.to(device)
predictions = self.el_encoder(
flat_mentions_repr, flat_entities_repr, md_scores, cand_scores
).squeeze(1)
binary_targets = (flat_targets == cand_indices.squeeze(1)).double()
el_loss = self.el_loss(predictions, binary_targets)
return el_loss
def training_step(self, batch, batch_idx):
"""
This receives queries, each with mutliple contexts.
"""
text_inputs = batch["input_ids"] # bs x mention_len
text_pad_mask = batch["attention_mask"]
gold_mention_offsets = batch["mention_offsets"] # bs x max_mentions_num
gold_mention_lengths = batch["mention_lengths"] # bs x max_mentions_num
entities_ids = batch["entities"] # bs x max_mentions_num
tokens_mapping = batch["tokens_mapping"] # bs x max_tokens_in_input x 2
# mention representations (bs x max_mentions_num x embedding_dim)
text_encodings, mentions_repr = self(
text_inputs, text_pad_mask, gold_mention_offsets, gold_mention_lengths
)
dis_loss = self._disambiguation_training_step(
mentions_repr,
gold_mention_offsets,
gold_mention_lengths,
entities_ids,
)
if dis_loss is not None:
self.log("dis_loss", dis_loss, prog_bar=True)
loss = dis_loss
if not self.only_train_disambiguation:
md_loss, mention_logits, mention_bounds = self._md_training_step(
text_encodings,
text_pad_mask,
gold_mention_offsets,
gold_mention_lengths,
entities_ids,
tokens_mapping,
)
self.log("md_loss", md_loss, prog_bar=True)
if loss is not None:
loss += md_loss
else:
loss = md_loss
if self.train_el_classifier:
el_loss = self._el_training_step(
text_encodings,
mention_logits,
mention_bounds,
gold_mention_offsets,
gold_mention_lengths,
entities_ids,
tokens_mapping,
)
self.log("el_loss", el_loss, prog_bar=True)
loss += el_loss
self.log("train_loss", loss, prog_bar=True)
assert not torch.isnan(loss)
return loss
def _disambiguation_eval_step(
self,
mentions_repr,
mention_offsets,
mention_lengths,
entities_ids,
):
device = mentions_repr.device
# flat mentions and entities indices (mentions_num x embedding_dim)
flat_mentions_repr = mentions_repr[mention_lengths != 0]
flat_entities_ids = entities_ids[mention_lengths != 0]
# obtain positive entities representations
# entities_repr = self.embeddings[flat_entities_ids.to("cpu")].to(device)
if self.use_gpu_index:
entities_repr = torch.stack(
[
self.faiss_index.reconstruct(flat_id)
for flat_id in flat_entities_ids.tolist()
]
).to(device)
else:
entities_repr = self.embeddings[flat_entities_ids.to("cpu")].to(device)
# compute scores for positive entities
pos_scores = self.sim_score(flat_mentions_repr, entities_repr)
# candidates to retrieve
n_retrieve_candidates = max(self.eval_compure_recall_at)
# retrieve negative candidates ids and scores
neg_cand_scores, neg_cand_indices = self.faiss_index.search(
flat_mentions_repr.detach().cpu().numpy().astype(np.float32),
n_retrieve_candidates,
)
neg_cand_scores = torch.from_numpy(neg_cand_scores).to(device)
neg_cand_indices = torch.from_numpy(neg_cand_indices).to(device)
# zero score for the positive entities
neg_cand_scores[
neg_cand_indices.eq(
flat_entities_ids.unsqueeze(1).repeat([1, n_retrieve_candidates])
)
] = float("-inf")
# append positive scores to neg scores
scores = torch.hstack([pos_scores.unsqueeze(1), neg_cand_scores])
# cosntruct targets
targets = torch.tensor([0] * neg_cand_scores.shape[0]).to(device)
loss = self.disambiguation_loss(scores, targets)
# compute recall at (1, 10, 100)
flat_entities_ids = flat_entities_ids.cpu().tolist()
neg_cand_indices = neg_cand_indices.cpu().tolist()
recalls = []
for k in self.eval_compure_recall_at:
recall = sum(
entity_id in cand_entity_ids[:k]
for entity_id, cand_entity_ids in zip(
flat_entities_ids, neg_cand_indices
)
)
recalls.append(recall)
return (
recalls,
len(flat_entities_ids),
loss,
)
def _joint_eval_step(
self,
text_inputs,
text_pad_mask,
gold_mention_offsets,
gold_mention_lengths,
entities_ids,
tokens_mapping,
):
device = text_inputs.device
# encode query and contexts
_, last_layer = self.encoder(text_inputs)
text_encodings = last_layer
text_encodings = self.project_encoder_op(text_encodings)
mention_logits, mention_bounds = self.mention_encoder(
text_encodings, text_pad_mask, tokens_mapping
)
(
chosen_mention_logits,
chosen_mention_bounds,
chosen_mention_mask,
mention_pos_mask,
) = self.mention_encoder.prune_ctxt_mentions(
mention_logits,
mention_bounds,
num_cand_mentions=50,
threshold=self.md_threshold,
)
mention_offsets = chosen_mention_bounds[:, :, 0]
mention_lengths = (
chosen_mention_bounds[:, :, 1] - chosen_mention_bounds[:, :, 0] + 1
)
mentions_repr = self.span_encoder(
text_encodings, mention_offsets, mention_lengths
)
mention_lengths[mention_offsets == 0] = 0
# flat mentions and entities indices (mentions_num x embedding_dim)
flat_mentions_repr = mentions_repr[mention_lengths != 0]
mentions_scores = torch.sigmoid(chosen_mention_logits)
# flat_mentions_repr = flat_mentions_repr[flat_mentions_scores > 0]
# retrieve candidates top-1 ids and scores
cand_scores, cand_indices = self.faiss_index.search(
flat_mentions_repr.detach().cpu().numpy().astype(np.float32), 1
)
if self.train_el_classifier:
# flat_entities_repr = self.embeddings[cand_indices.squeeze(1)].to(device)
if self.use_gpu_index:
flat_entities_repr = torch.stack(
[
self.faiss_index.reconstruct(flat_id)
for flat_id in cand_indices.squeeze(1).tolist()
]
).to(device)
else:
flat_entities_repr = self.embeddings[cand_indices.squeeze(1)].to(device)
flat_mentions_scores = mentions_scores[mention_lengths != 0].unsqueeze(-1)
cand_scores = torch.from_numpy(cand_scores).to(device)
el_scores = torch.sigmoid(
self.el_encoder(
flat_mentions_repr,
flat_entities_repr,
flat_mentions_scores,
cand_scores,
)
).squeeze(1)
gold_mention_offsets = gold_mention_offsets.cpu().tolist()
gold_mention_lengths = gold_mention_lengths.cpu().tolist()
entities_ids = entities_ids.cpu().tolist()
el_targets = []
for offsets, lengths, example_ent_ids in zip(
gold_mention_offsets,
gold_mention_lengths,
entities_ids,
):
el_targets.append(
{
(offset, length): ent_id
for offset, length, ent_id in zip(offsets, lengths, example_ent_ids)
if length != 0
}
)
mention_offsets = mention_offsets.detach().cpu().tolist()
mention_lengths = mention_lengths.detach().cpu().tolist()
mentions_scores = mentions_scores.detach().cpu().tolist()
el_predictions = []
cand_idx = 0
for offsets, lengths, md_scores in zip(
mention_offsets, mention_lengths, mentions_scores
):
example_predictions = {}
for offset, length, md_score in zip(offsets, lengths, md_scores):
if length != 0:
if md_score >= self.md_threshold:
if (
not self.train_el_classifier
or el_scores[cand_idx] >= self.el_threshold
):
example_predictions[(offset, length)] = cand_indices[
cand_idx
][0]
cand_idx += 1
el_predictions.append(example_predictions)
return el_targets, el_predictions
def _eval_step(self, batch, batch_idx):
text_inputs = batch["input_ids"] # bs x mention_len
text_pad_mask = batch["attention_mask"]
mention_offsets = batch["mention_offsets"] # bs x max_mentions_num
mention_lengths = batch["mention_lengths"] # bs x max_mentions_num
entities_ids = batch["entities"] # bs x max_mentions_num
tokens_mapping = batch["tokens_mapping"]
if self.only_train_disambiguation:
text_encodings, mentions_repr = self(
text_inputs, text_pad_mask, mention_offsets, mention_lengths
)
return self._disambiguation_eval_step(
mentions_repr,
mention_offsets,
mention_lengths,
entities_ids,
)
return self._joint_eval_step(
text_inputs,
text_pad_mask,
mention_offsets,
mention_lengths,
entities_ids,
tokens_mapping,
)
def _compute_disambiguation_metrics(self, outputs, log_prefix):
total_recalls = [0] * len(self.eval_compure_recall_at)
total_ent_count = 0
total_loss = 0
for recalls, count, loss in outputs:
for idx in range(len(total_recalls)):
total_recalls[idx] += recalls[idx]
total_ent_count += count
total_loss += loss
metrics = {
log_prefix + "_ent_count": total_ent_count,
log_prefix + "_loss": total_loss,
}
for idx, recall_at in enumerate(self.eval_compure_recall_at):
metrics[log_prefix + f"_recall_at_{recall_at}"] = (
total_recalls[idx] / total_ent_count
)
return metrics
@staticmethod
def calculate_classification_metrics(targets, predictions):
tp, fp, support = 0, 0, 0
boe_tp, boe_fp, boe_support = 0, 0, 0
for example_targets, example_predictions in zip(targets, predictions):
for pos, ent in example_targets.items():
support += 1
if pos in example_predictions and example_predictions[pos] == ent:
tp += 1
for pos, ent in example_predictions.items():
if pos not in example_targets or example_targets[pos] != ent:
fp += 1
example_targets_set = set(example_targets.values())
example_predictions_set = set(example_predictions.values())
for ent in example_targets_set:
boe_support += 1
if ent in example_predictions_set:
boe_tp += 1
for ent in example_predictions_set:
if ent not in example_targets_set:
boe_fp += 1
def compute_f1_p_r(tp, fp, fn):
precision = tp / (tp + fp) if (tp + fp) > 0 else 0
recall = tp / (tp + fn) if (tp + fn) > 0 else 0
f1 = 2 * tp / (2 * tp + fp + fn) if (2 * tp + fp + fn) > 0 else 0
return f1, precision, recall
fn = support - tp
boe_fn = boe_support - boe_tp
f1, precision, recall = compute_f1_p_r(tp, fp, fn)
boe_f1, boe_precision, boe_recall = compute_f1_p_r(boe_tp, boe_fp, boe_fn)
return ClassificationMetrics(
f1=f1,
precision=precision,
recall=recall,
support=support,
tp=tp,
fp=fp,
fn=fn,
boe_f1=boe_f1,
boe_precision=boe_precision,
boe_recall=boe_recall,
boe_support=boe_support,
boe_tp=boe_tp,
boe_fp=boe_fp,
boe_fn=boe_fn,
)
def _compute_el_metrics(self, outputs, log_prefix):
el_targets = []
el_predictions = []
for (
batch_el_targets,
batch_el_predictions,
) in outputs:
el_targets.extend(batch_el_targets)
el_predictions.extend(batch_el_predictions)
el_metrics = self.calculate_classification_metrics(el_targets, el_predictions)
metrics = {
log_prefix + "_f1": el_metrics.f1,
log_prefix + "_precision": el_metrics.precision,
log_prefix + "_recall": el_metrics.recall,
log_prefix + "_support": el_metrics.support,
log_prefix + "_tp": el_metrics.tp,
log_prefix + "_fp": el_metrics.fp,
log_prefix + "_fn": el_metrics.fn,
log_prefix + "_boe_f1": el_metrics.boe_f1,
log_prefix + "_boe_precision": el_metrics.boe_precision,
log_prefix + "_boe_recall": el_metrics.boe_recall,
log_prefix + "_boe_support": el_metrics.boe_support,
log_prefix + "_boe_tp": el_metrics.boe_tp,
log_prefix + "_boe_fp": el_metrics.boe_fp,
log_prefix + "_boe_fn": el_metrics.boe_fn,
}
return metrics
def _eval_epoch_end(self, outputs, log_prefix="valid"):
if self.only_train_disambiguation:
metrics = self._compute_disambiguation_metrics(outputs, log_prefix)
else:
metrics = self._compute_el_metrics(outputs, log_prefix)
print("EVAL:")
print(metrics)
self.log_dict(metrics, on_epoch=True, sync_dist=True)
def validation_step(self, batch, batch_idx):
return self._eval_step(batch, batch_idx)
def validation_epoch_end(self, valid_outputs):
self._eval_epoch_end(valid_outputs)
def test_step(self, batch, batch_idx):
return self._eval_step(batch, batch_idx)
def test_epoch_end(self, test_outputs):
self._eval_epoch_end(test_outputs, "test")
def on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
"""
This hook will be called before loading state_dict from a checkpoint.
setup("fit") will build the model before loading state_dict
Args:
checkpoint: A dictionary with variables from the checkpoint.
"""
self.setup("fit")
|
BELA-main
|
bela/task/joint_el_task.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
import yaml
from hydra.experimental import compose, initialize_config_module
import hydra
import torch
from tqdm import tqdm
import json
import faiss
import logging
from typing import Union, List, Dict, Any, Tuple
logger = logging.getLogger(__name__)
def load_file(path: Union[str, Path]) -> List[Dict[str, Any]]:
all_data = []
with open(path, 'rt') as fd:
for line in tqdm(fd):
data = json.loads(line)
all_data.append(data)
return all_data
def convert_sp_to_char_offsets(
text: str,
sp_offsets: List[int],
sp_lengths: List[int],
sp_tokens_boundaries: List[List[int]],
) -> Tuple[List[int], List[int]]:
"""
Function convert sentecepiece offsets and lengths to character level
offsets and lengths for a given `text`.
"""
char_offsets: List[int] = []
char_lengths: List[int] = []
text_utf8_chars: List[str] = [char for char in text]
for sp_offset, sp_length in zip(sp_offsets, sp_lengths):
# sp_offsets include cls_token, while boundaries doesn't
if sp_offset == 0:
continue
sp_offset = sp_offset - 1
char_offset = sp_tokens_boundaries[sp_offset][0]
char_end = sp_tokens_boundaries[sp_offset + sp_length - 1][1]
# sp token boundaries include whitespaces, so remove them
while text_utf8_chars[char_offset].isspace():
char_offset += 1
assert char_offset < len(text_utf8_chars)
char_offsets.append(char_offset)
char_lengths.append(char_end - char_offset)
return char_offsets, char_lengths
class ModelEval:
def __init__(
self,
checkpoint_path,
config_name="joint_el_mel",
embeddings_path=None,
ent_catalogue_idx_path=None
):
self.device = torch.device("cuda:0")
logger.info("Create task")
with initialize_config_module("bela/conf"):
cfg = compose(config_name=config_name)
cfg.task.load_from_checkpoint = checkpoint_path
cfg.task.embeddings_path = embeddings_path or cfg.task.embeddings_path
cfg.datamodule.ent_catalogue_idx_path = ent_catalogue_idx_path or cfg.datamodule.ent_catalogue_idx_path
cfg.datamodule.train_path = None
cfg.datamodule.val_path = None
cfg.datamodule.test_path = None
self.checkpoint_path = checkpoint_path
self.transform = hydra.utils.instantiate(cfg.task.transform)
datamodule = hydra.utils.instantiate(cfg.datamodule, transform=self.transform)
self.task = hydra.utils.instantiate(cfg.task, datamodule=datamodule, _recursive_=False)
self.task.setup("train")
self.task = self.task.eval()
self.task = self.task.to(self.device)
self.embeddings = self.task.embeddings
self.faiss_index = self.task.faiss_index
logger.info("Create ent index")
self.ent_idx = []
for ent in datamodule.ent_catalogue.idx:
self.ent_idx.append(ent)
def create_gpu_index(self, gpu_id=0):
flat_config = faiss.GpuIndexFlatConfig()
flat_config.device = gpu_id
flat_config.useFloat16 = True
res = faiss.StandardGpuResources()
self.faiss_index = faiss.GpuIndexFlatIP(res, embeddings.shape[1], flat_config)
self.faiss_index.add(self.embeddings)
def lookup(
self,
query: torch.Tensor,
):
scores, indices = self.faiss_index.search(query, k=1)
return scores.squeeze(-1).to(self.device), indices.squeeze(-1).to(self.device)
def process_batch(self, texts):
batch: Dict[str, Any] = {"texts": texts}
model_inputs = self.transform(batch)
token_ids = model_inputs["input_ids"].to(self.device)
text_pad_mask = model_inputs["attention_mask"].to(self.device)
tokens_mapping = model_inputs["tokens_mapping"].to(self.device)
sp_tokens_boundaries = model_inputs["sp_tokens_boundaries"].tolist()
with torch.no_grad():
_, last_layer = self.task.encoder(token_ids)
text_encodings = last_layer
text_encodings = self.task.project_encoder_op(text_encodings)
mention_logits, mention_bounds = self.task.mention_encoder(
text_encodings, text_pad_mask, tokens_mapping
)
(
chosen_mention_logits,
chosen_mention_bounds,
chosen_mention_mask,
mention_pos_mask,
) = self.task.mention_encoder.prune_ctxt_mentions(
mention_logits,
mention_bounds,
num_cand_mentions=50,
threshold=self.task.md_threshold,
)
mention_offsets = chosen_mention_bounds[:, :, 0]
mention_lengths = (
chosen_mention_bounds[:, :, 1] - chosen_mention_bounds[:, :, 0] + 1
)
mention_lengths[mention_offsets == 0] = 0
mentions_repr = self.task.span_encoder(
text_encodings, mention_offsets, mention_lengths
)
# flat mentions and entities indices (mentions_num x embedding_dim)
flat_mentions_repr = mentions_repr[mention_lengths != 0]
mentions_scores = torch.sigmoid(chosen_mention_logits)
# retrieve candidates top-1 ids and scores
cand_scores, cand_indices = self.lookup(
flat_mentions_repr.detach()
)
entities_repr = self.embeddings[cand_indices.to(self.embeddings.device)].to(self.device)
chosen_mention_limits: List[int] = (
chosen_mention_mask.int().sum(-1).detach().cpu().tolist()
)
flat_mentions_scores = mentions_scores[mention_lengths != 0].unsqueeze(-1)
cand_scores = cand_scores.unsqueeze(-1)
el_scores = torch.sigmoid(
self.task.el_encoder(
flat_mentions_repr,
entities_repr,
flat_mentions_scores,
cand_scores,
)
).squeeze(1)
predictions = []
cand_idx = 0
example_idx = 0
for offsets, lengths, md_scores in zip(
mention_offsets, mention_lengths, mentions_scores
):
ex_sp_offsets = []
ex_sp_lengths = []
ex_entities = []
ex_md_scores = []
ex_el_scores = []
for offset, length, md_score in zip(offsets, lengths, md_scores):
if length != 0:
if md_score >= self.task.md_threshold:
ex_sp_offsets.append(offset.detach().cpu().item())
ex_sp_lengths.append(length.detach().cpu().item())
ex_entities.append(self.ent_idx[cand_indices[cand_idx].detach().cpu().item()])
ex_md_scores.append(md_score.item())
ex_el_scores.append(el_scores[cand_idx].item())
cand_idx += 1
char_offsets, char_lengths = convert_sp_to_char_offsets(
texts[example_idx],
ex_sp_offsets,
ex_sp_lengths,
sp_tokens_boundaries[example_idx],
)
predictions.append(
{
"offsets": char_offsets,
"lengths": char_lengths,
"entities": ex_entities,
"md_scores": ex_md_scores,
"el_scores": ex_el_scores,
}
)
example_idx += 1
return predictions
def process_disambiguation_batch(self, texts, mention_offsets, mention_lengths, entities):
batch: Dict[str, Any] = {
"texts": texts,
"mention_offsets": mention_offsets,
"mention_lengths": mention_lengths,
"entities": entities,
}
model_inputs = self.transform(batch)
token_ids = model_inputs["input_ids"].to(self.device)
mention_offsets = model_inputs["mention_offsets"]
mention_lengths = model_inputs["mention_lengths"]
tokens_mapping = model_inputs["tokens_mapping"].to(self.device)
sp_tokens_boundaries = model_inputs["sp_tokens_boundaries"].tolist()
with torch.no_grad():
_, last_layer = self.task.encoder(token_ids)
text_encodings = last_layer
text_encodings = self.task.project_encoder_op(text_encodings)
mentions_repr = self.task.span_encoder(
text_encodings, mention_offsets, mention_lengths
)
flat_mentions_repr = mentions_repr[mention_lengths != 0]
# retrieve candidates top-1 ids and scores
cand_scores, cand_indices = self.lookup(
flat_mentions_repr.detach()
)
predictions = []
cand_idx = 0
example_idx = 0
for offsets, lengths in zip(
mention_offsets, mention_lengths,
):
ex_sp_offsets = []
ex_sp_lengths = []
ex_entities = []
ex_dis_scores = []
for offset, length in zip(offsets, lengths):
if length != 0:
ex_sp_offsets.append(offset.detach().cpu().item())
ex_sp_lengths.append(length.detach().cpu().item())
ex_entities.append(self.ent_idx[cand_indices[cand_idx].detach().cpu().item()])
ex_dis_scores.append(cand_scores[cand_idx].detach().cpu().item())
cand_idx += 1
char_offsets, char_lengths = convert_sp_to_char_offsets(
texts[example_idx],
ex_sp_offsets,
ex_sp_lengths,
sp_tokens_boundaries[example_idx],
)
predictions.append({
"offsets": char_offsets,
"lengths": char_lengths,
"entities": ex_entities,
"scores": ex_dis_scores
})
example_idx+= 1
return predictions
def get_predictions(self, test_data, batch_size=256):
all_predictions = []
for batch_start in tqdm(range(0,len(test_data),batch_size)):
batch = test_data[batch_start:batch_start+batch_size]
texts = [example['original_text'] for example in batch]
predictions = self.process_batch(texts)
all_predictions.extend(predictions)
return all_predictions
def get_disambiguation_predictions(self, test_data, batch_size=256):
all_predictions = []
for batch_start in tqdm(range(0,len(test_data),batch_size)):
batch = test_data[batch_start:batch_start+batch_size]
texts = [example['original_text'] for example in batch]
mention_offsets = [[offset for _,_,_,_,offset,_ in example['gt_entities']] for example in batch]
mention_lengths = [[length for _,_,_,_,_,length in example['gt_entities']] for example in batch]
entities = [[0 for _,_,_,_,_,_ in example['gt_entities']] for example in batch]
predictions = self.process_disambiguation_batch(texts, mention_offsets, mention_lengths, entities)
all_predictions.extend(predictions)
return all_predictions
@staticmethod
def compute_scores(data, predictions, md_threshold=0.2, el_threshold=0.05):
tp, fp, support = 0, 0, 0
tp_boe, fp_boe, support_boe = 0, 0, 0
predictions_per_example = []
for example, example_predictions in zip(data, predictions):
example_targets = {
(offset,length):ent_id
for _,_,ent_id,_,offset,length in example['gt_entities']
}
example_predictions = {
(offset, length):ent_id
for offset, length, ent_id, md_score, el_score in zip(
example_predictions['offsets'],
example_predictions['lengths'],
example_predictions['entities'],
example_predictions['md_scores'],
example_predictions['el_scores'],
)
if (el_score > el_threshold and md_score > md_threshold)
}
predictions_per_example.append((len(example_targets), len(example_predictions)))
for pos, ent in example_targets.items():
support += 1
if pos in example_predictions and example_predictions[pos] == ent:
tp += 1
for pos, ent in example_predictions.items():
if pos not in example_targets or example_targets[pos] != ent:
fp += 1
example_targets_set = set(example_targets.values())
example_predictions_set = set(example_predictions.values())
for ent in example_targets_set:
support_boe += 1
if ent in example_predictions_set:
tp_boe += 1
for ent in example_predictions_set:
if ent not in example_targets_set:
fp_boe += 1
def safe_division(a, b):
if b == 0:
return 0
else:
return a / b
def compute_f1_p_r(tp, fp, fn):
precision = safe_division(tp, (tp + fp))
recall = safe_division(tp, (tp + fn))
f1 = safe_division(2 * tp, (2 * tp + fp + fn))
return f1, precision, recall
fn = support - tp
fn_boe = support_boe - tp_boe
return compute_f1_p_r(tp, fp, fn), compute_f1_p_r(tp_boe, fp_boe, fn_boe)
|
BELA-main
|
bela/evaluation/model_eval.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
from typing import List, Any
# @manual "//github/facebookresearch/hydra:hydra"
from hydra.core.config_store import ConfigStore
from omegaconf import MISSING
defaults = [
"_self_",
{"task": "joint_el_task"},
# Model
# {"task/model": "xlmr"},
# Transform
# {"task/transform": "joint_el_xlmr_transform"},
# Optim
# {"task/optim": "adamw"},
# Data
# {"datamodule": "joint_el_datamodule"},
# Trainer
# {"trainer": "gpu_1_host"},
{"checkpoint_callback": "default"},
]
@dataclass
class MainConfig:
defaults: List[Any] = field(default_factory=lambda: defaults)
task: Any = MISSING
datamodule: Any = MISSING
trainer: Any = MISSING
test_only: bool = False
checkpoint_callback: Any = MISSING
cs = ConfigStore.instance()
cs.store(name="config", node=MainConfig)
|
BELA-main
|
bela/conf/config.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
from . import config
@dataclass
class TransformConf:
pass
@dataclass
class DataModuleConf:
pass
@dataclass
class OptimConf:
pass
@dataclass
class ModelConf:
pass
|
BELA-main
|
bela/conf/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
from itertools import product
from tqdm import tqdm
import numpy as np
from bela.evaluation.model_eval import ModelEval, load_file
from bela.utils.prediction_utils import get_predictions_using_windows
if __name__ == "__main__":
# Finetuned on aida
checkpoint_path = "/checkpoints/movb/bela/aida/lightning_logs/version_0/checkpoints/checkpoint_1.ckpt"
model_eval = ModelEval(checkpoint_path, config_name="joint_el_mel_new_index")
results = {}
print(f"{model_eval.checkpoint_path=}")
for md_threshold, el_threshold in tqdm(list(product(np.arange(0, 0.6, 0.2), repeat=2))):
model_eval.task.md_threshold = md_threshold
model_eval.task.el_threshold = el_threshold
print(f"{model_eval.task.md_threshold=}")
print(f"{model_eval.task.el_threshold=}")
test_data_path = "/fsx/louismartin/bela/retrieved_from_aws_backup/ndecao/TACKBP2015/train_bela_format_all_languages.jsonl"
print(f"Processing {test_data_path}")
test_data = load_file(test_data_path)
predictions = get_predictions_using_windows(model_eval, test_data, window_length=256)
(f1, precision, recall), (f1_boe, precision_boe, recall_boe) = ModelEval.compute_scores(test_data, predictions)
print(f"F1 = {f1:.4f}, precision = {precision:.4f}, recall = {recall:.4f}")
print(f"F1 boe = {f1_boe:.4f}, precision = {precision_boe:.4f}, recall = {recall_boe:.4f}")
results[(md_threshold, el_threshold)] = (f1, precision, recall), (f1_boe, precision_boe, recall_boe)
print(sorted(results.items(), key=lambda x: x[1][0][0], reverse=True))
pickle_path = Path.home() / "tmp/grid_search_thresholds.pkl"
|
BELA-main
|
scripts/grid_search_thresholds.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import warnings
warnings.filterwarnings('ignore')
import yaml
from hydra.experimental import compose, initialize_config_module
import hydra
import torch
from tqdm import tqdm
import json
import faiss
import logging
from collections import defaultdict
from dataclasses import dataclass
from typing import Optional, List, Dict, Any, Tuple
from bela.transforms.spm_transform import SPMTransform
from bela.evaluation.model_eval import ModelEval, load_file
from bela.utils.prediction_utils import get_predictions_using_windows, get_sp_transform
def evaluate_model_e2e(checkpoint_path, datasets, md_threshold = 0.2, el_threshold = 0.4, embeddings_path=None, ent_catalogue_idx_path=None):
print(f"Loading model from checkpoint {checkpoint_path}")
model_eval = ModelEval(
checkpoint_path=checkpoint_path,
config_name="joint_el_mel_new",
embeddings_path=embeddings_path,
ent_catalogue_idx_path=ent_catalogue_idx_path
)
model_eval.task.md_threshold = md_threshold
model_eval.task.el_threshold = el_threshold
for test_data_path in datasets:
print(f"Processing {test_data_path}")
test_data = load_file(test_data_path)
for sample in test_data:
if "data_example_id" in sample:
sample["document_id"] = sample["data_example_id"]
predictions = get_predictions_using_windows(model_eval, test_data, window_length=254, window_overlap=10, do_merge_predictions=True)
(f1, precision, recall), (f1_boe, precision_boe, recall_boe) = ModelEval.compute_scores(test_data, predictions)
print(f"F1 = {f1:.4f}, precision = {precision:.4f}, recall = {recall:.4f}")
def convert_examples_for_disambiguation(test_data, transform, skip_unknown_ent_ids=False, ent_idx=None):
old_max_seq_len = transform.max_seq_len
transform.max_seq_len = 10000
new_examples = []
max_mention_token_pos_in_text = 192
skipped_ent_ids = 0
for example in tqdm(test_data):
text = example['original_text']
outputs = transform(dict(texts=[text]))
sp_token_boundaries = outputs['sp_tokens_boundaries'][0]
for _ , _ , ent_id, _ , offset, length in example['gt_entities']:
if skip_unknown_ent_ids and ent_idx is not None and ent_id not in ent_idx:
skipped_ent_ids += 1
continue
token_pos = 0
while token_pos < len(sp_token_boundaries) and offset >= sp_token_boundaries[token_pos][1]:
token_pos += 1
new_text = text
new_offset = offset
if token_pos > max_mention_token_pos_in_text:
shift = sp_token_boundaries[token_pos-max_mention_token_pos_in_text][0].item()
new_text = new_text[shift:]
new_offset = new_offset - shift
assert text[offset:offset+length] == new_text[new_offset:new_offset+length]
new_example = {
'original_text': new_text,
'gt_entities': [[0,0,ent_id,'wiki',new_offset,length]],
}
new_examples.append(new_example)
transform.max_seq_len = old_max_seq_len
return new_examples, skipped_ent_ids
def metrics_disambiguation(test_data, predictions):
support = 0
correct = 0
for example_idx, (example, prediction) in tqdm(enumerate(zip(test_data, predictions))):
if len(prediction['entities']) == 0:
continue
target = example['gt_entities'][0][2]
prediction = prediction['entities'][0]
correct += (target == prediction)
support += 1
accuracy = correct/support
return accuracy, support
def evaluate_model_dis(checkpoint_path, datasets, embeddings_path=None, ent_catalogue_idx_path=None):
print(f"Loading model from checkpoint {checkpoint_path}")
model_eval = ModelEval(
checkpoint_path=checkpoint_path,
config_name="joint_el_mel_new",
embeddings_path=embeddings_path,
ent_catalogue_idx_path=ent_catalogue_idx_path
)
for test_data_path in datasets:
print(f"Processing {test_data_path}")
test_data = load_file(test_data_path)
test_data_for_disambgiation, skipped = convert_examples_for_disambiguation(test_data, model_eval.transform)
predictions = model_eval.get_disambiguation_predictions(test_data_for_disambgiation)
accuracy, support = metrics_disambiguation(test_data_for_disambgiation, predictions)
print(f"Accuracty {accuracy}, support {support}, skipped {skipped}")
embeddings_path = "./models/embeddings.pt"
ent_catalogue_idx_path = "./models/index.txt"
print("End-to-end EL performance on Mewsli-9’-test (under-labeled for end-to-end linking)")
checkpoint_path = "./models/model_mewsli.ckpt"
datasets = [
'./data/mewsli-9-splitted/ar.jsonl_test',
'./data/mewsli-9-splitted/de.jsonl_test',
'./data/mewsli-9-splitted/en.jsonl_test',
'./data/mewsli-9-splitted/es.jsonl_test',
'./data/mewsli-9-splitted/fa.jsonl_test',
'./data/mewsli-9-splitted/ja.jsonl_test',
'./data/mewsli-9-splitted/sr.jsonl_test',
'./data/mewsli-9-splitted/ta.jsonl_test',
'./data/mewsli-9-splitted/tr.jsonl_test',
]
evaluate_model_e2e(
checkpoint_path=checkpoint_path,
datasets=datasets,
embeddings_path=embeddings_path,
ent_catalogue_idx_path=ent_catalogue_idx_path,
)
print("End-to-end EL performance on Mewsli-9’-test (labeled for end-to-end linking)")
checkpoint_path = "./models/model_e2e.ckpt"
datasets = [
'./data/mewsli-9-labelled/ar_labelled.jsonl',
'./data/mewsli-9-labelled/de_labelled.jsonl',
'./data/mewsli-9-labelled/en_labelled.jsonl',
'./data/mewsli-9-labelled/es_labelled.jsonl',
'./data/mewsli-9-labelled/fa_labelled.jsonl',
'./data/mewsli-9-labelled/ja_labelled.jsonl',
'./data/mewsli-9-labelled/sr_labelled.jsonl',
'./data/mewsli-9-labelled/ta_labelled.jsonl',
'./data/mewsli-9-labelled/tr_labelled.jsonl',
]
evaluate_model_e2e(
checkpoint_path=checkpoint_path,
datasets=datasets,
embeddings_path=embeddings_path,
ent_catalogue_idx_path=ent_catalogue_idx_path,
)
print("End-to-end results on AIDA")
checkpoint_path = "./models/model_aida.ckpt"
datasets = [
'./data/aida/aida_testb.jsonl_wikidata',
]
evaluate_model_e2e(
checkpoint_path=checkpoint_path,
datasets=datasets,
embeddings_path=embeddings_path,
ent_catalogue_idx_path=ent_catalogue_idx_path,
)
print("ED accuracy on Mewsli-9")
checkpoint_path = "./models/model_wiki.ckpt"
datasets = [
'./data/mewsli-9/ar.jsonl',
'./data/mewsli-9/de.jsonl',
'./data/mewsli-9/en.jsonl',
'./data/mewsli-9/es.jsonl',
'./data/mewsli-9/fa.jsonl',
'./data/mewsli-9/ja.jsonl',
'./data/mewsli-9/sr.jsonl',
'./data/mewsli-9/ta.jsonl',
'./data/mewsli-9/tr.jsonl',
]
evaluate_model_dis(
checkpoint_path=checkpoint_path,
datasets=datasets,
embeddings_path=embeddings_path,
ent_catalogue_idx_path=ent_catalogue_idx_path,
)
|
BELA-main
|
scripts/evaluate.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import os
import pickle
import re
import pandas
import jsonlines
from mgenre.utils import chunk_it, get_wikidata_ids
from tqdm.auto import tqdm, trange
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_dir",
type=str,
default="/checkpoint/ndecao/xlwikifier-wikidata/data",
)
parser.add_argument(
"--output_dir",
type=str,
default="/checkpoint/ndecao/TR2016",
)
parser.add_argument(
"--base_wikidata",
type=str,
default="/checkpoint/ndecao/wikidata",
)
parser.add_argument(
"-d",
"--debug",
help="Print lots of debugging statements",
action="store_const",
dest="loglevel",
const=logging.DEBUG,
default=logging.WARNING,
)
parser.add_argument(
"-v",
"--verbose",
help="Be verbose",
action="store_const",
dest="loglevel",
const=logging.INFO,
)
args, _ = parser.parse_known_args()
logging.basicConfig(level=logging.DEBUG)
filename = os.path.join(args.base_wikidata, "lang_title2wikidataID.pkl")
logging.info("Loading {}".format(filename))
with open(filename, "rb") as f:
lang_title2wikidataID = pickle.load(f)
filename = os.path.join(args.base_wikidata, "lang_redirect2title.pkl")
logging.info("Loading {}".format(filename))
with open(filename, "rb") as f:
lang_redirect2title = pickle.load(f)
filename = os.path.join(args.base_wikidata, "label_or_alias2wikidataID.pkl")
logging.info("Loading {}".format(filename))
with open(filename, "rb") as f:
label_or_alias2wikidataID = pickle.load(f)
for lang in os.listdir(args.input_dir):
logging.info("Converting {}".format(lang))
for split in ("test", "train"):
kilt_dataset = []
for filename in tqdm(
set(
".".join(e.split(".")[:-1])
for e in os.listdir(os.path.join(args.input_dir, lang, split))
)
):
with open(
os.path.join(args.input_dir, lang, split, filename + ".txt")
) as f:
doc = f.read()
with open(
os.path.join(args.input_dir, lang, split, filename + ".mentions")
) as f:
mentions = f.readlines()
for i, mention in enumerate(mentions):
start, end, _, title, is_hard = mention.strip().split("\t")
start, end, is_hard = int(start), int(end), bool(int(is_hard))
wikidataIDs = get_wikidata_ids(
title.replace("_", " "),
lang,
lang_title2wikidataID,
lang_redirect2title,
label_or_alias2wikidataID,
)[0]
meta = {
"left_context": doc[:start].strip(),
"mention": doc[start:end].strip(),
"right_context": doc[end:].strip(),
}
item = {
"id": "TR2016-{}-{}-{}".format(lang, filename, i),
"input": (
meta["left_context"]
+ " [START] "
+ meta["mention"]
+ " [END] "
+ meta["right_context"]
),
"output": [{"answer": list(wikidataIDs)}],
"meta": meta,
"is_hard": is_hard,
}
kilt_dataset.append(item)
filename = os.path.join(
args.output_dir, "{}-kilt-{}.jsonl".format(lang, split)
)
logging.info("Saving {}".format(filename))
with jsonlines.open(filename, "w") as f:
f.write_all(kilt_dataset)
kilt_dataset = [e for e in kilt_dataset if e["is_hard"]]
filename = os.path.join(
args.output_dir, "{}-hard.jsonl".format(filename.split(".")[0])
)
logging.info("Saving {}".format(filename))
with jsonlines.open(filename, "w") as f:
f.write_all(kilt_dataset)
|
BELA-main
|
preprocessing_scripts/preprocess_TR2016.py
|
BELA-main
|
mblink/__init__.py
|
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import hydra
from mblink.conf.config import MainConfig
from omegaconf import OmegaConf
from pytorch_lightning.trainer import Trainer
@hydra.main(config_path="conf", config_name="config")
def main(cfg: MainConfig):
print(OmegaConf.to_yaml(cfg))
os.environ["NCCL_NSOCKS_PERTHREAD"] = "4"
os.environ["NCCL_SOCKET_NTHREADS"] = "2"
if cfg.get("debug_mode"):
os.environ["CUDA_LAUNCH_BLOCKING"] = "1"
os.environ["NCCL_BLOCKING_WAIT"] = "1"
os.environ["PL_SKIP_CPU_COPY_ON_DDP_TEARDOWN"] = "1"
task = hydra.utils.instantiate(cfg.task, _recursive_=False)
assert cfg.task.model.model_path == cfg.task.transform.model_path
transform = hydra.utils.instantiate(cfg.task.transform)
datamodule = hydra.utils.instantiate(cfg.datamodule, transform=transform)
checkpoint_callback = hydra.utils.instantiate(cfg.checkpoint_callback)
trainer = Trainer(**cfg.trainer, callbacks=[checkpoint_callback])
if cfg.test_only:
ckpt_path = cfg.task.load_from_checkpoint
trainer.test(
model=task,
ckpt_path=ckpt_path,
verbose=True,
datamodule=datamodule,
)
else:
trainer.fit(task, datamodule=datamodule)
print(f"*** Best model path is {checkpoint_callback.best_model_path}")
trainer.test(
model=None,
ckpt_path="best",
verbose=True,
datamodule=datamodule,
)
if __name__ == "__main__":
main()
|
BELA-main
|
mblink/main.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import logging
import mmap
from typing import List
import torch
from pytorch_lightning import LightningDataModule
from mblink.utils.utils import (
EntityCatalogueType,
EntityCatalogue,
ElDatasetType,
MultilangEntityCatalogue,
NegativesStrategy,
order_entities,
)
from mblink.transforms.blink_transform import BlinkTransform
logger = logging.getLogger()
class ElMatchaDataset(torch.utils.data.Dataset):
"""
A memory mapped dataset for EL in Matcha format
Each example in this dataset contains several mentions.
We laso filter out mentions, that are not present in entity catalogue
"""
def __init__(
self, path, ent_catalogue, negatives=False, negatives_strategy="higher"
):
self.ent_catalogue = ent_catalogue
self.negatives = negatives
self.negatives_strategy = NegativesStrategy(negatives_strategy)
self.file = open(path, mode="r")
self.mm = mmap.mmap(self.file.fileno(), 0, prot=mmap.PROT_READ)
self.offsets = []
self.count = 0
logger.info(f"Build mmap index for {path}")
line = self.mm.readline()
offset = 0
while line:
data = json.loads(line)
for gt_ent_idx, gt_entity in enumerate(data["gt_entities"]):
ent = gt_entity[2]
if ent in self.ent_catalogue:
self.offsets.append((offset, gt_ent_idx))
self.count += 1
offset = self.mm.tell()
line = self.mm.readline()
def __len__(self):
return self.count
def __getitem__(self, index):
offset, gt_ent_idx = self.offsets[index]
self.mm.seek(offset)
line = self.mm.readline()
data = json.loads(line)
_, _, gt_entity, _, offset, length = data["gt_entities"][gt_ent_idx]
entity_index, entity_tokens = self.ent_catalogue[gt_entity]
text = data['original_text']
result = {
"context_left": " ".join(text[:offset]),
"mention": " ".join(text[offset : offset + length]),
"context_right": " ".join(text[offset + length :]),
"entity_id": gt_entity,
"entity_index": entity_index,
"entity_tokens": entity_tokens,
}
if self.negatives:
assert "gt_hard_negatives" in data
neg_entities_ids = []
neg_entities_indexes = []
neg_entities_tokens = []
for ent in data["gt_hard_negatives"][gt_ent_idx]:
if (
ent == gt_entity
and self.negatives_strategy == NegativesStrategy.HIGHER
):
break
entity_index, entity_tokens = self.ent_catalogue[ent]
neg_entities_ids.append(ent)
neg_entities_indexes.append(entity_index)
neg_entities_tokens.append(entity_tokens)
result["neg_entities_ids"] = neg_entities_ids
result["neg_entities_indexes"] = neg_entities_indexes
result["neg_entities_tokens"] = neg_entities_tokens
return result
class ElBlinkDataset(torch.utils.data.Dataset):
"""
A memory mapped dataset for EL in BLINK format
Each example in this dataset contains one mention.
We laso filter out mentions, that are not present in entity catalogue
"""
def __init__(
self, path, ent_catalogue, negatives=False, negatives_strategy="higher"
):
self.ent_catalogue = ent_catalogue
self.file = open(path, mode="r")
self.mm = mmap.mmap(self.file.fileno(), 0, prot=mmap.PROT_READ)
self.offsets = []
self.count = 0
logger.info(f"Build mmap index for {path}")
line = self.mm.readline()
offset = 0
while line:
data = json.loads(line)
if data["entity_id"] in self.ent_catalogue:
self.offsets.append(offset)
self.count += 1
offset = self.mm.tell()
line = self.mm.readline()
def __len__(self):
return self.count
def __getitem__(self, index):
offset = self.offsets[index]
self.mm.seek(offset)
line = self.mm.readline()
data = json.loads(line)
entity_id = data["entity_id"]
entity_index, entity_tokens = self.ent_catalogue[entity_id]
return {
"context_left": data["context_left"],
"mention": data["mention"],
"context_right": data["context_right"],
"entity_id": entity_id,
"entity_index": entity_index,
"entity_tokens": entity_tokens,
}
class ElBiEncoderDataModule(LightningDataModule):
"""
Read data from EL datatset and prepare mention/entity pairs tensors
"""
def __init__(
self,
transform: BlinkTransform,
# Dataset args
train_path: str,
val_path: str,
test_path: str,
ent_catalogue_path: str,
ent_catalogue_idx_path: str,
dataset_type: str = "matcha",
ent_catalogue_type: str = "simple",
batch_size: int = 2,
negatives: bool = False,
negatives_strategy: str = "higher",
max_negative_entities_in_batch: int = 0,
drop_last: bool = False, # drop last batch if len(dataset) not multiple of batch_size
num_workers: int = 0, # increasing this bugs out right now
*args,
**kwargs,
):
super().__init__()
self.batch_size = batch_size
self.negatives = negatives
self.max_negative_entities_in_batch = max_negative_entities_in_batch
self.drop_last = drop_last
self.num_workers = num_workers
self.transform = transform
ent_catalogue_type = EntityCatalogueType(ent_catalogue_type)
if ent_catalogue_type == EntityCatalogueType.SIMPLE:
self.ent_catalogue = EntityCatalogue(
ent_catalogue_path, ent_catalogue_idx_path
)
elif ent_catalogue_type == EntityCatalogueType.MULTI:
self.ent_catalogue = MultilangEntityCatalogue(
ent_catalogue_path, ent_catalogue_idx_path
)
else:
raise NotImplementedError(
f"Unknown ent_catalogue_type {ent_catalogue_type}"
)
dataset_type = ElDatasetType(dataset_type)
if dataset_type == ElDatasetType.MATCHA:
dataset_cls = ElMatchaDataset
elif dataset_type == ElDatasetType.BLINK:
dataset_cls = ElBlinkDataset
else:
raise NotImplementedError(f"Unknown dataset_type {dataset_type}")
self.datasets = {
"train": dataset_cls(
train_path,
self.ent_catalogue,
negatives=negatives,
negatives_strategy=negatives_strategy,
),
"valid": dataset_cls(val_path, self.ent_catalogue),
"test": dataset_cls(test_path, self.ent_catalogue),
}
def train_dataloader(self):
return torch.utils.data.DataLoader(
self.datasets["train"],
batch_size=self.batch_size,
num_workers=self.num_workers,
collate_fn=self.collate_train,
)
def val_dataloader(self):
return torch.utils.data.DataLoader(
self.datasets["valid"],
shuffle=False,
batch_size=self.batch_size,
num_workers=self.num_workers,
collate_fn=self.collate_eval,
)
def test_dataloader(self):
return torch.utils.data.DataLoader(
self.datasets["test"],
shuffle=False,
batch_size=self.batch_size,
num_workers=self.num_workers,
collate_fn=self.collate_eval,
)
def collate_eval(self, batch):
return self.collate(batch, False)
def collate_train(self, batch):
return self.collate(batch, True)
def collate(self, batch, is_train):
"""
Prepare mention, entity tokens and target tensors
"""
if self.negatives and is_train:
(
left_context,
mention,
right_context,
_,
entity_ids,
entity_token_ids,
_,
neg_entities_ids,
neg_entities_tokens,
) = zip(*[item.values() for item in batch])
else:
left_context, mention, right_context, _, entity_ids, entity_token_ids = zip(
*[item.values() for item in batch]
)
neg_entities_ids = None
neg_entities_tokens = None
entity_token_ids, entity_ids, targets = order_entities(
entity_token_ids,
entity_ids,
neg_entities_ids,
neg_entities_tokens,
self.max_negative_entities_in_batch,
)
pad_length = (
len(batch) + self.max_negative_entities_in_batch - len(entity_token_ids)
)
entity_tensor_mask = [1] * len(entity_token_ids) + [0] * pad_length
entity_token_ids += [
[self.transform.bos_idx, self.transform.eos_idx]
] * pad_length
entity_ids += [0] * pad_length
mention_tensors, entity_tensors = self.transform(
{
"left_context": left_context,
"mention": mention,
"right_context": right_context,
"token_ids": entity_token_ids,
}
)
entity_ids = torch.tensor(entity_ids, dtype=torch.long)
targets = torch.tensor(targets, dtype=torch.long)
entity_tensor_mask = torch.tensor(entity_tensor_mask, dtype=torch.long)
return {
"mentions": mention_tensors,
"entities": entity_tensors,
"entity_ids": entity_ids,
"targets": targets,
"entity_tensor_mask": entity_tensor_mask,
}
|
BELA-main
|
mblink/datamodule/blink_datamodule.py
|
BELA-main
|
mblink/tests/__init__.py
|
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import os
import tempfile
import random
import torch
import h5py
import numpy as np
import torch
from mblink.datamodule.blink_datamodule import (
ElBlinkDataset,
ElMatchaDataset,
ElBiEncoderDataModule,
EntityCatalogue,
MultilangEntityCatalogue,
)
from mblink.transforms.blink_transform import (
BlinkTransform,
)
from mblink.utils.utils import assert_equal_tensor_dict
class TestBiEncoderELDataModule(unittest.TestCase):
def setUp(self):
random.seed(0)
torch.manual_seed(0)
self.base_dir = os.path.join(os.path.dirname(__file__), "data")
self.data_path = os.path.join(self.base_dir, "el_matcha.jsonl")
self.ent_catalogue_path = os.path.join(self.base_dir, "el_catalogue.h5")
self.ent_catalogue_idx_path = os.path.join(self.base_dir, "el_catalogue.idx")
self.transform = BlinkTransform(
model_path="bert-large-uncased",
max_mention_len=12,
max_entity_len=64,
add_eos_bos_to_entity=True,
)
self.tokens = {
"London": [
2414,
2003,
1996,
3007,
1998,
2922,
2103,
1997,
2563,
1998,
1996,
2142,
2983,
],
"Chelsea F.C.": [
9295,
2374,
2252,
2003,
2019,
2394,
2658,
2374,
2252,
2241,
1999,
21703,
1010,
2414,
1012,
],
}
def test_ent_catalogue(self):
ent_catalogue = EntityCatalogue(
self.ent_catalogue_path,
self.ent_catalogue_idx_path,
)
self.assertIn("London", ent_catalogue)
self.assertIn("Chelsea F.C.", ent_catalogue)
self.assertNotIn("Moscow", ent_catalogue)
idx, data = ent_catalogue["London"]
self.assertEqual(idx, 0)
self.assertSequenceEqual(data, self.tokens["London"])
idx, data = ent_catalogue["Chelsea F.C."]
self.assertEqual(idx, 1)
self.assertSequenceEqual(data, self.tokens["Chelsea F.C."])
def test_el_matcha_dataset(self):
ent_catalogue = EntityCatalogue(
self.ent_catalogue_path,
self.ent_catalogue_idx_path,
)
ds = ElMatchaDataset(path=self.data_path, ent_catalogue=ent_catalogue)
self.assertEqual(len(ds), 3)
self.assertEqual(
ds[0],
{
"context_left": "",
"mention": "Chelsea Football Club",
"context_right": "is an English professional football club based in Fulham London",
"entity_id": "Chelsea F.C.",
"entity_index": 1,
"entity_tokens": self.tokens["Chelsea F.C."],
},
)
self.assertEqual(
ds[1],
{
"context_left": "Chelsea Football Club is an English professional football club based in Fulham",
"mention": "London",
"context_right": "",
"entity_id": "London",
"entity_index": 0,
"entity_tokens": self.tokens["London"],
},
)
self.assertEqual(
ds[2],
{
"context_left": "In",
"mention": "London",
"context_right": "the capital of England and the United Kingdom",
"entity_id": "London",
"entity_index": 0,
"entity_tokens": self.tokens["London"],
},
)
def test_el_bi_encoder_data_module(self):
dm = ElBiEncoderDataModule(
transform=self.transform,
train_path=self.data_path,
val_path=self.data_path,
test_path=self.data_path,
ent_catalogue_path=self.ent_catalogue_path,
ent_catalogue_idx_path=self.ent_catalogue_idx_path,
batch_size=2,
)
batches = list(dm.train_dataloader())
self.assertEqual(len(batches), 2)
expected_batches = [
{
"mentions": {
"input_ids": torch.tensor(
[
[
101,
1,
9295,
2374,
2252,
2,
2003,
2019,
2394,
2658,
2374,
102,
],
[
101,
2394,
2658,
2374,
2252,
2241,
1999,
21703,
1,
2414,
2,
102,
],
]
),
"attention_mask": torch.tensor(
[
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
]
),
},
"entities": {
"input_ids": torch.tensor(
[
[
101,
9295,
2374,
2252,
2003,
2019,
2394,
2658,
2374,
2252,
2241,
1999,
21703,
1010,
2414,
1012,
102,
],
[
101,
2414,
2003,
1996,
3007,
1998,
2922,
2103,
1997,
2563,
1998,
1996,
2142,
2983,
102,
0,
0,
],
]
),
"attention_mask": torch.tensor(
[
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
]
),
},
"entity_ids": torch.tensor([1, 0]),
"targets": torch.tensor([0, 1]),
"entity_tensor_mask": torch.tensor([1, 1]),
},
{
"mentions": {
"input_ids": torch.tensor(
[
[
101,
1999,
1,
2414,
2,
1996,
3007,
1997,
2563,
1998,
1996,
102,
]
]
),
"attention_mask": torch.tensor(
[[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
),
},
"entities": {
"input_ids": torch.tensor(
[
[
101,
2414,
2003,
1996,
3007,
1998,
2922,
2103,
1997,
2563,
1998,
1996,
2142,
2983,
102,
]
]
),
"attention_mask": torch.tensor(
[[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
),
},
"entity_ids": torch.tensor([0]),
"targets": torch.tensor([0]),
"entity_tensor_mask": torch.tensor([1]),
},
]
for result, expected in zip(batches, expected_batches):
assert_equal_tensor_dict(self, result, expected)
class TestBiEncoderELDataModuleWithXlmrTransform(unittest.TestCase):
def setUp(self):
random.seed(0)
torch.manual_seed(0)
self.base_dir = os.path.join(os.path.dirname(__file__), "data")
self.data_path = os.path.join(self.base_dir, "el_matcha.jsonl")
self.ent_catalogue_path = os.path.join(self.base_dir, "el_xlmr_catalogue.h5")
self.ent_catalogue_idx_path = os.path.join(self.base_dir, "el_catalogue.idx")
self.transform = BlinkTransform(
model_path="xlm-roberta-base",
mention_start_token=-2,
mention_end_token=-3,
max_mention_len=12,
max_entity_len=32,
)
def test_el_bi_encoder_data_module_with_xlmr_transform(self):
dm = ElBiEncoderDataModule(
transform=self.transform,
train_path=self.data_path,
val_path=self.data_path,
test_path=self.data_path,
ent_catalogue_path=self.ent_catalogue_path,
ent_catalogue_idx_path=self.ent_catalogue_idx_path,
batch_size=2,
)
batches = list(dm.train_dataloader())
self.assertEqual(len(batches), 2)
expected_batches = [
{
"mentions": {
"input_ids": torch.tensor(
[
[
0,
250000,
44517,
98809,
7687,
249999,
83,
142,
14941,
23182,
101740,
2,
],
[
0,
23182,
101740,
11938,
35509,
23,
88437,
3915,
250000,
9020,
249999,
2,
],
]
),
"attention_mask": torch.tensor(
[
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
]
),
},
"entities": {
"input_ids": torch.tensor(
[
[
0,
44517,
563,
5,
441,
5,
250000,
44517,
98809,
7687,
83,
142,
14941,
23182,
101740,
11938,
35509,
23,
88437,
3915,
4,
9020,
5,
215624,
297,
23,
66007,
4,
70,
11938,
98438,
2,
],
[
0,
9020,
250000,
9020,
83,
70,
10323,
136,
142105,
26349,
111,
30715,
136,
70,
14098,
117604,
5,
581,
26349,
9157,
7,
98,
70,
32547,
99321,
90,
23,
70,
127067,
9,
13,
2,
],
]
),
"attention_mask": torch.tensor(
[
[
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
],
[
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
],
]
),
},
"entity_ids": torch.tensor([1, 0]),
"targets": torch.tensor([0, 1]),
"entity_tensor_mask": torch.tensor([1, 1]),
},
{
"mentions": {
"input_ids": torch.tensor(
[
[
0,
360,
250000,
9020,
249999,
70,
10323,
111,
30715,
136,
70,
2,
]
]
),
"attention_mask": torch.tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]),
},
"entities": {
"input_ids": torch.tensor(
[
[
0,
9020,
250000,
9020,
83,
70,
10323,
136,
142105,
26349,
111,
30715,
136,
70,
14098,
117604,
5,
581,
26349,
9157,
7,
98,
70,
32547,
99321,
90,
23,
70,
127067,
9,
13,
2,
]
]
),
"attention_mask": torch.tensor(
[
[
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
]
]
),
},
"entity_ids": torch.tensor([0]),
"targets": torch.tensor([0]),
"entity_tensor_mask": torch.tensor([1]),
},
]
for result, expected in zip(batches, expected_batches):
assert_equal_tensor_dict(self, result, expected)
def test_el_bi_encoder_data_module_with_hard_negatives_with_xlmr_transform(self):
dm = ElBiEncoderDataModule(
transform=self.transform,
train_path=self.data_path,
val_path=self.data_path,
test_path=self.data_path,
ent_catalogue_path=self.ent_catalogue_path,
ent_catalogue_idx_path=self.ent_catalogue_idx_path,
batch_size=2,
negatives=True,
max_negative_entities_in_batch=5,
)
batches = list(dm.train_dataloader())
self.assertEqual(len(batches), 2)
expected_batches = [
{
"mentions": {
"input_ids": torch.tensor(
[
[
0,
250000,
44517,
98809,
7687,
249999,
83,
142,
14941,
23182,
101740,
2,
],
[
0,
23182,
101740,
11938,
35509,
23,
88437,
3915,
250000,
9020,
249999,
2,
],
]
),
"attention_mask": torch.tensor(
[
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
]
),
},
"entities": {
"input_ids": torch.tensor(
[
[
0,
44517,
563,
5,
441,
5,
250000,
44517,
98809,
7687,
83,
142,
14941,
23182,
101740,
11938,
35509,
23,
88437,
3915,
4,
9020,
5,
215624,
297,
23,
66007,
4,
70,
11938,
98438,
2,
],
[
0,
9020,
250000,
9020,
83,
70,
10323,
136,
142105,
26349,
111,
30715,
136,
70,
14098,
117604,
5,
581,
26349,
9157,
7,
98,
70,
32547,
99321,
90,
23,
70,
127067,
9,
13,
2,
],
[0, 2] + [1] * 30,
[0, 2] + [1] * 30,
[0, 2] + [1] * 30,
[0, 2] + [1] * 30,
[0, 2] + [1] * 30,
]
),
"attention_mask": torch.tensor(
[
[1] * 32,
[1] * 32,
[1, 1] + [0] * 30,
[1, 1] + [0] * 30,
[1, 1] + [0] * 30,
[1, 1] + [0] * 30,
[1, 1] + [0] * 30,
]
),
},
"entity_ids": torch.tensor([1, 0, 0, 0, 0, 0, 0]),
"targets": torch.tensor([0, 1]),
"entity_tensor_mask": torch.tensor([1, 1, 0, 0, 0, 0, 0]),
},
{
"mentions": {
"input_ids": torch.tensor(
[
[
0,
360,
250000,
9020,
249999,
70,
10323,
111,
30715,
136,
70,
2,
]
]
),
"attention_mask": torch.tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]),
},
"entities": {
"input_ids": torch.tensor(
[
[
0,
9020,
250000,
9020,
83,
70,
10323,
136,
142105,
26349,
111,
30715,
136,
70,
14098,
117604,
5,
581,
26349,
9157,
7,
98,
70,
32547,
99321,
90,
23,
70,
127067,
9,
13,
2,
],
[
0,
44517,
563,
5,
441,
5,
250000,
44517,
98809,
7687,
83,
142,
14941,
23182,
101740,
11938,
35509,
23,
88437,
3915,
4,
9020,
5,
215624,
297,
23,
66007,
4,
70,
11938,
98438,
2,
],
[0, 2] + [1] * 30,
[0, 2] + [1] * 30,
[0, 2] + [1] * 30,
[0, 2] + [1] * 30,
]
),
"attention_mask": torch.tensor(
[
[1] * 32,
[1] * 32,
[1, 1] + [0] * 30,
[1, 1] + [0] * 30,
[1, 1] + [0] * 30,
[1, 1] + [0] * 30,
]
),
},
"entity_ids": torch.tensor([0, 1, 0, 0, 0, 0]),
"targets": torch.tensor([0]),
"entity_tensor_mask": torch.tensor([1, 1, 0, 0, 0, 0]),
},
]
for result, expected in zip(batches, expected_batches):
assert_equal_tensor_dict(self, result, expected)
class TestMultilangELDataModule(unittest.TestCase):
def setUp(self):
random.seed(0)
torch.manual_seed(0)
self.base_dir = os.path.join(os.path.dirname(__file__), "data")
self.data_path = os.path.join(self.base_dir, "el_blink.jsonl")
self.ent_catalogue_idx_path = os.path.join(
self.base_dir, "el_multi_catalogue.idx"
)
fid, self.ent_catalogue_path = tempfile.mkstemp()
os.close(fid)
self._create_ent_data(self.ent_catalogue_path)
def tearDown(self):
if os.path.isfile(self.ent_catalogue_path):
os.remove(self.ent_catalogue_path)
@staticmethod
def _create_ent_data(file_name):
with h5py.File(file_name, "w") as fd:
fd["en"] = np.array(
[
[3, 101, 25550, 102, 0, 0],
[3, 101, 16765, 102, 0, 0],
[5, 101, 12109, 10104, 14822, 102],
[3, 101, 10829, 102, 0, 0],
]
)
fd["pt"] = np.array(
[
[3, 101, 12264, 102, 0, 0],
[5, 101, 14734, 47630, 27171, 102],
]
)
fd["ru"] = np.array([[5, 101, 59049, 118, 11323, 102]])
def test_multilang_ent_catalogue(self):
ent_catalogue = MultilangEntityCatalogue(
self.ent_catalogue_path,
self.ent_catalogue_idx_path,
)
self.assertIn("Q5146", ent_catalogue)
self.assertIn("Q155", ent_catalogue)
self.assertIn("Q8678", ent_catalogue)
self.assertIn("Q84", ent_catalogue)
self.assertNotIn("London", ent_catalogue)
idx0, data = ent_catalogue["Q5146"]
self.assertSequenceEqual(data, [101, 14734, 47630, 27171, 102])
idx1, data = ent_catalogue["Q155"]
self.assertSequenceEqual(data, [101, 16765, 102])
idx2, data = ent_catalogue["Q8678"]
self.assertSequenceEqual(data, [101, 59049, 118, 11323, 102])
idx3, data = ent_catalogue["Q84"]
self.assertSequenceEqual(data, [101, 10829, 102])
# assert all keys have unique idx numbers
self.assertEqual(sorted([idx0, idx1, idx2, idx3]), [0, 1, 2, 3])
def test_el_blink_dataset(self):
ent_catalogue = MultilangEntityCatalogue(
self.ent_catalogue_path,
self.ent_catalogue_idx_path,
)
ds = ElBlinkDataset(path=self.data_path, ent_catalogue=ent_catalogue)
self.assertEqual(len(ds), 7)
self.assertEqual(
ds[0],
{
"context_left": "Guanabara K\u00f6rfezi (",
"mention": "Portekizce",
"context_right": ": ve de Rio de Janeiro eyaletinde.",
"entity_id": "Q5146",
"entity_index": ent_catalogue["Q5146"][0],
"entity_tokens": ent_catalogue["Q5146"][1],
},
)
self.assertEqual(
ds[6],
{
"context_left": "Serpenti Galerisi (\u0130ngilizce: Serpentine Gallery),",
"mention": "Londra",
"context_right": "\u015fehrindeki Hyde Park\u2019\u0131n bir par\u00e7as\u0131 olan Kensington Gardens.",
"entity_id": "Q84",
"entity_index": ent_catalogue["Q84"][0],
"entity_tokens": ent_catalogue["Q84"][1],
},
)
def test_el_multilang_datamodule(self):
transform = BlinkTransform(
model_path="xlm-roberta-base",
mention_start_token=-2,
mention_end_token=-3,
max_mention_len=12,
max_entity_len=32,
)
dm = ElBiEncoderDataModule(
transform=transform,
train_path=self.data_path,
val_path=self.data_path,
test_path=self.data_path,
ent_catalogue_path=self.ent_catalogue_path,
ent_catalogue_idx_path=self.ent_catalogue_idx_path,
dataset_type="blink",
ent_catalogue_type="multi",
batch_size=2,
mention_start_token=1,
mention_end_token=2,
ent_sep_token=3,
mention_context_length=12,
separate_segments=True,
)
batches = list(dm.train_dataloader())
self.assertEqual(len(batches), 4)
if __name__ == '__main__':
unittest.main()
|
BELA-main
|
mblink/tests/test_datamodules.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from bela.models.hf_encoder import HFEncoder
from bela.transforms.joint_el_transform import JointELTransform
class TestHFEncoder(unittest.TestCase):
def test_xlmr_encoder(self):
transform = JointELTransform()
model = HFEncoder(model_path="xlm-roberta-base")
model_inputs = transform(
{
"texts": [
[
"Some",
"simple",
"text",
"about",
"Real",
"Madrid",
"and",
"Barcelona",
],
["Hola", "amigos", "!"],
["Cristiano", "Ronaldo", "juega", "en", "la", "Juventus"],
],
"mention_offsets": [
[4, 7],
[1],
[0, 5],
],
"mention_lengths": [
[2, 1],
[1],
[2, 1],
],
"entities": [
[1, 2],
[3],
[102041, 267832],
],
}
)
output = model(
input_ids=model_inputs["input_ids"],
attention_mask=model_inputs["attention_mask"],
)
if __name__ == '__main__':
unittest.main()
|
BELA-main
|
mblink/tests/test_models.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from bela.transforms.joint_el_transform import JointELTransform
class TestJointELXlmrTransforms(unittest.TestCase):
def test_blink_mention_xlmr_transform(self):
transform = JointELTransform()
model_inputs = transform(
{
"texts": [
[
"Some",
"simple",
"text",
"about",
"Real",
"Madrid",
"and",
"Barcelona",
],
["Hola", "amigos", "!"],
["Cristiano", "Ronaldo", "juega", "en", "la", "Juventus"],
],
"mention_offsets": [
[4, 7],
[1],
[0, 5],
],
"mention_lengths": [
[2, 1],
[1],
[2, 1],
],
"entities": [
[1, 2],
[3],
[102041, 267832],
],
}
)
expected_model_inputs = {
"input_ids": torch.tensor(
[
[0, 31384, 8781, 7986, 1672, 5120, 8884, 136, 5755, 2],
[0, 47958, 19715, 711, 2, 1, 1, 1, 1, 1],
[0, 96085, 43340, 1129, 2765, 22, 21, 65526, 2, 1],
]
),
"attention_mask": torch.tensor(
[
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
]
),
"mention_offsets": torch.tensor([[5, 8], [2, 0], [1, 7]]),
"mention_lengths": torch.tensor([[2, 1], [1, 0], [2, 1]]),
"entities": torch.tensor([[1, 2], [3, 0], [102041, 267832]]),
"tokens_mapping": torch.tensor(
[
[[1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [6, 7], [7, 8], [8, 9]],
[
[1, 2],
[2, 3],
[3, 4],
[0, 1],
[0, 1],
[0, 1],
[0, 1],
[0, 1],
],
[
[1, 2],
[2, 3],
[3, 5],
[5, 6],
[6, 7],
[7, 8],
[0, 1],
[0, 1],
],
]
),
}
for key, value in expected_model_inputs.items():
self.assertTrue(
torch.all(model_inputs[key].eq(value)), f"{key} not equal"
)
if __name__ == '__main__':
unittest.main()
|
BELA-main
|
mblink/tests/test_transforms.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import logging
from enum import Enum
from typing import List
import torch
import h5py
logger = logging.getLogger()
class EntityCatalogueType(Enum):
SIMPLE = "simple"
MULTI = "multi"
class ElDatasetType(Enum):
BLINK = "blink"
MATCHA = "matcha"
class NegativesStrategy(Enum):
HIGHER = "higher"
ALL = "all"
def assert_equal_tensor_dict(test_case, result, expected):
"""
Compare tensors/values in the dict and assert if they are not equal.
The dict could countain multiple levels of nesting.
"""
for key, value in expected.items():
if isinstance(value, dict):
assert_equal_tensor_dict(test_case, result[key], value)
else:
if isinstance(value, torch.Tensor):
test_case.assertTrue(
torch.equal(result[key], value), f"{key} is not equal"
)
else:
test_case.assertEqual(result[key], value, f"{key} is not equal")
def get_seq_lengths(batch: List[List[int]]):
return [len(example) for example in batch]
class EntityCatalogue:
def __init__(self, local_path, idx_path):
self.data_file = h5py.File(local_path, "r")
self.data = self.data_file["data"]
logger.info(f"Reading entity catalogue index {idx_path}")
self.idx = {}
with open(idx_path, "rt") as fd:
for idx, line in enumerate(fd):
ent_id = line.strip()
self.idx[ent_id] = idx
def __len__(self):
return len(self.idx)
def __getitem__(self, entity_id):
ent_index = self.idx[entity_id]
value = self.data[ent_index].tolist()
value = value[1 : value[0] + 1]
return ent_index, value
def __contains__(self, entity_id):
return entity_id in self.idx
class MultilangEntityCatalogue:
"""
Entity catalogue where each entity id has descriptions in different languages
Index is a json file, where keys are entity ids. The value is dict where key is
language id and value is triplet (title, count, index). Title is a wikipedia title
of the entity in that language, count is a number of mentions to the entity in
that language and index is a pos of entity tokens in tokens array.
Index example:
{
...
"Q17": {
"en": ["Japan", 230, 10],
"ru": ["Япония", 111, 55]
}
...
}
Tokens file is an h5py file, where datasets keys are language ids and stored arrays
are ent tokens.
"""
def __init__(self, local_path, idx_path):
self.data = h5py.File(local_path, "r")
logger.info(f"Reading entity catalogue index {idx_path}")
with open(idx_path, "rt") as fd:
self.idx = json.load(fd)
# assign unique index number to each entity
for idx, ent_value in enumerate(self.idx.values()):
ent_value["idx"] = idx
def __len__(self):
return len(self.idx)
def __getitem__(self, entity_id):
ent_lang_map = self.idx[entity_id]
# now choose language with most mentions
selected_lang = None
max_count = -1
for lang, val in ent_lang_map.items():
if lang == "idx":
continue
_, count, _ = val
if count > max_count:
max_count = count
selected_lang = lang
assert selected_lang is not None
ent_index = ent_lang_map[selected_lang][2]
value = self.data[selected_lang][ent_index].tolist()
value = value[1 : value[0] + 1]
return ent_lang_map["idx"], value
def __contains__(self, entity_id):
return entity_id in self.idx
def order_entities(
entities_data,
entity_ids,
neg_entities_ids=None,
neg_entities_tokens=None,
max_negative_entities_in_batch=None,
):
"""
This function removes duplicated entities in the entities batch and
constructs the targets.
In bi-encoder model we train on in-batch random and hard negatives. In this
case each mention should have one positive entity class in enttiteis batch.
But it could happen there are two or more mentions in the batch that
referes to the same entitty (this entity would be in the batch 2 and more
times). In this case we could predict class correctly and calculate loss.
To resolve this problem we filter entities and left only one example of
each in the batch.
Returns:
filteres_entities - filtered entities tokens
filtered_entity_ids - filtered entities_ids
targets - array, where each i-th element is a position in embedding's
matrix of entity embedding of i-th corresponding mention.
"""
ent_indexes_map = {}
targets = []
filteres_entities = []
filtered_entity_ids = []
for ent_id, ent_data in zip(entity_ids, entities_data):
if ent_id in ent_indexes_map:
targets.append(ent_indexes_map[ent_id])
else:
ent_idx = len(ent_indexes_map)
targets.append(ent_idx)
ent_indexes_map[ent_id] = ent_idx
filteres_entities.append(ent_data)
filtered_entity_ids.append(ent_id)
# Append `max_negative_entities_in_batch` entities to the end of batch
neg_entities_ids = neg_entities_ids or []
neg_entities_tokens = neg_entities_tokens or []
neg_filteres_entities = []
neg_filtered_entity_ids = []
for item_neg_entities_ids, item_neg_entities_tokens in zip(
neg_entities_ids,
neg_entities_tokens,
):
for neg_entity_id, neg_entity_tokens in zip(
item_neg_entities_ids, item_neg_entities_tokens
):
if neg_entity_id not in ent_indexes_map:
ent_idx = len(ent_indexes_map)
ent_indexes_map[neg_entity_id] = ent_idx
neg_filteres_entities.append(neg_entity_tokens)
neg_filtered_entity_ids.append(neg_entity_id)
if max_negative_entities_in_batch is not None:
neg_filteres_entities = neg_filteres_entities[:max_negative_entities_in_batch]
neg_filtered_entity_ids = neg_filtered_entity_ids[
:max_negative_entities_in_batch
]
filteres_entities.extend(neg_filteres_entities)
filtered_entity_ids.extend(neg_filtered_entity_ids)
return filteres_entities, filtered_entity_ids, targets
|
BELA-main
|
mblink/utils/utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional
from transformers import AutoModel
from torch import nn
class HFEncoder(nn.Module):
def __init__(
self,
model_path: str = "xlm-roberta-base",
projection_dim: Optional[int] = None,
output_dropout: Optional[float] = 0.0,
):
super().__init__()
self.transformer = AutoModel.from_pretrained(model_path)
self.embedding_dim = self.transformer.encoder.config.hidden_size
self.project = nn.Identity() # to make torchscript happy
if projection_dim:
self.project = nn.Sequential(
nn.Linear(self.embedding_dim, projection_dim), nn.LayerNorm(projection_dim)
)
self.output_dropout = nn.Dropout(output_dropout)
def forward(self, input_ids, attention_mask=None):
output = self.transformer(input_ids=input_ids, attention_mask=attention_mask)
last_layer = output["last_hidden_state"]
sentence_rep = self.project(last_layer[:, 0, :])
return self.output_dropout(sentence_rep), last_layer
|
BELA-main
|
mblink/models/hf_encoder.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
from transformers import AutoTokenizer
class HFTransform(nn.Module):
def __init__(
self,
model_path: str = "xlm-roberta-base",
max_seq_len: int = 256,
add_special_tokens: bool = True,
return_offsets_mapping: bool = True,
):
super().__init__()
self.tokenizer = AutoTokenizer.from_pretrained(model_path)
self.sep_token = self.tokenizer.sep_token
self.max_seq_len = max_seq_len
self.add_special_tokens = add_special_tokens
self.return_offsets_mapping = return_offsets_mapping
self.pad_token_id = self.tokenizer.pad_token_id
def forward(self, texts):
return self.tokenizer(
texts,
return_tensors=None,
padding=False,
truncation=True,
max_length=self.max_seq_len,
add_special_tokens=self.add_special_tokens,
return_offsets_mapping=self.return_offsets_mapping,
)["input_ids"]
|
BELA-main
|
mblink/transforms/hf_transform.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, List, Optional, Tuple
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pad_sequence
from mblink.transforms.hf_transform import HFTransform
from mblink.utils.utils import (
EntityCatalogueType,
EntityCatalogue,
MultilangEntityCatalogue,
NegativesStrategy,
order_entities,
)
@torch.jit.script
def pad_2d(
batch: List[List[int]], seq_lens: List[int], pad_idx: int, max_len: int = -1
) -> List[List[int]]:
pad_to_length = max(seq_lens)
if max_len > 0:
pad_to_length = min(pad_to_length, max_len)
for sentence in batch:
padding = pad_to_length - len(sentence)
if padding >= 0:
for _ in range(padding):
sentence.append(pad_idx)
else:
for _ in range(-padding):
sentence.pop()
return batch
def prepare_mention(
context_left: List[int],
mention_tokens: List[int],
context_right: List[int],
max_mention_length: int,
mention_start_token: int,
mention_end_token: int,
bos_idx: int,
eos_idx: int,
):
context_left: List[int] = context_left[1:-1]
mention_tokens: List[int] = mention_tokens[1:-1]
context_right: List[int] = context_right[1:-1]
mention_tokens = mention_tokens[: max_mention_length - 4]
mention_tokens = [mention_start_token] + mention_tokens + [mention_end_token]
left_quota = (max_mention_length - len(mention_tokens)) // 2 - 1
right_quota = max_mention_length - len(mention_tokens) - left_quota - 2
left_add = len(context_left)
right_add = len(context_right)
if left_add <= left_quota:
if right_add > right_quota:
right_quota += left_quota - left_add
else:
if right_add <= right_quota:
left_quota += right_quota - right_add
empty_tokens: List[int] = []
context_left = empty_tokens if left_quota == 0 else context_left[-left_quota:]
context_right = context_right[:right_quota]
context_left = [bos_idx] + context_left
context_right = context_right + [eos_idx]
context_tokens = context_left + mention_tokens + context_right
return context_tokens
# class BlinkMentionRobertaTransform(HFTransform):
# def __init__(
# self,
# mention_start_token: int = -2,
# mention_end_token: int = -3,
# model_path: Optional[str] = None,
# max_seq_len: int = 64,
# ):
# super().__init__(
# model_path=model_path,
# max_seq_len=max_seq_len,
# )
# vocab_length = len(self.tokenizer.vocab)
# self.bos_idx = self.tokenizer.bos_token_id
# self.eos_idx = self.tokenizer.eos_token_id
# self.mention_start_token = (vocab_length + mention_start_token) % vocab_length
# self.mention_end_token = (vocab_length + mention_end_token) % vocab_length
# self.max_mention_length = max_seq_len
# def transform(self, batch: Dict[str, Any]) -> Dict[str, Any]:
# left_context = batch["left_context"]
# torch.jit.isinstance(left_context, List[str])
# right_context = batch["right_context"]
# torch.jit.isinstance(right_context, List[str])
# mention = batch["mention"]
# torch.jit.isinstance(mention, List[str])
# left_token_ids: List[List[int]] = self.tokenizer(left_context)["input_ids"]
# mention_token_ids: List[List[int]] = self.tokenizer(mention)["input_ids"]
# right_token_ids: List[List[int]] = self.tokenizer(right_context)["input_ids"]
# token_ids: List[List[int]] = []
# attention_masks: List[List[int]] = []
# seq_lens: List[int] = []
# for lc_token_ids, m_token_ids, rc_token_ids, in zip(
# left_token_ids,
# mention_token_ids,
# right_token_ids,
# ):
# sentence_token_ids = prepare_mention(
# lc_token_ids,
# m_token_ids,
# rc_token_ids,
# self.max_mention_length,
# self.mention_start_token,
# self.mention_end_token,
# self.bos_idx,
# self.eos_idx,
# )
# token_ids.append(sentence_token_ids)
# attention_mask = [1] * len(sentence_token_ids)
# attention_masks.append(attention_mask)
# seq_lens.append(len(sentence_token_ids))
# attention_masks = pad_2d(
# attention_masks,
# seq_lens,
# pad_idx = 0,
# )
# return {
# "input_ids": token_ids,
# "attention_mask": attention_masks,
# }
# class BlinkEntityPretokenizedTransform(HFTransform):
# def __init__(
# self,
# model_path: Optional[str] = None,
# max_seq_len: int = 64,
# ):
# super().__init__(
# model_path=model_path,
# max_seq_len=max_seq_len,
# )
# self.max_entity_length = max_seq_len
# def transform(self, batch: Dict[str, Any]) -> Dict[str, Any]:
# token_ids = batch["token_ids"]
# torch.jit.isinstance(token_ids, List[List[int]])
# result_token_ids: List[List[int]] = []
# seq_lens: List[int] = []
# attention_masks: List[List[int]] = []
# for token_ids_per_sequence in token_ids:
# if len(token_ids_per_sequence) > self.max_entity_length:
# eos_token = token_ids_per_sequence[-1]
# token_ids_per_sequence = token_ids_per_sequence[
# : self.max_entity_length
# ]
# token_ids_per_sequence[-1] = eos_token
# result_token_ids.append(token_ids_per_sequence)
# seq_len = len(token_ids_per_sequence)
# attention_mask = [1] * len(token_ids_per_sequence)
# attention_masks.append(attention_mask)
# seq_lens.append(seq_len)
# attention_masks = pad_2d(
# attention_masks,
# seq_lens,
# pad_idx = 0,
# )
# return {
# "input_ids": result_token_ids,
# "attention_mask": attention_masks,
# }
# class BlinkTransform(nn.Module):
# def __init__(
# self,
# model_path: Optional[str] = None,
# mention_start_token: int = -2,
# mention_end_token: int = -3,
# max_mention_len: int = 64,
# max_entity_len: int = 64,
# ):
# super().__init__()
# self.mention_transform = BlinkMentionRobertaTransform(
# mention_start_token=mention_start_token,
# mention_end_token=mention_end_token,
# model_path=model_path,
# max_seq_len=max_mention_len,
# )
# self.entity_transform = BlinkEntityPretokenizedTransform(
# model_path=model_path,
# max_seq_len=max_entity_len,
# )
# def forward(
# self, batch: Dict[str, Any]
# ) -> Tuple[Dict[str, torch.Tensor], Dict[str, torch.Tensor]]:
# return self.mention_transform(batch), self.entity_transform(batch)
# @property
# def bos_idx(self):
# return self.mention_transform.bos_idx
# @property
# def eos_idx(self):
# return self.mention_transform.eos_idx
class BlinkTransform(HFTransform):
def __init__(
self,
model_path: str = "bert-base-uncased",
mention_start_token: int = 1,
mention_end_token: int = 2,
max_mention_len: int = 32,
max_entity_len: int = 64,
add_eos_bos_to_entity: bool = False,
):
super().__init__(
model_path=model_path,
)
vocab_length = self.tokenizer.vocab_size
self.mention_start_token = (vocab_length + mention_start_token) % vocab_length
self.mention_end_token = (vocab_length + mention_end_token) % vocab_length
self.max_mention_len = max_mention_len
self.max_entity_len = max_entity_len
self.add_eos_bos_to_entity = add_eos_bos_to_entity
def _transform_mention(
self,
left_context: List[str],
mention: List[str],
right_context: List[str],
) -> List[List[int]]:
token_ids: List[List[int]] = []
for sentence_lc, sentence_mention, sentence_rc, in zip(
left_context,
mention,
right_context,
):
lc_token_ids = self.tokenizer.encode(sentence_lc)
mention_token_ids = self.tokenizer.encode(sentence_mention)
rc_token_ids = self.tokenizer.encode(sentence_rc)
sentence_token_ids = prepare_mention(
lc_token_ids,
mention_token_ids,
rc_token_ids,
self.max_mention_len,
self.mention_start_token,
self.mention_end_token,
self.tokenizer.cls_token_id,
self.tokenizer.sep_token_id,
)
token_ids.append(sentence_token_ids)
return token_ids
def _transform_entity(
self,
entity_token_ids: List[List[int]],
) -> List[List[int]]:
result_token_ids: List[List[int]] = []
for token_ids in entity_token_ids:
if self.add_eos_bos_to_entity:
token_ids = [self.bos_idx] + token_ids + [self.eos_idx]
if len(token_ids) > self.max_entity_len:
token_ids = token_ids[: self.max_entity_len]
token_ids[-1] = self.eos_idx
result_token_ids.append(token_ids)
return result_token_ids
def _to_tensor(self, token_ids, attention_mask_pad_idx=0):
seq_lens = [len(seq) for seq in token_ids]
input_ids = pad_2d(
token_ids,
seq_lens,
pad_idx = self.pad_token_id,
)
attention_mask = [[1]*seq_len for seq_len in seq_lens]
attention_mask = pad_2d(
attention_mask,
seq_lens,
pad_idx = attention_mask_pad_idx,
)
return {
'input_ids': torch.tensor(input_ids),
'attention_mask': torch.tensor(attention_mask),
}
def forward(
self, batch: Dict[str, Any]
) -> Tuple[Dict[str, torch.Tensor], Dict[str, torch.Tensor]]:
left_context = batch["left_context"]
torch.jit.isinstance(left_context, List[str])
mention = batch["mention"]
torch.jit.isinstance(mention, List[str])
right_context = batch["right_context"]
torch.jit.isinstance(right_context, List[str])
entity_token_ids = batch["token_ids"]
torch.jit.isinstance(entity_token_ids, List[List[int]])
mention_token_ids = self._transform_mention(
left_context,
mention,
right_context,
)
mention_tensors = self._to_tensor(
mention_token_ids,
)
entity_token_ids = self._transform_entity(entity_token_ids)
entity_tensors = self._to_tensor(
entity_token_ids,
)
return (mention_tensors, entity_tensors)
@property
def bos_idx(self):
return self.tokenizer.cls_token_id
@property
def eos_idx(self):
return self.tokenizer.sep_token_id
|
BELA-main
|
mblink/transforms/blink_transform.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from collections import OrderedDict
from typing import Optional
from pytorch_lightning.strategies import DDPShardedStrategy, DDPStrategy
from torch.distributed.algorithms.ddp_comm_hooks.default_hooks import (
fp16_compress_hook,
)
import hydra
import torch
import torch.nn as nn
from pytorch_lightning import LightningModule
from mblink.conf import (
DataModuleConf,
ModelConf,
OptimConf,
TransformConf,
)
logger = logging.getLogger(__name__)
class InBatchTripletLoss(nn.Module):
# Paper: In Defense of the Triplet Loss for Person Re-Identification, https://arxiv.org/abs/1703.07737
# Blog post: https://omoindrot.github.io/triplet-loss
def __init__(self, margin: float = 1.0):
super().__init__()
self.margin = margin
def forward(self, scores: torch.Tensor, targets: torch.Tensor) -> torch.Tensor:
"""Build the triplet loss over a matrix of computed scores
For each mention distance to correct entity should be greater then distance to all
other entities in batch by margin.
Args:
scores: n_mentions x n_entities matrix of distances between mentions and entities
targets: vector of indices of correct entity for each mention (n_mentions)
"""
one_hot_targets = torch.zeros(scores.shape).bool()
one_hot_targets[torch.arange(targets.shape[0]), targets] = True
pos_scores = scores[one_hot_targets].unsqueeze(1).repeat(1, scores.shape[1] - 1)
neg_scores = scores[~one_hot_targets].reshape(
scores.shape[0], scores.shape[1] - 1
)
loss = torch.relu(self.margin + neg_scores - pos_scores).mean()
return loss
class InBatchMarginLoss(nn.Module):
"""
Pushes positives scores above margin and negatives below 0.
The loss calculated as max(0, maring - positive scores) +
max(0, negative scores).
"""
def __init__(self, margin: float = 100.0, pos_weight=1.0, use_mean=True):
super().__init__()
self.margin = margin
self.pos_weight = pos_weight
self.reduce_op = torch.mean if use_mean else torch.sum
def forward(self, scores: torch.Tensor, targets: torch.Tensor) -> torch.Tensor:
one_hot_targets = torch.zeros(scores.shape).bool()
one_hot_targets[torch.arange(targets.shape[0]), targets] = True
pos_scores = scores[one_hot_targets]
neg_scores = scores[~one_hot_targets]
if self.pos_weight is None:
pos_weight = scores.shape[1] - 1
loss = self.reduce_op(
pos_weight * torch.relu(self.margin - pos_scores)
) + self.reduce_op(torch.relu(neg_scores))
return loss
class CombinedLoss(nn.Module):
def __init__(self, first: nn.Module, second: nn.Module, second_weight=1.0):
super().__init__()
self.first = first
self.second = second
self.second_weight = second_weight
def forward(self, scores: torch.Tensor, targets: torch.Tensor) -> torch.Tensor:
return self.first(scores, targets) + self.second_weight * self.second(
scores, targets
)
class ElBiEncoderTask(LightningModule):
def __init__(
self,
transform: TransformConf,
model: ModelConf,
datamodule: DataModuleConf,
optim: OptimConf,
in_batch_eval: bool = True, # use only in batch contexts for validation
warmup_steps: int = 0,
filter_entities: bool = True,
loss: str = "cross_entropy",
triplet_loss_margin: float = 1.0,
margin_loss_margin: float = 100.0,
margin_loss_pos_weight: Optional[float] = None,
margin_loss_weight: float = 1.0,
margin_loss_mean: bool = True,
load_from_checkpoint: Optional[str] = None,
fp16_grads: bool = False,
):
super().__init__()
# encoder setup
self.mention_encoder_conf = model
self.entity_encoder_conf = model
self.optim_conf = optim
self.in_batch_eval = in_batch_eval
self.warmup_steps = warmup_steps
self.filter_entities = filter_entities
self.load_from_checkpoint = load_from_checkpoint
self.fp16_grads = fp16_grads
if loss == "cross_entropy":
self.loss = nn.CrossEntropyLoss()
elif loss == "triplet":
self.loss = InBatchTripletLoss(margin=triplet_loss_margin)
elif loss == "margin":
self.loss = CombinedLoss(
nn.CrossEntropyLoss(),
InBatchMarginLoss(
margin=margin_loss_margin,
pos_weight=margin_loss_pos_weight,
mean=margin_loss_mean,
),
margin_loss_weight,
)
else:
raise ValueError(f"Unsupported loss {loss}")
@staticmethod
def _get_encoder_state(state, encoder_name):
encoder_state = OrderedDict()
for key, value in state["state_dict"].items():
if key.startswith(encoder_name):
encoder_state[key[len(encoder_name) + 1 :]] = value
return encoder_state
def setup(self, stage: str):
if stage == "test":
return
# resetting call_configure_sharded_model_hook attribute so that we could configure model
self.call_configure_sharded_model_hook = False
self.mention_encoder = hydra.utils.instantiate(
self.mention_encoder_conf,
_recursive_=False,
)
self.entity_encoder = hydra.utils.instantiate(
self.entity_encoder_conf,
_recursive_=False,
)
if self.load_from_checkpoint is not None:
logger.info(f"Load encoders state from {self.load_from_checkpoint}")
with open(self.load_from_checkpoint, "rb") as f:
checkpoint = torch.load(f, map_location=torch.device("cpu"))
entity_encoder_state = self._get_encoder_state(checkpoint, "entity_encoder")
self.entity_encoder.load_state_dict(entity_encoder_state)
mention_encoder_state = self._get_encoder_state(
checkpoint, "mention_encoder"
)
self.mention_encoder.load_state_dict(mention_encoder_state)
self.optimizer = hydra.utils.instantiate(
self.optim_conf, self.parameters(), _recursive_=False
)
def on_pretrain_routine_start(self):
if self.fp16_grads:
self.trainer.strategy._model.register_comm_hook(None, fp16_compress_hook)
def sim_score(self, mentions_repr, entities_repr):
scores = torch.matmul(mentions_repr, torch.transpose(entities_repr, 0, 1))
return scores
def forward(
self,
mentions_ids,
entities_ids,
):
# encode query and contexts
mentions_repr, _ = self.mention_encoder(mentions_ids) # bs x d
entities_repr, _ = self.entity_encoder(entities_ids) # bs x d
return mentions_repr, entities_repr
def configure_optimizers(self):
return self.optimizer
def training_step(self, batch, batch_idx):
"""
This receives queries, each with mutliple contexts.
"""
mentions = batch["mentions"] # bs x mention_len
entities = batch["entities"] # bs x entity len
entity_ids = batch["entity_ids"] # bs
targets = batch["targets"] # bs
mask = batch["entity_tensor_mask"] # bs
mentions_repr, entities_repr = self(mentions['input_ids'], entities['input_ids'])
if isinstance(self.trainer.strategy, (DDPStrategy, DDPShardedStrategy)):
mentions_to_send = mentions_repr.detach()
entities_to_send = entities_repr.detach()
all_mentions_repr = self.all_gather(mentions_to_send) # num_workers x bs
all_entities_repr = self.all_gather(entities_to_send)
all_targets = self.all_gather(targets)
# we are not filtering duplicated entities now
all_entity_ids = self.all_gather(entity_ids)
all_mask = self.all_gather(mask)
# offset = 0
all_mentions_list = []
all_entities_list = []
all_entity_ids_list = []
all_targets_list = []
# Add current device representations first.
# It is needed so we would not filter calculated on this
# device representations.
all_mentions_list.append(mentions_repr)
entities_repr = entities_repr[mask.bool()]
all_entities_list.append(entities_repr)
all_entity_ids_list.append(entity_ids[mask.bool()].tolist())
all_targets_list.append(targets)
# offset += entities_repr.size(0)
for i in range(all_targets.size(0)):
if i != self.global_rank:
all_mentions_list.append(all_mentions_repr[i])
all_entities_list.append(all_entities_repr[i][all_mask[i].bool()])
all_entity_ids_list.append(
all_entity_ids[i][all_mask[i].bool()].tolist()
)
# all_targets[i] += offset
all_targets_list.append(all_targets[i])
# offset += all_entities_repr[i].size(0)
mentions_repr = torch.cat(all_mentions_list, dim=0) # total_ctx x dim
# entities_repr = torch.cat(all_entities_list, dim=0) # total_query x dim
# targets = torch.cat(all_targets_list, dim=0)
if self.filter_entities:
entities_repr, targets = self._filter_entities_and_targets(
all_entities_list,
all_entity_ids_list,
all_targets_list,
)
else:
entities_repr = torch.cat(all_entities_list, dim=0)
targets = torch.cat(all_targets_list, dim=0)
# entity_ids = torch.flatten(entity_ids)
else:
entities_repr = entities_repr[mask.bool()]
scores = self.sim_score(mentions_repr, entities_repr)
loss = self.loss(scores, targets)
self.log("train_loss", loss, prog_bar=True)
return loss
def _filter_entities_and_targets(
self, all_entities_list, all_entity_ids_list, all_targets_list
):
filtered_entities_repr = []
filtered_targets = []
ent_indexes_map = {}
for entities_repr, entity_ids, targets, in zip(
all_entities_list,
all_entity_ids_list,
all_targets_list,
):
for entity_repr, ent_id in zip(entities_repr, entity_ids):
if ent_id not in ent_indexes_map:
ent_idx = len(ent_indexes_map)
ent_indexes_map[ent_id] = ent_idx
filtered_entities_repr.append(entity_repr)
for target in targets.tolist():
filtered_targets.append(ent_indexes_map[entity_ids[target]])
filtered_entities_repr = torch.stack(filtered_entities_repr, dim=0)
filtered_targets = torch.tensor(
filtered_targets,
dtype=torch.long,
device=filtered_entities_repr.get_device(),
)
return filtered_entities_repr, filtered_targets
def _eval_step(self, batch, batch_idx):
mentions = batch["mentions"] # bs x mention_len
entities = batch["entities"] # bs x entity len
entity_ids = batch["entity_ids"] # bs
targets = batch["targets"] # bs
mask = batch["entity_tensor_mask"] # bs
mentions_repr, entities_repr = self(mentions['input_ids'], entities['input_ids'])
entities_repr = entities_repr[mask.bool()]
scores = self.sim_score(mentions_repr, entities_repr) # bs x ctx_cnt
loss = self.loss(scores, targets)
return (
self.compute_rank_metrics(scores, targets),
mentions_repr,
entities_repr,
targets,
entity_ids,
loss,
)
def compute_rank_metrics(self, scores, target_labels):
# Compute total un_normalized avg_ranks, mrr
values, indices = torch.sort(scores, dim=1, descending=True)
rank = 0
mrr = 0.0
for i, idx in enumerate(target_labels):
gold_idx = (indices[i] == idx).nonzero()
rank += gold_idx.item() + 1
mrr += 1 / (gold_idx.item() + 1)
return rank, mrr
def _eval_epoch_end(self, outputs, log_prefix="valid"):
total_avg_rank, total_ent_count, total_count = 0, 0, 0
total_mrr = 0
total_loss = 0
if self.in_batch_eval:
for metrics, mentions_repr, entities_repr, _, _, loss in outputs:
rank, mrr = metrics
total_avg_rank += rank
total_mrr += mrr
total_ent_count += entities_repr.size(0)
total_count += mentions_repr.size(0)
total_loss += loss
total_ent_count = total_ent_count / len(outputs)
else:
# TODO: collect entities representations over all batches
raise NotImplementedError("Only in-batch eval implementted!")
metrics = {
log_prefix + "_avg_rank": total_avg_rank / total_count,
log_prefix + "_mrr": total_mrr / total_count,
log_prefix + "_ent_count": total_ent_count,
log_prefix + "_loss": total_loss,
}
self.log_dict(metrics, on_epoch=True, sync_dist=True)
def validation_step(self, batch, batch_idx):
return self._eval_step(batch, batch_idx)
def validation_epoch_end(self, valid_outputs):
self._eval_epoch_end(valid_outputs)
def test_step(self, batch, batch_idx):
return self._eval_step(batch, batch_idx)
def test_epoch_end(self, test_outputs):
self._eval_epoch_end(test_outputs, "test")
|
BELA-main
|
mblink/task/blink_task.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
from typing import List, Any
# @manual "//github/facebookresearch/hydra:hydra"
from hydra.core.config_store import ConfigStore
from omegaconf import MISSING
defaults = [
"_self_",
{"task": "blink_task"},
{"checkpoint_callback": "default"},
]
@dataclass
class MainConfig:
defaults: List[Any] = field(default_factory=lambda: defaults)
task: Any = MISSING
datamodule: Any = MISSING
trainer: Any = MISSING
test_only: bool = False
checkpoint_callback: Any = MISSING
cs = ConfigStore.instance()
cs.store(name="config", node=MainConfig)
|
BELA-main
|
mblink/conf/config.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
@dataclass
class TransformConf:
pass
@dataclass
class DataModuleConf:
pass
@dataclass
class OptimConf:
pass
@dataclass
class ModelConf:
pass
|
BELA-main
|
mblink/conf/__init__.py
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import pandas as pd
import os, sys
from syntactic_testsets.utils import load_vocab
def lstm_probs(output, gold, w2idx):
data = []
for scores, g in zip(output, gold):
scores = scores.split()
form, form_alt = g.split("\t")[6:8]
prob_correct = float(scores[w2idx[form]])
prob_wrong = float(scores[w2idx[form_alt]])
data.append(prob_correct)
data.append(prob_wrong)
return data
lang = sys.argv[1]
model = sys.argv[2]
path_repo = "../data"
path_test_data = path_repo + "/agreement/" + lang + "/generated"
path_output = path_repo + "/agreement/" + lang + "/generated.output_"
path_lm_data = path_repo + "/lm/" + lang
gold = open(path_test_data + ".gold").readlines()
sents = open(path_test_data + ".text").readlines()
data = pd.read_csv(path_test_data + ".tab",sep="\t")
vocab = load_vocab(path_lm_data + "/vocab.txt")
# getting softmax outputs and the probabilities for pairs of test forms
#print("Assembling probabilities for the choice forms")
outputs = {}
probs = pd.DataFrame([])
if os.path.isfile(path_output + model):
#print(model)
output = open(path_output + model).readlines()
#print(len(output))
data[model] = lstm_probs(output, gold, vocab)
### If you want to save table with target singular and plural form probabilities uncomment these lines and change the path ###
#path_result = path_repo + "/results/" + lang + "/" + model + ".tab"
#print("The target singular and plural form probabilities are saved in", path_result)
#data.to_csv(path_result, sep="\t", index=False)
#### Computing accuracy for the model (and frequency baseline) ####
if "freq" in data:
models = [model, "freq"]
else:
models = [model]
fields = ["pattern","constr_id","sent_id","n_attr","punct","len_prefix","len_context","sent","correct_number","type"]
wide_data = data[fields + ["class"] + models].pivot_table(columns=("class"), values=models, index=fields)
for model in models:
correct = wide_data.loc[:, (model, "correct")]
wrong = wide_data.loc[:, (model, "wrong")]
wide_data[(model, "acc")] = (correct > wrong)*100
t = wide_data.reset_index()
a = t.groupby("type").agg({(m,"acc"):"mean" for m in models})
print("Accuracy overall\n", a)
a = pd.concat([t[t.type=="original"].groupby("pattern").agg({(m, "acc"): "mean" for m in models}).rename(columns={'acc': 'orig'}),
t[t.type=="generated"].groupby("pattern").agg({(m, "acc"): "mean" for m in models}).rename(columns={'acc': 'gen'})], axis=1)
print()
print("Accuracy by pattern\n", a)
|
colorlessgreenRNNs-main
|
src/results.py
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
lm_parser = argparse.ArgumentParser(add_help=False)
lm_parser.add_argument('--data', type=str,
help='location of the data corpus')
lm_parser.add_argument('--model', type=str, default='LSTM',
help='type of recurrent net (RNN_TANH, RNN_RELU, LSTM, GRU)')
lm_parser.add_argument('--emsize', type=int, default=200,
help='size of word embeddings')
lm_parser.add_argument('--nhid', type=int, default=200,
help='number of hidden units per layer')
lm_parser.add_argument('--nlayers', type=int, default=2,
help='number of layers')
lm_parser.add_argument('--dropout', type=float, default=0.2,
help='dropout applied to layers (0 = no dropout)')
lm_parser.add_argument('--tied', action='store_true',
help='tie the word embedding and softmax weights')
lm_parser.add_argument('--lr', type=float, default=20,
help='initial learning rate')
lm_parser.add_argument('--clip', type=float, default=0.25,
help='gradient clipping')
lm_parser.add_argument('--epochs', type=int, default=40,
help='upper epoch limit')
lm_parser.add_argument('--batch_size', type=int, default=20, metavar='N',
help='batch size')
lm_parser.add_argument('--bptt', type=int, default=35,
help='sequence length')
lm_parser.add_argument('--seed', type=int, default=1111,
help='random seed')
lm_parser.add_argument('--cuda', action='store_true',
help='use CUDA')
lm_parser.add_argument('--log-interval', type=int, default=200, metavar='N',
help='report interval')
lm_parser.add_argument('--save', type=str, default='model.pt',
help='path to save the final model')
lm_parser.add_argument('--log', type=str, default='log.txt',
help='path to logging file')
|
colorlessgreenRNNs-main
|
src/language_models/lm_argparser.py
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
|
colorlessgreenRNNs-main
|
src/language_models/__init__.py
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch.nn as nn
import torch.utils.data.dataloader
class RNNModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder.
ntoken: vocab size
nip: embedding size
"""
def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.5, tie_weights=False):
super(RNNModel, self).__init__()
self.drop = nn.Dropout(dropout)
self.encoder = nn.Embedding(ntoken, ninp)
if rnn_type in ['LSTM', 'GRU']:
self.rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, dropout=dropout)
else:
try:
nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu'}[rnn_type]
except KeyError:
raise ValueError( """An invalid option for `--model` was supplied,
options are ['LSTM', 'GRU', 'RNN_TANH' or 'RNN_RELU']""")
self.rnn = nn.RNN(ninp, nhid, nlayers, nonlinearity=nonlinearity, dropout=dropout)
self.decoder = nn.Linear(nhid, ntoken)
# Optionally tie weights as in:
# "Using the Output Embedding to Improve Language Models" (Press & Wolf 2016)
# https://arxiv.org/abs/1608.05859
# and
# "Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling" (Inan et al. 2016)
# https://arxiv.org/abs/1611.01462
if tie_weights:
if nhid != ninp:
raise ValueError('When using the tied flag, nhid must be equal to emsize')
self.decoder.weight = self.encoder.weight
self.init_weights()
self.rnn_type = rnn_type
self.nhid = nhid
self.nlayers = nlayers
def init_weights(self):
initrange = 0.1
self.encoder.weight.data.uniform_(-initrange, initrange)
self.decoder.bias.data.fill_(0)
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, input, hidden):
emb = self.drop(self.encoder(input))
output, hidden = self.rnn(emb, hidden)
output = self.drop(output)
#print(output)
decoded = self.decoder(output.view(output.size(0)*output.size(1), output.size(2)))
return decoded.view(output.size(0), output.size(1), decoded.size(1)), hidden
def init_hidden(self, bsz):
weight = next(self.parameters()).data
if self.rnn_type == 'LSTM':
return (weight.new(self.nlayers, bsz, self.nhid).zero_(),
weight.new(self.nlayers, bsz, self.nhid).zero_())
else:
return weight.new(self.nlayers, bsz, self.nhid).zero_()
|
colorlessgreenRNNs-main
|
src/language_models/model.py
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
def repackage_hidden(h):
"""Detaches hidden states from their history."""
if isinstance(h, torch.Tensor):
return h.detach()
else:
return tuple(repackage_hidden(v) for v in h)
def get_batch(source, i, seq_length):
seq_len = min(seq_length, len(source) - 1 - i)
data = source[i:i+seq_len]
# predict the sequences shifted by one word
target = source[i+1:i+1+seq_len].view(-1)
return data, target
def batchify(data, bsz, cuda):
# Work out how cleanly we can divide the dataset into bsz parts.
nbatch = data.size(0) // bsz
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, nbatch * bsz)
# Evenly divide the data across the bsz batches.
data = data.view(bsz, -1).t().contiguous()
if cuda:
data = data.cuda()
return data
|
colorlessgreenRNNs-main
|
src/language_models/utils.py
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import math
import argparse
from utils import batchify, get_batch, repackage_hidden
import torch
import torch.nn as nn
from dictionary_corpus import Dictionary, Corpus, tokenize
parser = argparse.ArgumentParser(description='Evaluate perplexity of the dataset, ignoring the <unk> words')
parser.add_argument('--data', type=str, default='./data/penn',
help='location of the data corpus')
parser.add_argument('--test', type=str, default=None,
help='Indicate your test file if different from data/test.txt')
parser.add_argument('--checkpoint', type=str, default='model.pt',
help='path to save the final model')
parser.add_argument('--cuda', action='store_true',
help='use CUDA')
parser.add_argument('--bptt', type=int, default=35,
help='sequence length')
args = parser.parse_args()
def evaluate(data_source):
model.eval()
total_loss = 0
total_len = 0
ntokens = len(dictionary)
hidden = model.init_hidden(eval_batch_size)
unk_idx = dictionary.word2idx["<unk>"]
if args.cuda:
out_type = torch.cuda.LongTensor()
else:
out_type = torch.LongTensor()
with torch.no_grad():
for i in range(0, data_source.size(0) - 1, args.bptt):
data, targets = get_batch(data_source, i, args.bptt)
output, hidden = model(data, hidden)
output_flat = output.view(-1, ntokens)
subset = targets != unk_idx
targets = targets[subset]
output_flat = output_flat[torch.arange(0, output_flat.size(0), out=out_type)[subset]]
total_len += targets.size(0)
total_loss += targets.size(0) * nn.CrossEntropyLoss()(output_flat, targets).item()
hidden = repackage_hidden(hidden)
return total_loss / total_len
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
eval_batch_size = 32
if args.test:
dictionary = Dictionary(args.data)
test = tokenize(dictionary, args.test)
print("Size, OOV", test.size(0), sum(test == dictionary.word2idx["<unk>"]))
test_data = batchify(test, eval_batch_size, args.cuda)
ntokens = len(dictionary)
else:
corpus = Corpus(args.data)
print("Size, OOV", corpus.test.size(0), sum(corpus.test == corpus.dictionary.word2idx["<unk>"]))
test_data = batchify(corpus.test, eval_batch_size, args.cuda)
dictionary = corpus.dictionary
# Load the best saved model.
with open(args.checkpoint, 'rb') as f:
print("Loading the model")
if args.cuda:
model = torch.load(f)
else:
# to convert model trained on cuda to cpu model
model = torch.load(f, map_location=lambda storage, loc: storage)
print("Evaluation on non-unk tokens")
# Run on test data.
test_loss = evaluate(test_data)
print('=' * 89)
print('Test loss {:5.2f} | test ppl {:8.2f}'.format(
test_loss, math.exp(test_loss)))
print('=' * 89)
|
colorlessgreenRNNs-main
|
src/language_models/evaluate_test_perplexity.py
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import logging
import math
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from dictionary_corpus import Corpus, Dictionary, tokenize
from utils import batchify
import lm_argparser
parser = argparse.ArgumentParser(parents=[lm_argparser.lm_parser],
description="Training and testing ngram LSTM model")
parser.add_argument('--train', action='store_true', default=False,
help='enable training regime')
parser.add_argument('--test', action='store_true', default=False,
help='enable testing regime')
parser.add_argument('--test_path', type=str,
help='path to test file, gold file and vocab file output')
parser.add_argument('--suffix', type=str,
help='suffix for generated output files which will be saved as path.output_suffix')
args = parser.parse_args()
logging.basicConfig(level=logging.INFO, handlers=[logging.StreamHandler(),
logging.FileHandler(args.log)])
logging.info(args)
class RNNModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder.
ntoken: vocab size
nip: embedding size
"""
def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.5, tie_weights=False):
super(RNNModel, self).__init__()
self.drop = nn.Dropout(dropout)
self.encoder = nn.Embedding(ntoken, ninp)
if rnn_type in ['LSTM', 'GRU']:
self.rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, dropout=dropout)
else:
try:
nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu'}[rnn_type]
except KeyError:
raise ValueError( """An invalid option for `--model` was supplied,
options are ['LSTM', 'GRU', 'RNN_TANH' or 'RNN_RELU']""")
self.rnn = nn.RNN(ninp, nhid, nlayers, nonlinearity=nonlinearity, dropout=dropout)
self.decoder = nn.Linear(nhid, ntoken)
# Optionally tie weights as in:
# "Using the Output Embedding to Improve Language Models" (Press & Wolf 2016)
# https://arxiv.org/abs/1608.05859
# and
# "Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling" (Inan et al. 2016)
# https://arxiv.org/abs/1611.01462
if tie_weights:
if nhid != ninp:
raise ValueError('When using the tied flag, nhid must be equal to emsize')
self.decoder.weight = self.encoder.weight
self.init_weights()
self.rnn_type = rnn_type
self.nhid = nhid
self.nlayers = nlayers
def init_weights(self):
initrange = 0.1
self.encoder.weight.data.uniform_(-initrange, initrange)
self.decoder.bias.data.fill_(0)
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, input, hidden):
emb = self.drop(self.encoder(input))
output, hidden = self.rnn(emb, hidden)
#print("hidden", hidden, hidden[0].size())
# take last output of the sequence
output = output[-1]
#print(output)
#decoded = self.decoder(output.view(output.size(0)*output.size(1), output.size(2)))
#print(output.size())
decoded = self.decoder(output.view(-1, output.size(1)))
#print(output.view(output.size(0)*output.size(1), output.size(2)))
#print(decoded)
return decoded
def init_hidden(self, bsz):
weight = next(self.parameters()).data
if self.rnn_type == 'LSTM':
return (weight.new(self.nlayers, bsz, self.nhid).zero_(),
weight.new(self.nlayers, bsz, self.nhid).zero_())
else:
return weight.new(self.nlayers, bsz, self.nhid).zero_()
def get_batch(source, i, seq_length):
seq_len = min(seq_length, len(source) - 1 - i)
#print("Sequence length", seq_len)
#print(source)
data = source[i:i+seq_len]
#print(data)
#> predict the sequences shifted by one word
target = source[i+seq_len].view(-1)
#print(target)
return data, target
def create_target_mask(test_file, gold_file, index_col):
sents = open(test_file, "r", encoding="utf8").readlines()
golds = open(gold_file, "r", encoding="utf8").readlines()
#TODO optimize by initializaing np.array of needed size and doing indexing
targets = []
for sent, gold in zip(sents, golds):
# constr_id, sent_id, word_id, pos, morph
target_idx = int(gold.split()[index_col])
len_s = len(sent.split(" "))
t_s = [0] * len_s
t_s[target_idx] = 1
#print(sent.split(" ")[target_idx])
targets.extend(t_s)
return np.array(targets)
def evaluate_perplexity(data_source, exclude_oov=False):
# Turn on evaluation mode which disables dropout.
model.eval()
total_loss = 0
ntokens = len(corpus.dictionary)
len_data = 0
unk_idx = corpus.dictionary.word2idx["<unk>"]
if args.cuda:
torch_range = torch.cuda.LongTensor()
else:
torch_range = torch.LongTensor()
with torch.no_grad():
for i in range(0, data_source.size(0) - 1):
hidden = model.init_hidden(eval_batch_size)
data, targets = get_batch(data_source, i, args.bptt)
#> output has size seq_length x batch_size x vocab_size
output = model(data, hidden)
output_flat = output.view(-1, ntokens)
# excluding OOV
if exclude_oov:
subset = targets != unk_idx
subset = subset.data
targets = targets[subset]
output_flat = output_flat[torch.arange(0, output_flat.size(0), out=torch_range)[subset]]
total_loss += targets.size(0) * nn.CrossEntropyLoss()(output_flat, targets).data
len_data += targets.size(0)
return total_loss[0] / len_data
def evaluate_on_mask(data_source, mask):
model.eval()
idx2word = dictionary.idx2word
for i in range(0, data_source.size(0) - 1):
hidden = model.init_hidden(eval_batch_size)
data, targets = get_batch(data_source, i, args.bptt, evaluation=True)
_, targets_mask = get_batch(mask, i, args.bptt, evaluation=True)
#print(targets_mask.size())
#> output has size seq_length x batch_size x vocab_size
output = model(data, hidden)
output_flat = output.view(-1, ntokens)
log_probs = F.log_softmax(output_flat)
# print("Log probs size", log_probs.size())
# print("Target size", targets.size())
log_probs_np = log_probs.data.cpu().numpy()
subset = targets_mask.data.numpy().astype(bool)
for scores, correct_label in zip(log_probs_np[subset], targets.data.cpu().numpy()[subset]):
print(idx2word[correct_label], scores[correct_label])
f_output.write("\t".join(str(s) for s in scores) + "\n")
#return total_loss[0] /len(data_source)
###############################################################################
# Training code
###############################################################################
def train():
# Turn on training mode which enables dropout.
model.train()
total_loss = 0
start_time = time.time()
criterion = nn.CrossEntropyLoss()
for batch, i in enumerate(range(0, train_data.size(0) - 1)):
#> i is the starting index of the batch
#> batch is the number of the batch
#> data is a tensor of size seq_length x batch_size, where each element is an index from input vocabulary
#> targets is a vector of length seq_length x batch_size
data, targets = get_batch(train_data, i, args.bptt)
hidden = model.init_hidden(args.batch_size)
model.zero_grad()
output = model(data, hidden)
#> output.view(-1, ntokens) transforms a tensor to a longer tensor of size
#> (seq_length x batch_size) x output_vocab_size
#> which matches targets in length
loss = criterion(output.view(-1, ntokens), targets)
loss.backward()
torch.nn.utils.clip_grad_norm(model.parameters(), args.clip)
for p in model.parameters():
p.data.add_(-lr, p.grad.data)
total_loss += loss.data
if batch % args.log_interval == 0 and batch > 0:
cur_loss = total_loss[0] / args.log_interval
elapsed = time.time() - start_time
#logging.info('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.2f} | ms/batch {:5.2f} | '
# 'loss {:5.2f} | ppl {:8.2f}'.format(
# epoch, batch, len(train_data) // args.bptt, lr,
# elapsed * 1000 / args.log_interval, cur_loss, math.exp(cur_loss)))
logging.info('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.2f} | ms/batch {:5.2f} | '
'loss {:5.2f}'.format(epoch, batch, len(train_data), lr,
elapsed * 1000 / args.log_interval, cur_loss))
total_loss = 0
start_time = time.time()
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.manual_seed(args.seed)
###############################################################################
# Load data
###############################################################################
if args.train:
logging.info("Loading data")
corpus = Corpus(args.data)
# logging.info(corpus.train)
ntokens = len(corpus.dictionary)
logging.info("Vocab size %d", ntokens)
logging.info("Batchying..")
eval_batch_size = 256
train_data = batchify(corpus.train, args.batch_size, args.cuda)
# logging.info("Train data size", train_data.size())
val_data = batchify(corpus.valid, eval_batch_size, args.cuda)
test_data = batchify(corpus.test, eval_batch_size, args.cuda)
logging.info("Building the model")
# model = torch.nn.DataParallel(model.RNNModel(args.model, ntokens, args.emsize, args.nhid, args.nlayers, args.dropout, args.tied),
# dim=1)
model = RNNModel(args.model, ntokens, args.emsize, args.nhid, args.nlayers, args.dropout, args.tied)
if args.cuda:
model.cuda()
# Loop over epochs.
lr = args.lr
best_val_loss = None
try:
for epoch in range(1, args.epochs+1):
epoch_start_time = time.time()
train()
val_loss = evaluate_perplexity(val_data)
logging.info('-' * 89)
logging.info('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '
'valid ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time),
val_loss, math.exp(val_loss)))
logging.info('-' * 89)
# Save the model if the validation loss is the best we've seen so far.
if not best_val_loss or val_loss < best_val_loss:
with open(args.save, 'wb') as f:
torch.save(model, f)
best_val_loss = val_loss
else:
# Anneal the learning rate if no improvement has been seen in the validation dataset.
lr /= 4.0
except KeyboardInterrupt:
logging.info('-' * 89)
logging.info('Exiting from training early')
# Load the best saved model.
with open(args.save, 'rb', encoding="utf8") as f:
model = torch.load(f)
# Run on valid data with OOV excluded
test_loss = evaluate_perplexity(val_data, exclude_oov=True)
logging.info('=' * 89)
logging.info('| End of training | test loss {:5.2f} | test ppl {:8.2f}'.format(test_loss, math.exp(test_loss)))
logging.info('=' * 89)
#####################################
# Testing #
#####################################
if args.test:
dictionary = Dictionary(args.data)
with open(args.save, 'rb', encoding="utf8") as f:
print("Loading the model")
if args.cuda:
model = torch.load(f)
model.cuda()
else:
# to convert model trained on cuda to cpu model
model = torch.load(f, map_location=lambda storage, loc: storage)
model.cpu()
model.eval()
eval_batch_size = 1
ntokens = len(dictionary)
#print("Vocab size", ntokens)
#print("TESTING")
# depends on generation script (constantly modified) - the column where the target word index is written
index_col = 3
mask = create_target_mask(args.test_path + ".text", args.test_path + ".gold", index_col)
mask_data = batchify(torch.LongTensor(mask), eval_batch_size, False)
test_data = batchify(tokenize(dictionary, args.test_path + ".text"), eval_batch_size, args.cuda)
f_output = open(args.test_path + ".output_" + args.suffix, 'w')
evaluate_on_mask(test_data, mask_data)
f_output.close()
|
colorlessgreenRNNs-main
|
src/language_models/ngram_lstm.py
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import dictionary_corpus
from utils import repackage_hidden, batchify, get_batch
import numpy as np
parser = argparse.ArgumentParser(description='Mask-based evaluation: extracts softmax vectors for specified words')
parser.add_argument('--data', type=str,
help='location of the data corpus for LM training')
parser.add_argument('--checkpoint', type=str,
help='model checkpoint to use')
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--cuda', action='store_true',
help='use CUDA')
parser.add_argument('--path', type=str, help='path to test file (text) gold file (indices of words to evaluate)')
parser.add_argument('--suffix', type=str, help='suffix for generated output files which will be saved as path.output_suffix')
args = parser.parse_args()
def evaluate(data_source, mask):
# Turn on evaluation mode which disables dropout.
model.eval()
total_loss = 0
hidden = model.init_hidden(eval_batch_size)
with torch.no_grad():
for i in range(0, data_source.size(0) - 1, seq_len):
# keep continuous hidden state across all sentences in the input file
data, targets = get_batch(data_source, i, seq_len)
_, targets_mask = get_batch(mask, i, seq_len)
output, hidden = model(data, hidden)
output_flat = output.view(-1, vocab_size)
total_loss += len(data) * nn.CrossEntropyLoss()(output_flat, targets)
output_candidates_probs(output_flat, targets, targets_mask)
hidden = repackage_hidden(hidden)
return total_loss.item() / (len(data_source) - 1)
def output_candidates_probs(output_flat, targets, mask):
log_probs = F.log_softmax(output_flat, dim=1)
log_probs_np = log_probs.cpu().numpy()
subset = mask.cpu().numpy().astype(bool)
for scores, correct_label in zip(log_probs_np[subset], targets.cpu().numpy()[subset]):
#print(idx2word[correct_label], scores[correct_label])
f_output.write("\t".join(str(s) for s in scores) + "\n")
def create_target_mask(test_file, gold_file, index_col):
sents = open(test_file, "r").readlines()
golds = open(gold_file, "r").readlines()
#TODO optimize by initializaing np.array of needed size and doing indexing
targets = []
for sent, gold in zip(sents, golds):
# constr_id, sent_id, word_id, pos, morph
target_idx = int(gold.split()[index_col])
len_s = len(sent.split(" "))
t_s = [0] * len_s
t_s[target_idx] = 1
#print(sent.split(" ")[target_idx])
targets.extend(t_s)
return np.array(targets)
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.manual_seed(args.seed)
with open(args.checkpoint, 'rb') as f:
print("Loading the model")
if args.cuda:
model = torch.load(f)
else:
# to convert model trained on cuda to cpu model
model = torch.load(f, map_location = lambda storage, loc: storage)
model.eval()
if args.cuda:
model.cuda()
else:
model.cpu()
eval_batch_size = 1
seq_len = 20
dictionary = dictionary_corpus.Dictionary(args.data)
vocab_size = len(dictionary)
#print("Vocab size", vocab_size)
print("Computing probabilities for target words")
# assuming the mask file contains one number per line indicating the index of the target word
index_col = 0
mask = create_target_mask(args.path + ".text", args.path + ".eval", index_col)
mask_data = batchify(torch.LongTensor(mask), eval_batch_size, args.cuda)
test_data = batchify(dictionary_corpus.tokenize(dictionary, args.path + ".text"), eval_batch_size, args.cuda)
f_output = open(args.path + ".output_" + args.suffix, 'w')
evaluate(test_data, mask_data)
print("Probabilities saved to", args.path + ".output_" + args.suffix)
f_output.close()
|
colorlessgreenRNNs-main
|
src/language_models/evaluate_target_word.py
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import logging
import math
import time
import torch
import torch.nn as nn
from dictionary_corpus import Corpus
import model
from lm_argparser import lm_parser
from utils import repackage_hidden, get_batch, batchify
parser = argparse.ArgumentParser(parents=[lm_parser],
description="Basic training and evaluation for RNN LM")
args = parser.parse_args()
logging.basicConfig(level=logging.INFO, handlers=[logging.StreamHandler(),
logging.FileHandler(args.log)])
logging.info(args)
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.manual_seed(args.seed)
###############################################################################
# Load data
###############################################################################
logging.info("Loading data")
start = time.time()
corpus = Corpus(args.data)
logging.info("( %.2f )" % (time.time() - start))
ntokens = len(corpus.dictionary)
logging.info("Vocab size %d", ntokens)
logging.info("Batchying..")
eval_batch_size = 10
train_data = batchify(corpus.train, args.batch_size, args.cuda)
val_data = batchify(corpus.valid, eval_batch_size, args.cuda)
test_data = batchify(corpus.test, eval_batch_size, args.cuda)
criterion = nn.CrossEntropyLoss()
###############################################################################
# Build the model
###############################################################################
logging.info("Building the model")
model = model.RNNModel(args.model, ntokens, args.emsize, args.nhid, args.nlayers, args.dropout, args.tied)
if args.cuda:
model.cuda()
###############################################################################
# Training code
###############################################################################
def evaluate(data_source):
# Turn on evaluation mode which disables dropout.
model.eval()
total_loss = 0
hidden = model.init_hidden(eval_batch_size)
with torch.no_grad():
for i in range(0, data_source.size(0) - 1, args.bptt):
data, targets = get_batch(data_source, i, args.bptt)
#> output has size seq_length x batch_size x vocab_size
output, hidden = model(data, hidden)
#> output_flat has size num_targets x vocab_size (batches are stacked together)
#> ! important, otherwise softmax computation (e.g. with F.softmax()) is incorrect
output_flat = output.view(-1, ntokens)
#output_candidates_info(output_flat.data, targets.data)
total_loss += len(data) * nn.CrossEntropyLoss()(output_flat, targets).item()
hidden = repackage_hidden(hidden)
return total_loss / (len(data_source) - 1)
def train():
# Turn on training mode which enables dropout.
model.train()
total_loss = 0
start_time = time.time()
hidden = model.init_hidden(args.batch_size)
for batch, i in enumerate(range(0, train_data.size(0) - 1, args.bptt)):
data, targets = get_batch(train_data, i, args.bptt)
# truncated BPP
hidden = repackage_hidden(hidden)
model.zero_grad()
output, hidden = model(data, hidden)
loss = criterion(output.view(-1, ntokens), targets)
loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
for p in model.parameters():
p.data.add_(-lr, p.grad.data)
total_loss += loss.item()
if batch % args.log_interval == 0 and batch > 0:
cur_loss = total_loss / args.log_interval
elapsed = time.time() - start_time
logging.info('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.2f} | ms/batch {:5.2f} | '
'loss {:5.2f} | ppl {:8.2f}'.format(
epoch, batch, len(train_data) // args.bptt, lr,
elapsed * 1000 / args.log_interval, cur_loss, math.exp(cur_loss)))
total_loss = 0
start_time = time.time()
# Loop over epochs.
lr = args.lr
best_val_loss = None
# At any point you can hit Ctrl + C to break out of training early.
try:
for epoch in range(1, args.epochs + 1):
epoch_start_time = time.time()
train()
val_loss = evaluate(val_data)
logging.info('-' * 89)
logging.info('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '
'valid ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time),
val_loss, math.exp(val_loss)))
logging.info('-' * 89)
# Save the model if the validation loss is the best we've seen so far.
if not best_val_loss or val_loss < best_val_loss:
with open(args.save, 'wb') as f:
torch.save(model, f)
best_val_loss = val_loss
else:
# Anneal the learning rate if no improvement has been seen in the validation dataset.
lr /= 4.0
except KeyboardInterrupt:
logging.info('-' * 89)
logging.info('Exiting from training early')
# Load the best saved model.
with open(args.save, 'rb') as f:
model = torch.load(f)
# Run on test data.
test_loss = evaluate(test_data)
logging.info('=' * 89)
logging.info('| End of training | test loss {:5.2f} | test ppl {:8.2f}'.format(test_loss, math.exp(test_loss)))
logging.info('=' * 89)
|
colorlessgreenRNNs-main
|
src/language_models/main.py
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import torch
from collections import defaultdict
import logging
class Dictionary(object):
def __init__(self, path):
self.word2idx = {}
self.idx2word = []
self.word2freq = defaultdict(int)
vocab_path = os.path.join(path, 'vocab.txt')
try:
vocab = open(vocab_path, encoding="utf8").read()
self.word2idx = {w: i for i, w in enumerate(vocab.split())}
self.idx2word = [w for w in vocab.split()]
self.vocab_file_exists = True
except FileNotFoundError:
logging.info("Vocab file not found, creating new vocab file.")
self.create_vocab(os.path.join(path, 'train.txt'))
open(vocab_path,"w").write("\n".join([w for w in self.idx2word]))
def add_word(self, word):
self.word2freq[word] += 1
if word not in self.word2idx:
self.idx2word.append(word)
self.word2idx[word] = len(self.idx2word) - 1
#return self.word2idx[word]
def __len__(self):
return len(self.idx2word)
def create_vocab(self, path):
with open(path, 'r', encoding="utf8") as f:
for line in f:
words = line.split()
for word in words:
self.add_word(word)
class Corpus(object):
def __init__(self, path):
self.dictionary = Dictionary(path)
self.train = tokenize(self.dictionary, os.path.join(path, 'train.txt'))
self.valid = tokenize(self.dictionary, os.path.join(path, 'valid.txt'))
self.test = tokenize(self.dictionary, os.path.join(path, 'test.txt'))
def tokenize(dictionary, path):
"""Tokenizes a text file for training or testing to a sequence of indices format
We assume that training and test data has <eos> symbols """
assert os.path.exists(path)
with open(path, 'r', encoding="utf8") as f:
ntokens = 0
for line in f:
words = line.split()
ntokens += len(words)
# Tokenize file content
with open(path, 'r', encoding="utf8") as f:
ids = torch.LongTensor(ntokens)
token = 0
for line in f:
words = line.split()
for word in words:
if word in dictionary.word2idx:
ids[token] = dictionary.word2idx[word]
else:
ids[token] = dictionary.word2idx["<unk>"]
token += 1
return ids
|
colorlessgreenRNNs-main
|
src/language_models/dictionary_corpus.py
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import subprocess
def query_KenLM(lm_file, file_name, kenlm_path="/private/home/gulordava/kenlm/build/bin/"):
"""
:param lm_file: language model
:param file_name: file with (partial) sentences to test
:return: a list of probabilities of the last word of each sentence
"""
command = kenlm_path + "query " + lm_file + ' < ' + file_name + " -n"
KenLM_query = subprocess.getstatusoutput(command)[1]
lines = KenLM_query.split("\n")
skip = ["This binary file contains probing hash tables.",
"Loading the LM will be faster if you build a binary file."]
if any(s in lines[0] for s in skip):
lines = lines[1:]
result_probs = []
for line in lines:
# last ngram is Total + OOV
try:
result_probs.append(float(line.split('\t')[-2].split(" ")[2]))
except (IndexError, ValueError) as e:
print(line)
return result_probs, lines
|
colorlessgreenRNNs-main
|
src/syntactic_testsets/evaluate_utils.py
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import tree_module as tm
import argparse
import itertools
from collections import defaultdict
import numpy as np
from generate_utils import is_good_form
from utils import load_vocab, ltm_to_word, read_paradigms
def safe_log(x):
np.seterr(divide='ignore', invalid='ignore')
return np.where(x > 0.0001, np.log(x), 0.0)
def cond_entropy(xy):
# normalise
xy = xy / np.sum(xy)
x_ = np.sum(xy, axis=1)
y_ = np.sum(xy, axis=0)
x_y = xy / y_
# print(x_y)
y_x = xy / x_.reshape(x_.shape[0], 1)
# print(y_x)
# Entropies: H(x|y) H(y|x) H(x) H(y)
return np.sum(-xy * safe_log(x_y)), np.sum(-xy * safe_log(y_x)), np.sum(-x_ * safe_log(x_)), np.sum(
-y_ * safe_log(y_))
def pos_structure(nodes, arc):
""" Get a sequence of pos tags for nodes which are direct children of the arc head or the arc child
nodes - the list of nodes of the context Y, between the head and the child (X, Z) of the arc
"""
return tuple([n.pos for n in nodes if n.head_id in [arc.head.index, arc.child.index]])
def inside(tree, a):
if a.child.index < a.head.index:
nodes = tree.nodes[a.child.index: a.head.index - 1]
l = a.child
r = a.head
else:
nodes = tree.nodes[a.head.index: a.child.index - 1]
l = a.head
r = a.child
return nodes, l, r
def features(morph, feature_list):
#Definite=Def|Gender=Masc|Number=Sing|PronType=Art Tense=Past|VerbForm=Part
if not feature_list:
return morph
all_feats = morph.split("|")
feat_values = tuple(f for f in all_feats if f.split("=")[0] in feature_list)
return "|".join(feat_values)
def morph_contexts_frequencies(trees, feature_list):
"""
Collect frequencies for X Y Z tuples, where Y is a context defined by its surface structure
and X and Z are connected by a dependency
:param trees: dependency trees
:return: two dictionaries for left and right dependencies
"""
d_left = defaultdict(lambda: defaultdict(int))
d_right = defaultdict(lambda: defaultdict(int))
for t in trees:
for a in t.arcs:
if 3 < a.length() < 15 and t.is_projective_arc(a):
# print("\n".join(str(n) for n in t.nodes))
nodes, l, r = inside(t, a)
substring = (l.pos,) + pos_structure(nodes, a) + (r.pos,)
# print(substring)
if substring:
if features(l.morph, feature_list) == "" or features(r.morph, feature_list) == "":
continue
#substring = substring + (a.dep_label,)
if a.dir == tm.Arc.LEFT:
d_left[substring][(features(l.morph, feature_list), features(r.morph, feature_list))] += 1
if a.dir == tm.Arc.RIGHT:
d_right[substring][(features(l.morph, feature_list), features(r.morph, feature_list))] += 1
return d_left, d_right
def find_good_patterns(context_dict, freq_threshold):
"""
:param context_dict: is a dictionary of type { Y context : {(X, Z) : count} }
for X Y Z sequences where X and Z could be of any type (tags, morph)
:param freq_threshold: for filtering out too infrequent patterns
:return: list of patterns - tuples (context, left1, left2) == (Y, X1, X2)
(where X1 and X2 occur with different Zs)
"""
patterns = []
for context in context_dict:
left_right_pairs = context_dict[context].keys()
if len(left_right_pairs) == 0:
continue
left, right = zip(*left_right_pairs)
left_v = set(left)
d = context_dict[context]
if len(left_v) < 2:
continue
for l1, l2 in itertools.combinations(left_v, 2):
right_v = [r for (l, r) in left_right_pairs if l in (l1, l2)]
if len(right_v) < 2:
continue
a = np.zeros((2, len(right_v)))
for i, x in enumerate((l1, l2)):
for j, y in enumerate(right_v):
a[(i, j)] = d[(x, y)]
l_r, r_l, l_e, r_e = cond_entropy(a)
mi = l_e - l_r
count_l1 = 0
count_l2 = 0
for l, r in d:
if l == l1:
count_l1 += d[(l, r)]
if l == l2:
count_l2 += d[(l, r)]
#print(l_r, r_l, l_e, r_e, mi)
if l_r < 0.001 and count_l1 > freq_threshold and count_l2 > freq_threshold:
patterns.append((context, l1, l2))
print(context, l_r, mi)
print(l1, l2, count_l1, count_l2)
#for l, r in d:
# if l in (l1, l2) and d[(l, r)] > 0 :
# print(l, r, d[(l, r)])
return patterns
def grep_morph_pattern(trees, context, l_values, dep_dir, feature_list=None):
"""
:param context: Y
:param l_values: l_values are relevant X values
:param dep_dir:
:return: generator of (context-type, l, r, tree, Y nodes) tuples
"""
if feature_list is None:
feature_list = ['Number']
for t in trees:
for a in t.arcs:
if 3 < a.length() < 15 and t.is_projective_arc(a):
if a.child.pos == "PUNCT" or a.head.pos == "PUNCT":
continue
#print("\n".join(str(n) for n in t.nodes))
nodes, l, r = inside(t, a)
if a.dir != dep_dir:
continue
if not any(m in l.morph for m in l_values):
#print(features(l.morph), l_values)
continue
if features(r.morph, feature_list) != features(l.morph, feature_list):
continue
substring = (l.pos,) + pos_structure(nodes, a) + (r.pos,)
if substring == context:
#print(substring, context)
yield context, l, r, t, nodes
def main():
parser = argparse.ArgumentParser(
description='Extracting dependency-based long-distance agreement patterns')
parser.add_argument('--treebank', type=str, required=True,
help='Path of the input treebank file (in a column format)')
parser.add_argument('--output', type=str, required=True,
help="Path for the output files")
parser.add_argument('--features', type=str, default="Number",
help="A list of morphological features which will be used, in Number|Case|Gender format")
parser.add_argument('--freq', type=int, default=5, help="minimal frequency")
parser.add_argument('--vocab', type=str, required=False, help="LM vocab - to compute which sentences have OOV")
parser.add_argument('--paradigms', type=str, required=False, help="File with morphological paradigms - to compute"
"which sentences have both target pairs")
args = parser.parse_args()
if args.vocab:
vocab = load_vocab(args.vocab)
else:
vocab = []
print("Loading trees")
trees = tm.load_trees_from_conll(args.treebank)
# needed for original UD treebanks (e.g. Italian) which contain spans, e.g. 10-12
# annotating mutlimorphemic words as several nodes in the tree
for t in trees:
t.remerge_segmented_morphemes()
if args.features:
args.features = args.features.split("|")
print("Features", args.features)
print("Extracting contexts")
context_left_deps, context_right_deps = morph_contexts_frequencies(trees, args.features)
# filtering very infrequent cases
filter_threshold = 1
context_left_deps = defaultdict(lambda: defaultdict(int), {c: defaultdict(int,
{lr: freq for lr, freq in d.items() if freq > filter_threshold}) for c, d in context_left_deps.items()})
context_right_deps = defaultdict(lambda: defaultdict(int), {c: defaultdict(int,
{lr: freq for lr, freq in d.items() if freq > filter_threshold}) for c, d in context_right_deps.items()})
print("Finding good patterns")
good_patterns_left = find_good_patterns(context_left_deps, args.freq)
good_patterns_right = find_good_patterns(context_right_deps, args.freq)
f_out = open(args.output + "/patterns.txt", "w")
print("Saving patterns and sentences matching them")
ltm_paradigms = ltm_to_word(read_paradigms(args.paradigms))
for p in good_patterns_left:
f_out.write("L\t" + "_".join(x for x in p[0]) + "\t" + "\t".join(p[1:]) + "\n")
print("L\t" + "_".join(x for x in p[0]) + "\t" + "\t".join(p[1:]) + "\n")
f_out_grep = open(args.output + "/L_" + "_".join(x for x in p[0]), "w")
for context, l, r, t, nodes in grep_morph_pattern(trees, p[0], p[1:], tm.Arc.LEFT, args.features):
#print(l.morph + " " + r.morph + "\t" + l.word + " " + " ".join([n.word for n in nodes]) + " " + r.word)
in_vocab = all([n.word in vocab for n in nodes + [l, r]])
in_paradigms = is_good_form(r.word, r.word, r.morph, r.lemma, r.pos, vocab, ltm_paradigms)
f_out_grep.write(features(l.morph, args.features) + " " + features(r.morph, args.features) +
"\t" + str(in_vocab) + str(in_paradigms) + "\t" + l.word + " " + " ".join([n.word for n in nodes]) + " " + r.word + "\n")
f_out_grep.close()
for p in good_patterns_right:
f_out.write("R\t" + "_".join(x for x in p[0]) + "\t" + "\t".join(p[1:]) + "\n")
print("R\t" + "_".join(x for x in p[0]) + "\t" + "\t".join(p[1:]) + "\n")
f_out_grep = open(args.output + "/R_" + "_".join(x for x in p[0]), "w")
for context, l, r, t, nodes in grep_morph_pattern(trees, p[0], p[1:], tm.Arc.RIGHT, args.features):
#print(l.morph + " " + r.morph + "\t" + l.word + " " + " ".join([n.word for n in nodes]) + " " + r.word)
in_vocab = all([n.word in vocab for n in nodes + [l, r]])
in_paradigms = is_good_form(r.word, r.word, r.morph, r.lemma, r.pos, vocab, ltm_paradigms)
f_out_grep.write(features(l.morph, args.features)+ " " + features(r.morph, args.features) +
"\t" + str(in_vocab) + str(in_paradigms) + "\t" + l.word + " " + " ".join([n.word for n in nodes]) + " " + r.word + "\n")
f_out_grep.close()
f_out.close()
if __name__ == "__main__":
main()
|
colorlessgreenRNNs-main
|
src/syntactic_testsets/extract_dependency_patterns.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.