python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq.data.encoders import register_bpe
from fairseq.data.encoders.byte_utils import (
SPACE,
SPACE_ESCAPE,
byte_encode,
smart_byte_decode,
)
@register_bpe("bytes")
class Bytes(object):
def __init__(self, *unused):
pass
@staticmethod
def add_args(parser):
pass
@staticmethod
def encode(x: str) -> str:
encoded = byte_encode(x)
escaped = encoded.replace(SPACE, SPACE_ESCAPE)
return SPACE.join(list(escaped))
@staticmethod
def decode(x: str) -> str:
unescaped = x.replace(SPACE, "").replace(SPACE_ESCAPE, SPACE)
return smart_byte_decode(unescaped)
|
bart_ls-main
|
fairseq-py/fairseq/data/encoders/bytes.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import re
WHITESPACE_NORMALIZER = re.compile(r"\s+")
SPACE = chr(32)
SPACE_ESCAPE = chr(9601)
# excluding non-breaking space (160) here
PRINTABLE_LATIN = set(
list(range(32, 126 + 1)) + list(range(161, 172 + 1)) + list(range(174, 255 + 1))
)
BYTE_TO_BCHAR = {
b: chr(b) if b in PRINTABLE_LATIN else chr(256 + b) for b in range(256)
}
BCHAR_TO_BYTE = {bc: b for b, bc in BYTE_TO_BCHAR.items()}
def byte_encode(x: str) -> str:
normalized = WHITESPACE_NORMALIZER.sub(SPACE, x)
return "".join([BYTE_TO_BCHAR[b] for b in normalized.encode("utf-8")])
def byte_decode(x: str) -> str:
try:
return bytes([BCHAR_TO_BYTE[bc] for bc in x]).decode("utf-8")
except ValueError:
return ""
def smart_byte_decode(x: str) -> str:
output = byte_decode(x)
if output == "":
# DP the best recovery (max valid chars) if it's broken
n_bytes = len(x)
f = [0 for _ in range(n_bytes + 1)]
pt = [0 for _ in range(n_bytes + 1)]
for i in range(1, n_bytes + 1):
f[i], pt[i] = f[i - 1], i - 1
for j in range(1, min(4, i) + 1):
if f[i - j] + 1 > f[i] and len(byte_decode(x[i - j : i])) > 0:
f[i], pt[i] = f[i - j] + 1, i - j
cur_pt = n_bytes
while cur_pt > 0:
if f[cur_pt] == f[pt[cur_pt]] + 1:
output = byte_decode(x[pt[cur_pt] : cur_pt]) + output
cur_pt = pt[cur_pt]
return output
|
bart_ls-main
|
fairseq-py/fairseq/data/encoders/byte_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python3
class SpecialConversationSymbols:
BOC = "<c>"
EOC = "</c>"
BOS = "<s>"
EOS = "</s>"
BOS0 = "<s0>"
EOS0 = "</s0>"
BOS1 = "<s1>"
EOS1 = "</s1>"
PAD = "<pad>"
UNK = "<unk>"
|
bart_ls-main
|
fairseq-py/fairseq/data/fb_conversations/fb_special_symbols.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
bart_ls-main
|
fairseq-py/fairseq/data/fb_conversations/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import traceback
from typing import Any, Dict, Optional, Tuple
import numpy as np
import torch
from fairseq.data import FairseqDataset, FairseqIterableDataset, data_utils, encoders
from fairseq.data.fb_conversations.fb_special_symbols import SpecialConversationSymbols
from fairseq.data.fb_hive_dataset import HiveDataset
logger = logging.getLogger("fairseq.fb_conversation_dataset")
def _should_include(key: str, split_range: Tuple[float, float]) -> bool:
"""
Hashes key to decimal between 0 and 1 and returns whether it falls
within the supplied range.
"""
max_precision_order = 10000
decimal_hash = (hash(key) % max_precision_order) / max_precision_order
return split_range[0] < decimal_hash <= split_range[1]
def _tokenize_and_reformat_conversations(item, dictionary, encoder) -> Dict[str, Any]:
"""
Given an input (*item*) of the form:
(
'123:124', <- thread key
[[
1558359573 <- timestamp
1, <- user ID
'hello there', <- message body
], ...],
3, <- message count
'2019-06-30' <- partition
)
this will return:
{
'id': 123124,
'source': tensor([4, 6, 31373, 612, 7, ..., 5])
}
"""
if item is None:
return item
# Verify data is in expected format
assert len(item) == 4
assert (
isinstance(item[0], str)
and isinstance(item[1], list)
and isinstance(item[2], int)
)
def _reformat_msg(msg, sender_list, encoder):
sender = msg[1]
body = encoder.encode(msg[2])
body = dictionary.encode_line(
body,
add_if_not_exist=False,
append_eos=False,
)
# Add sender to shared list if not there already
if sender not in sender_list:
sender_list.append(sender)
# Make new sender ID based on index in list, so first person to
# talk will be s0
user_id_short = str(sender_list.index(sender))
bos = dictionary.index("<s{user_id_short}>".format(**locals()))
eos = dictionary.index("</s{user_id_short}>".format(**locals()))
return torch.cat([torch.IntTensor([bos]), body, torch.IntTensor([eos])])
try:
# Convert text thread key into an int ('1:2' -> 12)
id = int("".join(item[0].split(":")))
# Join all the messages into a single tensor separated by sender tags
user_list = []
convo_tensor = torch.cat(
[torch.IntTensor(_reformat_msg(m, user_list, encoder)) for m in item[1]]
)
# Create final tensor by bookending conversation tags
boc = dictionary.index(SpecialConversationSymbols.BOC)
eoc = dictionary.index(SpecialConversationSymbols.EOC)
item = {
"id": id,
"source": torch.cat(
[torch.IntTensor([boc]), convo_tensor, torch.IntTensor([eoc])]
),
}
except Exception as e:
logger.error("Exception: {}\n{}".format(e, traceback.format_exc()))
return None
return item
def _torchify(item, dictionary) -> Dict[str, Any]:
"""
Converts item into a format usable by PyTorch.
Given an (*item*) of the form:
{
'id': 123124,
'source': tensor([4, 6, 31373, 612, 7, ..., 5])
}
this will return:
{
'id': tensor([123124]),
'ntokens': 37,
'net_input': {
'src_tokens': tensor([5, 4, 6, 31373, 612, ..., 53])
},
'target': tensor([4, 6, 31373, 612, 7, ..., 5])
}
"""
tokenized_conversation = item["source"].long()
ntokens = len(tokenized_conversation)
if ntokens > 1024 or ntokens < 20:
logger.info("Skipped conversation with token length: {}".format(ntokens))
return None
source = data_utils.collate_tokens(
[tokenized_conversation],
dictionary.pad(),
eos_idx=dictionary.index(SpecialConversationSymbols.EOC),
move_eos_to_beginning=True,
)
target = data_utils.collate_tokens(
[tokenized_conversation],
dictionary.pad(),
)
torch_item = {
# Bound ID to 64 bit max to avoid overflow
"id": torch.LongTensor([item["id"] % (2 ** 63 - 1)]),
"ntokens": ntokens,
"net_input": {
"src_tokens": source,
"src_lengths": torch.LongTensor([ntokens]),
},
"target": target,
}
return torch_item
class ConversationDataset(FairseqDataset, FairseqIterableDataset):
"""
A dataset representing conversations between two or more people.
Given a dataset with items of the form:
(
'123:124', <- thread key
[[
1558359573 <- timestamp
1, <- user ID
'hello there', <- message body
], ...],
3, <- message count
'2019-06-30' <- partition
)
this will items like:
{
'id': tensor([123124]),
'ntokens': 37,
'net_input': {
'src_tokens': tensor([5, 4, 6, 31373, 612, ..., 53])
},
'target': tensor([4, 6, 31373, 612, 7, ..., 5])
}
Args:
dataset (torch.utils.data.Dataset): dataset to reformat
dictionary (fairseq.data.Dictionary): pre-made dictionary for the task
split_range (tuple(int, int)): Inclusive range between 0 and 9 from
which to sample. (e.g. (0, 7) will sample 80% of the data)
"""
def __init__(
self,
dataset: HiveDataset,
dictionary,
split_range: Tuple[float, float] = (0.0, 1.0),
):
super().__init__()
self.dataset = dataset
self.dictionary = dictionary
self.split_range = split_range
from fairseq.data.encoders.gpt2_bpe import GPT2BPEConfig
bpe_cfg = GPT2BPEConfig(
gpt2_encoder_json="/mnt/vol/gfsai-flash3-east/ai-group/users/myleott/gpt2_bpe/encoder.json",
gpt2_vocab_bpe="/mnt/vol/gfsai-flash3-east/ai-group/users/myleott/gpt2_bpe/vocab.bpe",
)
bpe_cfg._name = "gpt2"
self.bpe = encoders.build_bpe(bpe_cfg)
def __getitem__(self, index):
if isinstance(index, (int, np.integer)):
return self._transform_item(self.dataset[index])
elif isinstance(index, slice):
return ConversationDataset(self.dataset[index], self.dictionary)
else:
raise TypeError(
"Index must be int or slice, not {}".format(type(index).__name__)
)
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
item = self[index]
if item is None:
return 0
return item["ntokens"]
def __len__(self):
# We'll only look at a subset of the dataset as determined by the split
# range, and we should reflect that in the length.
ratio_of_data = self.split_range[1] - self.split_range[0]
return int(len(self.dataset) * ratio_of_data)
def __iter__(self):
for x in self.dataset:
if not _should_include(x[0], self.split_range):
continue
item = self._transform_item(x)
if item is not None:
yield item
def _transform_item(self, item):
return _torchify(
_tokenize_and_reformat_conversations(
item,
self.dictionary,
self.bpe,
),
self.dictionary,
)
|
bart_ls-main
|
fairseq-py/fairseq/data/fb_conversations/fb_conversation_dataset.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
A standalone module for aggregating metrics.
Metrics can be logged from anywhere using the `log_*` functions defined
in this module. The logged values will be aggregated dynamically based
on the aggregation context in which the logging occurs. See the
:func:`aggregate` context manager for more details.
"""
import contextlib
import uuid
from collections import defaultdict
from typing import Callable, List, Optional
from .meters import *
# Aggregation contexts are considered "active" when inside the scope
# created by the :func:`aggregate` context manager.
_aggregators = OrderedDict()
_active_aggregators = OrderedDict()
_active_aggregators_cnt = defaultdict(lambda: 0)
def reset() -> None:
"""Reset all metrics aggregators."""
_aggregators.clear()
_active_aggregators.clear()
_active_aggregators_cnt.clear()
# The "default" aggregator observes all logged values.
_aggregators["default"] = MetersDict()
_active_aggregators["default"] = _aggregators["default"]
_active_aggregators_cnt["default"] = 1
reset()
@contextlib.contextmanager
def aggregate(name: Optional[str] = None, new_root: bool = False):
"""Context manager to aggregate metrics under a given name.
Aggregations can be nested. If *new_root* is ``False``, then logged
metrics will be recorded along the entire stack of nested
aggregators, including a global "default" aggregator. If *new_root*
is ``True``, then this aggregator will be the root of a new
aggregation stack, thus bypassing any parent aggregators.
Note that aggregation contexts are uniquely identified by their
*name* (e.g., train, valid). Creating a context with an existing
name will reuse the corresponding :class:`MetersDict` instance.
If no name is given, then a temporary aggregator will be created.
Usage::
with metrics.aggregate("train"):
for step, batch in enumerate(epoch):
with metrics.aggregate("train_inner") as agg:
metrics.log_scalar("loss", get_loss(batch))
if step % log_interval == 0:
print(agg.get_smoothed_value("loss"))
agg.reset()
print(metrics.get_smoothed_values("train")["loss"])
Args:
name (str): name of the aggregation. Defaults to a
random/temporary name if not given explicitly.
new_root (bool): make this aggregation the root of a new
aggregation stack.
"""
if name is None:
# generate a temporary name
name = str(uuid.uuid4())
assert name not in _aggregators
agg = MetersDict()
else:
assert name != "default"
agg = _aggregators.setdefault(name, MetersDict())
if new_root:
backup_aggregators = _active_aggregators.copy()
_active_aggregators.clear()
backup_aggregators_cnt = _active_aggregators_cnt.copy()
_active_aggregators_cnt.clear()
_active_aggregators[name] = agg
_active_aggregators_cnt[name] += 1
yield agg
_active_aggregators_cnt[name] -= 1
if _active_aggregators_cnt[name] == 0 and name in _active_aggregators:
del _active_aggregators[name]
if new_root:
_active_aggregators.clear()
_active_aggregators.update(backup_aggregators)
_active_aggregators_cnt.clear()
_active_aggregators_cnt.update(backup_aggregators_cnt)
def get_active_aggregators() -> List[MetersDict]:
return list(_active_aggregators.values())
def log_scalar(
key: str,
value: float,
weight: float = 1,
priority: int = 10,
round: Optional[int] = None,
):
"""Log a scalar value.
Args:
key (str): name of the field to log
value (float): value to log
weight (float): weight that this value contributes to the average.
A weight of 0 will always log the latest value.
priority (int): smaller values are logged earlier in the output
round (Optional[int]): number of digits to round to when displaying
"""
for agg in get_active_aggregators():
if key not in agg:
agg.add_meter(key, AverageMeter(round=round), priority)
agg[key].update(value, weight)
def log_scalar_sum(
key: str,
value: float,
priority: int = 10,
round: Optional[int] = None,
):
"""Log a scalar value that is summed for reporting.
Args:
key (str): name of the field to log
value (float): value to log
priority (int): smaller values are logged earlier in the output
round (Optional[int]): number of digits to round to when displaying
"""
for agg in get_active_aggregators():
if key not in agg:
agg.add_meter(key, SumMeter(round=round), priority)
agg[key].update(value)
def log_derived(key: str, fn: Callable[[MetersDict], float], priority: int = 20):
"""Log a scalar value derived from other meters.
Args:
key (str): name of the field to log
fn (Callable[[MetersDict], float]): function that takes a single
argument *meters* and returns the derived value
priority (int): smaller values are logged earlier in the output
"""
for agg in get_active_aggregators():
if key not in agg:
agg.add_meter(key, MetersDict._DerivedMeter(fn), priority)
def log_speed(
key: str,
value: float,
priority: int = 30,
round: Optional[int] = None,
):
"""Log the rate of some quantity per second.
Args:
key (str): name of the field to log
value (float): value to log
priority (int): smaller values are logged earlier in the output
round (Optional[int]): number of digits to round to when displaying
"""
for agg in get_active_aggregators():
if key not in agg:
agg.add_meter(key, TimeMeter(round=round), priority)
agg[key].reset() # reset meter on the first call
else:
agg[key].update(value)
def log_start_time(key: str, priority: int = 40, round: Optional[int] = None):
"""Log the duration of some event in seconds.
The duration will be computed once :func:`log_stop_time` is called.
Args:
key (str): name of the field to log
priority (int): smaller values are logged earlier in the output
round (Optional[int]): number of digits to round to when displaying
"""
for agg in get_active_aggregators():
if key not in agg:
agg.add_meter(key, StopwatchMeter(round=round), priority)
agg[key].start()
def log_stop_time(key: str, weight: float = 0.0, prehook=None):
"""Log the duration of some event in seconds.
The duration will be computed since :func:`log_start_time` was called.
Set weight > 0 to report the average time instead of the sum.
Args:
key (str): name of the field to log
weight (float): weight that this time contributes to the average
prehook (function, no arguments): will be called before the timer
is stopped. For example, use prehook=torch.cuda.synchronize to
make sure all gpu operations are done before timer is stopped.
"""
for agg in get_active_aggregators():
if key in agg:
agg[key].stop(weight, prehook)
def log_custom(
new_meter_fn: Callable[[], Meter],
key: str,
*args,
priority: int = 50,
**kwargs,
):
"""Log using a custom Meter.
Any extra *args* or *kwargs* will be passed through to the Meter's
*update* method.
Args:
new_meter_fn (Callable[[], Meter]): function that returns a new
Meter instance
key (str): name of the field to log
priority (int): smaller values are logged earlier in the output
"""
for agg in get_active_aggregators():
if key not in agg:
agg.add_meter(key, new_meter_fn(), priority)
agg[key].update(*args, **kwargs)
def reset_meter(name: str, key: str) -> None:
"""Reset Meter instance aggregated under a given *name* and *key*."""
meter = get_meter(name, key)
if meter is not None:
meter.reset()
def reset_meters(name: str) -> None:
"""Reset Meter instances aggregated under a given *name*."""
meters = get_meters(name)
if meters is not None:
meters.reset()
def get_meter(name: str, key: str) -> Meter:
"""Get a single Meter instance aggregated under *name* and *key*.
Returns:
Meter or None if no metrics have been logged under *name* and *key*.
"""
if name not in _aggregators:
return None
return _aggregators[name].get(key, None)
def get_meters(name: str) -> MetersDict:
"""Get Meter instances aggregated under a given *name*.
Returns:
MetersDict or None if no metrics have been logged under *name*.
"""
return _aggregators.get(name, None)
def get_smoothed_value(name: str, key: str) -> float:
"""Get a single smoothed value.
Raises:
KeyError: if no metrics have been logged under *name* and *key*.
"""
return _aggregators[name].get_smoothed_value(key)
def get_smoothed_values(name: str) -> Dict[str, float]:
"""Get smoothed values aggregated under a given *name*.
Raises:
KeyError: if no metrics have been logged under *name*.
"""
return _aggregators[name].get_smoothed_values()
def state_dict():
return OrderedDict([(name, agg.state_dict()) for name, agg in _aggregators.items()])
def load_state_dict(state_dict):
for name, agg_state in state_dict.items():
_aggregators[name] = MetersDict()
_aggregators[name].load_state_dict(agg_state)
def xla_metrics_report():
try:
import torch_xla.debug.metrics as met
print(met.metrics_report())
except ImportError:
return
|
bart_ls-main
|
fairseq-py/fairseq/logging/metrics.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import bisect
import time
from collections import OrderedDict
from typing import Dict, Optional
try:
import torch
def type_as(a, b):
if torch.is_tensor(a) and torch.is_tensor(b):
return a.to(b)
else:
return a
except ImportError:
torch = None
def type_as(a, b):
return a
try:
import numpy as np
except ImportError:
np = None
class Meter(object):
"""Base class for Meters."""
def __init__(self):
pass
def state_dict(self):
return {}
def load_state_dict(self, state_dict):
pass
def reset(self):
raise NotImplementedError
@property
def smoothed_value(self) -> float:
"""Smoothed value used for logging."""
raise NotImplementedError
def safe_round(number, ndigits):
if hasattr(number, "__round__"):
return round(number, ndigits)
elif torch is not None and torch.is_tensor(number) and number.numel() == 1:
return safe_round(number.item(), ndigits)
elif np is not None and np.ndim(number) == 0 and hasattr(number, "item"):
return safe_round(number.item(), ndigits)
else:
return number
class AverageMeter(Meter):
"""Computes and stores the average and current value"""
def __init__(self, round: Optional[int] = None):
self.round = round
self.reset()
def reset(self):
self.val = None # most recent update
self.sum = 0 # sum from all updates
self.count = 0 # total n from all updates
def update(self, val, n=1):
if val is not None:
self.val = val
if n > 0:
self.sum = type_as(self.sum, val) + (val * n)
self.count = type_as(self.count, n) + n
def state_dict(self):
return {
"val": self.val,
"sum": self.sum,
"count": self.count,
"round": self.round,
}
def load_state_dict(self, state_dict):
self.val = state_dict["val"]
self.sum = state_dict["sum"]
self.count = state_dict["count"]
self.round = state_dict.get("round", None)
@property
def avg(self):
return self.sum / self.count if self.count > 0 else self.val
@property
def smoothed_value(self) -> float:
val = self.avg
if self.round is not None and val is not None:
val = safe_round(val, self.round)
return val
class SumMeter(Meter):
"""Computes and stores the sum"""
def __init__(self, round: Optional[int] = None):
self.round = round
self.reset()
def reset(self):
self.sum = 0 # sum from all updates
def update(self, val):
if val is not None:
self.sum = type_as(self.sum, val) + val
def state_dict(self):
return {
"sum": self.sum,
"round": self.round,
}
def load_state_dict(self, state_dict):
self.sum = state_dict["sum"]
self.round = state_dict.get("round", None)
@property
def smoothed_value(self) -> float:
val = self.sum
if self.round is not None and val is not None:
val = safe_round(val, self.round)
return val
class TimeMeter(Meter):
"""Computes the average occurrence of some event per second"""
def __init__(
self,
init: int = 0,
n: int = 0,
round: Optional[int] = None,
):
self.round = round
self.reset(init, n)
def reset(self, init=0, n=0):
self.init = init
self.start = time.perf_counter()
self.n = n
self.i = 0
def update(self, val=1):
self.n = type_as(self.n, val) + val
self.i += 1
def state_dict(self):
return {
"init": self.elapsed_time,
"n": self.n,
"round": self.round,
}
def load_state_dict(self, state_dict):
if "start" in state_dict:
# backwards compatibility for old state_dicts
self.reset(init=state_dict["init"])
else:
self.reset(init=state_dict["init"], n=state_dict["n"])
self.round = state_dict.get("round", None)
@property
def avg(self):
return self.n / self.elapsed_time
@property
def elapsed_time(self):
return self.init + (time.perf_counter() - self.start)
@property
def smoothed_value(self) -> float:
val = self.avg
if self.round is not None and val is not None:
val = safe_round(val, self.round)
return val
class StopwatchMeter(Meter):
"""Computes the sum/avg duration of some event in seconds"""
def __init__(self, round: Optional[int] = None):
self.round = round
self.sum = 0
self.n = 0
self.start_time = None
def start(self):
self.start_time = time.perf_counter()
def stop(self, n=1, prehook=None):
if self.start_time is not None:
if prehook is not None:
prehook()
delta = time.perf_counter() - self.start_time
self.sum = self.sum + delta
self.n = type_as(self.n, n) + n
def reset(self):
self.sum = 0 # cumulative time during which stopwatch was active
self.n = 0 # total n across all start/stop
self.start()
def state_dict(self):
return {
"sum": self.sum,
"n": self.n,
"round": self.round,
}
def load_state_dict(self, state_dict):
self.sum = state_dict["sum"]
self.n = state_dict["n"]
self.start_time = None
self.round = state_dict.get("round", None)
@property
def avg(self):
return self.sum / self.n if self.n > 0 else self.sum
@property
def elapsed_time(self):
if self.start_time is None:
return 0.0
return time.perf_counter() - self.start_time
@property
def smoothed_value(self) -> float:
val = self.avg if self.sum > 0 else self.elapsed_time
if self.round is not None and val is not None:
val = safe_round(val, self.round)
return val
class MetersDict(OrderedDict):
"""A sorted dictionary of :class:`Meters`.
Meters are sorted according to a priority that is given when the
meter is first added to the dictionary.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.priorities = []
def __setitem__(self, key, value):
assert key not in self, "MetersDict doesn't support reassignment"
priority, value = value
bisect.insort(self.priorities, (priority, len(self.priorities), key))
super().__setitem__(key, value)
for _, _, key in self.priorities: # reorder dict to match priorities
self.move_to_end(key)
def add_meter(self, key, meter, priority):
self.__setitem__(key, (priority, meter))
def state_dict(self):
return [
(pri, key, self[key].__class__.__name__, self[key].state_dict())
for pri, _, key in self.priorities
# can't serialize DerivedMeter instances
if not isinstance(self[key], MetersDict._DerivedMeter)
]
def load_state_dict(self, state_dict):
self.clear()
self.priorities.clear()
for pri, key, meter_cls, meter_state in state_dict:
meter = globals()[meter_cls]()
meter.load_state_dict(meter_state)
self.add_meter(key, meter, pri)
def get_smoothed_value(self, key: str) -> float:
"""Get a single smoothed value."""
meter = self[key]
if isinstance(meter, MetersDict._DerivedMeter):
return meter.fn(self)
else:
return meter.smoothed_value
def get_smoothed_values(self) -> Dict[str, float]:
"""Get all smoothed values."""
return OrderedDict(
[
(key, self.get_smoothed_value(key))
for key in self.keys()
if not key.startswith("_")
]
)
def reset(self):
"""Reset Meter instances."""
for meter in self.values():
if isinstance(meter, MetersDict._DerivedMeter):
continue
meter.reset()
class _DerivedMeter(Meter):
"""A Meter whose values are derived from other Meters."""
def __init__(self, fn):
self.fn = fn
def reset(self):
pass
|
bart_ls-main
|
fairseq-py/fairseq/logging/meters.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Wrapper around tbwriter api for for writing to manifold-tensorboard.
FB Internal (not to be open-sourced)
"""
import datetime
import os
from numbers import Number
from .meters import AverageMeter
from .progress_bar import BaseProgressBar
try:
from palaas import tbwriter, register_manifold
except ImportError:
pass
class LogCounter:
def __init__(self, interval):
self.log_interval = interval
self.log_counter = 1
def advance(self):
self.log_counter += 1
return self.log_counter % self.log_interval == 0
class FbTbmfWrapper(BaseProgressBar):
"""Log to tensorboard."""
# manifold sub folder to used by all instances.
manifold_job_path = ""
def _get_job_path(self):
# get slurm job name
job_id = os.environ.get("SLURM_JOB_NAME")
if job_id is None:
# TODO
# try to get fb learner job name
job_id = ""
if job_id is not None and job_id != "":
return job_id
else:
# get date-time str
time = datetime.datetime.now()
time_str = "{}-{}-{}-{}:{}".format(
time.year, time.month, time.day, time.hour, time.minute
)
return time_str
def __init__(self, wrapped_bar, log_interval):
self.wrapped_bar = wrapped_bar
if FbTbmfWrapper.manifold_job_path == "":
FbTbmfWrapper.manifold_job_path = self._get_job_path()
self.log_interval = log_interval
# We need a log counter for every variable.
self.counters = {}
self.counter_disabled_list = []
self.log_counter = 1
self._tbwriter = None
try:
self._tbwriter = tbwriter.get_tbwriter(FbTbmfWrapper.manifold_job_path)
register_manifold()
except Exception:
pass
self.disable_buffering("valid")
self._writers = {}
def __iter__(self):
return iter(self.wrapped_bar)
def log(self, stats, tag="", step=None):
"""Log intermediate stats to tensorboard."""
self._log_to_tensorboard(stats, tag, step)
self.wrapped_bar.log(stats, tag=tag, step=step)
def print(self, stats, tag="", step=None):
"""Print end-of-epoch stats."""
self._log_to_tensorboard(stats, tag, step)
self.wrapped_bar.print(stats, tag=tag, step=step)
def __exit__(self, *exc):
if self._tbwriter is not None:
self._tbwriter.close()
return False
def disable_buffering(self, tag):
if tag is not None:
self.counter_disabled_list.append(tag)
def _log_to_tensorboard(self, stats, tag="", step=None):
writer = self._tbwriter
if writer is None:
return
# Get LogCounter for this variable
if tag not in self.counter_disabled_list:
if tag not in self.counters:
self.counters[tag] = LogCounter(self.log_interval)
if not self.counters[tag].advance():
return
if step is None:
step = stats["num_updates"]
for key in stats.keys() - {"num_updates"}:
if isinstance(stats[key], AverageMeter):
writer.add_scalar(tag, key, stats[key].val, step)
elif isinstance(stats[key], Number):
writer.add_scalar(tag, key, stats[key], step)
writer.flush()
|
bart_ls-main
|
fairseq-py/fairseq/logging/fb_tbmf_wrapper.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
bart_ls-main
|
fairseq-py/fairseq/logging/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Wrapper around various loggers and progress bars (e.g., tqdm).
"""
import atexit
import json
import logging
import os
import sys
from collections import OrderedDict
from contextlib import contextmanager
from numbers import Number
from typing import Optional
import torch
from .meters import AverageMeter, StopwatchMeter, TimeMeter
logger = logging.getLogger(__name__)
def progress_bar(
iterator,
log_format: Optional[str] = None,
log_interval: int = 100,
log_file: Optional[str] = None,
epoch: Optional[int] = None,
prefix: Optional[str] = None,
tensorboard_logdir: Optional[str] = None,
default_log_format: str = "tqdm",
wandb_project: Optional[str] = None,
wandb_run_name: Optional[str] = None,
azureml_logging: Optional[bool] = False,
):
if log_format is None:
log_format = default_log_format
if log_file is not None:
handler = logging.FileHandler(filename=log_file)
logger.addHandler(handler)
if log_format == "tqdm" and not sys.stderr.isatty():
log_format = "simple"
if log_format == "json":
bar = JsonProgressBar(iterator, epoch, prefix, log_interval)
elif log_format == "none":
bar = NoopProgressBar(iterator, epoch, prefix)
elif log_format == "simple":
bar = SimpleProgressBar(iterator, epoch, prefix, log_interval)
elif log_format == "tqdm":
bar = TqdmProgressBar(iterator, epoch, prefix)
else:
raise ValueError("Unknown log format: {}".format(log_format))
if tensorboard_logdir:
try:
# [FB only] custom wrapper for TensorBoard
import palaas # noqa
from .fb_tbmf_wrapper import FbTbmfWrapper
bar = FbTbmfWrapper(bar, log_interval)
except ImportError:
bar = TensorboardProgressBarWrapper(bar, tensorboard_logdir)
if wandb_project:
bar = WandBProgressBarWrapper(bar, wandb_project, run_name=wandb_run_name)
if azureml_logging:
bar = AzureMLProgressBarWrapper(bar)
return bar
def build_progress_bar(
args,
iterator,
epoch: Optional[int] = None,
prefix: Optional[str] = None,
default: str = "tqdm",
no_progress_bar: str = "none",
):
"""Legacy wrapper that takes an argparse.Namespace."""
if getattr(args, "no_progress_bar", False):
default = no_progress_bar
if getattr(args, "distributed_rank", 0) == 0:
tensorboard_logdir = getattr(args, "tensorboard_logdir", None)
else:
tensorboard_logdir = None
return progress_bar(
iterator,
log_format=args.log_format,
log_interval=args.log_interval,
epoch=epoch,
prefix=prefix,
tensorboard_logdir=tensorboard_logdir,
default_log_format=default,
)
def format_stat(stat):
if isinstance(stat, Number):
stat = "{:g}".format(stat)
elif isinstance(stat, AverageMeter):
stat = "{:.3f}".format(stat.avg)
elif isinstance(stat, TimeMeter):
stat = "{:g}".format(round(stat.avg))
elif isinstance(stat, StopwatchMeter):
stat = "{:g}".format(round(stat.sum))
elif torch.is_tensor(stat):
stat = stat.tolist()
return stat
class BaseProgressBar(object):
"""Abstract class for progress bars."""
def __init__(self, iterable, epoch=None, prefix=None):
self.iterable = iterable
self.n = getattr(iterable, "n", 0)
self.epoch = epoch
self.prefix = ""
if epoch is not None:
self.prefix += "epoch {:03d}".format(epoch)
if prefix is not None:
self.prefix += (" | " if self.prefix != "" else "") + prefix
def __len__(self):
return len(self.iterable)
def __enter__(self):
return self
def __exit__(self, *exc):
return False
def __iter__(self):
raise NotImplementedError
def log(self, stats, tag=None, step=None):
"""Log intermediate stats according to log_interval."""
raise NotImplementedError
def print(self, stats, tag=None, step=None):
"""Print end-of-epoch stats."""
raise NotImplementedError
def update_config(self, config):
"""Log latest configuration."""
pass
def _str_commas(self, stats):
return ", ".join(key + "=" + stats[key].strip() for key in stats.keys())
def _str_pipes(self, stats):
return " | ".join(key + " " + stats[key].strip() for key in stats.keys())
def _format_stats(self, stats):
postfix = OrderedDict(stats)
# Preprocess stats according to datatype
for key in postfix.keys():
postfix[key] = str(format_stat(postfix[key]))
return postfix
@contextmanager
def rename_logger(logger, new_name):
old_name = logger.name
if new_name is not None:
logger.name = new_name
yield logger
logger.name = old_name
class JsonProgressBar(BaseProgressBar):
"""Log output in JSON format."""
def __init__(self, iterable, epoch=None, prefix=None, log_interval=1000):
super().__init__(iterable, epoch, prefix)
self.log_interval = log_interval
self.i = None
self.size = None
def __iter__(self):
self.size = len(self.iterable)
for i, obj in enumerate(self.iterable, start=self.n):
self.i = i
yield obj
def log(self, stats, tag=None, step=None):
"""Log intermediate stats according to log_interval."""
step = step or self.i or 0
if step > 0 and self.log_interval is not None and step % self.log_interval == 0:
update = (
self.epoch - 1 + (self.i + 1) / float(self.size)
if self.epoch is not None
else None
)
stats = self._format_stats(stats, epoch=self.epoch, update=update)
with rename_logger(logger, tag):
logger.info(json.dumps(stats))
def print(self, stats, tag=None, step=None):
"""Print end-of-epoch stats."""
self.stats = stats
if tag is not None:
self.stats = OrderedDict(
[(tag + "_" + k, v) for k, v in self.stats.items()]
)
stats = self._format_stats(self.stats, epoch=self.epoch)
with rename_logger(logger, tag):
logger.info(json.dumps(stats))
def _format_stats(self, stats, epoch=None, update=None):
postfix = OrderedDict()
if epoch is not None:
postfix["epoch"] = epoch
if update is not None:
postfix["update"] = round(update, 3)
# Preprocess stats according to datatype
for key in stats.keys():
postfix[key] = format_stat(stats[key])
return postfix
class NoopProgressBar(BaseProgressBar):
"""No logging."""
def __init__(self, iterable, epoch=None, prefix=None):
super().__init__(iterable, epoch, prefix)
def __iter__(self):
for obj in self.iterable:
yield obj
def log(self, stats, tag=None, step=None):
"""Log intermediate stats according to log_interval."""
pass
def print(self, stats, tag=None, step=None):
"""Print end-of-epoch stats."""
pass
class SimpleProgressBar(BaseProgressBar):
"""A minimal logger for non-TTY environments."""
def __init__(self, iterable, epoch=None, prefix=None, log_interval=1000):
super().__init__(iterable, epoch, prefix)
self.log_interval = log_interval
self.i = None
self.size = None
def __iter__(self):
self.size = len(self.iterable)
for i, obj in enumerate(self.iterable, start=self.n):
self.i = i
yield obj
def log(self, stats, tag=None, step=None):
"""Log intermediate stats according to log_interval."""
step = step or self.i or 0
if step > 0 and self.log_interval is not None and step % self.log_interval == 0:
stats = self._format_stats(stats)
postfix = self._str_commas(stats)
with rename_logger(logger, tag):
logger.info(
"{}: {:5d} / {:d} {}".format(
self.prefix, self.i + 1, self.size, postfix
)
)
def print(self, stats, tag=None, step=None):
"""Print end-of-epoch stats."""
postfix = self._str_pipes(self._format_stats(stats))
with rename_logger(logger, tag):
logger.info("{} | {}".format(self.prefix, postfix))
class TqdmProgressBar(BaseProgressBar):
"""Log to tqdm."""
def __init__(self, iterable, epoch=None, prefix=None):
super().__init__(iterable, epoch, prefix)
from tqdm import tqdm
self.tqdm = tqdm(
iterable,
self.prefix,
leave=False,
disable=(logger.getEffectiveLevel() > logging.INFO),
)
def __iter__(self):
return iter(self.tqdm)
def log(self, stats, tag=None, step=None):
"""Log intermediate stats according to log_interval."""
self.tqdm.set_postfix(self._format_stats(stats), refresh=False)
def print(self, stats, tag=None, step=None):
"""Print end-of-epoch stats."""
postfix = self._str_pipes(self._format_stats(stats))
with rename_logger(logger, tag):
logger.info("{} | {}".format(self.prefix, postfix))
try:
_tensorboard_writers = {}
from torch.utils.tensorboard import SummaryWriter
except ImportError:
try:
from tensorboardX import SummaryWriter
except ImportError:
SummaryWriter = None
def _close_writers():
for w in _tensorboard_writers.values():
w.close()
atexit.register(_close_writers)
class TensorboardProgressBarWrapper(BaseProgressBar):
"""Log to tensorboard."""
def __init__(self, wrapped_bar, tensorboard_logdir):
self.wrapped_bar = wrapped_bar
self.tensorboard_logdir = tensorboard_logdir
if SummaryWriter is None:
logger.warning(
"tensorboard not found, please install with: pip install tensorboard"
)
def _writer(self, key):
if SummaryWriter is None:
return None
_writers = _tensorboard_writers
if key not in _writers:
_writers[key] = SummaryWriter(os.path.join(self.tensorboard_logdir, key))
_writers[key].add_text("sys.argv", " ".join(sys.argv))
return _writers[key]
def __iter__(self):
return iter(self.wrapped_bar)
def log(self, stats, tag=None, step=None):
"""Log intermediate stats to tensorboard."""
self._log_to_tensorboard(stats, tag, step)
self.wrapped_bar.log(stats, tag=tag, step=step)
def print(self, stats, tag=None, step=None):
"""Print end-of-epoch stats."""
self._log_to_tensorboard(stats, tag, step)
self.wrapped_bar.print(stats, tag=tag, step=step)
def update_config(self, config):
"""Log latest configuration."""
# TODO add hparams to Tensorboard
self.wrapped_bar.update_config(config)
def _log_to_tensorboard(self, stats, tag=None, step=None):
writer = self._writer(tag or "")
if writer is None:
return
if step is None:
step = stats["num_updates"]
for key in stats.keys() - {"num_updates"}:
if isinstance(stats[key], AverageMeter):
writer.add_scalar(key, stats[key].val, step)
elif isinstance(stats[key], Number):
writer.add_scalar(key, stats[key], step)
elif torch.is_tensor(stats[key]) and stats[key].numel() == 1:
writer.add_scalar(key, stats[key].item(), step)
writer.flush()
try:
import wandb
except ImportError:
wandb = None
class WandBProgressBarWrapper(BaseProgressBar):
"""Log to Weights & Biases."""
def __init__(self, wrapped_bar, wandb_project, run_name=None):
self.wrapped_bar = wrapped_bar
if wandb is None:
logger.warning("wandb not found, pip install wandb")
return
# reinit=False to ensure if wandb.init() is called multiple times
# within one process it still references the same run
wandb.init(project=wandb_project, reinit=False, name=run_name)
def __iter__(self):
return iter(self.wrapped_bar)
def log(self, stats, tag=None, step=None):
"""Log intermediate stats to tensorboard."""
self._log_to_wandb(stats, tag, step)
self.wrapped_bar.log(stats, tag=tag, step=step)
def print(self, stats, tag=None, step=None):
"""Print end-of-epoch stats."""
self._log_to_wandb(stats, tag, step)
self.wrapped_bar.print(stats, tag=tag, step=step)
def update_config(self, config):
"""Log latest configuration."""
if wandb is not None:
wandb.config.update(config)
self.wrapped_bar.update_config(config)
def _log_to_wandb(self, stats, tag=None, step=None):
if wandb is None:
return
if step is None:
step = stats["num_updates"]
prefix = "" if tag is None else tag + "/"
for key in stats.keys() - {"num_updates"}:
if isinstance(stats[key], AverageMeter):
wandb.log({prefix + key: stats[key].val}, step=step)
elif isinstance(stats[key], Number):
wandb.log({prefix + key: stats[key]}, step=step)
try:
from azureml.core import Run
except ImportError:
Run = None
class AzureMLProgressBarWrapper(BaseProgressBar):
"""Log to Azure ML"""
def __init__(self, wrapped_bar):
self.wrapped_bar = wrapped_bar
if Run is None:
logger.warning("azureml.core not found, pip install azureml-core")
return
self.run = Run.get_context()
def __exit__(self, *exc):
if Run is not None:
self.run.complete()
return False
def __iter__(self):
return iter(self.wrapped_bar)
def log(self, stats, tag=None, step=None):
"""Log intermediate stats to AzureML"""
self._log_to_azureml(stats, tag, step)
self.wrapped_bar.log(stats, tag=tag, step=step)
def print(self, stats, tag=None, step=None):
"""Print end-of-epoch stats"""
self._log_to_azureml(stats, tag, step)
self.wrapped_bar.print(stats, tag=tag, step=step)
def update_config(self, config):
"""Log latest configuration."""
self.wrapped_bar.update_config(config)
def _log_to_azureml(self, stats, tag=None, step=None):
if Run is None:
return
if step is None:
step = stats["num_updates"]
prefix = "" if tag is None else tag + "/"
for key in stats.keys() - {"num_updates"}:
name = prefix + key
if isinstance(stats[key], AverageMeter):
self.run.log_row(name=name, **{"step": step, key: stats[key].val})
elif isinstance(stats[key], Number):
self.run.log_row(name=name, **{"step": step, key: stats[key]})
|
bart_ls-main
|
fairseq-py/fairseq/logging/progress_bar.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import inspect
from typing import Any, Dict, List
from fairseq import metrics, utils
from fairseq.dataclass import FairseqDataclass
from fairseq.dataclass.utils import gen_parser_from_dataclass
from torch.nn.modules.loss import _Loss
class FairseqCriterion(_Loss):
def __init__(self, task):
super().__init__()
self.task = task
if hasattr(task, "target_dictionary"):
tgt_dict = task.target_dictionary
self.padding_idx = tgt_dict.pad() if tgt_dict is not None else -100
@classmethod
def add_args(cls, parser):
"""Add criterion-specific arguments to the parser."""
dc = getattr(cls, "__dataclass", None)
if dc is not None:
gen_parser_from_dataclass(parser, dc())
@classmethod
def build_criterion(cls, cfg: FairseqDataclass, task):
"""Construct a criterion from command-line args."""
# arguments in the __init__.
init_args = {}
for p in inspect.signature(cls).parameters.values():
if (
p.kind == p.POSITIONAL_ONLY
or p.kind == p.VAR_POSITIONAL
or p.kind == p.VAR_KEYWORD
):
# we haven't implemented inference for these argument types,
# but PRs welcome :)
raise NotImplementedError("{} not supported".format(p.kind))
assert p.kind in {p.POSITIONAL_OR_KEYWORD, p.KEYWORD_ONLY}
if p.name == "task":
init_args["task"] = task
elif p.name == "cfg":
init_args["cfg"] = cfg
elif hasattr(cfg, p.name):
init_args[p.name] = getattr(cfg, p.name)
elif p.default != p.empty:
pass # we'll use the default value
else:
raise NotImplementedError(
"Unable to infer Criterion arguments, please implement "
"{}.build_criterion".format(cls.__name__)
)
return cls(**init_args)
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
raise NotImplementedError
@staticmethod
def aggregate_logging_outputs(
logging_outputs: List[Dict[str, Any]]
) -> Dict[str, Any]:
"""Aggregate logging outputs from data parallel training."""
utils.deprecation_warning(
"The aggregate_logging_outputs API is deprecated. "
"Please use the reduce_metrics API instead."
)
raise NotImplementedError
@classmethod
def reduce_metrics(cls, logging_outputs: List[Dict[str, Any]]) -> None:
"""Aggregate logging outputs from data parallel training."""
utils.deprecation_warning(
"Criterions should implement the reduce_metrics API. "
"Falling back to deprecated aggregate_logging_outputs API."
)
agg_logging_outputs = cls.aggregate_logging_outputs(logging_outputs)
for k, v in agg_logging_outputs.items():
if k in {"nsentences", "ntokens", "sample_size"}:
continue
metrics.log_scalar(k, v)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return False
class LegacyFairseqCriterion(FairseqCriterion):
def __init__(self, args, task):
super().__init__(task=task)
self.args = args
utils.deprecation_warning(
"Criterions should take explicit arguments instead of an "
"argparse.Namespace object, please update your criterion by "
"extending FairseqCriterion instead of LegacyFairseqCriterion."
)
@classmethod
def build_criterion(cls, args, task):
"""Construct a criterion from command-line args."""
return cls(args, task)
|
bart_ls-main
|
fairseq-py/fairseq/criterions/fairseq_criterion.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from dataclasses import dataclass
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
from omegaconf import II
@dataclass
class CrossEntropyCriterionConfig(FairseqDataclass):
sentence_avg: bool = II("optimization.sentence_avg")
@register_criterion("cross_entropy", dataclass=CrossEntropyCriterionConfig)
class CrossEntropyCriterion(FairseqCriterion):
def __init__(self, task, sentence_avg):
super().__init__(task)
self.sentence_avg = sentence_avg
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample["net_input"])
loss, _ = self.compute_loss(model, net_output, sample, reduce=reduce)
sample_size = (
sample["target"].size(0) if self.sentence_avg else sample["ntokens"]
)
logging_output = {
"loss": loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
}
return loss, sample_size, logging_output
def compute_loss(self, model, net_output, sample, reduce=True):
lprobs = model.get_normalized_probs(net_output, log_probs=True)
lprobs = lprobs.view(-1, lprobs.size(-1))
target = model.get_targets(sample, net_output).view(-1)
loss = F.nll_loss(
lprobs,
target,
ignore_index=self.padding_idx,
reduction="sum" if reduce else "none",
)
return loss, loss
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
# we divide by log(2) to convert the loss from base e to base 2
metrics.log_scalar(
"loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
if sample_size != ntokens:
metrics.log_scalar(
"nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3
)
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg)
)
else:
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["loss"].avg)
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
|
bart_ls-main
|
fairseq-py/fairseq/criterions/cross_entropy.py
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
from typing import List, Dict, Any
from dataclasses import dataclass, field
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
from fairseq.data.data_utils import lengths_to_mask
from fairseq.models.fairseq_model import FairseqEncoderModel
@dataclass
class FastSpeech2CriterionConfig(FairseqDataclass):
ctc_weight: float = field(
default=0.0, metadata={"help": "weight for CTC loss"}
)
@register_criterion("fastspeech2", dataclass=FastSpeech2CriterionConfig)
class FastSpeech2Loss(FairseqCriterion):
def __init__(self, task, ctc_weight):
super().__init__(task)
self.ctc_weight = ctc_weight
def forward(self, model: FairseqEncoderModel, sample, reduction="mean"):
src_tokens = sample["net_input"]["src_tokens"]
src_lens = sample["net_input"]["src_lengths"]
tgt_lens = sample["target_lengths"]
_feat_out, _, log_dur_out, pitch_out, energy_out = model(
src_tokens=src_tokens,
src_lengths=src_lens,
prev_output_tokens=sample["net_input"]["prev_output_tokens"],
incremental_state=None,
target_lengths=tgt_lens,
speaker=sample["speaker"],
durations=sample["durations"],
pitches=sample["pitches"],
energies=sample["energies"]
)
src_mask = lengths_to_mask(sample["net_input"]["src_lengths"])
tgt_mask = lengths_to_mask(sample["target_lengths"])
pitches, energies = sample["pitches"], sample["energies"]
pitch_out, pitches = pitch_out[src_mask], pitches[src_mask]
energy_out, energies = energy_out[src_mask], energies[src_mask]
feat_out, feat = _feat_out[tgt_mask], sample["target"][tgt_mask]
l1_loss = F.l1_loss(feat_out, feat, reduction=reduction)
pitch_loss = F.mse_loss(pitch_out, pitches, reduction=reduction)
energy_loss = F.mse_loss(energy_out, energies, reduction=reduction)
log_dur_out = log_dur_out[src_mask]
dur = sample["durations"].float()
dur = dur.half() if log_dur_out.type().endswith(".HalfTensor") else dur
log_dur = torch.log(dur + 1)[src_mask]
dur_loss = F.mse_loss(log_dur_out, log_dur, reduction=reduction)
ctc_loss = torch.tensor(0.).type_as(l1_loss)
if self.ctc_weight > 0.:
lprobs = model.get_normalized_probs((_feat_out,), log_probs=True)
lprobs = lprobs.transpose(0, 1) # T x B x C
src_mask = lengths_to_mask(src_lens)
src_tokens_flat = src_tokens.masked_select(src_mask)
ctc_loss = F.ctc_loss(
lprobs, src_tokens_flat, tgt_lens, src_lens,
reduction=reduction, zero_infinity=True
) * self.ctc_weight
loss = l1_loss + dur_loss + pitch_loss + energy_loss + ctc_loss
sample_size = sample["nsentences"]
logging_output = {
"loss": utils.item(loss.data),
"ntokens": sample["ntokens"],
"nsentences": sample["nsentences"],
"sample_size": sample_size,
"l1_loss": utils.item(l1_loss.data),
"dur_loss": utils.item(dur_loss.data),
"pitch_loss": utils.item(pitch_loss.data),
"energy_loss": utils.item(energy_loss.data),
"ctc_loss": utils.item(ctc_loss.data),
}
return loss, sample_size, logging_output
@classmethod
def reduce_metrics(cls, logging_outputs: List[Dict[str, Any]]) -> None:
ns = [log.get("sample_size", 0) for log in logging_outputs]
ntot = sum(ns)
ws = [n / (ntot + 1e-8) for n in ns]
for key in [
"loss", "l1_loss", "dur_loss", "pitch_loss", "energy_loss",
"ctc_loss"
]:
vals = [log.get(key, 0) for log in logging_outputs]
val = sum(val * w for val, w in zip(vals, ws))
metrics.log_scalar(key, val, ntot, round=3)
metrics.log_scalar("sample_size", ntot, len(logging_outputs))
# inference metrics
if "targ_frames" not in logging_outputs[0]:
return
n = sum(log.get("targ_frames", 0) for log in logging_outputs)
for key, new_key in [
("mcd_loss", "mcd_loss"),
("pred_frames", "pred_ratio"),
("nins", "ins_rate"),
("ndel", "del_rate"),
]:
val = sum(log.get(key, 0) for log in logging_outputs)
metrics.log_scalar(new_key, val / n, n, round=3)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
return False
|
bart_ls-main
|
fairseq-py/fairseq/criterions/fastspeech2_loss.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import math
from argparse import Namespace
from dataclasses import dataclass, field
from omegaconf import II
from typing import Optional
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
from fairseq.data.data_utils import post_process
from fairseq.tasks import FairseqTask
from fairseq.logging.meters import safe_round
@dataclass
class CtcCriterionConfig(FairseqDataclass):
zero_infinity: bool = field(
default=False,
metadata={"help": "zero inf loss when source length <= target length"},
)
sentence_avg: bool = II("optimization.sentence_avg")
post_process: str = field(
default="letter",
metadata={
"help": "how to post process predictions into words. can be letter, "
"wordpiece, BPE symbols, etc. "
"See fairseq.data.data_utils.post_process() for full list of options"
},
)
wer_kenlm_model: Optional[str] = field(
default=None,
metadata={
"help": "if this is provided, use kenlm to compute wer (along with other wer_* args)"
},
)
wer_lexicon: Optional[str] = field(
default=None,
metadata={"help": "lexicon to use with wer_kenlm_model"},
)
wer_lm_weight: float = field(
default=2.0,
metadata={"help": "lm weight to use with wer_kenlm_model"},
)
wer_word_score: float = field(
default=-1.0,
metadata={"help": "lm word score to use with wer_kenlm_model"},
)
wer_args: Optional[str] = field(
default=None,
metadata={
"help": "DEPRECATED: tuple of (wer_kenlm_model, wer_lexicon, wer_lm_weight, wer_word_score)"
},
)
@register_criterion("ctc", dataclass=CtcCriterionConfig)
class CtcCriterion(FairseqCriterion):
def __init__(self, cfg: CtcCriterionConfig, task: FairseqTask):
super().__init__(task)
self.blank_idx = (
task.target_dictionary.index(task.blank_symbol)
if hasattr(task, "blank_symbol")
else 0
)
self.pad_idx = task.target_dictionary.pad()
self.eos_idx = task.target_dictionary.eos()
self.post_process = cfg.post_process
if cfg.wer_args is not None:
(
cfg.wer_kenlm_model,
cfg.wer_lexicon,
cfg.wer_lm_weight,
cfg.wer_word_score,
) = eval(cfg.wer_args)
if cfg.wer_kenlm_model is not None:
from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder
dec_args = Namespace()
dec_args.nbest = 1
dec_args.criterion = "ctc"
dec_args.kenlm_model = cfg.wer_kenlm_model
dec_args.lexicon = cfg.wer_lexicon
dec_args.beam = 50
dec_args.beam_size_token = min(50, len(task.target_dictionary))
dec_args.beam_threshold = min(50, len(task.target_dictionary))
dec_args.lm_weight = cfg.wer_lm_weight
dec_args.word_score = cfg.wer_word_score
dec_args.unk_weight = -math.inf
dec_args.sil_weight = 0
self.w2l_decoder = W2lKenLMDecoder(dec_args, task.target_dictionary)
else:
self.w2l_decoder = None
self.zero_infinity = cfg.zero_infinity
self.sentence_avg = cfg.sentence_avg
def forward(self, model, sample, reduce=True):
net_output = model(**sample["net_input"])
lprobs = model.get_normalized_probs(
net_output, log_probs=True
).contiguous() # (T, B, C) from the encoder
if "src_lengths" in sample["net_input"]:
input_lengths = sample["net_input"]["src_lengths"]
else:
if net_output["padding_mask"] is not None:
non_padding_mask = ~net_output["padding_mask"]
input_lengths = non_padding_mask.long().sum(-1)
else:
input_lengths = lprobs.new_full(
(lprobs.size(1),), lprobs.size(0), dtype=torch.long
)
pad_mask = (sample["target"] != self.pad_idx) & (
sample["target"] != self.eos_idx
)
targets_flat = sample["target"].masked_select(pad_mask)
if "target_lengths" in sample:
target_lengths = sample["target_lengths"]
else:
target_lengths = pad_mask.sum(-1)
with torch.backends.cudnn.flags(enabled=False):
loss = F.ctc_loss(
lprobs,
targets_flat,
input_lengths,
target_lengths,
blank=self.blank_idx,
reduction="sum",
zero_infinity=self.zero_infinity,
)
ntokens = (
sample["ntokens"] if "ntokens" in sample else target_lengths.sum().item()
)
sample_size = sample["target"].size(0) if self.sentence_avg else ntokens
logging_output = {
"loss": utils.item(loss.data), # * sample['ntokens'],
"ntokens": ntokens,
"nsentences": sample["id"].numel(),
"sample_size": sample_size,
}
if not model.training:
import editdistance
with torch.no_grad():
lprobs_t = lprobs.transpose(0, 1).float().contiguous().cpu()
c_err = 0
c_len = 0
w_errs = 0
w_len = 0
wv_errs = 0
for lp, t, inp_l in zip(
lprobs_t,
sample["target_label"]
if "target_label" in sample
else sample["target"],
input_lengths,
):
lp = lp[:inp_l].unsqueeze(0)
decoded = None
if self.w2l_decoder is not None:
decoded = self.w2l_decoder.decode(lp)
if len(decoded) < 1:
decoded = None
else:
decoded = decoded[0]
if len(decoded) < 1:
decoded = None
else:
decoded = decoded[0]
p = (t != self.task.target_dictionary.pad()) & (
t != self.task.target_dictionary.eos()
)
targ = t[p]
targ_units = self.task.target_dictionary.string(targ)
targ_units_arr = targ.tolist()
toks = lp.argmax(dim=-1).unique_consecutive()
pred_units_arr = toks[toks != self.blank_idx].tolist()
c_err += editdistance.eval(pred_units_arr, targ_units_arr)
c_len += len(targ_units_arr)
targ_words = post_process(targ_units, self.post_process).split()
pred_units = self.task.target_dictionary.string(pred_units_arr)
pred_words_raw = post_process(pred_units, self.post_process).split()
if decoded is not None and "words" in decoded:
pred_words = decoded["words"]
w_errs += editdistance.eval(pred_words, targ_words)
wv_errs += editdistance.eval(pred_words_raw, targ_words)
else:
dist = editdistance.eval(pred_words_raw, targ_words)
w_errs += dist
wv_errs += dist
w_len += len(targ_words)
logging_output["wv_errors"] = wv_errs
logging_output["w_errors"] = w_errs
logging_output["w_total"] = w_len
logging_output["c_errors"] = c_err
logging_output["c_total"] = c_len
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = utils.item(sum(log.get("loss", 0) for log in logging_outputs))
ntokens = utils.item(sum(log.get("ntokens", 0) for log in logging_outputs))
nsentences = utils.item(
sum(log.get("nsentences", 0) for log in logging_outputs)
)
sample_size = utils.item(
sum(log.get("sample_size", 0) for log in logging_outputs)
)
metrics.log_scalar(
"loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
metrics.log_scalar("ntokens", ntokens)
metrics.log_scalar("nsentences", nsentences)
if sample_size != ntokens:
metrics.log_scalar(
"nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3
)
c_errors = sum(log.get("c_errors", 0) for log in logging_outputs)
metrics.log_scalar("_c_errors", c_errors)
c_total = sum(log.get("c_total", 0) for log in logging_outputs)
metrics.log_scalar("_c_total", c_total)
w_errors = sum(log.get("w_errors", 0) for log in logging_outputs)
metrics.log_scalar("_w_errors", w_errors)
wv_errors = sum(log.get("wv_errors", 0) for log in logging_outputs)
metrics.log_scalar("_wv_errors", wv_errors)
w_total = sum(log.get("w_total", 0) for log in logging_outputs)
metrics.log_scalar("_w_total", w_total)
if c_total > 0:
metrics.log_derived(
"uer",
lambda meters: safe_round(
meters["_c_errors"].sum * 100.0 / meters["_c_total"].sum, 3
)
if meters["_c_total"].sum > 0
else float("nan"),
)
if w_total > 0:
metrics.log_derived(
"wer",
lambda meters: safe_round(
meters["_w_errors"].sum * 100.0 / meters["_w_total"].sum, 3
)
if meters["_w_total"].sum > 0
else float("nan"),
)
metrics.log_derived(
"raw_wer",
lambda meters: safe_round(
meters["_wv_errors"].sum * 100.0 / meters["_w_total"].sum, 3
)
if meters["_w_total"].sum > 0
else float("nan"),
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
|
bart_ls-main
|
fairseq-py/fairseq/criterions/ctc.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from dataclasses import dataclass
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
from fairseq.dataclass.constants import DDP_BACKEND_CHOICES
from omegaconf import II
@dataclass
class AdaptiveLossConfig(FairseqDataclass):
sentence_avg: bool = II("optimization.sentence_avg")
ddp_backend: DDP_BACKEND_CHOICES = II("distributed_training.ddp_backend")
@register_criterion("adaptive_loss", dataclass=AdaptiveLossConfig)
class AdaptiveLoss(FairseqCriterion):
"""This is an implementation of the loss function accompanying the adaptive softmax approximation for
graphical processing units (GPU), described in the paper "Efficient softmax approximation for GPUs"
(http://arxiv.org/abs/1609.04309)."""
def __init__(self, task, sentence_avg):
super().__init__(task)
self.sentence_avg = sentence_avg
@classmethod
def build_criterion(cls, cfg: AdaptiveLossConfig, task):
if cfg.ddp_backend in {"c10d", "pytorch_ddp"}:
raise Exception(
"AdaptiveLoss is not compatible with the PyTorch "
"version of DistributedDataParallel. Please use "
"`--ddp-backend=legacy_ddp` instead."
)
return cls(task, cfg.sentence_avg)
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
assert (
hasattr(model.decoder, "adaptive_softmax")
and model.decoder.adaptive_softmax is not None
)
adaptive_softmax = model.decoder.adaptive_softmax
net_output = model(**sample["net_input"])
orig_target = model.get_targets(sample, net_output)
nsentences = orig_target.size(0)
orig_target = orig_target.view(-1)
bsz = orig_target.size(0)
logits, target = adaptive_softmax(net_output[0], orig_target)
assert len(target) == len(logits)
loss = net_output[0].new(1 if reduce else bsz).zero_()
for i in range(len(target)):
if target[i] is not None:
assert target[i].min() >= 0 and target[i].max() <= logits[i].size(1)
loss += F.cross_entropy(
logits[i],
target[i],
ignore_index=self.padding_idx,
reduction="sum" if reduce else "none",
)
orig = utils.strip_pad(orig_target, self.padding_idx)
ntokens = orig.numel()
sample_size = sample["target"].size(0) if self.sentence_avg else ntokens
logging_output = {
"loss": loss.data,
"ntokens": ntokens,
"nsentences": nsentences,
"sample_size": sample_size,
}
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = utils.item(sum(log.get("loss", 0) for log in logging_outputs))
ntokens = utils.item(sum(log.get("ntokens", 0) for log in logging_outputs))
sample_size = utils.item(
sum(log.get("sample_size", 0) for log in logging_outputs)
)
metrics.log_scalar(
"loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
if sample_size != ntokens:
metrics.log_scalar(
"nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3
)
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg)
)
else:
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["loss"].avg)
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
|
bart_ls-main
|
fairseq-py/fairseq/criterions/adaptive_loss.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from dataclasses import dataclass, field
from typing import Dict, List
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
logger = logging.getLogger(__name__)
@dataclass
class ModelCriterionConfig(FairseqDataclass):
loss_weights: Dict[str, float] = field(
default_factory=dict,
metadata={"help": "weights for the loss terms"},
)
log_keys: List[str] = field(
default_factory=list,
metadata={"help": "additional output keys to log"},
)
@register_criterion("model", dataclass=ModelCriterionConfig)
class ModelCriterion(FairseqCriterion):
"""
This criterion relies on the model to supply losses.
The losses should be a dictionary of name -> scalar returned by
the model either by including it in the net_output dict or by
implementing a get_losses(net_output, sample) method. The final loss is
a scaled sum of all losses according to weights in loss_weights.
If no weights are provided, then all losses are scaled by 1.0.
The losses will be automatically logged. Additional keys from
net_output dict can be logged via the log_keys parameter.
"""
def __init__(self, task, loss_weights=None, log_keys=None):
super().__init__(task)
self.loss_weights = loss_weights
self.log_keys = log_keys
def forward(self, model, sample, reduce=True):
net_output = model(**sample["net_input"])
sample_size = net_output["sample_size"]
scaled_losses = {}
if hasattr(model, "get_losses"):
losses = model.get_losses(net_output, sample)
elif isinstance(net_output, dict) and "losses" in net_output:
losses = net_output["losses"]
else:
raise Exception("Could not retrieve losses")
for lk, p in losses.items():
try:
coef = 1.0 if len(self.loss_weights) == 0 else self.loss_weights[lk]
except KeyError:
logger.error(
f"weight for loss {lk} is not in loss_weights ({self.loss_weights})"
)
raise
if coef != 0 and p is not None:
scaled_losses[lk] = coef * p.float()
loss = sum(scaled_losses.values())
if reduce and loss.numel() > 1:
loss = loss.sum()
logging_output = {
"loss": loss.data,
"ntokens": sample_size,
"nsentences": sample["id"].numel(),
"sample_size": sample_size,
"_world_size": 1,
}
for lk in self.log_keys:
if lk in net_output and net_output[lk] is not None:
logging_output[lk] = float(net_output[lk])
if len(scaled_losses) > 1:
for lk, l in scaled_losses.items():
logging_output[f"loss_{lk}"] = l.item()
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = utils.item(sum(log.get("loss", 0) for log in logging_outputs))
ntokens = utils.item(sum(log.get("ntokens", 0) for log in logging_outputs))
nsentences = utils.item(
sum(log.get("nsentences", 0) for log in logging_outputs)
)
sample_size = utils.item(
sum(log.get("sample_size", 0) for log in logging_outputs)
)
metrics.log_scalar("loss", loss_sum / sample_size, sample_size, round=3)
metrics.log_scalar("ntokens", ntokens)
metrics.log_scalar("nsentences", nsentences)
builtin_keys = {
"loss",
"ntokens",
"nsentences",
"sample_size",
"_world_size",
}
world_size = utils.item(
sum(log.get("_world_size", 0) for log in logging_outputs)
)
for k in logging_outputs[0]:
if k not in builtin_keys:
val = sum(log.get(k, 0) for log in logging_outputs)
if k.startswith("loss_"):
metrics.log_scalar(k, val / sample_size, sample_size, round=3)
else:
metrics.log_scalar(k, val / world_size, round=3)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
|
bart_ls-main
|
fairseq-py/fairseq/criterions/model_criterion.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from dataclasses import dataclass, field
from typing import List, Optional
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
from fairseq.logging.meters import safe_round
from fairseq.utils import is_xla_tensor
@dataclass
class Wav2VecCriterionConfig(FairseqDataclass):
infonce: bool = field(
default=False,
metadata={
"help": "if set, uses cross entropy instead of binary cross entropy (i.e. InfoNCE loss)"
},
)
loss_weights: Optional[List[float]] = field(
default=None,
metadata={"help": "weights for additional loss terms (not first one)"},
)
log_keys: List[str] = field(
default_factory=lambda: [],
metadata={"help": "output keys to log"},
)
@register_criterion("wav2vec", dataclass=Wav2VecCriterionConfig)
class Wav2vecCriterion(FairseqCriterion):
def __init__(self, task, infonce=False, loss_weights=None, log_keys=None):
super().__init__(task)
self.infonce = infonce
self.loss_weights = loss_weights
self.log_keys = [] if log_keys is None else log_keys
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample["net_input"])
logits = model.get_logits(net_output).float()
target = model.get_targets(sample, net_output)
self.xla = is_xla_tensor(logits)
# XXX: handle weights on xla.
weights = None
if hasattr(model, "get_target_weights") and not self.infonce:
weights = model.get_target_weights(target, net_output)
if torch.is_tensor(weights):
weights = weights.float()
losses = []
reduction = "none" if ((not reduce) or self.xla) else "sum"
if self.infonce:
loss = F.cross_entropy(logits, target, reduction=reduction)
else:
loss = F.binary_cross_entropy_with_logits(
logits, target.float(), weights, reduction=reduction
)
if self.xla:
# tpu-comment: since dynamic shapes lead to recompilations on xla,
# we don't shrink tensors using mask_indices.
# Instead, we use mask indices to adjust loss.
mi = (
sample['net_input']['mask_indices']
.transpose(0, 1) # logits are transposed in `model.get_logits`
.reshape(logits.size(0))
)
loss = (loss * mi).sum() if reduce else (loss * mi)
if 'sample_size' in sample:
sample_size = sample['sample_size']
elif 'mask_indices' in sample['net_input']:
sample_size = sample['net_input']['mask_indices'].sum()
else:
sample_size = target.numel() if self.infonce else target.long().sum().item()
losses.append(loss.detach().clone())
if self.loss_weights is not None:
assert hasattr(model, "get_extra_losses")
extra_losses = model.get_extra_losses(net_output)
if torch.is_tensor(extra_losses):
extra_losses = [extra_losses]
if len(self.loss_weights) == 1 and len(extra_losses) != 1:
self.loss_weights = [self.loss_weights[0]] * len(extra_losses)
assert len(extra_losses) == len(
self.loss_weights
), f"{len(extra_losses)}, {len(self.loss_weights)}"
for p, coef in zip(extra_losses, self.loss_weights):
if coef != 0 and p is not None:
p = coef * p.float() * sample_size
loss += p
losses.append(p)
logging_output = {
"loss": loss.item() if (reduce and not self.xla) else loss.detach(),
"ntokens": sample_size,
"nsentences": sample["id"].numel(),
"sample_size": sample_size,
}
for lk in self.log_keys:
# Only store "logits" and "target" for computing MAP and MAUC
# during validation
if lk == "logits":
if not self.training:
logging_output["logits"] = logits.cpu().numpy()
elif lk == "target":
if not self.training:
# If the targets have been mixed with the predictions of
# teacher models, find the original targets
if hasattr(model, "get_original_targets"):
original_target = model.get_original_targets(sample, net_output)
else:
original_target = target
logging_output["target"] = original_target.cpu().numpy()
elif lk in net_output:
value = net_output[lk]
if not is_xla_tensor(value):
value = float(value)
logging_output[lk] = value
if len(losses) > 1:
for i, l in enumerate(losses):
logging_output[f"loss_{i}"] = l.item() if not self.xla else l.detach()
if self.infonce:
with torch.no_grad():
if logits.numel() == 0:
corr = 0
count = 0
else:
assert logits.dim() > 1, logits.shape
max = logits.argmax(-1) == 0
min = logits.argmin(-1) == 0
if is_xla_tensor(logits):
max, min = max * mi, min * mi
both = max & min
corr = max.long().sum() - both.long().sum()
count = mi.sum()
else:
both = max & min
corr = max.long().sum().item() - both.long().sum().item()
count = float(max.numel())
logging_output["correct"] = corr
logging_output["count"] = count
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = utils.item(sum(log.get("loss", 0) for log in logging_outputs))
ntokens = utils.item(sum(log.get("ntokens", 0) for log in logging_outputs))
nsentences = utils.item(
sum(log.get("nsentences", 0) for log in logging_outputs)
)
sample_size = utils.item(
sum(log.get("sample_size", 0) for log in logging_outputs)
)
metrics.log_scalar(
"loss", loss_sum / (sample_size or 1) / math.log(2), sample_size, round=3
)
metrics.log_scalar("ntokens", ntokens)
metrics.log_scalar("nsentences", nsentences)
correct = sum(log.get("correct", 0) for log in logging_outputs)
metrics.log_scalar("_correct", correct)
total = sum(log.get("count", 0) for log in logging_outputs)
metrics.log_scalar("_total", total)
if total > 0:
metrics.log_derived(
"accuracy",
lambda meters: safe_round(
meters["_correct"].sum / meters["_total"].sum, 5
)
if meters["_total"].sum > 0
else float("nan"),
)
builtin_keys = {
"loss",
"ntokens",
"nsentences",
"sample_size",
"correct",
"count",
}
for k in logging_outputs[0]:
if k not in builtin_keys:
val = sum(log.get(k, 0) for log in logging_outputs)
if k.startswith("loss"):
metrics.log_scalar(
k, val / (sample_size or 1) / math.log(2), sample_size, round=3
)
else:
metrics.log_scalar(k, val / len(logging_outputs), round=3)
# FIXME: revert when gather based xla reduction is implemented
#@staticmethod
#def logging_outputs_can_be_summed() -> bool:
def logging_outputs_can_be_summed(self) -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
# XXX: Gather based reduction not implemented for xla yet.
# So we fall to sum based reduction for xla.
return self.xla
|
bart_ls-main
|
fairseq-py/fairseq/criterions/wav2vec_criterion.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
def compute_cross_entropy_loss(logits, targets, ignore_index=-100):
"""
Function to compute the cross entropy loss. The default value of
ignore_index is the same as the default value for F.cross_entropy in
pytorch.
"""
assert logits.size(0) == targets.size(
-1
), "Logits and Targets tensor shapes don't match up"
loss = F.nll_loss(
F.log_softmax(logits, -1, dtype=torch.float32),
targets,
reduction="sum",
ignore_index=ignore_index,
)
return loss
@register_criterion("legacy_masked_lm_loss")
class LegacyMaskedLmLoss(FairseqCriterion):
"""
Implementation for the loss used in masked language model (MLM) training.
This optionally also computes the next sentence prediction (NSP) loss and
adds it to the overall loss based on the specified args. There are three
cases to consider:
1) Generic MLM training without NSP loss. In this case sentence_targets
and sentence_logits are both None.
2) BERT training without NSP loss. In this case sentence_targets is
not None but sentence_logits is None and we should not be computing
a sentence level loss.
3) BERT training with NSP loss. In this case both sentence_targets and
sentence_logits are not None and we should be computing a sentence
level loss. The weight of the sentence level loss is specified as
an argument.
"""
def __init__(self, task, masked_lm_only, nsp_loss_weight):
super().__init__(task)
self.masked_lm_only = masked_lm_only
self.nsp_loss_weight = nsp_loss_weight
@staticmethod
def add_args(parser):
"""Args for MaskedLM Loss"""
# Default for masked_lm_only is False so as to not break BERT training
parser.add_argument(
"--masked-lm-only",
default=False,
action="store_true",
help="compute MLM loss only",
)
parser.add_argument(
"--nsp-loss-weight",
default=1.0,
type=float,
help="weight for next sentence prediction" " loss (default 1)",
)
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
lm_logits, output_metadata = model(**sample["net_input"])
# reshape lm_logits from (N,T,C) to (N*T,C)
lm_logits = lm_logits.view(-1, lm_logits.size(-1))
lm_targets = sample["lm_target"].view(-1)
lm_loss = compute_cross_entropy_loss(lm_logits, lm_targets, self.padding_idx)
# compute the number of tokens for which loss is computed. This is used
# to normalize the loss
ntokens = utils.strip_pad(lm_targets, self.padding_idx).numel()
loss = lm_loss / ntokens
nsentences = sample["nsentences"]
# nsentences = 0
# Compute sentence loss if masked_lm_only is False
sentence_loss = None
if not self.masked_lm_only:
sentence_logits = output_metadata["sentence_logits"]
sentence_targets = sample["sentence_target"].view(-1)
# This needs to be recomputed due to some differences between
# TokenBlock and BlockPair dataset. This can be resolved with a
# refactor of BERTModel which we will do in the future.
# TODO: Remove this after refactor of BERTModel
nsentences = sentence_targets.size(0)
# Check for logits being none which can happen when remove_heads
# is set to true in the BERT model. Ideally we should set
# masked_lm_only to true in this case, but that requires some
# refactor in the BERT model.
if sentence_logits is not None:
sentence_loss = compute_cross_entropy_loss(
sentence_logits, sentence_targets
)
loss += self.nsp_loss_weight * (sentence_loss / nsentences)
# NOTE: as we are summing up per token mlm loss and per sentence nsp loss
# we don't need to use sample_size as denominator for the gradient
# here sample_size is just used for logging
sample_size = 1
logging_output = {
"loss": utils.item(loss.data) if reduce else loss.data,
"lm_loss": utils.item(lm_loss.data) if reduce else lm_loss.data,
# sentence loss is not always computed
"sentence_loss": (
(utils.item(sentence_loss.data) if reduce else sentence_loss.data)
if sentence_loss is not None
else 0.0
),
"ntokens": ntokens,
"nsentences": nsentences,
"sample_size": sample_size,
}
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
lm_loss_sum = sum(log.get("lm_loss", 0) for log in logging_outputs)
sentence_loss_sum = sum(log.get("sentence_loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
agg_loss = sum(log.get("loss", 0) for log in logging_outputs)
metrics.log_scalar(
"loss",
agg_loss / sample_size / math.log(2) if sample_size > 0 else 0.0,
sample_size,
round=3,
)
metrics.log_scalar(
"lm_loss",
lm_loss_sum / ntokens / math.log(2) if ntokens > 0 else 0.0,
ntokens,
round=3,
)
metrics.log_scalar(
"sentence_loss",
sentence_loss_sum / nsentences / math.log(2) if nsentences > 0 else 0.0,
nsentences,
round=3,
)
metrics.log_scalar(
"nll_loss",
lm_loss_sum / ntokens / math.log(2) if ntokens > 0 else 0.0,
ntokens,
round=3,
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
|
bart_ls-main
|
fairseq-py/fairseq/criterions/legacy_masked_lm.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
from torch import Tensor
from dataclasses import dataclass, field
@dataclass
class LabelSmoothedDualImitationCriterionConfig(FairseqDataclass):
label_smoothing: float = field(
default=0.0,
metadata={"help": "epsilon for label smoothing, 0 means no label smoothing"},
)
@register_criterion("nat_loss", dataclass=LabelSmoothedDualImitationCriterionConfig)
class LabelSmoothedDualImitationCriterion(FairseqCriterion):
def __init__(self, task, label_smoothing):
super().__init__(task)
self.label_smoothing = label_smoothing
def _compute_loss(
self, outputs, targets, masks=None, label_smoothing=0.0, name="loss", factor=1.0
):
"""
outputs: batch x len x d_model
targets: batch x len
masks: batch x len
policy_logprob: if there is some policy
depends on the likelihood score as rewards.
"""
def mean_ds(x: Tensor, dim=None) -> Tensor:
return (
x.float().mean().type_as(x)
if dim is None
else x.float().mean(dim).type_as(x)
)
if masks is not None:
outputs, targets = outputs[masks], targets[masks]
if masks is not None and not masks.any():
nll_loss = torch.tensor(0)
loss = nll_loss
else:
logits = F.log_softmax(outputs, dim=-1)
if targets.dim() == 1:
losses = F.nll_loss(logits, targets.to(logits.device), reduction="none")
else: # soft-labels
losses = F.kl_div(logits, targets.to(logits.device), reduction="none")
losses = losses.sum(-1)
nll_loss = mean_ds(losses)
if label_smoothing > 0:
loss = (
nll_loss * (1 - label_smoothing) - mean_ds(logits) * label_smoothing
)
else:
loss = nll_loss
loss = loss * factor
return {"name": name, "loss": loss, "nll_loss": nll_loss, "factor": factor}
def _custom_loss(self, loss, name="loss", factor=1.0):
return {"name": name, "loss": loss, "factor": factor}
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
nsentences, ntokens = sample["nsentences"], sample["ntokens"]
# B x T
src_tokens, src_lengths = (
sample["net_input"]["src_tokens"],
sample["net_input"]["src_lengths"],
)
tgt_tokens, prev_output_tokens = sample["target"], sample["prev_target"]
outputs = model(src_tokens, src_lengths, prev_output_tokens, tgt_tokens)
losses, nll_loss = [], []
for obj in outputs:
if outputs[obj].get("loss", None) is None:
_losses = self._compute_loss(
outputs[obj].get("out"),
outputs[obj].get("tgt"),
outputs[obj].get("mask", None),
outputs[obj].get("ls", 0.0),
name=obj + "-loss",
factor=outputs[obj].get("factor", 1.0),
)
else:
_losses = self._custom_loss(
outputs[obj].get("loss"),
name=obj + "-loss",
factor=outputs[obj].get("factor", 1.0),
)
losses += [_losses]
if outputs[obj].get("nll_loss", False):
nll_loss += [_losses.get("nll_loss", 0.0)]
loss = sum(l["loss"] for l in losses)
nll_loss = sum(l for l in nll_loss) if len(nll_loss) > 0 else loss.new_tensor(0)
# NOTE:
# we don't need to use sample_size as denominator for the gradient
# here sample_size is just used for logging
sample_size = 1
logging_output = {
"loss": loss.data,
"nll_loss": nll_loss.data,
"ntokens": ntokens,
"nsentences": nsentences,
"sample_size": sample_size,
}
for l in losses:
logging_output[l["name"]] = (
utils.item(l["loss"].data / l["factor"])
if reduce
else l[["loss"]].data / l["factor"]
)
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
sample_size = utils.item(
sum(log.get("sample_size", 0) for log in logging_outputs)
)
loss = utils.item(sum(log.get("loss", 0) for log in logging_outputs))
nll_loss = utils.item(sum(log.get("nll_loss", 0) for log in logging_outputs))
metrics.log_scalar(
"loss", loss / sample_size / math.log(2), sample_size, round=3
)
metrics.log_scalar(
"nll_loss", nll_loss / sample_size / math.log(2), sample_size, round=3
)
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["loss"].avg)
)
for key in logging_outputs[0]:
if key[-5:] == "-loss":
val = sum(log.get(key, 0) for log in logging_outputs)
metrics.log_scalar(
key[:-5],
val / sample_size / math.log(2) if sample_size > 0 else 0.0,
sample_size,
round=3,
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
|
bart_ls-main
|
fairseq-py/fairseq/criterions/nat_loss.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""isort:skip_file"""
import importlib
import os
from fairseq import registry
from fairseq.criterions.fairseq_criterion import ( # noqa
FairseqCriterion,
LegacyFairseqCriterion,
)
from omegaconf import DictConfig
(
build_criterion_,
register_criterion,
CRITERION_REGISTRY,
CRITERION_DATACLASS_REGISTRY,
) = registry.setup_registry(
"--criterion", base_class=FairseqCriterion, default="cross_entropy"
)
def build_criterion(cfg: DictConfig, task):
return build_criterion_(cfg, task)
# automatically import any Python files in the criterions/ directory
for file in sorted(os.listdir(os.path.dirname(__file__))):
if file.endswith(".py") and not file.startswith("_"):
file_name = file[: file.find(".py")]
importlib.import_module("fairseq.criterions." + file_name)
|
bart_ls-main
|
fairseq-py/fairseq/criterions/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
import torch
from fairseq import metrics, utils
from fairseq.criterions import register_criterion
from fairseq.criterions.label_smoothed_cross_entropy import (
LabelSmoothedCrossEntropyCriterion,
LabelSmoothedCrossEntropyCriterionConfig
)
try:
from simuleval.metrics.latency import (
AverageLagging,
AverageProportion,
DifferentiableAverageLagging
)
LATENCY_METRICS = {
"average_lagging": AverageLagging,
"average_proportion": AverageProportion,
"differentiable_average_lagging": DifferentiableAverageLagging,
}
except ImportError:
LATENCY_METRICS = None
@dataclass
class LabelSmoothedCrossEntropyCriterionLatencyAugmentConfig(
LabelSmoothedCrossEntropyCriterionConfig
):
latency_avg_weight: float = field(
default=0.0,
metadata={"help": "weight fot average latency loss."},
)
latency_var_weight: float = field(
default=0.0,
metadata={"help": "weight fot variance latency loss."},
)
latency_avg_type: str = field(
default="differentiable_average_lagging",
metadata={"help": "latency type for average loss"},
)
latency_var_type: str = field(
default="variance_delay",
metadata={"help": "latency typ for variance loss"},
)
latency_gather_method: str = field(
default="weighted_average",
metadata={"help": "method to gather latency loss for all heads"},
)
latency_update_after: int = field(
default=0,
metadata={"help": "Add latency loss after certain steps"},
)
@register_criterion(
"latency_augmented_label_smoothed_cross_entropy",
dataclass=LabelSmoothedCrossEntropyCriterionLatencyAugmentConfig
)
class LatencyAugmentedLabelSmoothedCrossEntropyCriterion(
LabelSmoothedCrossEntropyCriterion
):
def __init__(
self,
task,
sentence_avg,
label_smoothing,
ignore_prefix_size,
report_accuracy,
latency_avg_weight,
latency_var_weight,
latency_avg_type,
latency_var_type,
latency_gather_method,
latency_update_after,
):
super().__init__(
task, sentence_avg, label_smoothing, ignore_prefix_size, report_accuracy
)
assert LATENCY_METRICS is not None, "Please make sure SimulEval is installed."
self.latency_avg_weight = latency_avg_weight
self.latency_var_weight = latency_var_weight
self.latency_avg_type = latency_avg_type
self.latency_var_type = latency_var_type
self.latency_gather_method = latency_gather_method
self.latency_update_after = latency_update_after
def forward(self, model, sample, reduce=True):
net_output = model(**sample["net_input"])
# 1. Compute cross entropy loss
loss, nll_loss = self.compute_loss(model, net_output, sample, reduce=reduce)
# 2. Compute cross latency loss
latency_loss, expected_latency, expected_delays_var = self.compute_latency_loss(
model, sample, net_output
)
if self.latency_update_after > 0:
num_updates = getattr(model.decoder, "num_updates", None)
assert num_updates is not None, (
"model.decoder doesn't have attribute 'num_updates'"
)
if num_updates <= self.latency_update_after:
latency_loss = 0
loss += latency_loss
sample_size = (
sample["target"].size(0) if self.sentence_avg else sample["ntokens"]
)
logging_output = {
"loss": loss.data,
"nll_loss": nll_loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
"latency": expected_latency,
"delays_var": expected_delays_var,
"latency_loss": latency_loss,
}
if self.report_accuracy:
n_correct, total = self.compute_accuracy(model, net_output, sample)
logging_output["n_correct"] = utils.item(n_correct.data)
logging_output["total"] = utils.item(total.data)
return loss, sample_size, logging_output
def compute_latency_loss(self, model, sample, net_output):
assert (
net_output[-1].encoder_padding_mask is None
or not net_output[-1].encoder_padding_mask[:, 0].any()
), (
"Only right padding on source is supported."
)
# 1. Obtain the expected alignment
alpha_list = [item["alpha"] for item in net_output[1].attn_list]
num_layers = len(alpha_list)
bsz, num_heads, tgt_len, src_len = alpha_list[0].size()
# bsz * num_layers * num_heads, tgt_len, src_len
alpha_all = torch.cat(alpha_list, dim=1).view(-1, tgt_len, src_len)
# 2 compute expected delays
# bsz * num_heads * num_layers, tgt_len, src_len for MMA
steps = (
torch.arange(1, 1 + src_len)
.unsqueeze(0)
.unsqueeze(1)
.expand_as(alpha_all)
.type_as(alpha_all)
)
expected_delays = torch.sum(steps * alpha_all, dim=-1)
target_padding_mask = (
model.get_targets(sample, net_output)
.eq(self.padding_idx)
.unsqueeze(1)
.expand(bsz, num_layers * num_heads, tgt_len)
.contiguous()
.view(-1, tgt_len)
)
src_lengths = (
sample["net_input"]["src_lengths"]
.unsqueeze(1)
.expand(bsz, num_layers * num_heads)
.contiguous()
.view(-1)
)
expected_latency = LATENCY_METRICS[self.latency_avg_type](
expected_delays, src_lengths, None,
target_padding_mask=target_padding_mask
)
# 2.1 average expected latency of heads
# bsz, num_layers * num_heads
expected_latency = expected_latency.view(bsz, -1)
if self.latency_gather_method == "average":
# bsz * tgt_len
expected_latency = expected_delays.mean(dim=1)
elif self.latency_gather_method == "weighted_average":
weights = torch.nn.functional.softmax(expected_latency, dim=1)
expected_latency = torch.sum(expected_latency * weights, dim=1)
elif self.latency_gather_method == "max":
expected_latency = expected_latency.max(dim=1)[0]
else:
raise NotImplementedError
expected_latency = expected_latency.sum()
avg_loss = self.latency_avg_weight * expected_latency
# 2.2 variance of expected delays
expected_delays_var = (
expected_delays.view(bsz, -1, tgt_len).var(dim=1).mean(dim=1)
)
expected_delays_var = expected_delays_var.sum()
var_loss = self.latency_avg_weight * expected_delays_var
# 3. Final loss
latency_loss = avg_loss + var_loss
return latency_loss, expected_latency, expected_delays_var
@classmethod
def reduce_metrics(cls, logging_outputs) -> None:
super().reduce_metrics(logging_outputs)
latency = sum(
log.get("latency", 0) for log in logging_outputs
)
delays_var = sum(
log.get("delays_var", 0) for log in logging_outputs
)
latency_loss = sum(
log.get("latency_loss", 0) for log in logging_outputs
)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
metrics.log_scalar(
"latency", latency.float() / nsentences, nsentences, round=3
)
metrics.log_scalar(
"delays_var", delays_var / nsentences,
nsentences, round=3
)
metrics.log_scalar(
"latency_loss", latency_loss / nsentences,
nsentences, round=3
)
|
bart_ls-main
|
fairseq-py/fairseq/criterions/label_smoothed_cross_entropy_latency_augmented.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from dataclasses import dataclass, field
import torch
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
from omegaconf import II
@dataclass
class LabelSmoothedCrossEntropyCriterionConfig(FairseqDataclass):
label_smoothing: float = field(
default=0.0,
metadata={"help": "epsilon for label smoothing, 0 means no label smoothing"},
)
report_accuracy: bool = field(
default=False,
metadata={"help": "report accuracy metric"},
)
ignore_prefix_size: int = field(
default=0,
metadata={"help": "Ignore first N tokens"},
)
sentence_avg: bool = II("optimization.sentence_avg")
def label_smoothed_nll_loss(lprobs, target, epsilon, ignore_index=None, reduce=True):
if target.dim() == lprobs.dim() - 1:
target = target.unsqueeze(-1)
nll_loss = -lprobs.gather(dim=-1, index=target)
smooth_loss = -lprobs.sum(dim=-1, keepdim=True)
if ignore_index is not None:
pad_mask = target.eq(ignore_index)
nll_loss.masked_fill_(pad_mask, 0.0)
smooth_loss.masked_fill_(pad_mask, 0.0)
else:
nll_loss = nll_loss.squeeze(-1)
smooth_loss = smooth_loss.squeeze(-1)
if reduce:
nll_loss = nll_loss.sum()
smooth_loss = smooth_loss.sum()
eps_i = epsilon / (lprobs.size(-1) - 1)
loss = (1.0 - epsilon - eps_i) * nll_loss + eps_i * smooth_loss
return loss, nll_loss
@register_criterion(
"label_smoothed_cross_entropy", dataclass=LabelSmoothedCrossEntropyCriterionConfig
)
class LabelSmoothedCrossEntropyCriterion(FairseqCriterion):
def __init__(
self,
task,
sentence_avg,
label_smoothing,
ignore_prefix_size=0,
report_accuracy=False,
):
super().__init__(task)
self.sentence_avg = sentence_avg
self.eps = label_smoothing
self.ignore_prefix_size = ignore_prefix_size
self.report_accuracy = report_accuracy
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample["net_input"])
loss, nll_loss = self.compute_loss(model, net_output, sample, reduce=reduce)
sample_size = (
sample["target"].size(0) if self.sentence_avg else sample["ntokens"]
)
logging_output = {
"loss": loss.data,
"nll_loss": nll_loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
}
if self.report_accuracy:
n_correct, total = self.compute_accuracy(model, net_output, sample)
logging_output["n_correct"] = utils.item(n_correct.data)
logging_output["total"] = utils.item(total.data)
return loss, sample_size, logging_output
def get_lprobs_and_target(self, model, net_output, sample):
lprobs = model.get_normalized_probs(net_output, log_probs=True)
target = model.get_targets(sample, net_output)
if self.ignore_prefix_size > 0:
if getattr(lprobs, "batch_first", False):
lprobs = lprobs[:, self.ignore_prefix_size :, :].contiguous()
target = target[:, self.ignore_prefix_size :].contiguous()
else:
lprobs = lprobs[self.ignore_prefix_size :, :, :].contiguous()
target = target[self.ignore_prefix_size :, :].contiguous()
return lprobs.view(-1, lprobs.size(-1)), target.view(-1)
def compute_loss(self, model, net_output, sample, reduce=True):
lprobs, target = self.get_lprobs_and_target(model, net_output, sample)
loss, nll_loss = label_smoothed_nll_loss(
lprobs,
target,
self.eps,
ignore_index=self.padding_idx,
reduce=reduce,
)
return loss, nll_loss
def compute_accuracy(self, model, net_output, sample):
lprobs, target = self.get_lprobs_and_target(model, net_output, sample)
mask = target.ne(self.padding_idx)
n_correct = torch.sum(
lprobs.argmax(1).masked_select(mask).eq(target.masked_select(mask))
)
total = torch.sum(mask)
return n_correct, total
@classmethod
def reduce_metrics(cls, logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
nll_loss_sum = sum(log.get("nll_loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
metrics.log_scalar(
"loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
metrics.log_scalar(
"nll_loss", nll_loss_sum / ntokens / math.log(2), ntokens, round=3
)
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg)
)
total = utils.item(sum(log.get("total", 0) for log in logging_outputs))
if total > 0:
metrics.log_scalar("total", total)
n_correct = utils.item(
sum(log.get("n_correct", 0) for log in logging_outputs)
)
metrics.log_scalar("n_correct", n_correct)
metrics.log_derived(
"accuracy",
lambda meters: round(
meters["n_correct"].sum * 100.0 / meters["total"].sum, 3
)
if meters["total"].sum > 0
else float("nan"),
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
|
bart_ls-main
|
fairseq-py/fairseq/criterions/label_smoothed_cross_entropy.py
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import logging
from typing import Any, Dict, List
from functools import lru_cache
from dataclasses import dataclass, field
import torch
from omegaconf import II
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
from fairseq.data.data_utils import lengths_to_mask
import torch.nn.functional as F
logger = logging.getLogger(__name__)
@dataclass
class Tacotron2CriterionConfig(FairseqDataclass):
bce_pos_weight: float = field(
default=1.0,
metadata={"help": "weight of positive examples for BCE loss"},
)
n_frames_per_step: int = field(
default=0,
metadata={"help": "Number of frames per decoding step"},
)
use_guided_attention_loss: bool = field(
default=False,
metadata={"help": "use guided attention loss"},
)
guided_attention_loss_sigma: float = field(
default=0.4,
metadata={"help": "weight of positive examples for BCE loss"},
)
ctc_weight: float = field(
default=0.0, metadata={"help": "weight for CTC loss"}
)
sentence_avg: bool = II("optimization.sentence_avg")
class GuidedAttentionLoss(torch.nn.Module):
"""
Efficiently Trainable Text-to-Speech System Based on Deep Convolutional
Networks with Guided Attention (https://arxiv.org/abs/1710.08969)
"""
def __init__(self, sigma):
super().__init__()
self.sigma = sigma
@staticmethod
@lru_cache(maxsize=8)
def _get_weight(s_len, t_len, sigma):
grid_x, grid_y = torch.meshgrid(torch.arange(t_len), torch.arange(s_len))
grid_x = grid_x.to(s_len.device)
grid_y = grid_y.to(s_len.device)
w = (grid_y.float() / s_len - grid_x.float() / t_len) ** 2
return 1.0 - torch.exp(-w / (2 * (sigma ** 2)))
def _get_weights(self, src_lens, tgt_lens):
bsz, max_s_len, max_t_len = len(src_lens), max(src_lens), max(tgt_lens)
weights = torch.zeros((bsz, max_t_len, max_s_len))
for i, (s_len, t_len) in enumerate(zip(src_lens, tgt_lens)):
weights[i, :t_len, :s_len] = self._get_weight(s_len, t_len,
self.sigma)
return weights
@staticmethod
def _get_masks(src_lens, tgt_lens):
in_masks = lengths_to_mask(src_lens)
out_masks = lengths_to_mask(tgt_lens)
return out_masks.unsqueeze(2) & in_masks.unsqueeze(1)
def forward(self, attn, src_lens, tgt_lens, reduction="mean"):
weights = self._get_weights(src_lens, tgt_lens).to(attn.device)
masks = self._get_masks(src_lens, tgt_lens).to(attn.device)
loss = (weights * attn.transpose(1, 2)).masked_select(masks)
loss = torch.sum(loss) if reduction == "sum" else torch.mean(loss)
return loss
@register_criterion("tacotron2", dataclass=Tacotron2CriterionConfig)
class Tacotron2Criterion(FairseqCriterion):
def __init__(self, task, sentence_avg, n_frames_per_step,
use_guided_attention_loss, guided_attention_loss_sigma,
bce_pos_weight, ctc_weight):
super().__init__(task)
self.sentence_avg = sentence_avg
self.n_frames_per_step = n_frames_per_step
self.bce_pos_weight = bce_pos_weight
self.guided_attn = None
if use_guided_attention_loss:
self.guided_attn = GuidedAttentionLoss(guided_attention_loss_sigma)
self.ctc_weight = ctc_weight
def forward(self, model, sample, reduction="mean"):
bsz, max_len, _ = sample["target"].size()
feat_tgt = sample["target"]
feat_len = sample["target_lengths"].view(bsz, 1).expand(-1, max_len)
eos_tgt = torch.arange(max_len).to(sample["target"].device)
eos_tgt = eos_tgt.view(1, max_len).expand(bsz, -1)
eos_tgt = (eos_tgt == (feat_len - 1)).float()
src_tokens = sample["net_input"]["src_tokens"]
src_lens = sample["net_input"]["src_lengths"]
tgt_lens = sample["target_lengths"]
feat_out, eos_out, extra = model(
src_tokens=src_tokens,
src_lengths=src_lens,
prev_output_tokens=sample["net_input"]["prev_output_tokens"],
incremental_state=None,
target_lengths=tgt_lens,
speaker=sample["speaker"]
)
l1_loss, mse_loss, eos_loss = self.compute_loss(
extra["feature_out"], feat_out, eos_out, feat_tgt, eos_tgt,
tgt_lens, reduction,
)
attn_loss = torch.tensor(0.).type_as(l1_loss)
if self.guided_attn is not None:
attn_loss = self.guided_attn(extra['attn'], src_lens, tgt_lens, reduction)
ctc_loss = torch.tensor(0.).type_as(l1_loss)
if self.ctc_weight > 0.:
net_output = (feat_out, eos_out, extra)
lprobs = model.get_normalized_probs(net_output, log_probs=True)
lprobs = lprobs.transpose(0, 1) # T x B x C
src_mask = lengths_to_mask(src_lens)
src_tokens_flat = src_tokens.masked_select(src_mask)
ctc_loss = F.ctc_loss(
lprobs, src_tokens_flat, tgt_lens, src_lens,
reduction=reduction, zero_infinity=True
) * self.ctc_weight
loss = l1_loss + mse_loss + eos_loss + attn_loss + ctc_loss
sample_size = sample["nsentences"] if self.sentence_avg \
else sample["ntokens"]
logging_output = {
"loss": utils.item(loss.data),
"ntokens": sample["ntokens"],
"nsentences": sample["nsentences"],
"sample_size": sample_size,
"l1_loss": utils.item(l1_loss.data),
"mse_loss": utils.item(mse_loss.data),
"eos_loss": utils.item(eos_loss.data),
"attn_loss": utils.item(attn_loss.data),
"ctc_loss": utils.item(ctc_loss.data),
}
return loss, sample_size, logging_output
def compute_loss(self, feat_out, feat_out_post, eos_out, feat_tgt,
eos_tgt, tgt_lens, reduction="mean"):
mask = lengths_to_mask(tgt_lens)
_eos_out = eos_out[mask].squeeze()
_eos_tgt = eos_tgt[mask]
_feat_tgt = feat_tgt[mask]
_feat_out = feat_out[mask]
_feat_out_post = feat_out_post[mask]
l1_loss = (
F.l1_loss(_feat_out, _feat_tgt, reduction=reduction) +
F.l1_loss(_feat_out_post, _feat_tgt, reduction=reduction)
)
mse_loss = (
F.mse_loss(_feat_out, _feat_tgt, reduction=reduction) +
F.mse_loss(_feat_out_post, _feat_tgt, reduction=reduction)
)
eos_loss = F.binary_cross_entropy_with_logits(
_eos_out, _eos_tgt, pos_weight=torch.tensor(self.bce_pos_weight),
reduction=reduction
)
return l1_loss, mse_loss, eos_loss
@classmethod
def reduce_metrics(cls, logging_outputs: List[Dict[str, Any]]) -> None:
ns = [log.get("sample_size", 0) for log in logging_outputs]
ntot = sum(ns)
ws = [n / (ntot + 1e-8) for n in ns]
for key in ["loss", "l1_loss", "mse_loss", "eos_loss", "attn_loss", "ctc_loss"]:
vals = [log.get(key, 0) for log in logging_outputs]
val = sum(val * w for val, w in zip(vals, ws))
metrics.log_scalar(key, val, ntot, round=3)
metrics.log_scalar("sample_size", ntot, len(logging_outputs))
# inference metrics
if "targ_frames" not in logging_outputs[0]:
return
n = sum(log.get("targ_frames", 0) for log in logging_outputs)
for key, new_key in [
("mcd_loss", "mcd_loss"),
("pred_frames", "pred_ratio"),
("nins", "ins_rate"),
("ndel", "del_rate"),
]:
val = sum(log.get(key, 0) for log in logging_outputs)
metrics.log_scalar(new_key, val / n, n, round=3)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
return False
|
bart_ls-main
|
fairseq-py/fairseq/criterions/tacotron2_loss.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq import utils
from fairseq.criterions import LegacyFairseqCriterion, register_criterion
from torch import nn
@register_criterion("composite_loss")
class CompositeLoss(LegacyFairseqCriterion):
"""This is a composite loss that, given a list of model outputs and a list of targets,
computes an average of losses for each output-target pair"""
def __init__(self, args, task):
super().__init__(args, task)
self.underlying_criterion = args.underlying_criterion
@staticmethod
def add_args(parser):
"""Add criterion-specific arguments to the parser."""
# fmt: off
parser.add_argument('--underlying-criterion', type=str, metavar='VAL', required=True,
help='underlying criterion to use for the composite loss')
# fmt: on
@staticmethod
def build_underlying_criterion(args, task):
saved_criterion = args.criterion
args.criterion = args.underlying_criterion
assert saved_criterion != args.underlying_criterion
underlying_criterion = task.build_criterion(args)
args.criterion = saved_criterion
return underlying_criterion
@classmethod
def build_criterion(cls, args, task):
underlying_criterion = CompositeLoss.build_underlying_criterion(args, task)
class FakeModel(nn.Module):
def __init__(self, model, net_out, target):
super().__init__()
self.model = model
self.net_out = net_out
self.target = target
def forward(self, **unused):
return self.net_out
def get_normalized_probs(self, net_output, log_probs, sample=None):
return self.model.get_normalized_probs(
net_output, log_probs, sample=sample
)
def get_targets(self, *unused):
return self.target
@property
def decoder(self):
return self.model.decoder
class _CompositeLoss(LegacyFairseqCriterion):
def __init__(self, args, task, underlying_criterion):
super().__init__(args, task)
self.underlying_criterion = underlying_criterion
def forward(self, model, sample, reduce=True):
net_outputs = model(**sample["net_input"])
targets = sample["target"]
bsz = targets[0].size(0)
loss = net_outputs[0][0].new(1 if reduce else bsz).float().zero_()
sample_size = 0
logging_output = {}
for o, t in zip(net_outputs[0], targets):
m = FakeModel(model, (o, net_outputs[1]), t)
sample["target"] = t
l, ss, logging_output = self.underlying_criterion(m, sample, reduce)
loss += l
sample_size += ss
loss.div_(len(targets))
sample_size /= len(targets)
logging_output["loss"] = utils.item(loss.data) if reduce else loss.data
return loss, sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
return underlying_criterion.__class__.aggregate_logging_outputs(
logging_outputs
)
@staticmethod
def reduce_metrics(logging_outputs) -> None:
underlying_criterion.__class__.reduce_metrics(logging_outputs)
return _CompositeLoss(args, task, underlying_criterion)
|
bart_ls-main
|
fairseq-py/fairseq/criterions/composite_loss.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import re
from dataclasses import dataclass, field
from typing import List, Optional
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
@dataclass
class HubertCriterionConfig(FairseqDataclass):
pred_masked_weight: float = field(
default=1.0,
metadata={"help": "weight for predictive loss for masked frames"},
)
pred_nomask_weight: float = field(
default=0.0,
metadata={"help": "weight for predictive loss for unmasked frames"},
)
loss_weights: Optional[List[float]] = field(
default=None,
metadata={"help": "weights for additional loss terms (not first one)"},
)
log_keys: List[str] = field(
default_factory=lambda: [],
metadata={"help": "output keys to log"},
)
@register_criterion("hubert", dataclass=HubertCriterionConfig)
class HubertCriterion(FairseqCriterion):
def __init__(self, task, pred_masked_weight, pred_nomask_weight, loss_weights=None, log_keys=None):
super().__init__(task)
self.pred_masked_weight = pred_masked_weight
self.pred_nomask_weight = pred_nomask_weight
self.loss_weights = loss_weights
self.log_keys = [] if log_keys is None else log_keys
def forward(self, model, sample, reduce=True, log_pred=False):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(target_list=sample["target_list"], **sample["net_input"])
loss = 0.
sample_size = 0
logging_output = {}
reduction = "sum" if reduce else "none"
loss_m_list = []
logp_m_list = model.get_logits(net_output, True)
targ_m_list = model.get_targets(net_output, True)
assert self.pred_masked_weight == 0 or len(logp_m_list) > 0
for i, (logp_m, targ_m) in enumerate(zip(logp_m_list, targ_m_list)):
loss_m = F.cross_entropy(logp_m, targ_m, reduction=reduction)
loss_m_list.append(loss_m)
logging_output[f"loss_m_{i}"] = loss_m.detach().item()
if self.pred_masked_weight > 0:
loss += self.pred_masked_weight * sum(loss_m_list)
sample_size += targ_m_list[0].numel()
loss_u_list = []
logp_u_list = model.get_logits(net_output, False)
targ_u_list = model.get_targets(net_output, False)
assert self.pred_nomask_weight == 0 or len(logp_u_list) > 0
for i, (logp_u, targ_u) in enumerate(zip(logp_u_list, targ_u_list)):
loss_u = F.cross_entropy(logp_u, targ_u, reduction=reduction)
loss_u_list.append(loss_u)
logging_output[f"loss_u_{i}"] = loss_u.detach().item()
if self.pred_nomask_weight > 0:
loss += self.pred_nomask_weight * sum(loss_u_list)
sample_size += targ_u_list[0].numel()
if self.loss_weights is not None:
assert hasattr(model, "get_extra_losses")
extra_losses, names = model.get_extra_losses(net_output)
if torch.is_tensor(extra_losses):
extra_losses = [extra_losses]
names = [names]
if len(self.loss_weights) == 1 and len(extra_losses) != 1:
self.loss_weights = [self.loss_weights[0]] * len(extra_losses)
assert len(extra_losses) == len(self.loss_weights), f"{len(extra_losses)}, {len(self.loss_weights)}"
for p, n, coef in zip(extra_losses, names, self.loss_weights):
if coef != 0 and p is not None:
p = coef * p.float() * sample_size
loss += p
logging_output[f"loss_{n}"] = p.item()
logging_output = {
"loss": loss.item() if reduce else loss,
"ntokens": sample_size,
"nsentences": sample["id"].numel(),
"sample_size": sample_size,
**logging_output,
}
for lk in self.log_keys:
if lk in net_output:
logging_output[lk] = float((net_output[lk]))
def compute_correct(logits):
if logits.numel() == 0:
return 0, 0
else:
assert logits.dim() > 1, logits.shape
max = logits.argmax(-1) == 0
min = logits.argmin(-1) == 0
both = max & min
corr = max.long().sum().item() - both.long().sum().item()
count = max.numel()
return corr, count
with torch.no_grad():
for i, logp_m in enumerate(logp_m_list):
corr_m, count_m = compute_correct(logp_m)
logging_output[f"correct_m_{i}"] = corr_m
logging_output[f"count_m_{i}"] = count_m
for i, logp_u in enumerate(logp_u_list):
corr_u, count_u = compute_correct(logp_u)
logging_output[f"correct_u_{i}"] = corr_u
logging_output[f"count_u_{i}"] = count_u
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training (copied from normal cross entropy)."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
metrics.log_scalar("loss", loss_sum / sample_size / math.log(2), sample_size, round=3)
if sample_size != ntokens:
metrics.log_scalar("nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3)
metrics.log_derived("ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg))
else:
metrics.log_derived("ppl", lambda meters: utils.get_perplexity(meters["loss"].avg))
counts = {}
for lk in logging_outputs[0].keys():
if lk.startswith("count_"):
val = sum(log[lk] for log in logging_outputs)
metrics.log_scalar(lk, val)
counts[lk] = val
for lk in logging_outputs[0].keys():
if lk.startswith("loss_"):
val = sum(log[lk] for log in logging_outputs)
metrics.log_scalar(lk, val / sample_size / math.log(2), round=3)
elif lk.startswith("correct_"):
val = sum(log[lk] for log in logging_outputs)
metrics.log_scalar(lk, val / counts[re.sub("correct", "count", lk)])
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
raise NotImplementedError()
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return False
|
bart_ls-main
|
fairseq-py/fairseq/criterions/hubert_criterion.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from dataclasses import dataclass, field
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
from omegaconf import II
@dataclass
class ModelDenoisingCriterionConfig(FairseqDataclass):
sentence_avg: bool = II("optimization.sentence_avg")
mlm_loss_weight: float = field(
default=1.0,
metadata={"help": "weight for mlm"}
)
@register_criterion("model_based_denoising", dataclass=ModelDenoisingCriterionConfig)
class ModelBasedDenoisingCriterion(FairseqCriterion):
def __init__(self, task, sentence_avg, mlm_loss_weight):
super().__init__(task)
self.sentence_avg = sentence_avg
self.mlm_loss_weight = mlm_loss_weight
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample["net_input"])
seq2seq_loss, _ = self.compute_loss(model, net_output, sample, reduce=reduce)
mlm_loss = None
if len(net_output) == 4:
mlm_loss = net_output[2]
ntokens = (net_output[1] != self.padding_idx).sum()
sample_size = ntokens
logging_output = {
"loss": seq2seq_loss,
'mlm_loss': mlm_loss.data if mlm_loss else 0,
"ntokens": ntokens,
"nsentences": net_output[1].size(0),
"sample_size": sample_size,
}
if mlm_loss is not None:
loss = seq2seq_loss + mlm_loss * sample_size
else:
loss = seq2seq_loss
return loss, sample_size, logging_output
def compute_loss(self, model, net_output, sample, reduce=True):
lprobs = model.get_normalized_probs(net_output, log_probs=True)
lprobs = lprobs.view(-1, lprobs.size(-1))
target = net_output[1].contiguous().view(-1)
# target = model.get_targets(sample, net_output).view(-1)
loss = F.nll_loss(
lprobs,
target,
ignore_index=self.padding_idx,
reduction="sum" if reduce else "none",
)
return loss, loss
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
mlm_loss_sum = sum(log.get("mlm_loss", 0) for log in logging_outputs)
metrics.log_scalar(
"mlm_loss", mlm_loss_sum / len(logging_outputs), 1, round=3
)
# we divide by log(2) to convert the loss from base e to base 2
metrics.log_scalar(
"loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
if sample_size != ntokens:
metrics.log_scalar(
"nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3
)
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg)
)
else:
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["loss"].avg)
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
|
bart_ls-main
|
fairseq-py/fairseq/criterions/model_denoising.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from dataclasses import dataclass, field
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
@dataclass
class SentencePredictionConfig(FairseqDataclass):
classification_head_name: str = field(
default="sentence_classification_head",
metadata={"help": "name of the classification head to use"},
)
regression_target: bool = field(
default=False,
)
@register_criterion("sentence_prediction", dataclass=SentencePredictionConfig)
class SentencePredictionCriterion(FairseqCriterion):
def __init__(self, cfg: SentencePredictionConfig, task):
super().__init__(task)
self.classification_head_name = cfg.classification_head_name
self.regression_target = cfg.regression_target
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
assert (
hasattr(model, "classification_heads")
and self.classification_head_name in model.classification_heads
), "model must provide sentence classification head for --criterion=sentence_prediction"
logits, _ = model(
**sample["net_input"],
features_only=True,
classification_head_name=self.classification_head_name,
)
targets = model.get_targets(sample, [logits]).view(-1)
sample_size = targets.numel()
if not self.regression_target:
lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float32)
loss = F.nll_loss(lprobs, targets, reduction="sum")
else:
logits = logits.view(-1).float()
targets = targets.float()
loss = F.mse_loss(logits, targets, reduction="sum")
logging_output = {
"loss": loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample_size,
"sample_size": sample_size,
}
if not self.regression_target:
preds = logits.argmax(dim=1)
logging_output["ncorrect"] = (preds == targets).sum()
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
metrics.log_scalar(
"loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
if sample_size != ntokens:
metrics.log_scalar(
"nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3
)
if len(logging_outputs) > 0 and "ncorrect" in logging_outputs[0]:
ncorrect = sum(log.get("ncorrect", 0) for log in logging_outputs)
metrics.log_scalar(
"accuracy", 100.0 * ncorrect / nsentences, nsentences, round=1
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
|
bart_ls-main
|
fairseq-py/fairseq/criterions/sentence_prediction.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from fairseq import metrics, utils
from fairseq.criterions import register_criterion
from .label_smoothed_cross_entropy import (
LabelSmoothedCrossEntropyCriterion,
LabelSmoothedCrossEntropyCriterionConfig,
)
from dataclasses import dataclass, field
@dataclass
class LabelSmoothedCrossEntropyCriterionWithAlignmentConfig(
LabelSmoothedCrossEntropyCriterionConfig
):
alignment_lambda: float = field(
default=0.05, metadata={"help": "weight for the alignment loss"}
)
@register_criterion(
"label_smoothed_cross_entropy_with_alignment",
dataclass=LabelSmoothedCrossEntropyCriterionWithAlignmentConfig,
)
class LabelSmoothedCrossEntropyCriterionWithAlignment(
LabelSmoothedCrossEntropyCriterion
):
def __init__(self, task, sentence_avg, label_smoothing, alignment_lambda):
super().__init__(task, sentence_avg, label_smoothing)
self.alignment_lambda = alignment_lambda
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample["net_input"])
loss, nll_loss = self.compute_loss(model, net_output, sample, reduce=reduce)
sample_size = (
sample["target"].size(0) if self.sentence_avg else sample["ntokens"]
)
logging_output = {
"loss": utils.item(loss.data) if reduce else loss.data,
"nll_loss": utils.item(nll_loss.data) if reduce else nll_loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
}
alignment_loss = None
# Compute alignment loss only for training set and non dummy batches.
if "alignments" in sample and sample["alignments"] is not None:
alignment_loss = self.compute_alignment_loss(sample, net_output)
if alignment_loss is not None:
logging_output["alignment_loss"] = utils.item(alignment_loss.data)
loss += self.alignment_lambda * alignment_loss
return loss, sample_size, logging_output
def compute_alignment_loss(self, sample, net_output):
attn_prob = net_output[1]["attn"][0]
bsz, tgt_sz, src_sz = attn_prob.shape
attn = attn_prob.view(bsz * tgt_sz, src_sz)
align = sample["alignments"]
align_weights = sample["align_weights"].float()
if len(align) > 0:
# Alignment loss computation. align (shape [:, 2]) contains the src-tgt index pairs corresponding to
# the alignments. align_weights (shape [:]) contains the 1 / frequency of a tgt index for normalizing.
loss = -(
(attn[align[:, 1][:, None], align[:, 0][:, None]]).log()
* align_weights[:, None]
).sum()
else:
return None
return loss
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = utils.item(sum(log.get("loss", 0) for log in logging_outputs))
nll_loss_sum = utils.item(
sum(log.get("nll_loss", 0) for log in logging_outputs)
)
alignment_loss_sum = utils.item(
sum(log.get("alignment_loss", 0) for log in logging_outputs)
)
ntokens = utils.item(sum(log.get("ntokens", 0) for log in logging_outputs))
sample_size = utils.item(
sum(log.get("sample_size", 0) for log in logging_outputs)
)
metrics.log_scalar(
"loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
metrics.log_scalar(
"nll_loss", nll_loss_sum / ntokens / math.log(2), ntokens, round=3
)
metrics.log_scalar(
"alignment_loss",
alignment_loss_sum / sample_size / math.log(2),
sample_size,
round=3,
)
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg)
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
|
bart_ls-main
|
fairseq-py/fairseq/criterions/label_smoothed_cross_entropy_with_alignment.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
import math
from omegaconf import II
import torch
from fairseq import metrics, modules, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
@dataclass
class MaskedLmConfig(FairseqDataclass):
tpu: bool = II("common.tpu")
@register_criterion("masked_lm", dataclass=MaskedLmConfig)
class MaskedLmLoss(FairseqCriterion):
"""
Implementation for the loss used in masked language model (MLM) training.
"""
def __init__(self, cfg: MaskedLmConfig, task):
super().__init__(task)
self.tpu = cfg.tpu
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
masked_tokens = sample["target"].ne(self.padding_idx)
sample_size = masked_tokens.int().sum()
# Rare: when all tokens are masked, project all tokens.
# We use torch.where to avoid device-to-host transfers,
# except on CPU where torch.where is not well supported
# (see github.com/pytorch/pytorch/issues/26247).
if self.tpu:
masked_tokens = None # always project all tokens on TPU
elif masked_tokens.device == torch.device("cpu"):
if not masked_tokens.any():
masked_tokens = None
else:
masked_tokens = torch.where(
masked_tokens.any(),
masked_tokens,
masked_tokens.new([True]),
)
logits = model(**sample["net_input"], masked_tokens=masked_tokens)[0]
targets = model.get_targets(sample, [logits])
if masked_tokens is not None:
targets = targets[masked_tokens]
loss = modules.cross_entropy(
logits.view(-1, logits.size(-1)),
targets.view(-1),
reduction="sum",
ignore_index=self.padding_idx,
)
logging_output = {
"loss": loss if self.tpu else loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["nsentences"],
"sample_size": sample_size,
}
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
metrics.log_scalar(
"loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["loss"].avg)
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
|
bart_ls-main
|
fairseq-py/fairseq/criterions/masked_lm.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
@register_criterion("sentence_ranking")
class SentenceRankingCriterion(FairseqCriterion):
def __init__(self, task, ranking_head_name, save_predictions, num_classes):
super().__init__(task)
self.ranking_head_name = ranking_head_name
if save_predictions is not None:
self.prediction_h = open(save_predictions, "w")
else:
self.prediction_h = None
self.num_classes = num_classes
def __del__(self):
if self.prediction_h is not None:
self.prediction_h.close()
@staticmethod
def add_args(parser):
# fmt: off
parser.add_argument('--save-predictions', metavar='FILE',
help='file to save predictions to')
parser.add_argument('--ranking-head-name',
default='sentence_classification_head',
help='name of the ranking head to use')
# fmt: on
def forward(self, model, sample, reduce=True):
"""Compute ranking loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
assert (
hasattr(model, "classification_heads")
and self.ranking_head_name in model.classification_heads
), "model must provide sentence ranking head for --criterion=sentence_ranking"
scores = []
for idx in range(self.num_classes):
score, _ = model(
**sample["net_input{idx}".format(idx=idx + 1)],
classification_head_name=self.ranking_head_name,
)
scores.append(score)
logits = torch.cat(scores, dim=1)
sample_size = logits.size(0)
if "target" in sample:
targets = model.get_targets(sample, [logits]).view(-1)
lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float32)
loss = F.nll_loss(lprobs, targets, reduction="sum")
else:
targets = None
loss = torch.tensor(0.0, requires_grad=True)
if self.prediction_h is not None:
preds = logits.argmax(dim=1)
for i, (id, pred) in enumerate(zip(sample["id"].tolist(), preds.tolist())):
if targets is not None:
label = targets[i].item()
print("{}\t{}\t{}".format(id, pred, label), file=self.prediction_h)
else:
print("{}\t{}".format(id, pred), file=self.prediction_h)
logging_output = {
"loss": loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample_size,
"sample_size": sample_size,
}
if targets is not None:
logging_output["ncorrect"] = (logits.argmax(dim=1) == targets).sum()
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
metrics.log_scalar(
"loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
if sample_size != ntokens:
metrics.log_scalar(
"nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3
)
if len(logging_outputs) > 0 and "ncorrect" in logging_outputs[0]:
ncorrect = sum(log.get("ncorrect", 0) for log in logging_outputs)
metrics.log_scalar(
"accuracy", 100.0 * ncorrect / nsentences, nsentences, round=1
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
|
bart_ls-main
|
fairseq-py/fairseq/criterions/sentence_ranking.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
from fairseq.dataclass.initialize import add_defaults, hydra_init
from fairseq_cli.train import main as pre_main
from fairseq import distributed_utils, metrics
from fairseq.dataclass.configs import FairseqConfig
from fairseq.dataclass.utils import omegaconf_no_object_check
from fairseq.utils import reset_logging
import hydra
from hydra.core.hydra_config import HydraConfig
import torch
from omegaconf import OmegaConf, open_dict
logger = logging.getLogger("fairseq_cli.hydra_train")
@hydra.main(config_path=os.path.join("..", "fairseq", "config"), config_name="config")
def hydra_main(cfg: FairseqConfig) -> float:
_hydra_main(cfg)
def _hydra_main(cfg: FairseqConfig, **kwargs) -> float:
add_defaults(cfg)
if cfg.common.reset_logging:
reset_logging() # Hydra hijacks logging, fix that
else:
# check if directly called or called through hydra_main
if HydraConfig.initialized():
with open_dict(cfg):
# make hydra logging work with ddp (see # see https://github.com/facebookresearch/hydra/issues/1126)
cfg.job_logging_cfg = OmegaConf.to_container(HydraConfig.get().job_logging, resolve=True)
with omegaconf_no_object_check():
cfg = OmegaConf.create(OmegaConf.to_container(cfg, resolve=True, enum_to_str=True))
OmegaConf.set_struct(cfg, True)
try:
if cfg.common.profile:
with torch.cuda.profiler.profile():
with torch.autograd.profiler.emit_nvtx():
distributed_utils.call_main(cfg, pre_main, **kwargs)
else:
distributed_utils.call_main(cfg, pre_main, **kwargs)
except BaseException as e:
if not cfg.common.suppress_crashes:
raise
else:
logger.error("Crashed! " + str(e))
# get best val and return - useful for sweepers
try:
best_val = metrics.get_smoothed_value(
"valid", cfg.checkpoint.best_checkpoint_metric
)
except:
best_val = None
if best_val is None:
best_val = float("inf")
return best_val
def cli_main():
try:
from hydra._internal.utils import get_args
cfg_name = get_args().config_name or "config"
except:
logger.warning("Failed to get config name from hydra args")
cfg_name = "config"
hydra_init(cfg_name)
hydra_main()
if __name__ == "__main__":
cli_main()
|
bart_ls-main
|
fairseq-py/fairseq_cli/hydra_train.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Data pre-processing: build vocabularies and binarize training data.
"""
import logging
import os
import shutil
import sys
from collections import Counter
from itertools import zip_longest
from multiprocessing import Pool
from fairseq import options, tasks, utils
from fairseq.binarizer import Binarizer
from fairseq.data import indexed_dataset
from fairseq.file_chunker_utils import find_offsets
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("fairseq_cli.preprocess")
def main(args):
utils.import_user_module(args)
os.makedirs(args.destdir, exist_ok=True)
logger.addHandler(
logging.FileHandler(
filename=os.path.join(args.destdir, "preprocess.log"),
)
)
logger.info(args)
assert args.dataset_impl != "huffman", "preprocessing.py doesn't support Huffman yet, use HuffmanCodeBuilder directly."
task = tasks.get_task(args.task)
def train_path(lang):
return "{}{}".format(args.trainpref, ("." + lang) if lang else "")
def file_name(prefix, lang):
fname = prefix
if lang is not None:
fname += ".{lang}".format(lang=lang)
return fname
def dest_path(prefix, lang):
return os.path.join(args.destdir, file_name(prefix, lang))
def dict_path(lang):
return dest_path("dict", lang) + ".txt"
def build_dictionary(filenames, src=False, tgt=False):
assert src ^ tgt
return task.build_dictionary(
filenames,
workers=args.workers,
threshold=args.thresholdsrc if src else args.thresholdtgt,
nwords=args.nwordssrc if src else args.nwordstgt,
padding_factor=args.padding_factor,
)
target = not args.only_source
if not args.srcdict and os.path.exists(dict_path(args.source_lang)):
raise FileExistsError(dict_path(args.source_lang))
if target and not args.tgtdict and os.path.exists(dict_path(args.target_lang)):
raise FileExistsError(dict_path(args.target_lang))
if args.joined_dictionary:
assert (
not args.srcdict or not args.tgtdict
), "cannot use both --srcdict and --tgtdict with --joined-dictionary"
if args.srcdict:
src_dict = task.load_dictionary(args.srcdict)
elif args.tgtdict:
src_dict = task.load_dictionary(args.tgtdict)
else:
assert (
args.trainpref
), "--trainpref must be set if --srcdict is not specified"
src_dict = build_dictionary(
{train_path(lang) for lang in [args.source_lang, args.target_lang]},
src=True,
)
tgt_dict = src_dict
else:
if args.srcdict:
src_dict = task.load_dictionary(args.srcdict)
else:
assert (
args.trainpref
), "--trainpref must be set if --srcdict is not specified"
src_dict = build_dictionary([train_path(args.source_lang)], src=True)
if target:
if args.tgtdict:
tgt_dict = task.load_dictionary(args.tgtdict)
else:
assert (
args.trainpref
), "--trainpref must be set if --tgtdict is not specified"
tgt_dict = build_dictionary([train_path(args.target_lang)], tgt=True)
else:
tgt_dict = None
src_dict.save(dict_path(args.source_lang))
if target and tgt_dict is not None:
tgt_dict.save(dict_path(args.target_lang))
if args.dict_only:
return
def make_binary_dataset(vocab, input_prefix, output_prefix, lang, num_workers):
logger.info("[{}] Dictionary: {} types".format(lang, len(vocab)))
n_seq_tok = [0, 0]
replaced = Counter()
def merge_result(worker_result):
replaced.update(worker_result["replaced"])
n_seq_tok[0] += worker_result["nseq"]
n_seq_tok[1] += worker_result["ntok"]
input_file = "{}{}".format(
input_prefix, ("." + lang) if lang is not None else ""
)
offsets = find_offsets(input_file, num_workers)
(first_chunk, *more_chunks) = zip(offsets, offsets[1:])
pool = None
if num_workers > 1:
pool = Pool(processes=num_workers - 1)
for worker_id, (start_offset, end_offset) in enumerate(
more_chunks, start=1
):
prefix = "{}{}".format(output_prefix, worker_id)
pool.apply_async(
binarize,
(
args,
input_file,
vocab,
prefix,
lang,
start_offset,
end_offset,
),
callback=merge_result,
)
pool.close()
ds = indexed_dataset.make_builder(
dataset_dest_file(args, output_prefix, lang, "bin"),
impl=args.dataset_impl,
vocab_size=len(vocab),
)
merge_result(
Binarizer.binarize(
input_file,
vocab,
lambda t: ds.add_item(t),
offset=first_chunk[0],
end=first_chunk[1],
)
)
if num_workers > 1:
pool.join()
for worker_id in range(1, num_workers):
prefix = "{}{}".format(output_prefix, worker_id)
temp_file_path = dataset_dest_prefix(args, prefix, lang)
ds.merge_file_(temp_file_path)
os.remove(indexed_dataset.data_file_path(temp_file_path))
os.remove(indexed_dataset.index_file_path(temp_file_path))
ds.finalize(dataset_dest_file(args, output_prefix, lang, "idx"))
logger.info(
"[{}] {}: {} sents, {} tokens, {:.3}% replaced by {}".format(
lang,
input_file,
n_seq_tok[0],
n_seq_tok[1],
100 * sum(replaced.values()) / n_seq_tok[1],
vocab.unk_word,
)
)
def make_binary_alignment_dataset(input_prefix, output_prefix, num_workers):
nseq = [0]
def merge_result(worker_result):
nseq[0] += worker_result["nseq"]
input_file = input_prefix
offsets = find_offsets(input_file, num_workers)
(first_chunk, *more_chunks) = zip(offsets, offsets[1:])
pool = None
if num_workers > 1:
pool = Pool(processes=num_workers - 1)
for worker_id, (start_offset, end_offset) in enumerate(
more_chunks, start=1
):
prefix = "{}{}".format(output_prefix, worker_id)
pool.apply_async(
binarize_alignments,
(
args,
input_file,
utils.parse_alignment,
prefix,
start_offset,
end_offset,
),
callback=merge_result,
)
pool.close()
ds = indexed_dataset.make_builder(
dataset_dest_file(args, output_prefix, None, "bin"), impl=args.dataset_impl
)
merge_result(
Binarizer.binarize_alignments(
input_file,
utils.parse_alignment,
lambda t: ds.add_item(t),
offset=first_chunk[0],
end=first_chunk[1],
)
)
if num_workers > 1:
pool.join()
for worker_id in range(1, num_workers):
prefix = "{}{}".format(output_prefix, worker_id)
temp_file_path = dataset_dest_prefix(args, prefix, None)
ds.merge_file_(temp_file_path)
os.remove(indexed_dataset.data_file_path(temp_file_path))
os.remove(indexed_dataset.index_file_path(temp_file_path))
ds.finalize(dataset_dest_file(args, output_prefix, None, "idx"))
logger.info("[alignments] {}: parsed {} alignments".format(input_file, nseq[0]))
def make_dataset(vocab, input_prefix, output_prefix, lang, num_workers=1):
if args.dataset_impl == "raw":
# Copy original text file to destination folder
output_text_file = dest_path(
output_prefix + ".{}-{}".format(args.source_lang, args.target_lang),
lang,
)
shutil.copyfile(file_name(input_prefix, lang), output_text_file)
else:
make_binary_dataset(vocab, input_prefix, output_prefix, lang, num_workers)
def make_all(lang, vocab):
if args.trainpref:
make_dataset(vocab, args.trainpref, "train", lang, num_workers=args.workers)
if args.validpref:
for k, validpref in enumerate(args.validpref.split(",")):
outprefix = "valid{}".format(k) if k > 0 else "valid"
make_dataset(
vocab, validpref, outprefix, lang, num_workers=args.workers
)
if args.testpref:
for k, testpref in enumerate(args.testpref.split(",")):
outprefix = "test{}".format(k) if k > 0 else "test"
make_dataset(vocab, testpref, outprefix, lang, num_workers=args.workers)
def make_all_alignments():
if args.trainpref and os.path.exists(args.trainpref + "." + args.align_suffix):
make_binary_alignment_dataset(
args.trainpref + "." + args.align_suffix,
"train.align",
num_workers=args.workers,
)
if args.validpref and os.path.exists(args.validpref + "." + args.align_suffix):
make_binary_alignment_dataset(
args.validpref + "." + args.align_suffix,
"valid.align",
num_workers=args.workers,
)
if args.testpref and os.path.exists(args.testpref + "." + args.align_suffix):
make_binary_alignment_dataset(
args.testpref + "." + args.align_suffix,
"test.align",
num_workers=args.workers,
)
make_all(args.source_lang, src_dict)
if target:
make_all(args.target_lang, tgt_dict)
if args.align_suffix:
make_all_alignments()
logger.info("Wrote preprocessed data to {}".format(args.destdir))
if args.alignfile:
assert args.trainpref, "--trainpref must be set if --alignfile is specified"
src_file_name = train_path(args.source_lang)
tgt_file_name = train_path(args.target_lang)
freq_map = {}
with open(args.alignfile, "r", encoding="utf-8") as align_file:
with open(src_file_name, "r", encoding="utf-8") as src_file:
with open(tgt_file_name, "r", encoding="utf-8") as tgt_file:
for a, s, t in zip_longest(align_file, src_file, tgt_file):
si = src_dict.encode_line(s, add_if_not_exist=False)
ti = tgt_dict.encode_line(t, add_if_not_exist=False)
ai = list(map(lambda x: tuple(x.split("-")), a.split()))
for sai, tai in ai:
srcidx = si[int(sai)]
tgtidx = ti[int(tai)]
if srcidx != src_dict.unk() and tgtidx != tgt_dict.unk():
assert srcidx != src_dict.pad()
assert srcidx != src_dict.eos()
assert tgtidx != tgt_dict.pad()
assert tgtidx != tgt_dict.eos()
if srcidx not in freq_map:
freq_map[srcidx] = {}
if tgtidx not in freq_map[srcidx]:
freq_map[srcidx][tgtidx] = 1
else:
freq_map[srcidx][tgtidx] += 1
align_dict = {}
for srcidx in freq_map.keys():
align_dict[srcidx] = max(freq_map[srcidx], key=freq_map[srcidx].get)
with open(
os.path.join(
args.destdir,
"alignment.{}-{}.txt".format(args.source_lang, args.target_lang),
),
"w",
encoding="utf-8",
) as f:
for k, v in align_dict.items():
print("{} {}".format(src_dict[k], tgt_dict[v]), file=f)
def binarize(args, filename, vocab, output_prefix, lang, offset, end, append_eos=True):
ds = indexed_dataset.make_builder(
dataset_dest_file(args, output_prefix, lang, "bin"),
impl=args.dataset_impl,
vocab_size=len(vocab),
)
def consumer(tensor):
ds.add_item(tensor)
res = Binarizer.binarize(
filename, vocab, consumer, append_eos=append_eos, offset=offset, end=end
)
ds.finalize(dataset_dest_file(args, output_prefix, lang, "idx"))
return res
def binarize_alignments(args, filename, parse_alignment, output_prefix, offset, end):
ds = indexed_dataset.make_builder(
dataset_dest_file(args, output_prefix, None, "bin"),
impl=args.dataset_impl,
vocab_size=None,
)
def consumer(tensor):
ds.add_item(tensor)
res = Binarizer.binarize_alignments(
filename, parse_alignment, consumer, offset=offset, end=end
)
ds.finalize(dataset_dest_file(args, output_prefix, None, "idx"))
return res
def dataset_dest_prefix(args, output_prefix, lang):
base = "{}/{}".format(args.destdir, output_prefix)
if lang is not None:
lang_part = ".{}-{}.{}".format(args.source_lang, args.target_lang, lang)
elif args.only_source:
lang_part = ""
else:
lang_part = ".{}-{}".format(args.source_lang, args.target_lang)
return "{}{}".format(base, lang_part)
def dataset_dest_file(args, output_prefix, lang, extension):
base = dataset_dest_prefix(args, output_prefix, lang)
return "{}.{}".format(base, extension)
def cli_main():
parser = options.get_preprocessing_parser()
args = parser.parse_args()
main(args)
if __name__ == "__main__":
cli_main()
|
bart_ls-main
|
fairseq-py/fairseq_cli/preprocess.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Translate pre-processed data with a trained model.
"""
import ast
import logging
import math
import os
import sys
from argparse import Namespace
from itertools import chain
import numpy as np
import torch
from fairseq import checkpoint_utils, options, scoring, tasks, utils
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.logging import progress_bar
from fairseq.logging.meters import StopwatchMeter, TimeMeter
from omegaconf import DictConfig
def main(cfg: DictConfig):
if isinstance(cfg, Namespace):
cfg = convert_namespace_to_omegaconf(cfg)
assert cfg.common_eval.path is not None, "--path required for generation!"
assert (
not cfg.generation.sampling or cfg.generation.nbest == cfg.generation.beam
), "--sampling requires --nbest to be equal to --beam"
assert (
cfg.generation.replace_unk is None or cfg.dataset.dataset_impl == "raw"
), "--replace-unk requires a raw text dataset (--dataset-impl=raw)"
if cfg.common_eval.results_path is not None:
os.makedirs(cfg.common_eval.results_path, exist_ok=True)
output_path = os.path.join(
cfg.common_eval.results_path,
"generate-{}.txt".format(cfg.dataset.gen_subset),
)
with open(output_path, "w", buffering=1, encoding="utf-8") as h:
return _main(cfg, h)
else:
return _main(cfg, sys.stdout)
def get_symbols_to_strip_from_output(generator):
if hasattr(generator, "symbols_to_strip_from_output"):
return generator.symbols_to_strip_from_output
else:
return {generator.eos}
def _main(cfg: DictConfig, output_file):
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=output_file,
)
logger = logging.getLogger("fairseq_cli.generate")
utils.import_user_module(cfg.common)
if cfg.dataset.max_tokens is None and cfg.dataset.batch_size is None:
cfg.dataset.max_tokens = 12000
logger.info(cfg)
# Fix seed for stochastic decoding
if cfg.common.seed is not None and not cfg.generation.no_seed_provided:
np.random.seed(cfg.common.seed)
utils.set_torch_seed(cfg.common.seed)
use_cuda = torch.cuda.is_available() and not cfg.common.cpu
# Load dataset splits
task = tasks.setup_task(cfg.task)
# Set dictionaries
try:
src_dict = getattr(task, "source_dictionary", None)
except NotImplementedError:
src_dict = None
tgt_dict = task.target_dictionary
overrides = ast.literal_eval(cfg.common_eval.model_overrides)
# Load ensemble
logger.info("loading model(s) from {}".format(cfg.common_eval.path))
models, saved_cfg = checkpoint_utils.load_model_ensemble(
utils.split_paths(cfg.common_eval.path),
arg_overrides=overrides,
task=task,
suffix=cfg.checkpoint.checkpoint_suffix,
strict=(cfg.checkpoint.checkpoint_shard_count == 1),
num_shards=cfg.checkpoint.checkpoint_shard_count,
)
# loading the dataset should happen after the checkpoint has been loaded so we can give it the saved task config
task.load_dataset(cfg.dataset.gen_subset, task_cfg=saved_cfg.task)
if cfg.generation.lm_path is not None:
overrides["data"] = cfg.task.data
try:
lms, _ = checkpoint_utils.load_model_ensemble(
[cfg.generation.lm_path], arg_overrides=overrides, task=None
)
except:
logger.warning(
f"Failed to load language model! Please make sure that the language model dict is the same "
f"as target dict and is located in the data dir ({cfg.task.data})"
)
raise
assert len(lms) == 1
else:
lms = [None]
# Optimize ensemble for generation
for model in chain(models, lms):
if model is None:
continue
if cfg.common.fp16:
model.half()
if use_cuda and not cfg.distributed_training.pipeline_model_parallel:
model.cuda()
model.prepare_for_inference_(cfg)
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
align_dict = utils.load_align_dict(cfg.generation.replace_unk)
# Load dataset (possibly sharded)
itr = task.get_batch_iterator(
dataset=task.dataset(cfg.dataset.gen_subset),
max_tokens=cfg.dataset.max_tokens,
max_sentences=cfg.dataset.batch_size,
max_positions=utils.resolve_max_positions(
task.max_positions(), *[m.max_positions() for m in models]
),
ignore_invalid_inputs=cfg.dataset.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=cfg.dataset.required_batch_size_multiple,
seed=cfg.common.seed,
num_shards=cfg.distributed_training.distributed_world_size,
shard_id=cfg.distributed_training.distributed_rank,
num_workers=cfg.dataset.num_workers,
data_buffer_size=cfg.dataset.data_buffer_size,
).next_epoch_itr(shuffle=False)
progress = progress_bar.progress_bar(
itr,
log_format=cfg.common.log_format,
log_interval=cfg.common.log_interval,
default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"),
)
# Initialize generator
gen_timer = StopwatchMeter()
extra_gen_cls_kwargs = {"lm_model": lms[0], "lm_weight": cfg.generation.lm_weight}
generator = task.build_generator(
models, cfg.generation, extra_gen_cls_kwargs=extra_gen_cls_kwargs
)
# Handle tokenization and BPE
tokenizer = task.build_tokenizer(cfg.tokenizer)
bpe = task.build_bpe(cfg.bpe)
def decode_fn(x):
if bpe is not None:
x = bpe.decode(x)
if tokenizer is not None:
x = tokenizer.decode(x)
return x
scorer = scoring.build_scorer(cfg.scoring, tgt_dict)
num_sentences = 0
has_target = True
wps_meter = TimeMeter()
for sample in progress:
sample = utils.move_to_cuda(sample) if use_cuda else sample
if "net_input" not in sample:
continue
prefix_tokens = None
if cfg.generation.prefix_size > 0:
prefix_tokens = sample["target"][:, : cfg.generation.prefix_size]
constraints = None
if "constraints" in sample:
constraints = sample["constraints"]
gen_timer.start()
hypos = task.inference_step(
generator,
models,
sample,
prefix_tokens=prefix_tokens,
constraints=constraints,
)
num_generated_tokens = sum(len(h[0]["tokens"]) for h in hypos)
gen_timer.stop(num_generated_tokens)
for i, sample_id in enumerate(sample["id"].tolist()):
has_target = sample["target"] is not None
# Remove padding
if "src_tokens" in sample["net_input"]:
src_tokens = utils.strip_pad(
sample["net_input"]["src_tokens"][i, :], tgt_dict.pad()
)
else:
src_tokens = None
target_tokens = None
if has_target:
target_tokens = (
utils.strip_pad(sample["target"][i, :], tgt_dict.pad()).int().cpu()
)
# Either retrieve the original sentences or regenerate them from tokens.
if align_dict is not None:
src_str = task.dataset(cfg.dataset.gen_subset).src.get_original_text(
sample_id
)
target_str = task.dataset(cfg.dataset.gen_subset).tgt.get_original_text(
sample_id
)
else:
if src_dict is not None:
src_str = src_dict.string(src_tokens, cfg.common_eval.post_process)
else:
src_str = ""
if has_target:
target_str = tgt_dict.string(
target_tokens,
cfg.common_eval.post_process,
escape_unk=True,
extra_symbols_to_ignore=get_symbols_to_strip_from_output(
generator
),
)
src_str = decode_fn(src_str)
if has_target:
target_str = decode_fn(target_str)
if not cfg.common_eval.quiet:
if src_dict is not None:
print("S-{}\t{}".format(sample_id, src_str), file=output_file)
if has_target:
print("T-{}\t{}".format(sample_id, target_str), file=output_file)
# Process top predictions
for j, hypo in enumerate(hypos[i][: cfg.generation.nbest]):
hypo_tokens, hypo_str, alignment = utils.post_process_prediction(
hypo_tokens=hypo["tokens"].int().cpu(),
src_str=src_str,
alignment=hypo["alignment"],
align_dict=align_dict,
tgt_dict=tgt_dict,
remove_bpe=cfg.common_eval.post_process,
extra_symbols_to_ignore=get_symbols_to_strip_from_output(generator),
)
detok_hypo_str = decode_fn(hypo_str)
if not cfg.common_eval.quiet:
score = hypo["score"] / math.log(2) # convert to base 2
# original hypothesis (after tokenization and BPE)
print(
"H-{}\t{}\t{}".format(sample_id, score, hypo_str),
file=output_file,
)
# detokenized hypothesis
print(
"D-{}\t{}\t{}".format(sample_id, score, detok_hypo_str),
file=output_file,
)
print(
"P-{}\t{}".format(
sample_id,
" ".join(
map(
lambda x: "{:.4f}".format(x),
# convert from base e to base 2
hypo["positional_scores"]
.div_(math.log(2))
.tolist(),
)
),
),
file=output_file,
)
if cfg.generation.print_alignment == "hard":
print(
"A-{}\t{}".format(
sample_id,
" ".join(
[
"{}-{}".format(src_idx, tgt_idx)
for src_idx, tgt_idx in alignment
]
),
),
file=output_file,
)
if cfg.generation.print_alignment == "soft":
print(
"A-{}\t{}".format(
sample_id,
" ".join(
[
",".join(src_probs)
for src_probs in alignment
]
),
),
file=output_file,
)
if cfg.generation.print_step:
print(
"I-{}\t{}".format(sample_id, hypo["steps"]),
file=output_file,
)
if cfg.generation.retain_iter_history:
for step, h in enumerate(hypo["history"]):
_, h_str, _ = utils.post_process_prediction(
hypo_tokens=h["tokens"].int().cpu(),
src_str=src_str,
alignment=None,
align_dict=None,
tgt_dict=tgt_dict,
remove_bpe=None,
)
print(
"E-{}_{}\t{}".format(sample_id, step, h_str),
file=output_file,
)
# Score only the top hypothesis
if has_target and j == 0:
if align_dict is not None or cfg.common_eval.post_process is not None:
# Convert back to tokens for evaluation with unk replacement and/or without BPE
target_tokens = tgt_dict.encode_line(
target_str, add_if_not_exist=True
)
hypo_tokens = tgt_dict.encode_line(
detok_hypo_str, add_if_not_exist=True
)
if hasattr(scorer, "add_string"):
scorer.add_string(target_str, detok_hypo_str)
else:
scorer.add(target_tokens, hypo_tokens)
wps_meter.update(num_generated_tokens)
progress.log({"wps": round(wps_meter.avg)})
num_sentences += (
sample["nsentences"] if "nsentences" in sample else sample["id"].numel()
)
logger.info("NOTE: hypothesis and token scores are output in base 2")
logger.info(
"Translated {:,} sentences ({:,} tokens) in {:.1f}s ({:.2f} sentences/s, {:.2f} tokens/s)".format(
num_sentences,
gen_timer.n,
gen_timer.sum,
num_sentences / gen_timer.sum,
1.0 / gen_timer.avg,
)
)
if has_target:
if cfg.bpe and not cfg.generation.sacrebleu:
if cfg.common_eval.post_process:
logger.warning(
"BLEU score is being computed by splitting detokenized string on spaces, this is probably not what you want. Use --sacrebleu for standard 13a BLEU tokenization"
)
else:
logger.warning(
"If you are using BPE on the target side, the BLEU score is computed on BPE tokens, not on proper words. Use --sacrebleu for standard 13a BLEU tokenization"
)
# use print to be consistent with other main outputs: S-, H-, T-, D- and so on
print(
"Generate {} with beam={}: {}".format(
cfg.dataset.gen_subset, cfg.generation.beam, scorer.result_string()
),
file=output_file,
)
return scorer
def cli_main():
parser = options.get_generation_parser()
# TODO: replace this workaround with refactoring of `AudioPretraining`
parser.add_argument(
'--arch', '-a', metavar='ARCH', default="wav2vec2",
help='Model architecture. For constructing tasks that rely on '
'model args (e.g. `AudioPretraining`)'
)
args = options.parse_args_and_arch(parser)
main(args)
if __name__ == "__main__":
cli_main()
|
bart_ls-main
|
fairseq-py/fairseq_cli/generate.py
|
bart_ls-main
|
fairseq-py/fairseq_cli/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import os
import sys
import tempfile
from pathlib import Path
from typing import Callable, Optional
import torch.fb.rendezvous.zeus # noqa: F401
from fairseq import distributed_utils, options
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.file_io import PathManager
from fairseq_cli.train import main as fairseq_train_main
from iopath.fb.manifold import ManifoldPathHandler
logger = logging.getLogger(__file__)
def get_fb_training_parser():
parser = options.get_training_parser()
parser.add_argument(
"--tensorboard-manifold",
action="store_true",
help="[FB only] send tensorboard plots to manifold",
)
parser.add_argument(
"--log-dir",
metavar="LOG",
default=None,
help="[FB only] Dir to store log in addition to stdout. If this "
"is not set, it will be set to args.save_dir",
)
# For latte_training use case, we have separate NMTManifoldPathHandler registered in
# https://fburl.com/wurd7t70. So if parameters need to be updated the right place
# is ~/fbsource/fbcode/fblearner/flow/projects/fairseq/latte_training/manifold_file_io.py
# for manifold
parser.add_argument(
"--manifold-max-parallel",
default=8,
type=int,
help="set ManifoldPathHandler max_parallel download number",
)
parser.add_argument(
"--manifold-timeout-sec",
default=1800,
type=int,
help="set ManifoldPathHandler timeout seconds",
)
parser.add_argument(
"--manifold-has-user-data",
default=True,
type=lambda x: x.lower() not in ("no", "false", "f", "n", "0")
if x is not None
else None,
help="set ManifoldPathHandler has_user_data option",
)
parser.add_argument(
"--manifold-num-retries",
default=15,
type=int,
help="set ManifoldPathHandler num_retries option",
)
parser.add_argument(
"--manifold-ttl",
default=None,
type=int,
help="A manifold resource's time-to-live, applied to all manifold written resources. By default, there is no TTL.",
)
# for manifold
return parser
def init_manifold(args):
# support Manifold for checkpoints
# For latte_training use case, we use a separate NMTManifoldPathHandler
# registered in https://fburl.com/diffusion/djgz9bwx.
try:
PathManager.register_handler(
ManifoldPathHandler(
max_parallel=args.manifold_max_parallel,
timeout_sec=args.manifold_timeout_sec,
has_user_data=args.manifold_has_user_data,
num_retries=args.manifold_num_retries,
ttl=args.manifold_ttl,
)
)
logger.info(
f"ManifoldPathHandler is set: max_parallel={args.manifold_max_parallel}, "
f"timeout_sec={args.manifold_timeout_sec}; has_user_data={args.manifold_has_user_data}"
)
except KeyError:
logging.warning("ManifoldPathHandler already registered.")
def fb_main(
device_id,
args,
start_rank,
log_path=None,
manifold_log_uri=None,
after_distributed_init_fn: Optional[
Callable[[argparse.Namespace], argparse.Namespace]
] = None,
):
"""[FB] entry point for each worker process."""
args.distributed_rank = start_rank + device_id
def add_handler(handler):
for root in ["fairseq", "fairseq_cli"]:
logger = logging.getLogger(root)
logger.propagate = False # don't propagate to parent loggers
handler.setLevel(logging.INFO)
handler.setFormatter(
logging.Formatter(
fmt="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
)
logger.addHandler(handler)
# write fairseq logs to stdout
add_handler(logging.StreamHandler(sys.stdout))
init_manifold(args)
def train_main():
cfg = convert_namespace_to_omegaconf(args)
distributed_utils.distributed_main(
device_id,
fairseq_train_main,
cfg,
kwargs={
"after_distributed_init_fn": after_distributed_init_fn,
},
)
if manifold_log_uri is not None and log_path is None:
log_path = tempfile.mktemp()
if log_path is not None and args.distributed_rank == 0:
# write logs from worker 0 to train.log
PathManager.mkdirs(args.save_dir)
os.makedirs(os.path.dirname(log_path), exist_ok=True)
Path(log_path).touch(0o777, exist_ok=True)
add_handler(logging.FileHandler(log_path))
train_main()
if manifold_log_uri is not None:
PathManager.copy_from_local(
local_path=log_path, dst_path=manifold_log_uri, overwrite=True
)
else:
train_main()
if __name__ == "__main__":
parser = get_fb_training_parser()
args = options.parse_args_and_arch(parser)
log_dir = args.log_dir if args.log_dir is not None else args.save_dir
log_path = os.path.join(log_dir, "train.log")
distributed_utils.infer_init_method(
convert_namespace_to_omegaconf(args), force_distributed=True
)
start_rank = args.distributed_rank
args.distributed_rank = None # assign automatically
torch.multiprocessing.spawn(
fn=fb_main,
args=(args, start_rank, log_path),
nprocs=min(
torch.cuda.device_count(),
args.distributed_world_size,
),
)
|
bart_ls-main
|
fairseq-py/fairseq_cli/fb_train.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import sys
from argparse import Namespace
from itertools import chain
import torch
from fairseq import checkpoint_utils, distributed_utils, options, utils
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.logging import metrics, progress_bar
from fairseq.utils import reset_logging
from omegaconf import DictConfig
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("fairseq_cli.validate")
def main(cfg: DictConfig, override_args=None):
if isinstance(cfg, Namespace):
cfg = convert_namespace_to_omegaconf(cfg)
utils.import_user_module(cfg.common)
reset_logging()
assert (
cfg.dataset.max_tokens is not None or cfg.dataset.batch_size is not None
), "Must specify batch size either with --max-tokens or --batch-size"
use_fp16 = cfg.common.fp16
use_cuda = torch.cuda.is_available() and not cfg.common.cpu
if use_cuda:
torch.cuda.set_device(cfg.distributed_training.device_id)
if cfg.distributed_training.distributed_world_size > 1:
data_parallel_world_size = distributed_utils.get_data_parallel_world_size()
data_parallel_rank = distributed_utils.get_data_parallel_rank()
else:
data_parallel_world_size = 1
data_parallel_rank = 0
if override_args is not None:
overrides = vars(override_args)
overrides.update(eval(getattr(override_args, "model_overrides", "{}")))
else:
overrides = None
# Load ensemble
logger.info("loading model(s) from {}".format(cfg.common_eval.path))
models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
[cfg.common_eval.path],
arg_overrides=overrides,
suffix=cfg.checkpoint.checkpoint_suffix,
)
model = models[0]
# Move models to GPU
for model in models:
model.eval()
if use_fp16:
model.half()
if use_cuda:
model.cuda()
# Print args
logger.info(saved_cfg)
# Build criterion
criterion = task.build_criterion(saved_cfg.criterion)
criterion.eval()
for subset in cfg.dataset.valid_subset.split(","):
try:
task.load_dataset(subset, combine=False, epoch=1, task_cfg=saved_cfg.task)
dataset = task.dataset(subset)
except KeyError:
raise Exception("Cannot find dataset: " + subset)
# Initialize data iterator
itr = task.get_batch_iterator(
dataset=dataset,
max_tokens=cfg.dataset.max_tokens,
max_sentences=cfg.dataset.batch_size,
max_positions=utils.resolve_max_positions(
task.max_positions(),
*[m.max_positions() for m in models],
),
ignore_invalid_inputs=cfg.dataset.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=cfg.dataset.required_batch_size_multiple,
seed=cfg.common.seed,
num_shards=data_parallel_world_size,
shard_id=data_parallel_rank,
num_workers=cfg.dataset.num_workers,
data_buffer_size=cfg.dataset.data_buffer_size,
).next_epoch_itr(shuffle=False)
progress = progress_bar.progress_bar(
itr,
log_format=cfg.common.log_format,
log_interval=cfg.common.log_interval,
prefix=f"valid on '{subset}' subset",
default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"),
)
log_outputs = []
for i, sample in enumerate(progress):
sample = utils.move_to_cuda(sample) if use_cuda else sample
_loss, _sample_size, log_output = task.valid_step(sample, model, criterion)
progress.log(log_output, step=i)
log_outputs.append(log_output)
if data_parallel_world_size > 1:
log_outputs = distributed_utils.all_gather_list(
log_outputs,
max_size=cfg.common.all_gather_list_size,
group=distributed_utils.get_data_parallel_group(),
)
log_outputs = list(chain.from_iterable(log_outputs))
with metrics.aggregate() as agg:
task.reduce_metrics(log_outputs, criterion)
log_output = agg.get_smoothed_values()
progress.print(log_output, tag=subset, step=i)
def cli_main():
parser = options.get_validation_parser()
args = options.parse_args_and_arch(parser)
# only override args that are explicitly given on the command line
override_parser = options.get_validation_parser()
override_args = options.parse_args_and_arch(
override_parser, suppress_defaults=True
)
distributed_utils.call_main(
convert_namespace_to_omegaconf(args), main, override_args=override_args
)
if __name__ == "__main__":
cli_main()
|
bart_ls-main
|
fairseq-py/fairseq_cli/validate.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Translate raw text with a trained model. Batches data on-the-fly.
"""
import ast
import fileinput
import logging
import math
import os
import sys
import time
from argparse import Namespace
from collections import namedtuple
import numpy as np
import torch
from fairseq import checkpoint_utils, distributed_utils, options, tasks, utils
from fairseq.dataclass.configs import FairseqConfig
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.token_generation_constraints import pack_constraints, unpack_constraints
from fairseq_cli.generate import get_symbols_to_strip_from_output
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("fairseq_cli.interactive")
Batch = namedtuple("Batch", "ids src_tokens src_lengths constraints")
Translation = namedtuple("Translation", "src_str hypos pos_scores alignments")
def buffered_read(input, buffer_size):
buffer = []
with fileinput.input(files=[input], openhook=fileinput.hook_encoded("utf-8")) as h:
for src_str in h:
buffer.append(src_str.strip())
if len(buffer) >= buffer_size:
yield buffer
buffer = []
if len(buffer) > 0:
yield buffer
def make_batches(lines, cfg, task, max_positions, encode_fn):
def encode_fn_target(x):
return encode_fn(x)
if cfg.generation.constraints:
# Strip (tab-delimited) contraints, if present, from input lines,
# store them in batch_constraints
batch_constraints = [list() for _ in lines]
for i, line in enumerate(lines):
if "\t" in line:
lines[i], *batch_constraints[i] = line.split("\t")
# Convert each List[str] to List[Tensor]
for i, constraint_list in enumerate(batch_constraints):
batch_constraints[i] = [
task.target_dictionary.encode_line(
encode_fn_target(constraint),
append_eos=False,
add_if_not_exist=False,
)
for constraint in constraint_list
]
if cfg.generation.constraints:
constraints_tensor = pack_constraints(batch_constraints)
else:
constraints_tensor = None
tokens, lengths = task.get_interactive_tokens_and_lengths(lines, encode_fn)
itr = task.get_batch_iterator(
dataset=task.build_dataset_for_inference(
tokens, lengths, constraints=constraints_tensor
),
max_tokens=cfg.dataset.max_tokens,
max_sentences=cfg.dataset.batch_size,
max_positions=max_positions,
ignore_invalid_inputs=cfg.dataset.skip_invalid_size_inputs_valid_test,
).next_epoch_itr(shuffle=False)
for batch in itr:
ids = batch["id"]
src_tokens = batch["net_input"]["src_tokens"]
src_lengths = batch["net_input"]["src_lengths"]
constraints = batch.get("constraints", None)
yield Batch(
ids=ids,
src_tokens=src_tokens,
src_lengths=src_lengths,
constraints=constraints,
)
def main(cfg: FairseqConfig):
if isinstance(cfg, Namespace):
cfg = convert_namespace_to_omegaconf(cfg)
start_time = time.time()
total_translate_time = 0
utils.import_user_module(cfg.common)
if cfg.interactive.buffer_size < 1:
cfg.interactive.buffer_size = 1
if cfg.dataset.max_tokens is None and cfg.dataset.batch_size is None:
cfg.dataset.batch_size = 1
assert (
not cfg.generation.sampling or cfg.generation.nbest == cfg.generation.beam
), "--sampling requires --nbest to be equal to --beam"
assert (
not cfg.dataset.batch_size
or cfg.dataset.batch_size <= cfg.interactive.buffer_size
), "--batch-size cannot be larger than --buffer-size"
logger.info(cfg)
# Fix seed for stochastic decoding
if cfg.common.seed is not None and not cfg.generation.no_seed_provided:
np.random.seed(cfg.common.seed)
utils.set_torch_seed(cfg.common.seed)
use_cuda = torch.cuda.is_available() and not cfg.common.cpu
# Setup task, e.g., translation
task = tasks.setup_task(cfg.task)
# Load ensemble
overrides = ast.literal_eval(cfg.common_eval.model_overrides)
logger.info("loading model(s) from {}".format(cfg.common_eval.path))
models, _model_args = checkpoint_utils.load_model_ensemble(
utils.split_paths(cfg.common_eval.path),
arg_overrides=overrides,
task=task,
suffix=cfg.checkpoint.checkpoint_suffix,
strict=(cfg.checkpoint.checkpoint_shard_count == 1),
num_shards=cfg.checkpoint.checkpoint_shard_count,
)
# Set dictionaries
src_dict = task.source_dictionary
tgt_dict = task.target_dictionary
# Optimize ensemble for generation
for model in models:
if model is None:
continue
if cfg.common.fp16:
model.half()
if use_cuda and not cfg.distributed_training.pipeline_model_parallel:
model.cuda()
model.prepare_for_inference_(cfg)
# Initialize generator
generator = task.build_generator(models, cfg.generation)
# Handle tokenization and BPE
tokenizer = task.build_tokenizer(cfg.tokenizer)
bpe = task.build_bpe(cfg.bpe)
def encode_fn(x):
if tokenizer is not None:
x = tokenizer.encode(x)
if bpe is not None:
x = bpe.encode(x)
return x
def decode_fn(x):
if bpe is not None:
x = bpe.decode(x)
if tokenizer is not None:
x = tokenizer.decode(x)
return x
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
align_dict = utils.load_align_dict(cfg.generation.replace_unk)
max_positions = utils.resolve_max_positions(
task.max_positions(), *[model.max_positions() for model in models]
)
if cfg.generation.constraints:
logger.warning(
"NOTE: Constrained decoding currently assumes a shared subword vocabulary."
)
if cfg.interactive.buffer_size > 1:
logger.info("Sentence buffer size: %s", cfg.interactive.buffer_size)
logger.info("NOTE: hypothesis and token scores are output in base 2")
logger.info("Type the input sentence and press return:")
start_id = 0
for inputs in buffered_read(cfg.interactive.input, cfg.interactive.buffer_size):
results = []
for batch in make_batches(inputs, cfg, task, max_positions, encode_fn):
bsz = batch.src_tokens.size(0)
src_tokens = batch.src_tokens
src_lengths = batch.src_lengths
constraints = batch.constraints
if use_cuda:
src_tokens = src_tokens.cuda()
src_lengths = src_lengths.cuda()
if constraints is not None:
constraints = constraints.cuda()
sample = {
"net_input": {
"src_tokens": src_tokens,
"src_lengths": src_lengths,
},
}
translate_start_time = time.time()
translations = task.inference_step(
generator, models, sample, constraints=constraints
)
translate_time = time.time() - translate_start_time
total_translate_time += translate_time
list_constraints = [[] for _ in range(bsz)]
if cfg.generation.constraints:
list_constraints = [unpack_constraints(c) for c in constraints]
for i, (id, hypos) in enumerate(zip(batch.ids.tolist(), translations)):
src_tokens_i = utils.strip_pad(src_tokens[i], tgt_dict.pad())
constraints = list_constraints[i]
results.append(
(
start_id + id,
src_tokens_i,
hypos,
{
"constraints": constraints,
"time": translate_time / len(translations),
},
)
)
# sort output to match input order
for id_, src_tokens, hypos, info in sorted(results, key=lambda x: x[0]):
src_str = ''
if src_dict is not None:
src_str = src_dict.string(src_tokens, cfg.common_eval.post_process)
print("S-{}\t{}".format(id_, src_str))
print("W-{}\t{:.3f}\tseconds".format(id_, info["time"]))
for constraint in info["constraints"]:
print(
"C-{}\t{}".format(
id_, tgt_dict.string(constraint, cfg.common_eval.post_process)
)
)
# Process top predictions
for hypo in hypos[: min(len(hypos), cfg.generation.nbest)]:
hypo_tokens, hypo_str, alignment = utils.post_process_prediction(
hypo_tokens=hypo["tokens"].int().cpu(),
src_str=src_str,
alignment=hypo["alignment"],
align_dict=align_dict,
tgt_dict=tgt_dict,
remove_bpe=cfg.common_eval.post_process,
extra_symbols_to_ignore=get_symbols_to_strip_from_output(generator),
)
detok_hypo_str = decode_fn(hypo_str)
score = hypo["score"] / math.log(2) # convert to base 2
# original hypothesis (after tokenization and BPE)
print("H-{}\t{}\t{}".format(id_, score, hypo_str))
# detokenized hypothesis
print("D-{}\t{}\t{}".format(id_, score, detok_hypo_str))
print(
"P-{}\t{}".format(
id_,
" ".join(
map(
lambda x: "{:.4f}".format(x),
# convert from base e to base 2
hypo["positional_scores"].div_(math.log(2)).tolist(),
)
),
)
)
if cfg.generation.print_alignment:
alignment_str = " ".join(
["{}-{}".format(src, tgt) for src, tgt in alignment]
)
print("A-{}\t{}".format(id_, alignment_str))
# update running id_ counter
start_id += len(inputs)
logger.info(
"Total time: {:.3f} seconds; translation time: {:.3f}".format(
time.time() - start_time, total_translate_time
)
)
def cli_main():
parser = options.get_interactive_generation_parser()
args = options.parse_args_and_arch(parser)
distributed_utils.call_main(convert_namespace_to_omegaconf(args), main)
if __name__ == "__main__":
cli_main()
|
bart_ls-main
|
fairseq-py/fairseq_cli/interactive.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Train a new model on one or across multiple GPUs.
"""
import argparse
import logging
import math
import os
import sys
from typing import Dict, Optional, Any, List, Tuple, Callable
# We need to setup root logger before importing any fairseq libraries.
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("fairseq_cli.train")
import numpy as np
import torch
from fairseq import (
checkpoint_utils,
options,
quantization_utils,
tasks,
utils,
)
from fairseq.data import iterators, data_utils
from fairseq.data.plasma_utils import PlasmaStore
from fairseq.dataclass.configs import FairseqConfig
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.distributed import fsdp_enable_wrap, fsdp_wrap, utils as distributed_utils
from fairseq.file_io import PathManager
from fairseq.logging import meters, metrics, progress_bar
from fairseq.model_parallel.megatron_trainer import MegatronTrainer
from fairseq.trainer import Trainer
from omegaconf import DictConfig, OmegaConf
def main(cfg: FairseqConfig) -> None:
if isinstance(cfg, argparse.Namespace):
cfg = convert_namespace_to_omegaconf(cfg)
utils.import_user_module(cfg.common)
if distributed_utils.is_master(cfg.distributed_training) and "job_logging_cfg" in cfg:
# make hydra logging work with ddp (see # see https://github.com/facebookresearch/hydra/issues/1126)
logging.config.dictConfig(OmegaConf.to_container(cfg.job_logging_cfg))
assert (
cfg.dataset.max_tokens is not None or cfg.dataset.batch_size is not None
), "Must specify batch size either with --max-tokens or --batch-size"
metrics.reset()
if cfg.common.log_file is not None:
handler = logging.FileHandler(filename=cfg.common.log_file)
logger.addHandler(handler)
np.random.seed(cfg.common.seed)
utils.set_torch_seed(cfg.common.seed)
if distributed_utils.is_master(cfg.distributed_training):
checkpoint_utils.verify_checkpoint_directory(cfg.checkpoint.save_dir)
# Print args
logger.info(cfg)
if cfg.checkpoint.write_checkpoints_asynchronously:
try:
import iopath # noqa: F401
except ImportError:
logging.exception(
"Asynchronous checkpoint writing is specified but iopath is "
"not installed: `pip install iopath`"
)
return
# Setup task, e.g., translation, language modeling, etc.
task = tasks.setup_task(cfg.task)
assert cfg.criterion, "Please specify criterion to train a model"
# Build model and criterion
if cfg.distributed_training.ddp_backend == "fully_sharded":
with fsdp_enable_wrap(cfg.distributed_training):
model = fsdp_wrap(task.build_model(cfg.model))
else:
model = task.build_model(cfg.model)
criterion = task.build_criterion(cfg.criterion)
logger.info(model)
logger.info("task: {}".format(task.__class__.__name__))
logger.info("model: {}".format(model.__class__.__name__))
logger.info("criterion: {}".format(criterion.__class__.__name__))
logger.info(
"num. shared model params: {:,} (num. trained: {:,})".format(
sum(p.numel() for p in model.parameters() if not getattr(p, "expert", False)),
sum(p.numel() for p in model.parameters() if not getattr(p, "expert", False) and p.requires_grad)
)
)
logger.info(
"num. expert model params: {} (num. trained: {})".format(
sum(p.numel() for p in model.parameters() if getattr(p, "expert", False)),
sum(p.numel() for p in model.parameters() if getattr(p, "expert", False) and p.requires_grad),
)
)
# Load valid dataset (we load training data below, based on the latest checkpoint)
# We load the valid dataset AFTER building the model
data_utils.raise_if_valid_subsets_unintentionally_ignored(cfg)
if cfg.dataset.combine_valid_subsets:
task.load_dataset("valid", combine=True, epoch=1)
else:
for valid_sub_split in cfg.dataset.valid_subset.split(","):
task.load_dataset(valid_sub_split, combine=False, epoch=1)
# (optionally) Configure quantization
if cfg.common.quantization_config_path is not None:
quantizer = quantization_utils.Quantizer(
config_path=cfg.common.quantization_config_path,
max_epoch=cfg.optimization.max_epoch,
max_update=cfg.optimization.max_update,
)
else:
quantizer = None
# Build trainer
if cfg.common.model_parallel_size == 1:
trainer = Trainer(cfg, task, model, criterion, quantizer)
else:
trainer = MegatronTrainer(cfg, task, model, criterion)
logger.info(
"training on {} devices (GPUs/TPUs)".format(
cfg.distributed_training.distributed_world_size
)
)
logger.info(
"max tokens per device = {} and max sentences per device = {}".format(
cfg.dataset.max_tokens,
cfg.dataset.batch_size,
)
)
# Load the latest checkpoint if one is available and restore the
# corresponding train iterator
extra_state, epoch_itr = checkpoint_utils.load_checkpoint(
cfg.checkpoint,
trainer,
# don't cache epoch iterators for sharded datasets
disable_iterator_cache=task.has_sharded_data("train"),
)
if cfg.common.tpu:
import torch_xla.core.xla_model as xm
xm.rendezvous("load_checkpoint") # wait for all workers
max_epoch = cfg.optimization.max_epoch or math.inf
lr = trainer.get_lr()
train_meter = meters.StopwatchMeter()
train_meter.start()
while epoch_itr.next_epoch_idx <= max_epoch:
if lr <= cfg.optimization.stop_min_lr:
logger.info(
f"stopping training because current learning rate ({lr}) is smaller "
"than or equal to minimum learning rate "
f"(--stop-min-lr={cfg.optimization.stop_min_lr})"
)
break
# train for one epoch
valid_losses, should_stop = train(cfg, trainer, task, epoch_itr)
if should_stop:
break
# only use first validation loss to update the learning rate
lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])
epoch_itr = trainer.get_train_iterator(
epoch_itr.next_epoch_idx,
# sharded data: get train iterator for next epoch
load_dataset=task.has_sharded_data("train"),
# don't cache epoch iterators for sharded datasets
disable_iterator_cache=task.has_sharded_data("train"),
)
train_meter.stop()
logger.info("done training in {:.1f} seconds".format(train_meter.sum))
# ioPath implementation to wait for all asynchronous file writes to complete.
if cfg.checkpoint.write_checkpoints_asynchronously:
logger.info(
"ioPath PathManager waiting for all asynchronous checkpoint "
"writes to finish."
)
PathManager.async_close()
logger.info("ioPath PathManager finished waiting.")
def should_stop_early(cfg: DictConfig, valid_loss: float) -> bool:
# skip check if no validation was done in the current epoch
if valid_loss is None:
return False
if cfg.checkpoint.patience <= 0:
return False
def is_better(a, b):
return a > b if cfg.checkpoint.maximize_best_checkpoint_metric else a < b
prev_best = getattr(should_stop_early, "best", None)
if prev_best is None or is_better(valid_loss, prev_best):
should_stop_early.best = valid_loss
should_stop_early.num_runs = 0
return False
else:
should_stop_early.num_runs += 1
if should_stop_early.num_runs >= cfg.checkpoint.patience:
logger.info(
"early stop since valid performance hasn't improved for last {} runs".format(
cfg.checkpoint.patience
)
)
return True
else:
return False
@metrics.aggregate("train")
def train(
cfg: DictConfig, trainer: Trainer, task: tasks.FairseqTask, epoch_itr
) -> Tuple[List[Optional[float]], bool]:
"""Train the model for one epoch and return validation losses."""
# Initialize data iterator
itr = epoch_itr.next_epoch_itr(
fix_batches_to_gpus=cfg.distributed_training.fix_batches_to_gpus,
shuffle=(epoch_itr.next_epoch_idx > cfg.dataset.curriculum),
)
update_freq = (
cfg.optimization.update_freq[epoch_itr.epoch - 1]
if epoch_itr.epoch <= len(cfg.optimization.update_freq)
else cfg.optimization.update_freq[-1]
)
itr = iterators.GroupedIterator(itr, update_freq)
if cfg.common.tpu:
itr = utils.tpu_data_loader(itr)
progress = progress_bar.progress_bar(
itr,
log_format=cfg.common.log_format,
log_file=cfg.common.log_file,
log_interval=cfg.common.log_interval,
epoch=epoch_itr.epoch,
tensorboard_logdir=(
cfg.common.tensorboard_logdir
if distributed_utils.is_master(cfg.distributed_training)
else None
),
default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"),
wandb_project=(
cfg.common.wandb_project
if distributed_utils.is_master(cfg.distributed_training)
else None
),
wandb_run_name=os.environ.get(
"WANDB_NAME", os.path.basename(cfg.checkpoint.save_dir)
),
azureml_logging=(
cfg.common.azureml_logging
if distributed_utils.is_master(cfg.distributed_training)
else False
),
)
progress.update_config(_flatten_config(cfg))
trainer.begin_epoch(epoch_itr.epoch)
valid_subsets = cfg.dataset.valid_subset.split(",")
should_stop = False
num_updates = trainer.get_num_updates()
logger.info("Start iterating over samples")
for i, samples in enumerate(progress):
with metrics.aggregate("train_inner"), torch.autograd.profiler.record_function(
"train_step-%d" % i
):
log_output = trainer.train_step(samples)
if log_output is not None: # not OOM, overflow, ...
# log mid-epoch stats
num_updates = trainer.get_num_updates()
if num_updates % cfg.common.log_interval == 0:
stats = get_training_stats(metrics.get_smoothed_values("train_inner"))
progress.log(stats, tag="train_inner", step=num_updates)
# reset mid-epoch stats after each log interval
# the end-of-epoch stats will still be preserved
metrics.reset_meters("train_inner")
end_of_epoch = not itr.has_next()
valid_losses, should_stop = validate_and_save(
cfg, trainer, task, epoch_itr, valid_subsets, end_of_epoch
)
if should_stop:
break
# log end-of-epoch stats
logger.info("end of epoch {} (average epoch stats below)".format(epoch_itr.epoch))
stats = get_training_stats(metrics.get_smoothed_values("train"))
progress.print(stats, tag="train", step=num_updates)
# reset epoch-level meters
metrics.reset_meters("train")
return valid_losses, should_stop
def _flatten_config(cfg: DictConfig):
config = OmegaConf.to_container(cfg)
# remove any legacy Namespaces and replace with a single "args"
namespace = None
for k, v in list(config.items()):
if isinstance(v, argparse.Namespace):
namespace = v
del config[k]
if namespace is not None:
config["args"] = vars(namespace)
return config
def validate_and_save(
cfg: DictConfig,
trainer: Trainer,
task: tasks.FairseqTask,
epoch_itr,
valid_subsets: List[str],
end_of_epoch: bool,
) -> Tuple[List[Optional[float]], bool]:
num_updates = trainer.get_num_updates()
max_update = cfg.optimization.max_update or math.inf
# Stopping conditions (and an additional one based on validation loss later
# on)
should_stop = False
if num_updates >= max_update:
should_stop = True
logger.info(
f"Stopping training due to "
f"num_updates: {num_updates} >= max_update: {max_update}"
)
training_time_hours = trainer.cumulative_training_time() / (60 * 60)
if (
cfg.optimization.stop_time_hours > 0
and training_time_hours > cfg.optimization.stop_time_hours
):
should_stop = True
logger.info(
f"Stopping training due to "
f"cumulative_training_time: {training_time_hours} > "
f"stop_time_hours: {cfg.optimization.stop_time_hours} hour(s)"
)
do_save = (
(end_of_epoch and epoch_itr.epoch % cfg.checkpoint.save_interval == 0)
or should_stop
or (
cfg.checkpoint.save_interval_updates > 0
and num_updates > 0
and num_updates % cfg.checkpoint.save_interval_updates == 0
and num_updates >= cfg.dataset.validate_after_updates
)
)
do_validate = (
(not end_of_epoch and do_save) # validate during mid-epoch saves
or (end_of_epoch and epoch_itr.epoch % cfg.dataset.validate_interval == 0)
or should_stop
or (
cfg.dataset.validate_interval_updates > 0
and num_updates > 0
and num_updates % cfg.dataset.validate_interval_updates == 0
)
) and not cfg.dataset.disable_validation and num_updates >= cfg.dataset.validate_after_updates
# Validate
valid_losses = [None]
if do_validate:
valid_losses = validate(cfg, trainer, task, epoch_itr, valid_subsets)
should_stop |= should_stop_early(cfg, valid_losses[0])
# Save checkpoint
if do_save or should_stop:
checkpoint_utils.save_checkpoint(
cfg.checkpoint, trainer, epoch_itr, valid_losses[0]
)
return valid_losses, should_stop
def get_training_stats(stats: Dict[str, Any]) -> Dict[str, Any]:
stats["wall"] = round(metrics.get_meter("default", "wall").elapsed_time, 0)
return stats
def validate(
cfg: DictConfig,
trainer: Trainer,
task: tasks.FairseqTask,
epoch_itr,
subsets: List[str],
) -> List[Optional[float]]:
"""Evaluate the model on the validation set(s) and return the losses."""
if cfg.dataset.fixed_validation_seed is not None:
# set fixed seed for every validation
utils.set_torch_seed(cfg.dataset.fixed_validation_seed)
trainer.begin_valid_epoch(epoch_itr.epoch)
valid_losses = []
for subset in subsets:
logger.info('begin validation on "{}" subset'.format(subset))
# Initialize data iterator
itr = trainer.get_valid_iterator(subset).next_epoch_itr(
shuffle=False, set_dataset_epoch=False # use a fixed valid set
)
if cfg.common.tpu:
itr = utils.tpu_data_loader(itr)
progress = progress_bar.progress_bar(
itr,
log_format=cfg.common.log_format,
log_interval=cfg.common.log_interval,
epoch=epoch_itr.epoch,
prefix=f"valid on '{subset}' subset",
tensorboard_logdir=(
cfg.common.tensorboard_logdir
if distributed_utils.is_master(cfg.distributed_training)
else None
),
default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"),
wandb_project=(
cfg.common.wandb_project
if distributed_utils.is_master(cfg.distributed_training)
else None
),
wandb_run_name=os.environ.get(
"WANDB_NAME", os.path.basename(cfg.checkpoint.save_dir)
),
)
# create a new root metrics aggregator so validation metrics
# don't pollute other aggregators (e.g., train meters)
with metrics.aggregate(new_root=True) as agg:
for i, sample in enumerate(progress):
if cfg.dataset.max_valid_steps is not None and i > cfg.dataset.max_valid_steps:
break
trainer.valid_step(sample)
# log validation stats
stats = get_valid_stats(cfg, trainer, agg.get_smoothed_values())
if hasattr(task, "post_validate"):
task.post_validate(trainer.get_model(), stats, agg)
progress.print(stats, tag=subset, step=trainer.get_num_updates())
valid_losses.append(stats[cfg.checkpoint.best_checkpoint_metric])
return valid_losses
def get_valid_stats(
cfg: DictConfig, trainer: Trainer, stats: Dict[str, Any]
) -> Dict[str, Any]:
stats["num_updates"] = trainer.get_num_updates()
if hasattr(checkpoint_utils.save_checkpoint, "best"):
key = "best_{0}".format(cfg.checkpoint.best_checkpoint_metric)
best_function = max if cfg.checkpoint.maximize_best_checkpoint_metric else min
stats[key] = best_function(
checkpoint_utils.save_checkpoint.best,
stats[cfg.checkpoint.best_checkpoint_metric],
)
return stats
def cli_main(
modify_parser: Optional[Callable[[argparse.ArgumentParser], None]] = None
) -> None:
parser = options.get_training_parser()
args = options.parse_args_and_arch(parser, modify_parser=modify_parser)
cfg = convert_namespace_to_omegaconf(args)
if cfg.common.use_plasma_view:
server = PlasmaStore(path=cfg.common.plasma_path)
logger.info(f"Started plasma server pid {server.server.pid} {cfg.common.plasma_path}")
if args.profile:
with torch.cuda.profiler.profile():
with torch.autograd.profiler.emit_nvtx():
distributed_utils.call_main(cfg, main)
else:
distributed_utils.call_main(cfg, main)
# if cfg.common.use_plasma_view:
# server.server.kill()
if __name__ == "__main__":
cli_main()
|
bart_ls-main
|
fairseq-py/fairseq_cli/train.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Evaluate the perplexity of a trained language model.
"""
import logging
import math
import os
import sys
from argparse import Namespace
from typing import Iterable, List, Optional
import torch
import fairseq
from fairseq import checkpoint_utils, distributed_utils, options, tasks, utils
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.logging import progress_bar
from fairseq.logging.meters import StopwatchMeter
from fairseq.sequence_scorer import SequenceScorer
from omegaconf import DictConfig
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("fairseq_cli.eval_lm")
def eval_lm(
models: List[fairseq.models.FairseqModel],
source_dictionary: fairseq.data.Dictionary,
batch_iterator: Iterable,
post_process: Optional[str] = None,
output_word_probs: bool = False,
output_word_stats: bool = False,
target_dictionary: Optional[fairseq.data.Dictionary] = None,
softmax_batch: int = 0,
remove_bos_token: bool = False,
device: Optional[torch.device] = None,
):
"""
Args:
models (List[~fairseq.models.FairseqModel]): list of models to
evaluate. Models are essentially `nn.Module` instances, but
must be compatible with fairseq's `SequenceScorer`.
source_dictionary (~fairseq.data.Dictionary): dictionary for
applying any relevant post processing or outputing word
probs/stats.
batch_iterator (Iterable): yield batches of data
post_process (Optional[str]): post-process text by removing BPE,
letter segmentation, etc. Valid options can be found in
fairseq.data.utils.post_process, although not all options
are implemented here.
output_word_probs (Optional[bool]): output words and their
predicted log probabilities
output_word_stats (Optional[bool]): output word statistics such
as word count and average probability
target_dictionary (Optional[~fairseq.data.Dictionary]): output
dictionary (defaults to *source_dictionary*)
softmax_batch (Optional[bool]): if BxT is more than this, will
batch the softmax over vocab to this amount of tokens, in
order to fit into GPU memory
remove_bos_token (Optional[bool]): if True, confirm that the
first token is the beginning-of-sentence symbol (according
to the relevant dictionary) and remove it from the output
device (Optional[torch.device]): device to use for evaluation
(defaults to device of first model parameter)
"""
if target_dictionary is None:
target_dictionary = source_dictionary
if device is None:
device = next(models[0].parameters()).device
gen_timer = StopwatchMeter()
scorer = SequenceScorer(target_dictionary, softmax_batch)
score_sum = 0.0
count = 0
if post_process is not None:
if post_process in {"subword_nmt", "@@ "}:
bpe_cont = post_process.rstrip()
bpe_toks = {
i
for i in range(len(source_dictionary))
if source_dictionary[i].endswith(bpe_cont)
}
else:
raise NotImplementedError(
"--post-process={post_process} is not implemented"
)
bpe_len = len(bpe_cont)
else:
bpe_toks = None
bpe_len = 0
word_stats = dict()
for sample in batch_iterator:
if "net_input" not in sample:
continue
sample = utils.move_to_cuda(sample, device=device)
gen_timer.start()
hypos = scorer.generate(models, sample)
gen_timer.stop(sample["ntokens"])
for i, hypos_i in enumerate(hypos):
hypo = hypos_i[0]
sample_id = sample["id"][i]
tokens = hypo["tokens"]
tgt_len = tokens.numel()
pos_scores = hypo["positional_scores"].float()
if remove_bos_token:
assert hypo["tokens"][0].item() == target_dictionary.bos()
tokens = tokens[1:]
pos_scores = pos_scores[1:]
skipped_toks = 0
if bpe_toks is not None:
for i in range(tgt_len - 1):
if tokens[i].item() in bpe_toks:
skipped_toks += 1
pos_scores[i + 1] += pos_scores[i]
pos_scores[i] = 0
inf_scores = pos_scores.eq(float("inf")) | pos_scores.eq(float("-inf"))
if inf_scores.any():
logger.info(
"skipping tokens with inf scores:",
target_dictionary.string(tokens[inf_scores.nonzero()]),
)
pos_scores = pos_scores[(~inf_scores).nonzero()]
score_sum += pos_scores.sum().cpu()
count += pos_scores.numel() - skipped_toks
if output_word_probs or output_word_stats:
w = ""
word_prob = []
is_bpe = False
for i in range(len(tokens)):
w_ind = tokens[i].item()
w += source_dictionary[w_ind]
if bpe_toks is not None and w_ind in bpe_toks:
w = w[:-bpe_len]
is_bpe = True
else:
word_prob.append((w, pos_scores[i].item()))
next_prob = None
ind = i + 1
while ind < len(tokens):
if pos_scores[ind].item() != 0:
next_prob = pos_scores[ind]
break
ind += 1
word_stats.setdefault(w, WordStat(w, is_bpe)).add(
pos_scores[i].item(), next_prob
)
is_bpe = False
w = ""
if output_word_probs:
logger.info(
str(int(sample_id))
+ " "
+ (
"\t".join(
"{} [{:2f}]".format(x[0], x[1]) for x in word_prob
)
)
)
avg_nll_loss = (
-score_sum / count / math.log(2) if count > 0 else 0
) # convert to base 2
logger.info(
"Evaluated {:,} tokens in {:.1f}s ({:.2f} tokens/s)".format(
gen_timer.n, gen_timer.sum, 1.0 / gen_timer.avg if gen_timer.avg > 0 else 0
)
)
if output_word_stats:
for ws in sorted(word_stats.values(), key=lambda x: x.count, reverse=True):
logger.info(ws)
return {
"loss": avg_nll_loss,
"perplexity": 2 ** avg_nll_loss,
}
class WordStat(object):
def __init__(self, word, is_bpe):
self.word = word
self.is_bpe = is_bpe
self.log_prob = 0
self.next_word_prob = 0
self.count = 0
self.missing_next_words = 0
def add(self, log_prob, next_word_prob):
"""increments counters for the sum of log probs of current word and next
word (given context ending at current word). Since the next word might be at the end of the example,
or it might be not counted because it is not an ending subword unit,
also keeps track of how many of those we have seen"""
if next_word_prob is not None:
self.next_word_prob += next_word_prob
else:
self.missing_next_words += 1
self.log_prob += log_prob
self.count += 1
def __str__(self):
return "{}\t{}\t{}\t{}\t{}\t{}".format(
self.word,
self.count,
self.log_prob,
self.is_bpe,
self.next_word_prob,
self.count - self.missing_next_words,
)
def main(cfg: DictConfig, **unused_kwargs):
if isinstance(cfg, Namespace):
cfg = convert_namespace_to_omegaconf(cfg)
utils.import_user_module(cfg.common)
logger.info(cfg)
if cfg.eval_lm.context_window > 0:
# reduce tokens per sample by the required context window size
cfg.task.tokens_per_sample -= cfg.eval_lm.context_window
# Initialize the task using the current *cfg*
task = tasks.setup_task(cfg.task)
# Load ensemble
logger.info("loading model(s) from {}".format(cfg.common_eval.path))
models, model_args, task = checkpoint_utils.load_model_ensemble_and_task(
[cfg.common_eval.path],
arg_overrides=eval(cfg.common_eval.model_overrides),
suffix=cfg.checkpoint.checkpoint_suffix,
strict=(cfg.checkpoint.checkpoint_shard_count == 1),
num_shards=cfg.checkpoint.checkpoint_shard_count,
task=task,
)
use_fp16 = cfg.common.fp16
use_cuda = torch.cuda.is_available() and not cfg.common.cpu
if use_cuda:
torch.cuda.set_device(cfg.distributed_training.device_id)
# Optimize ensemble for generation and set the source and dest dicts on the model
# (required by scorer)
for model in models:
if use_fp16:
model.half()
if use_cuda and not cfg.distributed_training.pipeline_model_parallel:
model.cuda()
model.prepare_for_inference_(cfg)
assert len(models) > 0
logger.info(
"num. model params: {:,}".format(sum(p.numel() for p in models[0].parameters()))
)
# Load dataset splits
task.load_dataset(cfg.dataset.gen_subset)
dataset = task.dataset(cfg.dataset.gen_subset)
logger.info(
"{} {} {:,} examples".format(
cfg.task.data, cfg.dataset.gen_subset, len(dataset)
)
)
itr = task.eval_lm_dataloader(
dataset=dataset,
max_tokens=cfg.dataset.max_tokens or 36000,
batch_size=cfg.dataset.batch_size,
max_positions=utils.resolve_max_positions(
*[model.max_positions() for model in models]
),
num_shards=max(
cfg.dataset.num_shards,
cfg.distributed_training.distributed_world_size,
),
shard_id=max(
cfg.dataset.shard_id,
cfg.distributed_training.distributed_rank,
),
num_workers=cfg.dataset.num_workers,
data_buffer_size=cfg.dataset.data_buffer_size,
context_window=cfg.eval_lm.context_window,
)
itr = progress_bar.progress_bar(
itr,
log_format=cfg.common.log_format,
log_interval=cfg.common.log_interval,
default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"),
)
results = eval_lm(
models=models,
source_dictionary=task.source_dictionary,
batch_iterator=itr,
post_process=cfg.common_eval.post_process,
output_word_probs=cfg.eval_lm.output_word_probs,
output_word_stats=cfg.eval_lm.output_word_stats,
target_dictionary=task.target_dictionary,
softmax_batch=cfg.eval_lm.softmax_batch,
remove_bos_token=getattr(cfg.task, "add_bos_token", False),
)
logger.info(
"Loss (base 2): {:.4f}, Perplexity: {:.2f}".format(
results["loss"], results["perplexity"]
)
)
return results
def cli_main():
parser = options.get_eval_lm_parser()
args = options.parse_args_and_arch(parser)
distributed_utils.call_main(convert_namespace_to_omegaconf(args), main)
if __name__ == "__main__":
cli_main()
|
bart_ls-main
|
fairseq-py/fairseq_cli/eval_lm.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
BLEU scoring of generated translations against reference translations.
"""
import argparse
import os
import sys
from fairseq.data import dictionary
from fairseq.scoring import bleu
def get_parser():
parser = argparse.ArgumentParser(
description="Command-line script for BLEU scoring."
)
# fmt: off
parser.add_argument('-s', '--sys', default='-', help='system output')
parser.add_argument('-r', '--ref', required=True, help='references')
parser.add_argument('-o', '--order', default=4, metavar='N',
type=int, help='consider ngrams up to this order')
parser.add_argument('--ignore-case', action='store_true',
help='case-insensitive scoring')
parser.add_argument('--sacrebleu', action='store_true',
help='score with sacrebleu')
parser.add_argument('--sentence-bleu', action='store_true',
help='report sentence-level BLEUs (i.e., with +1 smoothing)')
# fmt: on
return parser
def cli_main():
parser = get_parser()
args = parser.parse_args()
print(args)
assert args.sys == "-" or os.path.exists(
args.sys
), "System output file {} does not exist".format(args.sys)
assert os.path.exists(args.ref), "Reference file {} does not exist".format(args.ref)
dict = dictionary.Dictionary()
def readlines(fd):
for line in fd.readlines():
if args.ignore_case:
yield line.lower()
else:
yield line
if args.sacrebleu:
import sacrebleu
def score(fdsys):
with open(args.ref) as fdref:
print(sacrebleu.corpus_bleu(fdsys, [fdref]).format())
elif args.sentence_bleu:
def score(fdsys):
with open(args.ref) as fdref:
scorer = bleu.Scorer(dict.pad(), dict.eos(), dict.unk())
for i, (sys_tok, ref_tok) in enumerate(
zip(readlines(fdsys), readlines(fdref))
):
scorer.reset(one_init=True)
sys_tok = dict.encode_line(sys_tok)
ref_tok = dict.encode_line(ref_tok)
scorer.add(ref_tok, sys_tok)
print(i, scorer.result_string(args.order))
else:
def score(fdsys):
with open(args.ref) as fdref:
scorer = bleu.Scorer(
bleu.BleuConfig(
pad=dict.pad(),
eos=dict.eos(),
unk=dict.unk(),
)
)
for sys_tok, ref_tok in zip(readlines(fdsys), readlines(fdref)):
sys_tok = dict.encode_line(sys_tok)
ref_tok = dict.encode_line(ref_tok)
scorer.add(ref_tok, sys_tok)
print(scorer.result_string(args.order))
if args.sys == "-":
score(sys.stdin)
else:
with open(args.sys, "r") as f:
score(f)
if __name__ == "__main__":
cli_main()
|
bart_ls-main
|
fairseq-py/fairseq_cli/score.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Split a large file into a train and valid set while respecting document
boundaries. Documents should be separated by a single empty line.
"""
import argparse
import random
import sys
def main():
parser = argparse.ArgumentParser()
parser.add_argument("input")
parser.add_argument("sample_output", help="train output file")
parser.add_argument("remainder_output", help="valid output file")
parser.add_argument("-k", type=int, help="remainder size")
parser.add_argument(
"--lines", action="store_true", help="split lines instead of docs"
)
args = parser.parse_args()
assert args.k is not None
sample = []
remainder = []
num_docs = [0]
def update_sample(doc):
if len(sample) < args.k:
sample.append(doc.copy())
else:
i = num_docs[0]
j = random.randrange(i + 1)
if j < args.k:
remainder.append(sample[j])
sample[j] = doc.copy()
else:
remainder.append(doc.copy())
num_docs[0] += 1
doc.clear()
with open(args.input, "r", encoding="utf-8") as h:
doc = []
for i, line in enumerate(h):
if line.strip() == "": # empty line indicates new document
update_sample(doc)
else:
doc.append(line)
if args.lines:
update_sample(doc)
if i % 1000000 == 0:
print(i, file=sys.stderr, end="", flush=True)
elif i % 100000 == 0:
print(".", file=sys.stderr, end="", flush=True)
if len(doc) > 0:
update_sample(doc)
print(file=sys.stderr, flush=True)
assert len(sample) == args.k
with open(args.sample_output, "w", encoding="utf-8") as out:
first = True
for doc in sample:
if not first and not args.lines:
out.write("\n")
first = False
for line in doc:
out.write(line)
with open(args.remainder_output, "w", encoding="utf-8") as out:
first = True
for doc in remainder:
if not first and not args.lines:
out.write("\n")
first = False
for line in doc:
out.write(line)
if __name__ == "__main__":
main()
|
bart_ls-main
|
fairseq-py/scripts/split_train_valid_docs.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Use this script in order to build symmetric alignments for your translation
dataset.
This script depends on fast_align and mosesdecoder tools. You will need to
build those before running the script.
fast_align:
github: http://github.com/clab/fast_align
instructions: follow the instructions in README.md
mosesdecoder:
github: http://github.com/moses-smt/mosesdecoder
instructions: http://www.statmt.org/moses/?n=Development.GetStarted
The script produces the following files under --output_dir:
text.joined - concatenation of lines from the source_file and the
target_file.
align.forward - forward pass of fast_align.
align.backward - backward pass of fast_align.
aligned.sym_heuristic - symmetrized alignment.
"""
import argparse
import os
from itertools import zip_longest
def main():
parser = argparse.ArgumentParser(description="symmetric alignment builer")
# fmt: off
parser.add_argument('--fast_align_dir',
help='path to fast_align build directory')
parser.add_argument('--mosesdecoder_dir',
help='path to mosesdecoder root directory')
parser.add_argument('--sym_heuristic',
help='heuristic to use for symmetrization',
default='grow-diag-final-and')
parser.add_argument('--source_file',
help='path to a file with sentences '
'in the source language')
parser.add_argument('--target_file',
help='path to a file with sentences '
'in the target language')
parser.add_argument('--output_dir',
help='output directory')
# fmt: on
args = parser.parse_args()
fast_align_bin = os.path.join(args.fast_align_dir, "fast_align")
symal_bin = os.path.join(args.mosesdecoder_dir, "bin", "symal")
sym_fast_align_bin = os.path.join(
args.mosesdecoder_dir, "scripts", "ems", "support", "symmetrize-fast-align.perl"
)
# create joined file
joined_file = os.path.join(args.output_dir, "text.joined")
with open(args.source_file, "r", encoding="utf-8") as src, open(
args.target_file, "r", encoding="utf-8"
) as tgt:
with open(joined_file, "w", encoding="utf-8") as joined:
for s, t in zip_longest(src, tgt):
print("{} ||| {}".format(s.strip(), t.strip()), file=joined)
bwd_align_file = os.path.join(args.output_dir, "align.backward")
# run forward alignment
fwd_align_file = os.path.join(args.output_dir, "align.forward")
fwd_fast_align_cmd = "{FASTALIGN} -i {JOINED} -d -o -v > {FWD}".format(
FASTALIGN=fast_align_bin, JOINED=joined_file, FWD=fwd_align_file
)
assert os.system(fwd_fast_align_cmd) == 0
# run backward alignment
bwd_align_file = os.path.join(args.output_dir, "align.backward")
bwd_fast_align_cmd = "{FASTALIGN} -i {JOINED} -d -o -v -r > {BWD}".format(
FASTALIGN=fast_align_bin, JOINED=joined_file, BWD=bwd_align_file
)
assert os.system(bwd_fast_align_cmd) == 0
# run symmetrization
sym_out_file = os.path.join(args.output_dir, "aligned")
sym_cmd = "{SYMFASTALIGN} {FWD} {BWD} {SRC} {TGT} {OUT} {HEURISTIC} {SYMAL}".format(
SYMFASTALIGN=sym_fast_align_bin,
FWD=fwd_align_file,
BWD=bwd_align_file,
SRC=args.source_file,
TGT=args.target_file,
OUT=sym_out_file,
HEURISTIC=args.sym_heuristic,
SYMAL=symal_bin,
)
assert os.system(sym_cmd) == 0
if __name__ == "__main__":
main()
|
bart_ls-main
|
fairseq-py/scripts/build_sym_alignment.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq.average_checkpoints import main
from fairseq.file_io import PathManager
# support fb specific path mananger
try:
from iopath.fb.manifold import ManifoldPathHandler
PathManager.register_handler(ManifoldPathHandler(max_parallel=16, timeout_sec=1800))
except Exception:
pass
if __name__ == "__main__":
main()
|
bart_ls-main
|
fairseq-py/scripts/fb_average_checkpoints.py
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import sentencepiece as spm
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model", required=True, help="sentencepiece model to use for decoding"
)
parser.add_argument("--input", required=True, help="input file to decode")
parser.add_argument("--input_format", choices=["piece", "id"], default="piece")
args = parser.parse_args()
sp = spm.SentencePieceProcessor()
sp.Load(args.model)
if args.input_format == "piece":
def decode(l):
return "".join(sp.DecodePieces(l))
elif args.input_format == "id":
def decode(l):
return "".join(sp.DecodeIds(l))
else:
raise NotImplementedError
def tok2int(tok):
# remap reference-side <unk> (represented as <<unk>>) to 0
return int(tok) if tok != "<<unk>>" else 0
with open(args.input, "r", encoding="utf-8") as h:
for line in h:
if args.input_format == "id":
print(decode(list(map(tok2int, line.rstrip().split()))))
elif args.input_format == "piece":
print(decode(line.rstrip().split()))
if __name__ == "__main__":
main()
|
bart_ls-main
|
fairseq-py/scripts/spm_decode.py
|
bart_ls-main
|
fairseq-py/scripts/__init__.py
|
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import re
import shutil
import sys
pt_regexp = re.compile(r"checkpoint(\d+|_\d+_\d+|_[a-z]+)\.pt")
pt_regexp_epoch_based = re.compile(r"checkpoint(\d+)\.pt")
pt_regexp_update_based = re.compile(r"checkpoint_\d+_(\d+)\.pt")
def parse_checkpoints(files):
entries = []
for f in files:
m = pt_regexp_epoch_based.fullmatch(f)
if m is not None:
entries.append((int(m.group(1)), m.group(0)))
else:
m = pt_regexp_update_based.fullmatch(f)
if m is not None:
entries.append((int(m.group(1)), m.group(0)))
return entries
def last_n_checkpoints(files, n):
entries = parse_checkpoints(files)
return [x[1] for x in sorted(entries, reverse=True)[:n]]
def every_n_checkpoints(files, n):
entries = parse_checkpoints(files)
return [x[1] for x in sorted(sorted(entries)[::-n])]
def main():
parser = argparse.ArgumentParser(
description=(
"Recursively delete checkpoint files from `root_dir`, "
"but preserve checkpoint_best.pt and checkpoint_last.pt"
)
)
parser.add_argument("root_dirs", nargs="*")
parser.add_argument(
"--save-last", type=int, default=0, help="number of last checkpoints to save"
)
parser.add_argument(
"--save-every", type=int, default=0, help="interval of checkpoints to save"
)
parser.add_argument(
"--preserve-test",
action="store_true",
help="preserve checkpoints in dirs that start with test_ prefix (default: delete them)",
)
parser.add_argument(
"--delete-best", action="store_true", help="delete checkpoint_best.pt"
)
parser.add_argument(
"--delete-last", action="store_true", help="delete checkpoint_last.pt"
)
parser.add_argument(
"--no-dereference", action="store_true", help="don't dereference symlinks"
)
args = parser.parse_args()
files_to_desymlink = []
files_to_preserve = []
files_to_delete = []
for root_dir in args.root_dirs:
for root, _subdirs, files in os.walk(root_dir):
if args.save_last > 0:
to_save = last_n_checkpoints(files, args.save_last)
else:
to_save = []
if args.save_every > 0:
to_save += every_n_checkpoints(files, args.save_every)
for file in files:
if not pt_regexp.fullmatch(file):
continue
full_path = os.path.join(root, file)
if (
not os.path.basename(root).startswith("test_") or args.preserve_test
) and (
(file == "checkpoint_last.pt" and not args.delete_last)
or (file == "checkpoint_best.pt" and not args.delete_best)
or file in to_save
):
if os.path.islink(full_path) and not args.no_dereference:
files_to_desymlink.append(full_path)
else:
files_to_preserve.append(full_path)
else:
files_to_delete.append(full_path)
if len(files_to_desymlink) == 0 and len(files_to_delete) == 0:
print("Nothing to do.")
sys.exit(0)
files_to_desymlink = sorted(files_to_desymlink)
files_to_preserve = sorted(files_to_preserve)
files_to_delete = sorted(files_to_delete)
print("Operations to perform (in order):")
if len(files_to_desymlink) > 0:
for file in files_to_desymlink:
print(" - preserve (and dereference symlink): " + file)
if len(files_to_preserve) > 0:
for file in files_to_preserve:
print(" - preserve: " + file)
if len(files_to_delete) > 0:
for file in files_to_delete:
print(" - delete: " + file)
while True:
resp = input("Continue? (Y/N): ")
if resp.strip().lower() == "y":
break
elif resp.strip().lower() == "n":
sys.exit(0)
print("Executing...")
if len(files_to_desymlink) > 0:
for file in files_to_desymlink:
realpath = os.path.realpath(file)
print("rm " + file)
os.remove(file)
print("cp {} {}".format(realpath, file))
shutil.copyfile(realpath, file)
if len(files_to_delete) > 0:
for file in files_to_delete:
print("rm " + file)
os.remove(file)
if __name__ == "__main__":
main()
|
bart_ls-main
|
fairseq-py/scripts/rm_pt.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Count the number of documents and average number of lines and tokens per
document in a large file. Documents should be separated by a single empty line.
"""
import argparse
import gzip
import sys
import numpy as np
def main():
parser = argparse.ArgumentParser()
parser.add_argument("input")
parser.add_argument("--gzip", action="store_true")
args = parser.parse_args()
def gopen():
if args.gzip:
return gzip.open(args.input, "r")
else:
return open(args.input, "r", encoding="utf-8")
num_lines = []
num_toks = []
with gopen() as h:
num_docs = 1
num_lines_in_doc = 0
num_toks_in_doc = 0
for i, line in enumerate(h):
if len(line.strip()) == 0: # empty line indicates new document
num_docs += 1
num_lines.append(num_lines_in_doc)
num_toks.append(num_toks_in_doc)
num_lines_in_doc = 0
num_toks_in_doc = 0
else:
num_lines_in_doc += 1
num_toks_in_doc += len(line.rstrip().split())
if i % 1000000 == 0:
print(i, file=sys.stderr, end="", flush=True)
elif i % 100000 == 0:
print(".", file=sys.stderr, end="", flush=True)
print(file=sys.stderr, flush=True)
print("found {} docs".format(num_docs))
print("average num lines per doc: {}".format(np.mean(num_lines)))
print("average num toks per doc: {}".format(np.mean(num_toks)))
if __name__ == "__main__":
main()
|
bart_ls-main
|
fairseq-py/scripts/count_docs.py
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import contextlib
import sys
import sentencepiece as spm
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model", required=True, help="sentencepiece model to use for encoding"
)
parser.add_argument(
"--inputs", nargs="+", default=["-"], help="input files to filter/encode"
)
parser.add_argument(
"--outputs", nargs="+", default=["-"], help="path to save encoded outputs"
)
parser.add_argument("--output_format", choices=["piece", "id"], default="piece")
parser.add_argument(
"--min-len",
type=int,
metavar="N",
help="filter sentence pairs with fewer than N tokens",
)
parser.add_argument(
"--max-len",
type=int,
metavar="N",
help="filter sentence pairs with more than N tokens",
)
args = parser.parse_args()
assert len(args.inputs) == len(
args.outputs
), "number of input and output paths should match"
sp = spm.SentencePieceProcessor()
sp.Load(args.model)
if args.output_format == "piece":
def encode(l):
return sp.EncodeAsPieces(l)
elif args.output_format == "id":
def encode(l):
return list(map(str, sp.EncodeAsIds(l)))
else:
raise NotImplementedError
if args.min_len is not None or args.max_len is not None:
def valid(line):
return (args.min_len is None or len(line) >= args.min_len) and (
args.max_len is None or len(line) <= args.max_len
)
else:
def valid(lines):
return True
with contextlib.ExitStack() as stack:
inputs = [
stack.enter_context(open(input, "r", encoding="utf-8"))
if input != "-"
else sys.stdin
for input in args.inputs
]
outputs = [
stack.enter_context(open(output, "w", encoding="utf-8"))
if output != "-"
else sys.stdout
for output in args.outputs
]
stats = {
"num_empty": 0,
"num_filtered": 0,
}
def encode_line(line):
line = line.strip()
if len(line) > 0:
line = encode(line)
if valid(line):
return line
else:
stats["num_filtered"] += 1
else:
stats["num_empty"] += 1
return None
for i, lines in enumerate(zip(*inputs), start=1):
enc_lines = list(map(encode_line, lines))
if not any(enc_line is None for enc_line in enc_lines):
for enc_line, output_h in zip(enc_lines, outputs):
print(" ".join(enc_line), file=output_h)
if i % 10000 == 0:
print("processed {} lines".format(i), file=sys.stderr)
print("skipped {} empty lines".format(stats["num_empty"]), file=sys.stderr)
print("filtered {} lines".format(stats["num_filtered"]), file=sys.stderr)
if __name__ == "__main__":
main()
|
bart_ls-main
|
fairseq-py/scripts/spm_encode.py
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import sentencepiece as spm
if __name__ == "__main__":
spm.SentencePieceTrainer.Train(" ".join(sys.argv[1:]))
|
bart_ls-main
|
fairseq-py/scripts/spm_train.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
extract text from json reddit dataset
"""
import argparse
import gzip
import json
import os
import sys
REPLACE_MAP = {
"&": "&",
"<": "<",
">": ">",
}
def main():
parser = argparse.ArgumentParser()
parser.add_argument("source_dir")
args = parser.parse_args()
def extract_text(json):
text = ""
def try_add(key):
if (
key in json
and json[key] != ""
and json[key] != "[deleted]"
and json[key] != "[removed]"
):
return json[key] + "\n"
else:
return ""
text += try_add("title")
text += try_add("selftext")
text += try_add("body")
if "children" in json:
for c in json["children"]:
text += extract_text(c)
return text
for filename in os.listdir(args.source_dir):
if not filename.endswith(".jsonl.gz"):
print(f"skipping{filename}", file=sys.stderr)
continue
p = os.path.join(args.source_dir, filename)
print("processing " + p, file=sys.stderr)
with gzip.GzipFile(p, "r") as fin:
json_bytes = fin.read()
json_strs = filter(None, json_bytes.decode("utf-8").split("\n"))
for js in json_strs:
data = json.loads(js)
text = extract_text(data)
for k, v in REPLACE_MAP.items():
text = text.replace(k, v)
if len(text) > 0:
print(text)
if __name__ == "__main__":
main()
|
bart_ls-main
|
fairseq-py/scripts/fb_reddit_text.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import collections
import os
import re
import torch
from fairseq.file_io import PathManager
def average_checkpoints(inputs):
"""Loads checkpoints from inputs and returns a model with averaged weights.
Args:
inputs: An iterable of string paths of checkpoints to load from.
Returns:
A dict of string keys mapping to various values. The 'model' key
from the returned dict should correspond to an OrderedDict mapping
string parameter names to torch Tensors.
"""
params_dict = collections.OrderedDict()
params_keys = None
new_state = None
num_models = len(inputs)
for fpath in inputs:
with PathManager.open(fpath, "rb") as f:
state = torch.load(
f,
map_location=(
lambda s, _: torch.serialization.default_restore_location(s, "cpu")
),
)
# Copies over the settings from the first checkpoint
if new_state is None:
new_state = state
model_params = state["model"]
model_params_keys = list(model_params.keys())
if params_keys is None:
params_keys = model_params_keys
elif params_keys != model_params_keys:
raise KeyError(
"For checkpoint {}, expected list of params: {}, "
"but found: {}".format(f, params_keys, model_params_keys)
)
for k in params_keys:
p = model_params[k]
if isinstance(p, torch.HalfTensor):
p = p.float()
if k not in params_dict:
params_dict[k] = p.clone()
# NOTE: clone() is needed in case of p is a shared parameter
else:
params_dict[k] += p
averaged_params = collections.OrderedDict()
for k, v in params_dict.items():
averaged_params[k] = v
if averaged_params[k].is_floating_point():
averaged_params[k].div_(num_models)
else:
averaged_params[k] //= num_models
new_state["model"] = averaged_params
return new_state
def last_n_checkpoints(paths, n, update_based, upper_bound=None):
assert len(paths) == 1
path = paths[0]
if update_based:
pt_regexp = re.compile(r"checkpoint_\d+_(\d+)\.pt")
else:
pt_regexp = re.compile(r"checkpoint(\d+)\.pt")
files = PathManager.ls(path)
entries = []
for f in files:
m = pt_regexp.fullmatch(f)
if m is not None:
sort_key = int(m.group(1))
if upper_bound is None or sort_key <= upper_bound:
entries.append((sort_key, m.group(0)))
if len(entries) < n:
raise Exception(
"Found {} checkpoint files but need at least {}", len(entries), n
)
return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)[:n]]
def main():
parser = argparse.ArgumentParser(
description="Tool to average the params of input checkpoints to "
"produce a new checkpoint",
)
# fmt: off
parser.add_argument('--inputs', required=True, nargs='+',
help='Input checkpoint file paths.')
parser.add_argument('--output', required=True, metavar='FILE',
help='Write the new checkpoint containing the averaged weights to this path.')
num_group = parser.add_mutually_exclusive_group()
num_group.add_argument('--num-epoch-checkpoints', type=int,
help='if set, will try to find checkpoints with names checkpoint_xx.pt in the path specified by input, '
'and average last this many of them.')
num_group.add_argument('--num-update-checkpoints', type=int,
help='if set, will try to find checkpoints with names checkpoint_ee_xx.pt in the path specified by input, '
'and average last this many of them.')
parser.add_argument('--checkpoint-upper-bound', type=int,
help='when using --num-epoch-checkpoints, this will set an upper bound on which epoch to use, '
'when using --num-update-checkpoints, this will set an upper bound on which update to use'
'e.g., with --num-epoch-checkpoints=10 --checkpoint-upper-bound=50, checkpoints 41-50 would be averaged.'
'e.g., with --num-update-checkpoints=10 --checkpoint-upper-bound=50000, checkpoints 40500-50000 would be averaged assuming --save-interval-updates 500'
)
# fmt: on
args = parser.parse_args()
print(args)
num = None
is_update_based = False
if args.num_update_checkpoints is not None:
num = args.num_update_checkpoints
is_update_based = True
elif args.num_epoch_checkpoints is not None:
num = args.num_epoch_checkpoints
assert args.checkpoint_upper_bound is None or (
args.num_epoch_checkpoints is not None
or args.num_update_checkpoints is not None
), "--checkpoint-upper-bound requires --num-epoch-checkpoints or --num-update-checkpoints"
assert (
args.num_epoch_checkpoints is None or args.num_update_checkpoints is None
), "Cannot combine --num-epoch-checkpoints and --num-update-checkpoints"
if num is not None:
args.inputs = last_n_checkpoints(
args.inputs,
num,
is_update_based,
upper_bound=args.checkpoint_upper_bound,
)
print("averaging checkpoints: ", args.inputs)
new_state = average_checkpoints(args.inputs)
with PathManager.open(args.output, "wb") as f:
torch.save(new_state, f)
print("Finished writing averaged checkpoint to {}".format(args.output))
if __name__ == "__main__":
main()
|
bart_ls-main
|
fairseq-py/scripts/average_checkpoints.py
|
#!/usr/bin/env python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
"""Reads in a fairseq output file, and verifies that the constraints
(C- lines) are present in the output (the first H- line). Assumes that
constraints are listed prior to the first hypothesis.
"""
constraints = []
found = 0
total = 0
for line in sys.stdin:
if line.startswith("C-"):
constraints.append(line.rstrip().split("\t")[1])
elif line.startswith("H-"):
text = line.split("\t")[2]
for constraint in constraints:
total += 1
if constraint in text:
found += 1
else:
print(f"No {constraint} in {text}", file=sys.stderr)
constraints = []
print(f"Found {found} / {total} = {100 * found / total:.1f}%")
|
bart_ls-main
|
fairseq-py/scripts/constraints/validate.py
|
#!/usr/bin/env python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Extracts random constraints from reference files."""
import argparse
import random
import sys
from sacrebleu import extract_ngrams
def get_phrase(words, index, length):
assert index < len(words) - length + 1
phr = " ".join(words[index : index + length])
for i in range(index, index + length):
words.pop(index)
return phr
def main(args):
if args.seed:
random.seed(args.seed)
for line in sys.stdin:
constraints = []
def add_constraint(constraint):
constraints.append(constraint)
source = line.rstrip()
if "\t" in line:
source, target = line.split("\t")
if args.add_sos:
target = f"<s> {target}"
if args.add_eos:
target = f"{target} </s>"
if len(target.split()) >= args.len:
words = [target]
num = args.number
choices = {}
for i in range(num):
if len(words) == 0:
break
segmentno = random.choice(range(len(words)))
segment = words.pop(segmentno)
tokens = segment.split()
phrase_index = random.choice(range(len(tokens)))
choice = " ".join(
tokens[phrase_index : min(len(tokens), phrase_index + args.len)]
)
for j in range(
phrase_index, min(len(tokens), phrase_index + args.len)
):
tokens.pop(phrase_index)
if phrase_index > 0:
words.append(" ".join(tokens[0:phrase_index]))
if phrase_index + 1 < len(tokens):
words.append(" ".join(tokens[phrase_index:]))
choices[target.find(choice)] = choice
# mask out with spaces
target = target.replace(choice, " " * len(choice), 1)
for key in sorted(choices.keys()):
add_constraint(choices[key])
print(source, *constraints, sep="\t")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--number", "-n", type=int, default=1, help="number of phrases")
parser.add_argument("--len", "-l", type=int, default=1, help="phrase length")
parser.add_argument(
"--add-sos", default=False, action="store_true", help="add <s> token"
)
parser.add_argument(
"--add-eos", default=False, action="store_true", help="add </s> token"
)
parser.add_argument("--seed", "-s", default=0, type=int)
args = parser.parse_args()
main(args)
|
bart_ls-main
|
fairseq-py/scripts/constraints/extract.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from tqdm import tqdm
import json
import os
from nltk import word_tokenize
from pathlib import Path
# import stanza
# nlp = stanza.Pipeline(lang='en', processors={'tokenize': 'spacy'})
# tokneize a sent
def tokenize(sent):
tokens = ' '.join(word_tokenize(sent.lower()))
return tokens
# doc = nlp(sent)
# return ' '.join(token.text for sentence in doc.sentences for token in sentence.tokens)
def clean_data(text):
text = text.replace('{ vocalsound } ', '')
text = text.replace('{ disfmarker } ', '')
text = text.replace('a_m_i_', 'ami')
text = text.replace('l_c_d_', 'lcd')
text = text.replace('p_m_s', 'pms')
text = text.replace('t_v_', 'tv')
text = text.replace('{ pause } ', '')
text = text.replace('{ nonvocalsound } ', '')
text = text.replace('{ gap } ', '')
return text
def simplify(data_dir, save_dir, oracle=False):
if oracle:
save_dir = Path(save_dir + '-gold')
save_dir.mkdir(parents=True, exist_ok=True)
else:
save_dir = Path(save_dir)
save_dir.mkdir(parents=True, exist_ok=True)
for split in ['train', 'val', 'test']:
data_path = os.path.join(data_dir, f'{split}.jsonl')
data = []
with open(data_path) as f:
for line in f:
data.append(json.loads(line))
queries, sources, targets = [], [], []
targets_raw = []
for i in range(len(data)):
src = []
for k in range(len(data[i]['meeting_transcripts'])):
cur_turn = data[i]['meeting_transcripts'][k]['speaker'].lower() + ': '
cur_turn = cur_turn + tokenize(data[i]['meeting_transcripts'][k]['content'])
src.append(cur_turn)
src = ' '.join(src)
for j in range(len(data[i]['general_query_list'])):
query = tokenize(data[i]['general_query_list'][j]['query'])
queries.append(clean_data(query))
sources.append(clean_data(src))
targets_raw.append(data[i]['general_query_list'][j]['answer'])
targets.append(tokenize(data[i]['general_query_list'][j]['answer']))
for j in range(len(data[i]['specific_query_list'])):
query = tokenize(data[i]['specific_query_list'][j]['query'])
target = tokenize(data[i]['specific_query_list'][j]['answer'])
if oracle:
relevant = []
for span in data[i]['specific_query_list'][j]['relevant_text_span']:
assert len(span) == 2
st, ed = int(span[0]), int(span[1])
for k in range(st, ed + 1):
cur_turn = data[i]['meeting_transcripts'][k]['speaker'].lower() + ': '
cur_turn = cur_turn + tokenize(data[i]['meeting_transcripts'][k]['content'])
relevant.append(cur_turn)
sources.append(clean_data(" ".join(relevant)))
else:
sources.append(clean_data(src))
queries.append(clean_data(query))
targets_raw.append(data[i]['specific_query_list'][j]['answer'])
targets.append(target)
with open(save_dir / f'{split}.source', 'w') as g1, \
open(save_dir / f'{split}.target', 'w') as g2, \
open(save_dir / f'{split}.query', 'w') as g3, \
open(save_dir / f'{split}.target_raw', 'w') as g4:
for q, s, t, t_ in zip(queries, sources, targets, targets_raw):
g1.write(s.strip() + '\n')
g2.write(t.strip() + '\n')
g3.write(q.strip() + '\n')
g4.write(t_.strip() + '\n')
if __name__ == '__main__':
data_dir = '/fsx/xwhan/data/QMSum/data/ALL/jsonl'
save_dir = '/fsx/xwhan/data/QMSum/data/raw'
simplify(data_dir, save_dir, oracle=False)
|
bart_ls-main
|
fairseq-py/scripts/summarization/qmsum_preprocess.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import argparse
from tqdm import tqdm
from fairseq import checkpoint_utils
from fairseq.tasks.summarization import load_langpair_dataset, load_query_based_dataset
from fairseq.models.bart.hub_interface import BARTHubInterface
from fairseq.data.encoders.gpt2_bpe import GPT2BPE, GPT2BPEConfig
from nltk import word_tokenize
# tokneize a sent
def tokenize(sent):
tokens = ' '.join(word_tokenize(sent.lower()))
return tokens
def main():
"""
Usage::
python scripts/summarization/qmsum_generate.py \
--model-dir /checkpoints/xwhan/qmsum/qmsum_best_r3f.noiseuniform.r3f0.01.bart_large.mep150.sl16384.ls0.1.mq45.pad_q0.mixed.pool4.block_noglobal.mt2.uf2.dr0.1.atdr0.1.actdr0.0.wd0.01.s3.adam.beta9999.eps1e-08.clip0.1.lr5e-05.mu8000.warm200.fp16.cmetricrouge_avg.ngpu8/checkpoint_best.pt \
--data-dir /fsx/xwhan/data/QMSum/data/raw-bin \
--save-dir /fsx/xwhan/data/QMSum/data/raw/valid.hypo \
--split valid \
--bsz 4
SCROLLS submission:
python scripts/summarization/qmsum_generate.py \
--model-dir /checkpoints/xwhan/qmsum/qmsum_best_r3f.noiseuniform.r3f0.01.bart_large.mep150.sl16384.ls0.1.mq45.pad_q0.mixed.pool4.block_noglobal.mt2.uf2.dr0.1.atdr0.1.actdr0.0.wd0.01.s5.adam.beta9999.eps1e-08.clip0.1.lr5e-05.mu8000.warm100.fp16.cmetricrouge_avg.ngpu8/checkpoint_best.pt \
--data-dir /fsx/xwhan/data/scrolls/qmsum/bin \
--save-dir /fsx/xwhan/data/scrolls/qmsum/test.best \
--split test \
--bsz 4 \
--skip-eval
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--model-dir",
required=True,
type=str,
default="bart.large.cnn/",
help="path containing model file and src_dict.txt",
)
parser.add_argument(
"--data-dir", default="binarized data path", help="text to summarize", type=str
)
parser.add_argument("--save-dir", default=None, type=str)
parser.add_argument("--bsz", default=4, help="batch size", type=int)
parser.add_argument("--max-len", default=256, help="max_len_b", type=int)
parser.add_argument(
"--n", default=None, help="how many examples to summarize", type=int
)
parser.add_argument("--split", default='test', type=str)
parser.add_argument("--skip-eval", action='store_true', default=False)
args = parser.parse_args()
models, cfg, task = checkpoint_utils.load_model_ensemble_and_task([args.model_dir])
model = models[0]
# task.cfg.required_seq_len_multiple = 1024
# task.cfg.left_pad_source = False
if not task.cfg.query_based:
dataset_for_inference = load_langpair_dataset(args.data_dir,
split=args.split,
src='source',
src_dict=task.src_dict,
tgt='target',
tgt_dict=task.tgt_dict,
combine=False,
dataset_impl=task.cfg.dataset_impl,
upsample_primary=task.cfg.upsample_primary,
left_pad_source=task.cfg.left_pad_source,
left_pad_target=task.cfg.left_pad_target,
max_source_positions=task.cfg.max_source_positions,
max_target_positions=task.cfg.max_target_positions,
truncate_source=True,
shuffle=False,
)
else:
dataset_for_inference = load_query_based_dataset(args.data_dir,
split=args.split,
src='source',
src_dict=task.src_dict,
tgt='target',
tgt_dict=task.tgt_dict,
combine=False,
dataset_impl=task.cfg.dataset_impl,
upsample_primary=task.cfg.upsample_primary,
left_pad_source=task.cfg.left_pad_source,
left_pad_target=task.cfg.left_pad_target,
max_source_positions=task.cfg.max_source_positions,
max_target_positions=task.cfg.max_target_positions,
truncate_source=True,
shuffle=False,
max_query_positions=task.cfg.max_query_positions,
input_pattern=task.cfg.input_pattern,
blocksize=task.cfg.block_size,
qry=task.cfg.query_lang,
pad_q_len=task.cfg.pad_query
)
hub_interface = BARTHubInterface(cfg, task, model)
hub_interface.bpe = GPT2BPE(GPT2BPEConfig)
# HACK
hub_interface.cfg.dataset.batch_size = args.bsz
all_sents = [item['source'] for item in dataset_for_inference]
GEN_KWARGS = dict(beam=4, max_len_b=args.max_len, lenpen=4.0, no_repeat_ngram_size=2, min_len=40, patience_factor=1.0)
print(GEN_KWARGS)
all_results = []
with torch.no_grad():
hub_interface = hub_interface.eval()
assert torch.cuda.is_available()
hub_interface = hub_interface.cuda().half()
for idx in tqdm(range(0, len(all_sents), args.bsz)):
batch_sents = all_sents[idx:idx+args.bsz]
batch_hypos = hub_interface.generate(
batch_sents,
skip_invalid_size_inputs=False,
**GEN_KWARGS
)
batch_outputs = [hub_interface.decode(hypos[0]["tokens"]) for hypos in batch_hypos]
all_results.extend(batch_outputs)
import datetime
# suffix = datetime.datetime.now().strftime("%y%m%d_%H%M%S")
# save_path = args.save_dir + '.' + suffix
save_path = args.save_dir
with open(save_path, 'w') as out:
for l in all_results:
out.write(l.strip() + '\n')
if args.skip_eval:
return
# sanity check
from rouge_score import rouge_scorer
import nltk
raw_data_path = args.data_dir[:-len("-bin")]
all_refs = [l.strip() for l in open(f'{raw_data_path}/{args.split}.tgt').readlines()]
scorer = rouge_scorer.RougeScorer(rouge_types=['rouge1', 'rouge2', 'rougeL'], use_stemmer=False)
rouge1 = rouge2 = rougel = 0.0
for ref, pred in zip(all_refs, all_results):
ref = "\n".join(nltk.sent_tokenize(ref))
pred = "\n".join(nltk.sent_tokenize(pred))
score = scorer.score(ref, pred)
rouge1 += score['rouge1'].fmeasure
rouge2 += score['rouge2'].fmeasure
rougel += score['rougeL'].fmeasure
print(f'Rouge scorer results: R-1 {rouge1 / len(all_results)}, R-2 {rouge2 / len(all_results)}, R-L {rougel / len(all_results)} ')
import os
os.system(f'./scripts/summarization/eval_rouge.sh {save_path} {raw_data_path}/{args.split}.target')
if __name__ == "__main__":
main()
|
bart_ls-main
|
fairseq-py/scripts/summarization/qmsum_generate.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Parallel testing using submitit
"""
from tqdm import tqdm
import submitit
from pathlib import Path
import math
import os
import torch
import argparse
from tqdm import tqdm
from fairseq import checkpoint_utils
from fairseq.tasks.summarization import load_langpair_dataset
from fairseq.models.bart.hub_interface import BARTHubInterface
from fairseq.data.encoders.gpt2_bpe import GPT2BPE, GPT2BPEConfig
def get_shared_folder() -> Path:
return Path('/checkpoints/xwhan/eval_summ_jobs')
def generate(args, shard_id, generate_args):
models, cfg, task = checkpoint_utils.load_model_ensemble_and_task([args.model_dir])
model = models[0]
task.cfg.required_seq_len_multiple = 1024
task.cfg.left_pad_source = False
dataset_for_inference = load_langpair_dataset(args.data_dir,
split=args.split,
src='src' if 'tv' not in args.data_dir else 'source',
src_dict=task.src_dict,
tgt='tgt' if 'tv' not in args.data_dir else 'target',
tgt_dict=task.tgt_dict,
combine=False,
dataset_impl=task.cfg.dataset_impl,
upsample_primary=task.cfg.upsample_primary,
left_pad_source=task.cfg.left_pad_source,
left_pad_target=task.cfg.left_pad_target,
max_source_positions=task.cfg.max_source_positions,
max_target_positions=task.cfg.max_target_positions,
truncate_source=True,
shuffle=False,
)
hub_interface = BARTHubInterface(cfg, task, model)
bpe_cfg = GPT2BPEConfig()
hub_interface.bpe = GPT2BPE(bpe_cfg)
hub_interface.cfg.dataset.batch_size = args.bsz
all_sents = [item['source'] for item in dataset_for_inference]
shard_size = math.ceil(len(all_sents) / args.shards)
start_idx = shard_id * shard_size
shard_sents = all_sents[start_idx:start_idx + shard_size]
shard_results = []
with torch.no_grad():
hub_interface = hub_interface.eval()
assert torch.cuda.is_available()
hub_interface = hub_interface.cuda().half()
for idx in tqdm(range(0, len(shard_sents), args.bsz)):
batch_sents = shard_sents[idx:idx+args.bsz]
batch_hypos = hub_interface.generate(
batch_sents,
skip_invalid_size_inputs=False,
**generate_args
)
batch_outputs = [hub_interface.decode(hypos[0]["tokens"]) for hypos in batch_hypos]
shard_results.extend(batch_outputs)
return shard_results
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model-dir",
required=True,
type=str,
default="bart.large.cnn/",
help="path containing model file and src_dict.txt",
)
parser.add_argument(
"--data-dir", default="binarized data path", help="text to summarize", type=str
)
parser.add_argument("--save-dir", default=None, type=str)
parser.add_argument("--bsz", default=4, help="batch size", type=int)
parser.add_argument(
"--n", default=None, help="how many examples to summarize", type=int
)
parser.add_argument("--split", default='test', type=str)
parser.add_argument("--shards", default=8, type=int)
parser.add_argument("--skip-eval", action='store_true', default=False)
args = parser.parse_args()
data_name = args.data_dir.split('/')[-1][:-len("-bin")]
raw_data_path = args.data_dir[:-len("-bin")]
if 'gov_report' in args.data_dir:
generate_args = dict(beam=4, max_len_b=740, lenpen=4.0, no_repeat_ngram_size=0, min_len=50)
elif 'tv' in args.data_dir:
generate_args = dict(beam=4, max_len_b=640, lenpen=5.0, no_repeat_ngram_size=3, min_len=50)
elif 'summscreen' in args.data_dir:
generate_args = dict(beam=4, max_len_b=350, lenpen=4.0, no_repeat_ngram_size=4, min_len=50)
elif 'booksum' in args.data_dir:
generate_args = dict(beam=4, max_len_b=550, lenpen=4.0, no_repeat_ngram_size=3, min_len=20)
elif 'pubmed' in args.data_dir:
generate_args = dict(beam=4, max_len_b=400, lenpen=4.0, no_repeat_ngram_size=3, min_len=40)
elif 'arxiv' in args.data_dir:
generate_args = dict(beam=4, max_len_b=300, lenpen=5.0, no_repeat_ngram_size=4, min_len=50)
else:
generate_args = dict(beam=4, max_len_b=256, lenpen=2.0, no_repeat_ngram_size=3, min_len=50)
print(f'Generating parameters {generate_args}')
executor = submitit.AutoExecutor(folder=get_shared_folder() / "%j")
executor.update_parameters(
mem_gb=None,
gpus_per_node=1,
tasks_per_node=1,
cpus_per_task=10,
nodes=1,
slurm_time=120,
timeout_min=120,
slurm_partition="lowpri",
slurm_job_name=f"eval_summ_{data_name}",
slurm_exclude=os.environ.get("EXCLUDED_HOSTS", None)
)
jobs = []
for shard_id in range(args.shards):
job = executor.submit(generate, args, shard_id, generate_args)
jobs.append(job)
all_results = []
for job in jobs:
all_results.extend(job.task(0).result())
import datetime
# suffix = datetime.datetime.now().strftime("%y%m%d_%H%M%S")
# save_path = args.save_dir + '.' + suffix
save_path = args.save_dir
with open(save_path, 'w') as out:
for l in all_results:
out.write(l.strip() + '\n')
if args.skip_eval:
return
from rouge_score import rouge_scorer
import nltk
all_refs = [l.strip() for l in open(f'{raw_data_path}/{args.split}.tgt').readlines()]
scorer = rouge_scorer.RougeScorer(rouge_types=['rouge1', 'rouge2', 'rougeL'], use_stemmer=False)
rouge1 = rouge2 = rougel = 0.0
for ref, pred in zip(all_refs, all_results):
ref = "\n".join(nltk.sent_tokenize(ref))
pred = "\n".join(nltk.sent_tokenize(pred))
score = scorer.score(ref, pred)
rouge1 += score['rouge1'].fmeasure
rouge2 += score['rouge2'].fmeasure
rougel += score['rougeL'].fmeasure
print(f'Rouge scorer results: R-1 {rouge1 / len(all_results)}, R-2 {rouge2 / len(all_results)}, R-L {rougel / len(all_results)} ')
os.system(f'./scripts/summarization/eval_rouge.sh {save_path} {raw_data_path}/{args.split}.tgt')
if __name__ == "__main__":
main()
|
bart_ls-main
|
fairseq-py/scripts/summarization/eval_generation_submitit.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import argparse
from tqdm import tqdm
from fairseq import checkpoint_utils
from fairseq.tasks.summarization import load_langpair_dataset
from fairseq.models.bart.hub_interface import BARTHubInterface
from fairseq.data.encoders.gpt2_bpe import GPT2BPE, GPT2BPEConfig
import nltk
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model-dir",
required=True,
type=str,
default="bart.large.cnn/",
help="path containing model file and src_dict.txt",
)
parser.add_argument(
"--data-dir", default="binarized data path", help="text to summarize", type=str
)
parser.add_argument("--save-dir", default=None, type=str)
parser.add_argument("--bsz", default=4, help="batch size", type=int)
parser.add_argument(
"--n", default=None, help="how many examples to summarize", type=int
)
parser.add_argument("--split", default='test', type=str)
parser.add_argument("--skip-eval", action='store_true', default=False)
args = parser.parse_args()
models, cfg, task = checkpoint_utils.load_model_ensemble_and_task([args.model_dir])
model = models[0]
print(f"Num of model parameters: {sum(p.numel() for p in model.parameters() if p.requires_grad)}")
task.cfg.required_seq_len_multiple = 1024
task.cfg.left_pad_source = False
dataset_for_inference = load_langpair_dataset(args.data_dir,
split=args.split,
src='src',
src_dict=task.src_dict,
tgt='tgt',
tgt_dict=task.tgt_dict,
combine=False,
dataset_impl=task.cfg.dataset_impl,
upsample_primary=task.cfg.upsample_primary,
left_pad_source=task.cfg.left_pad_source,
left_pad_target=task.cfg.left_pad_target,
max_source_positions=task.cfg.max_source_positions,
max_target_positions=task.cfg.max_target_positions,
truncate_source=True,
shuffle=False,
pad_to_multiple=task.cfg.required_seq_len_multiple,
)
hub_interface = BARTHubInterface(cfg, task, model)
hub_interface.bpe = GPT2BPE(GPT2BPEConfig)
hub_interface.cfg.dataset.batch_size = args.bsz
all_sents = [item['source'] for item in dataset_for_inference]
if 'gov_report' in args.data_dir:
generate_args = dict(beam=4, max_len_b=1024, lenpen=3.0, no_repeat_ngram_size=3, min_len=60)
elif 'summscreen' in args.data_dir:
generate_args = dict(beam=4, max_len_b=350, lenpen=2.0, no_repeat_ngram_size=3, min_len=50)
elif 'booksum' in args.data_dir:
generate_args = dict(beam=4, max_len_b=320, lenpen=2.0, no_repeat_ngram_size=4, min_len=20)
else:
generate_args = dict(beam=4, max_len_b=256, lenpen=2.0, no_repeat_ngram_size=4, min_len=50)
print(f'Generating parameters {generate_args}')
all_results = []
with torch.no_grad():
hub_interface = hub_interface.eval()
assert torch.cuda.is_available()
hub_interface = hub_interface.cuda()
for idx in tqdm(range(0, len(all_sents), args.bsz)):
batch_sents = all_sents[idx:idx+args.bsz]
batch_hypos = hub_interface.generate(
batch_sents,
skip_invalid_size_inputs=False,
**generate_args
)
batch_outputs = [hub_interface.decode(hypos[0]["tokens"]) for hypos in batch_hypos]
all_results.extend(batch_outputs)
save_path = args.save_dir
with open(save_path, 'w') as out:
for l in all_results:
out.write(l.strip() + '\n')
if args.skip_eval:
return
raw_data_path = args.data_dir[:-len("-bin")]
from rouge_score import rouge_scorer
all_refs = [l.strip() for l in open(f'{raw_data_path}/{args.split}.tgt').readlines()]
scorer = rouge_scorer.RougeScorer(rouge_types=['rouge1', 'rouge2', 'rougeL'], use_stemmer=False)
rouge1 = rouge2 = rougel = 0.0
for ref, pred in zip(all_refs, all_results):
ref = "\n".join(nltk.sent_tokenize(ref))
pred = "\n".join(nltk.sent_tokenize(pred))
score = scorer.score(ref, pred)
rouge1 += score['rouge1'].fmeasure
rouge2 += score['rouge2'].fmeasure
rougel += score['rougeL'].fmeasure
print(f'Rouge (by rouge_score) results: R-1 {rouge1 / len(all_results)}, R-2 {rouge2 / len(all_results)}, R-L {rougel / len(all_results)} ')
save_path = args.save_dir
with open(save_path, 'w') as out:
for l in all_results:
out.write(l.strip() + '\n')
if __name__ == "__main__":
main()
|
bart_ls-main
|
fairseq-py/scripts/summarization/long_generate.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# from datasets import load_dataset
import torch
import argparse
from tqdm import tqdm
from fairseq import checkpoint_utils
from fairseq.tasks.summarization import load_query_based_dataset
from fairseq.models.bart.hub_interface import BARTHubInterface
from fairseq.data.encoders.gpt2_bpe import GPT2BPE, GPT2BPEConfig
from collections import defaultdict
from fairseq.tasks.qa import f1_score, exact_match_score
import numpy as np
import json
"""
Aggregate predictions for same questions, and use the maximum f1/EM scores
"""
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
assert isinstance(prediction, str)
assert isinstance(ground_truths, list)
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model-dir",
required=True,
type=str,
default="bart.large.cnn/",
help="path containing model file and src_dict.txt",
)
parser.add_argument(
"--data-dir", default="binarized data path", help="text to summarize", type=str
)
parser.add_argument("--bsz", default=1, help="batch size", type=int)
parser.add_argument("--data-name", default='qasper', type=str)
parser.add_argument(
"--n", default=None, help="how many examples to summarize", type=int
)
parser.add_argument("--split", default='test', type=str)
parser.add_argument("--combine-instances", action='store_true', default=False)
parser.add_argument("--json-gold", action='store_true', default=False, help='load json format groundtruth')
parser.add_argument("--save-dir", default=None, type=str)
parser.add_argument("--skip-eval", action='store_true', default=False)
args = parser.parse_args()
models, cfg, task = checkpoint_utils.load_model_ensemble_and_task([args.model_dir])
model = models[0]
generate_args_narrative = dict(beam=4, max_len_b=20, lenpen=1.0, no_repeat_ngram_size=3)
generate_args_qasper = dict(beam=4, max_len_b=80, lenpen=1.0, no_repeat_ngram_size=3)
generate_args_quality = dict(beam=4, max_len_b=50, lenpen=3.0, no_repeat_ngram_size=3)
if 'narrative' in args.data_name:
generate_args = generate_args_narrative
elif 'quality' in args.data_name:
generate_args = generate_args_quality
elif 'contract' in args.data_name:
generate_args = dict(beam=4, max_len_b=6, lenpen=3.0, no_repeat_ngram_size=3)
else:
generate_args = generate_args_qasper
print(f'generating arguments {generate_args}')
task.cfg.required_seq_len_multiple = 1024
task.cfg.left_pad_source = False
dataset_for_inference = load_query_based_dataset(args.data_dir,
split=args.split,
src='source',
src_dict=task.src_dict,
tgt='target',
tgt_dict=task.tgt_dict,
combine=False,
dataset_impl=task.cfg.dataset_impl,
upsample_primary=task.cfg.upsample_primary,
left_pad_source=task.cfg.left_pad_source,
left_pad_target=task.cfg.left_pad_target,
max_source_positions=task.cfg.max_source_positions,
max_target_positions=task.cfg.max_target_positions,
truncate_source=True,
shuffle=False,
max_query_positions=task.cfg.max_query_positions,
input_pattern=task.cfg.input_pattern,
blocksize=task.cfg.block_size,
qry=task.cfg.query_lang,
pad_q_len=task.cfg.pad_query
)
hub_interface = BARTHubInterface(cfg, task, model)
hub_interface.bpe = GPT2BPE(GPT2BPEConfig)
# HACK
hub_interface.cfg.dataset.batch_size = args.bsz
all_sents = [item['source'] for item in dataset_for_inference]
all_results = []
with torch.no_grad():
hub_interface = hub_interface.eval()
assert torch.cuda.is_available()
hub_interface = hub_interface.cuda().half()
for idx in tqdm(range(0, len(all_sents), args.bsz)):
batch_sents = all_sents[idx:idx+args.bsz]
batch_hypos = hub_interface.generate(
batch_sents,
skip_invalid_size_inputs=False,
**generate_args
)
batch_outputs = [hub_interface.decode(hypos[0]["tokens"]) for hypos in batch_hypos]
all_results.extend(batch_outputs)
if args.save_dir:
with open(args.save_dir, 'w') as out:
for l in all_results:
out.write(l.strip() + '\n')
if args.skip_eval:
return
# load groundtruth
raw_data_path = args.data_dir[:-len("-bin")]
split = 'val' if args.split == 'valid' else 'test'
questions = [l.strip() for l in open(f'{raw_data_path}/{split}.query').readlines()]
answers = [l.strip() for l in open(f'{raw_data_path}/{split}.target').readlines()]
inputs = [l.strip() for l in open(f'{raw_data_path}/{split}.source').readlines()]
predictions = []
golds = []
if args.combine_instances:
last_question = None
last_input = None
curr_gold = []
for q, pred, gold, doc in zip(questions, all_results, answers, inputs):
if q != last_question or (last_input is None or doc[:1000] != last_input[:1000]):
predictions.append(pred)
if len(curr_gold) > 0:
golds.append(curr_gold)
curr_gold = []
curr_gold.append(gold)
last_question = q
last_input = doc
if curr_gold:
golds.append(curr_gold)
else:
for pred, gold in zip(all_results, answers):
predictions.append(pred)
if args.json_gold:
golds.append(json.loads(gold))
else:
golds.append([gold])
ems, f1s = [], []
for pred, grounds in zip(predictions, golds):
ems.append(metric_max_over_ground_truths(exact_match_score, pred, grounds))
f1s.append(metric_max_over_ground_truths(f1_score, pred, grounds))
print(f'Mean EM: {np.mean(ems)}, f1: {np.mean(f1s)} of {len(ems)} examples on {split}')
if __name__ == "__main__":
main()
|
bart_ls-main
|
fairseq-py/scripts/qa/qa_eval.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq.models.bart import BARTModel
from fairseq.tasks.long_denoising import LongDenoisingTask
from fairseq.tasks.denoising import DenoisingTask
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
import os
import torch
hub = BARTModel.from_pretrained('/data/home/xwhan/fairseq-py/checkpoints/bart.large', checkpoint_file='model.pt')
task = hub.task
bart = hub.model
model_args = hub.cfg.model
model_args.max_source_positions = 1024 * 16
model_args.max_target_positions = 1024
model_args.alibi = False
model_args.pooling_layers = 4
model_args.mean_noise_span_length = 3
model_args.noise_density = 0.0625
checkpoint_path = "/data/home/xwhan/fairseq-py/checkpoints/bart.large"
dictionary = DenoisingTask.load_dictionary(os.path.join(checkpoint_path, 'dict.txt'))
state = torch.load(os.path.join(checkpoint_path, 'model.pt'), map_location=torch.device('cpu'))
task = LongDenoisingTask(model_args, dictionary)
# task = DenoisingTask(model_args, dictionary)
long_cfg = convert_namespace_to_omegaconf(model_args)
long_model = task.build_model(long_cfg.model)
##### encoder staff #####
# 1. embed_tokens and layernorm_embedding
vocab_size, _ = bart.encoder.embed_tokens.weight.shape
new_vocab_size, embed_dim = long_model.encoder.embed_tokens.weight.shape
print('old embedding matrix size from BART', vocab_size)
print('new embedding matrix size', new_vocab_size)
new_embed_tokens = bart.encoder.embed_tokens.weight.new_empty(new_vocab_size, embed_dim)
new_embed_tokens[:vocab_size] = bart.encoder.embed_tokens.weight
for idx in range(vocab_size, new_vocab_size):
new_embed_tokens[idx] = bart.encoder.embed_tokens.weight[-1]
long_model.encoder.embed_tokens.weight.data = new_embed_tokens
# 2.
# long_model.encoder.embed_tokens.load_state_dict(bart.encoder.embed_tokens.state_dict())
long_model.encoder.layernorm_embedding.load_state_dict(bart.encoder.layernorm_embedding.state_dict())
# 2. attention layers
long_model.encoder.layers.load_state_dict(bart.encoder.layers.state_dict(), strict=False)
# 3. embed_positions, longer
if not model_args.alibi:
pos_limit, _ = bart.encoder.embed_positions.weight.shape
new_pos_limit, embed_dim = long_model.encoder.embed_positions.weight.shape
new_pos_embed = bart.encoder.embed_positions.weight.new_empty(new_pos_limit, embed_dim)
step = pos_limit - 2
for start in range(2, new_pos_limit, step):
new_pos_embed[start:start+step] = bart.encoder.embed_positions.weight[2:]
long_model.encoder.embed_positions.weight.data = new_pos_embed
##### decoder staff #####
long_model.decoder.layernorm_embedding.load_state_dict(bart.decoder.layernorm_embedding.state_dict())
if not model_args.alibi:
# 2. embed_positions, longer
long_model.decoder.embed_positions.load_state_dict(bart.decoder.embed_positions.state_dict())
# decoder attention layers
long_model.decoder.layers.load_state_dict(bart.decoder.layers.state_dict())
save_path = '/data/home/xwhan/fairseq-py/checkpoints/bart.large.block16k.pool.t5.span3'
dictionary.save(os.path.join(save_path, 'dict.txt'))
state['args'] = model_args
state['model'] = long_model.state_dict()
if 'criterion' in state:
del state['criterion']
state['extra_state'] = {"epoch": 0}
state['last_optimizer_state'] = None
torch.save(state, os.path.join(save_path, 'model.pt'))
|
bart_ls-main
|
fairseq-py/scripts/long_denoise/initialize_models_long_denoising.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq.tasks.denoising import DenoisingTask
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
import os
import torch
from fairseq.tasks.masked_lm import MaskedLMTask
print('Loading RoBERTa weights...')
checkpoint_path = "/data/home/xwhan/fairseq-py/checkpoints/roberta.base"
dictionary = MaskedLMTask.load_dictionary(os.path.join(checkpoint_path, 'dict.txt'))
state = torch.load(os.path.join(checkpoint_path, 'model.pt'), map_location=torch.device('cpu'))
roberta_cfg = convert_namespace_to_omegaconf(state['args'])
task = MaskedLMTask(state['args'], dictionary)
roberta = task.build_model(roberta_cfg.model)
roberta.load_state_dict(state['model'], strict=True, model_cfg=roberta_cfg.model)
print('Loading BART weights...')
bart_path = "/data/home/xwhan/fairseq-py/checkpoints/bart.base"
dictionary = DenoisingTask.load_dictionary(os.path.join(bart_path, 'dict.txt'))
state = torch.load(os.path.join(bart_path, 'model.pt'), map_location=torch.device('cpu'))
bart_cfg = convert_namespace_to_omegaconf(state['args'])
task = DenoisingTask(state['args'], dictionary)
bart = task.build_model(bart_cfg.model)
bart.load_state_dict(state['model'], strict=True, model_cfg=bart_cfg.model)
from fairseq.tasks.model_based_denoising import ModelDenoisingTask
print(state['args'])
model_args = state['args']
model_args.arch = 'loco_base'
model_args.max_target_positions = 1024
# # 4k models, span 5
# model_args.xformer_config = '{"block_size": 1024, "max_seq_len": 4096}'
# model_args.max_source_positions = 1024 * 4
# model_args.mean_noise_span_length = 5
# model_args.noise_density = 0.2
# model_args.sample_ratio = 0.5
# model_args.tokens_per_sample = 1024 * 4
# 4k models, span 3
# model_args.xformer_config = '{"block_size": 1024, "max_seq_len": 4096}'
# model_args.max_source_positions = 1024 * 4
# model_args.mean_noise_span_length = 3
# model_args.noise_density = 0.1
# model_args.sample_ratio = 0.2
# model_args.tokens_per_sample = 1024 * 4
# # 8k models, span 3
# model_args.max_source_positions = 1024 * 8
# model_args.mean_noise_span_length = 3
# model_args.noise_density = 0.125
# model_args.sample_ratio = 0.3
# model_args.tokens_per_sample = 1024 * 8
# 16k models, span 3
model_args.max_source_positions = 1024 * 16
model_args.mean_noise_span_length = 3
model_args.noise_density = 1 / 16
model_args.sample_ratio = 0.2
model_args.tokens_per_sample = 1024 * 16
model_args.generator_layers = 6
# # 2k model
# model_args.max_source_positions = 1024 * 8
# model_args.mean_noise_span_length = 5
# model_args.noise_density = 0.1
# model_args.sample_ratio = 0.2
# model_args.tokens_per_sample = 1024 * 8
# pooling layers
model_args.pooling_layers = 4
task = ModelDenoisingTask(model_args, dictionary)
long_cfg = convert_namespace_to_omegaconf(model_args)
long_model = task.build_model(long_cfg.model)
print("initializing the generator from roberta")
roberta = roberta.encoder
# position embeddings
pos_limit, _ = roberta.sentence_encoder.embed_positions.weight.shape
new_pos_limit, embed_dim = long_model.generator.sentence_encoder.embed_positions.weight.shape
new_pos_embed = roberta.sentence_encoder.embed_positions.weight.new_empty(new_pos_limit, embed_dim)
step = pos_limit - 2
for start in range(2, new_pos_limit, step):
new_pos_embed[start:start+step] = roberta.sentence_encoder.embed_positions.weight[2:]
long_model.generator.sentence_encoder.embed_positions.weight.data = new_pos_embed
# vocab embedding matrix
vocab_size, _ = roberta.sentence_encoder.embed_tokens.weight.shape
new_vocab_size, embed_dim = long_model.generator.sentence_encoder.embed_tokens.weight.shape
print(f'roberta vocab size: {vocab_size}')
print(f'generator vocab size: {new_vocab_size}')
new_embed_tokens = roberta.sentence_encoder.embed_tokens.weight.new_empty(new_vocab_size, embed_dim)
new_embed_tokens[:vocab_size] = roberta.sentence_encoder.embed_tokens.weight
for idx in range(vocab_size, new_vocab_size):
new_embed_tokens[idx] = roberta.sentence_encoder.embed_tokens.weight[-1] # initialize with <mask>
long_model.generator.sentence_encoder.embed_tokens.weight.data = new_embed_tokens
# layers and lm head
long_model.generator.lm_head.dense.load_state_dict(roberta.lm_head.dense.state_dict())
long_model.generator.lm_head.layer_norm.load_state_dict(roberta.lm_head.layer_norm.state_dict())
long_model.generator.sentence_encoder.layernorm_embedding.load_state_dict(roberta.sentence_encoder.layernorm_embedding.state_dict())
long_model.generator.sentence_encoder.layers.load_state_dict(roberta.sentence_encoder.layers[:model_args.generator_layers].state_dict())
print("done")
print("initializing the seq2seq model from bart")
##### encoder staff #####
## embed_tokens
vocab_size, _ = bart.encoder.embed_tokens.weight.shape
new_vocab_size, embed_dim = long_model.encoder.embed_tokens.weight.shape
print('old embedding matrix size from BART', vocab_size)
print('new embedding matrix size', new_vocab_size)
# how should we initialize these sentinel embeddings
new_embed_tokens = bart.encoder.embed_tokens.weight.new_empty(new_vocab_size, embed_dim)
new_embed_tokens[:vocab_size] = bart.encoder.embed_tokens.weight
for idx in range(vocab_size, new_vocab_size):
new_embed_tokens[idx] = bart.encoder.embed_tokens.weight[-1] # initialize with <mask>
long_model.encoder.embed_tokens.weight.data = new_embed_tokens
## layernorm_embedding
long_model.encoder.layernorm_embedding.load_state_dict(bart.encoder.layernorm_embedding.state_dict())
## encoder layers
long_model.encoder.layers.load_state_dict(bart.encoder.layers.state_dict(), strict=False)
## embed positions
pos_limit, _ = bart.encoder.embed_positions.weight.shape
new_pos_limit, embed_dim = long_model.encoder.embed_positions.weight.shape
new_pos_embed = bart.encoder.embed_positions.weight.new_empty(new_pos_limit, embed_dim)
step = pos_limit - 2
for start in range(2, new_pos_limit, step):
new_pos_embed[start:start+step] = bart.encoder.embed_positions.weight[2:]
long_model.encoder.embed_positions.weight.data = new_pos_embed
##### decoder staff #####
long_model.decoder.layernorm_embedding.load_state_dict(bart.decoder.layernorm_embedding.state_dict())
# 2. embed_positions, longer
long_model.decoder.embed_positions.load_state_dict(bart.decoder.embed_positions.state_dict())
# decoder attention layers
long_model.decoder.layers.load_state_dict(bart.decoder.layers.state_dict())
save_path = '/data/home/xwhan/fairseq-py/checkpoints/md.base.16k.pool4.span3.r6'
dictionary.save(os.path.join(save_path, 'dict.txt'))
state['args'] = model_args
state['model'] = long_model.state_dict()
if 'criterion' in state:
del state['criterion']
state['extra_state'] = {"epoch": 0}
state['last_optimizer_state'] = None
torch.save(state, os.path.join(save_path, 'model.pt'))
|
bart_ls-main
|
fairseq-py/scripts/long_denoise/initialize_models_model_denoising.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import distutils.command.clean
import glob
import os
import re
import shutil
import sys
import setuptools
import torch
from torch.utils.cpp_extension import (
CUDA_HOME,
BuildExtension,
CppExtension,
CUDAExtension,
)
this_dir = os.path.dirname(os.path.abspath(__file__))
def fetch_requirements():
with open("requirements.txt") as f:
reqs = f.read().strip().split("\n")
return reqs
# https://packaging.python.org/guides/single-sourcing-package-version/
def find_version(version_file_path):
with open(version_file_path) as version_file:
version_match = re.search(
r"^__version__ = ['\"]([^'\"]*)['\"]", version_file.read(), re.M
)
# The following is used to build release packages.
# Users should never use it.
suffix = os.getenv("XFORMERS_VERSION_SUFFIX", "")
if version_match:
return version_match.group(1) + suffix
raise RuntimeError("Unable to find version string.")
def get_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
extensions_dir = os.path.join(
this_dir, "xformers", "components", "attention", "csrc"
)
main_file = glob.glob(os.path.join(extensions_dir, "*.cpp"))
source_cpu = glob.glob(os.path.join(extensions_dir, "cpu", "*.cpp")) + glob.glob(
os.path.join(extensions_dir, "autograd", "*.cpp")
)
sources = main_file + source_cpu
source_cuda = glob.glob(os.path.join(extensions_dir, "cuda", "*.cu"))
sputnik_dir = os.path.join(this_dir, "third_party", "sputnik")
extension = CppExtension
define_macros = []
extra_compile_args = {"cxx": ["-O3"]}
if sys.platform == "win32":
define_macros += [("xformers_EXPORTS", None)]
extra_compile_args["cxx"].append("/MP")
elif "OpenMP not found" not in torch.__config__.parallel_info():
extra_compile_args["cxx"].append("-fopenmp")
include_dirs = [extensions_dir]
if (torch.cuda.is_available() and ((CUDA_HOME is not None))) or os.getenv(
"FORCE_CUDA", "0"
) == "1":
extension = CUDAExtension
sources += source_cuda
include_dirs += [sputnik_dir]
nvcc_flags = os.getenv("NVCC_FLAGS", "")
if nvcc_flags == "":
nvcc_flags = []
else:
nvcc_flags = nvcc_flags.split(" ")
extra_compile_args["nvcc"] = nvcc_flags
sources = [os.path.join(extensions_dir, s) for s in sources]
ext_modules = [
extension(
"xformers._C",
sorted(sources),
include_dirs=include_dirs,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
)
]
return ext_modules
class clean(distutils.command.clean.clean): # type: ignore
def run(self):
with open(".gitignore", "r") as f:
ignores = f.read()
for wildcard in filter(None, ignores.split("\n")):
for filename in glob.glob(wildcard):
try:
os.remove(filename)
except OSError:
shutil.rmtree(filename, ignore_errors=True)
# It's an old-style class in Python 2.7...
distutils.command.clean.clean.run(self)
if __name__ == "__main__":
setuptools.setup(
name="xformers",
description="XFormers: A collection of composable Transformer building blocks.",
version=find_version(os.path.join(this_dir, "xformers", "__init__.py")),
setup_requires=[],
install_requires=fetch_requirements(),
packages=setuptools.find_packages(exclude=("tests", "tests.*")),
ext_modules=get_extensions(),
cmdclass={
"build_ext": BuildExtension.with_options(no_python_abi_suffix=True),
"clean": clean,
},
python_requires=">=3.6",
author="Facebook AI Research",
author_email="lefaudeux@fb.com",
long_description="XFormers: A collection of composable Transformer building blocks."
+ "XFormers aims at being able to reproduce most architectures in the Transformer-family SOTA,"
+ "defined as compatible and combined building blocks as opposed to monolithic models",
long_description_content_type="text/markdown",
classifiers=[
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"License :: OSI Approved :: BSD License",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Operating System :: OS Independent",
],
zip_safe=False,
)
|
bart_ls-main
|
xformers/setup.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Tuple, TypeVar
import torch
import torch.nn as nn
from pyre_extensions import TypeVarTuple, Unpack
from torch import Tensor
from typing_extensions import Literal as L
Ts = TypeVarTuple("Ts")
N = TypeVar("N", bound=int)
# flake8: noqa
"""
Tensor shape signatures can get complicated and hard to debug. We are basically
writing code at the level of types.
It's helpful to have type-level unit tests for the stubs.
Take care to add both a positive and a negative test for your stub. That way,
even if someone changes the stub to return a bad type like `Any`, we will still
be warned by an unused-ignore error. Otherwise, `y: Tensor[int, L[2], L[3]] =
foo(x)` would silently pass because `Any` is compatible with any type.
Use `pyre --output=json | pyre-upgrade` to add the `pyre-fixme` comment for you.
"""
def test_sin() -> None:
x: Tensor[int, L[2], L[3]]
same_shape_as_x: Tensor[int, L[2], L[3]]
not_same_shape_as_x: Tensor[int, L[2], L[99]]
y: Tensor[int, L[2], L[3]] = torch.sin(x)
# pyre-fixme[9]: y2 has type `Tensor[int, typing_extensions.Literal[2],
# typing_extensions.Literal[4]]`; used as `Tensor[int,
# typing_extensions.Literal[2], typing_extensions.Literal[3]]`.
y2: Tensor[int, L[2], L[4]] = torch.sin(x)
y3: Tensor[int, L[2], L[3]] = torch.sin(x, out=same_shape_as_x)
# pyre-fixme[6]: Expected `Tensor[Variable[torch.DType], *torch.Ts]` for 2nd
# param but got `Tensor[int, int, int]`.
# pyre-fixme[9]: y4 has type `Tensor[int, typing_extensions.Literal[2],
# typing_extensions.Literal[4]]`; used as `Tensor[int,
# typing_extensions.Literal[2], typing_extensions.Literal[3]]`.
y4: Tensor[int, L[2], L[4]] = torch.sin(x, out=not_same_shape_as_x)
y5: Tensor[int, L[2], L[3]] = torch.sin(x, out=None)
def test_unsqueeze() -> None:
x: Tensor[int, L[2], L[3]]
y: Tensor[int, L[1], L[2], L[3]] = x.unsqueeze(0)
y_torch_function: Tensor[int, L[1], L[2], L[3]] = torch.unsqueeze(x, 0)
y2: Tensor[int, L[2], L[1], L[3]] = x.unsqueeze(1)
y3: Tensor[int, L[2], L[3], L[1]] = x.unsqueeze(-1)
# pyre-fixme[9]: y4 has type `Tensor[int, typing_extensions.Literal[99]]`; used
# as `Tensor[int, typing_extensions.Literal[1], typing_extensions.Literal[2],
# typing_extensions.Literal[3]]`.
y4: Tensor[int, L[99]] = x.unsqueeze(0)
empty: Tensor[int]
y5: Tensor[int, L[1]] = empty.unsqueeze(0)
# pyre-fixme[6]: Expected `typing_extensions.Literal[0]` for 1st param but got
# `typing_extensions.Literal[1]`.
y6: Tensor[int, L[1]] = empty.unsqueeze(1)
y7: Tensor[int, L[2], L[3], L[1]] = x.unsqueeze(2)
def test_unsqueeze_() -> None:
x: Tensor[int, L[2], L[3]]
y: Tensor[int, L[1], L[2], L[3]] = x.unsqueeze_(0)
y_error: Tensor[int, L[1], L[2], L[3]] = x.unsqueeze_(0)
# pyre-ignore[9]: `unsqueeze_` is an in-place shape-transforming function. But Pyre cannot
# update a variable's shape type.
z: Tensor[int, L[1], L[2], L[3]] = x
def test_squeeze_() -> None:
x: Tensor[int, L[1], L[2], L[3]]
out: Tensor
y: Tensor[int, L[2], L[3]] = x.squeeze_(out=out)
# pyre-ignore[9]: Expected error.
y_error: Tensor[int, L[2], L[99]] = x.squeeze_()
y2: Tensor[int, L[2], L[3]] = x.squeeze_().squeeze_()
x2: Tensor[int, L[2], L[3], L[1], L[1]]
x3: Tensor[int, L[2], L[3], L[1]]
y3: Tensor[int, L[2], L[3]] = x2.squeeze_()
y4: Tensor[int, L[2], L[3]] = x3.squeeze_()
y5: Tensor[int, L[2], L[3]] = x.squeeze_(0)
y6: Tensor[int, L[2], L[3], L[1]] = x2.squeeze_(-1)
def test_squeeze() -> None:
x: Tensor[int, L[1], L[2], L[3]]
out: Tensor
y: Tensor[int, L[2], L[3]] = x.squeeze(out=out)
# pyre-ignore[9]: Expected error.
y_error: Tensor[int, L[2], L[99]] = x.squeeze()
y2: Tensor[int, L[2], L[3]] = x.squeeze().squeeze()
x2: Tensor[int, L[2], L[3], L[1], L[1]]
x3: Tensor[int, L[2], L[3], L[1]]
y3: Tensor[int, L[2], L[3]] = x2.squeeze()
y4: Tensor[int, L[2], L[3]] = x3.squeeze()
y5: Tensor[int, L[2], L[3]] = x.squeeze(0)
y6: Tensor[int, L[2], L[3], L[1]] = x2.squeeze(-1)
def test_repeat() -> None:
x: Tensor[int, L[2], L[3]]
y: Tensor[int, L[8], L[15]] = x.repeat(4, 5)
# pyre-fixme[9]
y2: Tensor[int, L[8], L[16]] = x.repeat(4, 5)
# TODO(T96315150): This is passing by coincidence right now.
y3: Tensor[int, L[4], L[10], L[18]] = x.repeat(4, 5, 6)
# pyre-ignore[9]: Doesn't error as expected because we have limited overloads.
y3_error: Tensor[int, L[4], L[10], L[99]] = x.repeat(4, 5, 6)
# pyre-ignore[9, 19]
not_yet_supported: Tensor[int, L[4], L[5], L[12], L[21]] = x.repeat(4, 5, 6, 7)
# Fewer dimensions than the Tensor. Should raise a different error.
x.repeat(2)
one_dimension: Tensor[int, L[2]]
y4: Tensor[int, L[8]] = x.repeat(4)
# pyre-ignore[9]
y4_error: Tensor[int, L[99]] = x.repeat(4)
def test_multiply() -> None:
x: Tensor[torch.int64, L[2], L[3]]
y: Tensor[torch.float32, L[2], L[3]] = x * 2
# pyre-fixme[9]: y_error has type `Tensor[torch.bool,
# typing_extensions.Literal[2], typing_extensions.Literal[99]]`; used as
# `Tensor[torch.float32, typing_extensions.Literal[2],
# typing_extensions.Literal[3]]`.
y_error: Tensor[torch.bool, L[2], L[99]] = x * 2
y2: Tensor[torch.float32, L[2], L[3]] = 2 * x
# pyre-fixme[9]: y2_error has type `Tensor[torch.bool,
# typing_extensions.Literal[2], typing_extensions.Literal[99]]`; used as
# `Tensor[torch.float32, typing_extensions.Literal[2],
# typing_extensions.Literal[3]]`.
y2_error: Tensor[torch.bool, L[2], L[99]] = 2 * x
y3: Tensor[torch.float32, L[2], L[3]] = x * 2.0
# pyre-fixme[9]: y3_error has type `Tensor[torch.float32,
# typing_extensions.Literal[2], typing_extensions.Literal[4]]`; used as
# `Tensor[torch.float32, typing_extensions.Literal[2],
# typing_extensions.Literal[3]]`.
y3_error: Tensor[torch.float32, L[2], L[4]] = x * 2.0
z: Tensor[torch.int64, L[4], L[1], L[1]]
z_bad: Tensor[torch.int64, L[4], L[2], L[99]]
y4: Tensor[torch.int64, L[4], L[2], L[3]] = x * z
# pyre-fixme[2001]: Broadcast error at expression `x.__mul__(z_bad)`; types
# `Tuple[typing_extensions.Literal[2], typing_extensions.Literal[3]]` and
# `Tuple[typing_extensions.Literal[4], typing_extensions.Literal[2],
# typing_extensions.Literal[99]]` cannot be broadcasted together.
x * z_bad
x4: Tensor[torch.float32, L[2], L[3]]
x5: Tensor[torch.float32, L[2], L[3]]
x5_bad: Tensor[torch.float32, L[2], L[99]]
x4 *= x5
x4 *= 4
y5: Tensor[torch.float32, L[2], L[3]] = x5
# pyre-fixme[2001]: Broadcast error at expression `x4.__imul__(x5_bad)`; types
# `Tuple[typing_extensions.Literal[2], typing_extensions.Literal[3]]` and
# `Tuple[typing_extensions.Literal[2], typing_extensions.Literal[99]]` cannot be
# broadcasted together.
x4 *= x5_bad
def test_floor_division() -> None:
x: Tensor[torch.int64, L[2], L[3]]
x2: Tensor[torch.int64, L[2], L[1]]
y: Tensor[torch.int64, L[2], L[3]] = x // 2
# pyre-fixme[9]: y_error has type `Tensor[torch.bool,
# typing_extensions.Literal[2], typing_extensions.Literal[99]]`; used as
# `Tensor[torch.float32, typing_extensions.Literal[2],
# typing_extensions.Literal[3]]`.
y_error: Tensor[torch.bool, L[2], L[99]] = x // 2
y2: Tensor[torch.int64, L[2], L[3]] = 2 // x
# pyre-fixme[9]: y2_error has type `Tensor[torch.bool,
# typing_extensions.Literal[2], typing_extensions.Literal[99]]`; used as
# `Tensor[torch.float32, typing_extensions.Literal[2],
# typing_extensions.Literal[3]]`.
y2_error: Tensor[torch.bool, L[2], L[99]] = 2 // x
y3: Tensor[torch.int64, L[2], L[3]] = x // x2
x3: Tensor[torch.float32, L[2], L[3]]
x4: Tensor[torch.float32, L[2], L[3]]
x4_bad: Tensor[torch.float32, L[2], L[99]]
x3 //= x4
x3 //= 4
y5: Tensor[torch.float32, L[2], L[3]] = x3
# pyre-fixme[2001]: Broadcast error at expression `x3.__ifloordiv__(x4_bad)`;
# types `Tuple[typing_extensions.Literal[2], typing_extensions.Literal[3]]` and
# `Tuple[typing_extensions.Literal[2], typing_extensions.Literal[99]]` cannot be
# broadcasted together.
x3 //= x4_bad
def test_division() -> None:
x: Tensor[torch.int64, L[2], L[3]]
x2: Tensor[torch.int64, L[2], L[1]]
y: Tensor[torch.float32, L[2], L[3]] = x / 2
# pyre-fixme[9]: y_error has type `Tensor[torch.bool,
# typing_extensions.Literal[2], typing_extensions.Literal[99]]`; used as
# `Tensor[torch.float32, typing_extensions.Literal[2],
# typing_extensions.Literal[3]]`.
y_error: Tensor[torch.bool, L[2], L[99]] = x / 2
y2: Tensor[torch.float32, L[2], L[3]] = 2 / x
# pyre-fixme[9]: y2_error has type `Tensor[torch.bool,
# typing_extensions.Literal[2], typing_extensions.Literal[99]]`; used as
# `Tensor[torch.float32, typing_extensions.Literal[2],
# typing_extensions.Literal[3]]`.
y2_error: Tensor[torch.bool, L[2], L[99]] = 2 / x
x3: Tensor[torch.float32, L[2], L[3]]
y3: Tensor[torch.float32, L[2], L[3]] = x3 / 2
y4: Tensor[torch.float32, L[2], L[3]] = 2 / x3
y5: Tensor[torch.float32, L[2], L[3]] = x / x2
x5: Tensor[torch.float32, L[2], L[3]]
x6: Tensor[torch.float32, L[2], L[3]]
x6_bad: Tensor[torch.float32, L[2], L[99]]
x5 /= x6
x5 /= 4
y6: Tensor[torch.float32, L[2], L[3]] = x5
# pyre-fixme[2001]: Broadcast error at expression `x5.__itruediv__(x6_bad)`;
# types `Tuple[typing_extensions.Literal[2], typing_extensions.Literal[3]]` and
# `Tuple[typing_extensions.Literal[2], typing_extensions.Literal[99]]` cannot be
# broadcasted together.
x5 /= x6_bad
def test_setitem() -> None:
x: Tensor[torch.int64, L[2], L[3]]
x[0, 0] = 1
def test_arange(n: N) -> None:
y: Tensor[torch.int64, L[5]] = torch.arange(5)
# pyre-fixme[9]: y_error has type `Tensor[torch.int64,
# typing_extensions.Literal[99]]`; used as `Tensor[torch.int64,
# typing_extensions.Literal[5]]`.
y_error: Tensor[torch.int64, L[99]] = torch.arange(5)
y2: Tensor[torch.int64, L[4]] = torch.arange(1, 5)
y3: Tensor[torch.int64, L[2]] = torch.arange(1, 6, 2)
y_float: Tensor[torch.float32, L[5]] = torch.arange(5, dtype=torch.float32)
y_float2: Tensor[torch.float32, L[2]] = torch.arange(1, 6, 2, dtype=torch.float32)
device: torch.device
y_generic: Tensor[torch.float32, N] = torch.arange(
0, n, device=device, dtype=torch.float32
)
# pyre-fixme[9]: Expected error.
y_generic_error: Tensor[torch.float32, L[99]] = torch.arange(
0, n, device=device, dtype=torch.float32
)
def test_embedding() -> None:
embedding = nn.Embedding(10, 20)
y: Tensor[torch.float32, L[10], L[20]] = embedding.weight
# pyre-fixme[9]: y_error has type `Tensor[torch.float32,
# typing_extensions.Literal[10], typing_extensions.Literal[99]]`; used as
# `Tensor[torch.float32, typing_extensions.Literal[10],
# typing_extensions.Literal[20]]`.
y_error: Tensor[torch.float32, L[10], L[99]] = embedding.weight
x: Tensor[torch.float32, L[2], L[3], L[4]]
y2: Tensor[torch.float32, L[2], L[3], L[4], L[20]] = embedding(x)
# pyre-fixme[9]: y2_error has type `Tensor[torch.float32, typing_extensions.Liter...
y2_error: Tensor[torch.float32, L[2], L[3], L[4], L[99]] = embedding(x)
weight: Tensor[torch.float32, L[3], L[4]]
embedding2: nn.Embedding[L[3], L[4]] = nn.Embedding.from_pretrained(weight)
# pyre-fixme[9]: embedding2_error has type
# `Embedding[typing_extensions.Literal[3], typing_extensions.Literal[99]]`; used
# as `Embedding[typing_extensions.Literal[3], typing_extensions.Literal[4]]`.
embedding2_error: nn.Embedding[L[3], L[99]] = nn.Embedding.from_pretrained(weight)
y3: Tensor[torch.float32, L[2], L[3], L[4], L[4]] = embedding2(x)
def test_init_normal() -> None:
x: Tensor[torch.float32, L[5], L[10]]
y: Tensor[torch.float32, L[5], L[10]] = nn.init.normal_(x)
# pyre-fixme[9]: y_error has type `Tensor[torch.float32,
# typing_extensions.Literal[5], typing_extensions.Literal[99]]`; used as
# `Tensor[torch.float32, typing_extensions.Literal[5],
# typing_extensions.Literal[10]]`.
y_error: Tensor[torch.float32, L[5], L[99]] = nn.init.normal_(x)
def test_view() -> None:
x: Tensor[torch.float32, L[4], L[4]]
y: Tensor[torch.float32, L[16]] = x.view(16)
# pyre-fixme[9]: y_error has type `Tensor[torch.float32,
# typing_extensions.Literal[99]]`; used as `Tensor[torch.float32,
# typing_extensions.Literal[16]]`.
y_error: Tensor[torch.float32, L[99]] = x.view(16)
# Should be an error because 4 * 4 != 99. Don't think this is going to be
# feasible any time soon.
y_error2: Tensor[torch.float32, L[99]] = x.view(99)
y_error3: Tensor[torch.float32, L[2], L[3], L[4], L[5]] = x.view(2, 3, 4, 5)
y2: Tensor[torch.float32, L[2], L[8]] = x.view(-1, 8)
# pyre-fixme[9]: y2_error has type `Tensor[torch.float32,
# typing_extensions.Literal[2], typing_extensions.Literal[99]]`; used as
# `Tensor[torch.float32, typing_extensions.Literal[2],
# typing_extensions.Literal[8]]`.
y2_error: Tensor[torch.float32, L[2], L[99]] = x.view(-1, 8)
x3: Tensor[torch.float32, L[2], L[3], L[4]]
y3: Tensor[torch.float32, L[24]] = x3.view(-1)
y4: Tensor[torch.float32, L[8], L[3]] = x3.view(-1, 3)
# pyre-fixme[9]: y4_error has type `Tensor[torch.float32,
# typing_extensions.Literal[99], typing_extensions.Literal[3]]`; used as
# `Tensor[torch.float32, typing_extensions.Literal[8],
# typing_extensions.Literal[3]]`.
y4_error: Tensor[torch.float32, L[99], L[3]] = x3.view(-1, 3)
y5: Tensor[torch.float32, L[2], L[6], L[2]] = x3.view(2, -1, 2)
x4: Tensor[torch.float32, L[2], L[3], L[4], L[5]]
y6: Tensor[torch.float32, L[3], L[5], L[8]] = x4.view(3, 5, -1)
def test_reshape() -> None:
x: Tensor[torch.float32, L[2], L[3], L[4]]
y: Tensor[torch.float32, L[24]] = torch.reshape(x, (-1,))
y2: Tensor[torch.float32, L[8], L[3]] = torch.reshape(x, (-1, 3))
# pyre-fixme[9]: y2_error has type `Tensor[torch.float32,
# typing_extensions.Literal[99], typing_extensions.Literal[3]]`; used as
# `Tensor[torch.float32, typing_extensions.Literal[8],
# typing_extensions.Literal[3]]`.
y2_error: Tensor[torch.float32, L[99], L[3]] = torch.reshape(x, (-1, 3))
y3: Tensor[torch.float32, L[6], L[2], L[2]] = torch.reshape(x, (-1, 2, 2))
y4: Tensor[torch.float32, L[2], L[6], L[2]] = torch.reshape(x, (2, -1, 2))
y5: Tensor[torch.float32, L[4], L[3], L[2]] = torch.reshape(x, (4, 3, 2))
def test_transpose() -> None:
x: Tensor[torch.float32, L[2], L[3], L[4], L[5], L[6]]
y: Tensor[torch.float32, L[2], L[3], L[4], L[6], L[5]] = x.transpose(-2, -1)
y_function: Tensor[torch.float32, L[2], L[3], L[4], L[6], L[5]] = torch.transpose(
x, -2, -1
)
# pyre-fixme[9]: y_error has type `Tensor[torch.float32, typing_extensions.Litera...
y_error: Tensor[torch.float32, L[2], L[4], L[99]] = x.transpose(-2, -1)
y2: Tensor[torch.float32, L[2], L[4], L[3], L[5], L[6]] = x.transpose(1, 2)
y3: Tensor[torch.float32, L[3], L[2], L[4], L[5], L[6]] = x.transpose(0, 1)
y4: Tensor[torch.float32, L[3], L[2], L[4], L[5], L[6]] = x.transpose(1, 0)
y5: Tensor[torch.float32, L[2], L[3], L[4], L[6], L[5]] = x.transpose(-1, -2)
not_yet_supported: Tensor[
torch.float32,
L[3],
L[2],
L[4],
L[5],
L[6]
# pyre-fixme[6]: Expected `typing_extensions.Literal[0]` for 2nd param but got
# `typing_extensions.Literal[4]`.
] = x.transpose(1, 4)
def test_flatten() -> None:
x: Tensor[torch.float32, L[2], L[3]]
x_large: Tensor[torch.float32, L[2], L[3], L[4], L[5]]
y: Tensor[torch.float32, L[6]] = x.flatten()
y_default: Tensor[torch.float32, L[6]] = torch.flatten(x)
y_large: Tensor[torch.float32, L[120]] = x_large.flatten()
# pyre-fixme[9]: y_error has type `Tensor[torch.float32,
# typing_extensions.Literal[99]]`; used as `Tensor[torch.float32,
# typing_extensions.Literal[6]]`.
y_error: Tensor[torch.float32, L[99]] = x.flatten()
z: Tensor[torch.float32, L[2], L[3], L[4]]
y2: Tensor[torch.float32, L[6], L[4]] = z.flatten(0, 1)
y2_keyword: Tensor[torch.float32, L[6], L[4]] = z.flatten(start_dim=0, end_dim=1)
y3: Tensor[torch.float32, L[2], L[12]] = z.flatten(1, 2)
y3_large: Tensor[torch.float32, L[2], L[12], L[5]] = x_large.flatten(1, 2)
y4: Tensor[torch.float32, L[2], L[3], L[20]] = x_large.flatten(2, 3)
x_6d: Tensor[torch.float32, L[2], L[3], L[4], L[5], L[6], L[7]]
y4_large: Tensor[torch.float32, L[2], L[3], L[20], L[6], L[7]] = x_6d.flatten(2, 3)
# Out of bounds.
# pyre-fixme[9]: y5_error has type `Tensor[torch.float32,
# typing_extensions.Literal[2], typing_extensions.Literal[12]]`; used as
# `Tensor[torch.float32, typing_extensions.Literal[6]]`.
# pyre-fixme[6]: Expected `typing_extensions.Literal[0]` for 1st param but got
# `typing_extensions.Literal[99]`.
y5_error: Tensor[torch.float32, L[2], L[12]] = x.flatten(99, 100)
x_0d: Tensor[torch.float32]
y_0d: Tensor[torch.float32, L[1]] = x_0d.flatten()
def test_empty() -> None:
x: Tuple[L[1], L[2], L[3]]
y: Tensor
device: torch.device
result1: torch.Tensor[torch.float32, L[1], L[2], L[3]] = torch.empty(
*x,
device=device,
layout=torch.strided,
requires_grad=True,
out=y,
pin_memory=False,
memory_format=torch.memory_format(),
)
# pyre-fixme[9]: bad1 has type `Tensor[torch.float32, typing_extensions.Literal[9...
bad1: torch.Tensor[torch.float32, L[99], L[2], L[3]] = torch.empty(*x)
result2: torch.Tensor[torch.float32, L[1], L[2], L[3]] = torch.empty(
*x, device=device, layout=torch.strided, requires_grad=True, out=y
)
# pyre-fixme[9]: bad2 has type `Tensor[torch.float32, typing_extensions.Literal[9...
bad2: torch.Tensor[torch.float32, L[99], L[2], L[3]] = torch.empty(
*x, device=device, layout=torch.strided, requires_grad=True, out=y
)
result4: torch.Tensor[torch.float32, L[1], L[2], L[3]] = torch.empty(x)
result5: torch.Tensor[torch.float32, L[4]] = torch.empty(4)
result6: torch.Tensor[torch.int64, L[1], L[2], L[3]] = torch.empty(
x, dtype=torch.int64
)
result7: torch.Tensor[torch.int64, L[1], L[2], L[3]] = torch.empty(
*x, dtype=torch.int64
)
def test_empty_like() -> None:
x: torch.Tensor[torch.float32, L[1], L[2], L[3]]
out: Tensor
device: torch.device
y1: torch.Tensor[torch.float32, L[1], L[2], L[3]] = torch.empty_like(
x, device=device, layout=torch.strided, requires_grad=True, out=out
)
# pyre-fixme[9]: Expected error.
y1_error: torch.Tensor[torch.float32, L[99], L[2], L[3]] = torch.empty_like(
x, device=device, layout=torch.strided, requires_grad=True, out=out
)
y2: torch.Tensor[torch.int64, L[1], L[2], L[3]] = torch.empty_like(
x,
dtype=torch.int64,
device=device,
layout=torch.strided,
requires_grad=True,
out=out,
)
def test_randn() -> None:
x: Tuple[L[1], L[2], L[3]]
y: Tensor
device: torch.device
result1: torch.Tensor[torch.float32, L[1], L[2], L[3]] = torch.randn(
*x, device=device, layout=torch.strided, requires_grad=True, out=y
)
# pyre-fixme[9]: bad1 has type `Tensor[torch.float32, typing_extensions.Literal[9...
bad1: torch.Tensor[torch.float32, L[99], L[2], L[3]] = torch.randn(*x)
result2: torch.Tensor[torch.float32, L[1], L[2], L[3]] = torch.randn(
*x, device=device, layout=torch.strided, requires_grad=True, out=y
)
# pyre-fixme[9]: bad2 has type `Tensor[torch.float32, typing_extensions.Literal[9...
bad2: torch.Tensor[torch.float32, L[99], L[2], L[3]] = torch.randn(
*x, device=device, layout=torch.strided, requires_grad=True, out=y
)
result4: torch.Tensor[torch.float32, L[1], L[2], L[3]] = torch.randn(x)
result5: torch.Tensor[torch.float32, L[4]] = torch.randn(4)
result6: torch.Tensor[torch.int64, L[1], L[2], L[3]] = torch.randn(
x, dtype=torch.int64
)
result7: torch.Tensor[torch.int64, L[1], L[2], L[3]] = torch.randn(
*x, dtype=torch.int64
)
def test_all() -> None:
x: torch.Tensor[torch.float32, L[1], L[2], L[3]]
device: torch.device
y: torch.Tensor[torch.bool, L[1]] = torch.all(x)
# pyre-fixme[9]: bad1 has type `Tensor[torch.bool,
# typing_extensions.Literal[99]]`; used as `Tensor[torch.bool,
# typing_extensions.Literal[1]]`.
y_error: torch.Tensor[torch.bool, L[99]] = torch.all(x)
y2: torch.Tensor[torch.bool, L[2], L[3]] = torch.all(x, dim=0)
y3: torch.Tensor[torch.bool, L[1], L[3]] = torch.all(x, dim=1)
y4: torch.Tensor[torch.bool, L[1]] = x.all()
def test_where() -> None:
x: torch.Tensor[torch.float32, L[2], L[3]]
good: Tuple[torch.LongTensor[int, int], torch.LongTensor[int, int]] = torch.where(x)
bad: Tuple[
torch.LongTensor[int, int], torch.LongTensor[int, int], L[99]
] = torch.where(x)
y: torch.Tensor[torch.float32, L[2], L[1]]
not_broadcastable: torch.Tensor[torch.float32, L[2], L[99]]
good: Tuple[torch.LongTensor[int, int], torch.LongTensor[int, int]] = torch.where(x)
good2: torch.Tensor[torch.float32, L[2], L[3]] = torch.where(x > 0, x, y)
# pyre-fixme[9]: bad2 has type `Tensor[torch.float32,
# typing_extensions.Literal[2], typing_extensions.Literal[99]]`; used as
# `Tensor[torch.float32, typing_extensions.Literal[2],
# typing_extensions.Literal[3]]`.
bad2: torch.Tensor[torch.float32, L[2], L[99]] = torch.where(x > 0, x, y)
# pyre-fixme[2001]: Broadcast error at expression `torch.where(x > 0, x,
# not_broadcastable)`; types `Tuple[typing_extensions.Literal[2],
# typing_extensions.Literal[3]]` and `Tuple[typing_extensions.Literal[2],
# typing_extensions.Literal[99]]` cannot be broadcasted together.
z = torch.where(x > 0, x, not_broadcastable)
def test_getitem() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
good1: torch.Tensor[torch.float32, L[3], L[4]] = x[0]
# pyre-fixme[9]: bad1 has type `Tensor[torch.float32,
# typing_extensions.Literal[99], typing_extensions.Literal[4]]`; used as
# `Tensor[torch.float32, typing_extensions.Literal[3],
# typing_extensions.Literal[4]]`.
bad1: torch.Tensor[torch.float32, L[99], L[4]] = x[0]
good2: torch.Tensor[torch.float32, L[1], L[2], L[3], L[4]] = x[None]
# pyre-fixme[9]: bad2 has type `Tensor[torch.float32, typing_extensions.Literal[9...
bad2: torch.Tensor[torch.float32, L[99], L[2], L[3], L[4]] = x[None]
mask: torch.Tensor[torch.bool, L[2], L[3], L[4]]
good3: torch.Tensor[torch.float32, int] = x[mask]
# pyre-fixme[9]: bad3 has type `Tensor[torch.float32,
# typing_extensions.Literal[99]]`; used as `Tensor[torch.float32, int]`.
bad3: torch.Tensor[torch.float32, L[99]] = x[mask]
any1: Tuple[int, str, float] = x[2]
any2: Tuple[float, str, int] = x[2]
def test_expand() -> None:
x: torch.Tensor[torch.float32, L[1], L[2], L[3]]
shape: Tuple[L[4], L[1], L[3]]
good1: torch.Tensor[torch.float32, L[4], L[2], L[3]] = x.expand(shape)
# pyre-fixme[9]: bad1 has type `Tensor[torch.float32, typing_extensions.Literal[9...
bad1: torch.Tensor[torch.float32, L[99], L[2], L[3]] = x.expand(shape)
# pyre-fixme[2001]: Broadcast error at expression `x.expand((4, 99, 3))`; types `...
x.expand((4, 99, 3))
good2: torch.Tensor[torch.float32, L[4], L[2], L[3]] = x.expand(4, 1, 3)
def test_to() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
good1: torch.Tensor[torch.int32, L[2], L[3], L[4]] = x.to(torch.int32)
# pyre-fixme[9]: bad1 has type `Tensor[torch.int32, typing_extensions.Literal[99]...
bad1: torch.Tensor[torch.int32, L[99], L[3], L[4]] = x.to(torch.int32)
device: torch.device
good2: torch.Tensor[torch.float32, L[2], L[3], L[4]] = x.to(device)
# pyre-fixme[9]: bad2 has type `Tensor[torch.float32, typing_extensions.Literal[9...
bad2: torch.Tensor[torch.float32, L[99], L[3], L[4]] = x.to(device)
y: torch.Tensor[torch.int32, L[2], L[3], L[4]]
good3: torch.Tensor[torch.float32, L[2], L[3], L[4]] = y.to(torch.float32, device)
# pyre-fixme[9]: bad3 has type `Tensor[torch.float32, typing_extensions.Literal[9...
bad3: torch.Tensor[torch.float32, L[99], L[3], L[4]] = y.to(torch.float32, device)
def test_Linear_to() -> None:
linear: nn.Linear[L[10], L[20]]
device: torch.device
linear.to(dtype=torch.int64, device=device)
def test_Module_eval() -> None:
module: nn.Module
module.eval()
def test_Module_train() -> None:
module: nn.Module
module.train(mode=True)
y: bool = module.training
def test_Linear_bias() -> None:
linear: nn.Linear[L[10], L[20]]
x: nn.Parameter = linear.bias
def test_sum() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
y1: torch.Tensor[torch.float32, L[2], L[3]] = x.sum(-1, dtype=None)
# pyre-fixme[9]: y_error has type `Tensor[torch.float32,
# typing_extensions.Literal[99], typing_extensions.Literal[3]]`; used as
# `Tensor[torch.float32, typing_extensions.Literal[2],
# typing_extensions.Literal[3]]`.
y_error: torch.Tensor[torch.float32, L[99], L[3]] = x.sum(-1, dtype=None)
y2: torch.Tensor[torch.float32, L[2], L[4]] = x.sum(-2)
y3: torch.Tensor[torch.float32] = x.sum()
y4: torch.Tensor[torch.float32, L[3], L[4]] = x.sum(0)
y5: torch.Tensor[torch.float32, L[2], L[4]] = x.sum(1)
y6: torch.Tensor[torch.float32] = torch.sum(x)
def test_cumsum() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
good1: torch.Tensor[torch.float32, L[2], L[3], L[4]] = x.cumsum()
# pyre-fixme[9]: bad1 has type `Tensor[torch.float32, typing_extensions.Literal[9...
bad1: torch.Tensor[torch.float32, L[99], L[3], L[4]] = x.cumsum()
good2: torch.Tensor[torch.float32, L[2], L[3], L[4]] = x.cumsum(dim=0)
# pyre-fixme[9]: bad2 has type `Tensor[torch.float32, typing_extensions.Literal[9...
bad2: torch.Tensor[torch.float32, L[99], L[3], L[4]] = x.cumsum(dim=0)
good3: torch.Tensor[torch.float32, L[2], L[3], L[4]] = x.cumsum(dtype=None)
# pyre-fixme[9]: bad3 has type `Tensor[torch.float32, typing_extensions.Literal[9...
bad3: torch.Tensor[torch.float32, L[99], L[3], L[4]] = x.cumsum(dtype=None)
def test_contiguous() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
good: torch.Tensor[torch.float32, L[2], L[3], L[4]] = x.contiguous()
# pyre-fixme[9]: bad has type `Tensor[torch.float32, typing_extensions.Literal[99...
bad: torch.Tensor[torch.float32, L[99], L[3], L[4]] = x.contiguous()
def test_diff() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
good: torch.Tensor[torch.float32, L[2], L[3], L[3]] = torch.diff(x)
# pyre-fixme[9]: bad has type `Tensor[torch.float32, typing_extensions.Literal[99...
bad: torch.Tensor[torch.float32, L[99], L[3], L[3]] = torch.diff(x)
good2: torch.Tensor[torch.float32, L[1], L[3], L[4]] = torch.diff(x, dim=0)
good3: torch.Tensor[torch.float32, L[2], L[2], L[4]] = torch.diff(x, dim=1)
good4: torch.Tensor[torch.float32, L[2], L[3], L[3]] = torch.diff(x, dim=-1)
good5: torch.Tensor[torch.float32, L[2], L[2], L[4]] = torch.diff(x, dim=-2)
good6: torch.Tensor[torch.float32, L[2], L[2], L[4]] = x.diff(dim=-2)
def test_argsort() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
good1: torch.Tensor[torch.float32, L[2], L[3], L[4]] = torch.argsort(x)
# pyre-fixme[9]: bad1 has type `LongTensor[torch.float32, typing_extensions.Liter...
bad1: torch.Tensor[torch.float32, L[99], L[3], L[4]] = torch.argsort(x)
good2: torch.Tensor[torch.float32, L[2], L[3], L[4]] = torch.argsort(x, dim=0)
# pyre-fixme[9]: bad2 has type `LongTensor[torch.float32, typing_extensions.Liter...
bad2: torch.Tensor[torch.float32, L[99], L[3], L[4]] = torch.argsort(x, dim=0)
good3: torch.Tensor[torch.float32, L[2], L[3], L[4]] = torch.argsort(
x, descending=True
)
# pyre-fixme[9]: bad3 has type `LongTensor[torch.float32, typing_extensions.Liter...
bad3: torch.Tensor[torch.float32, L[99], L[3], L[4]] = torch.argsort(
x, descending=True
)
good4: torch.Tensor[torch.float32, L[2], L[3], L[4]] = x.argsort(dim=-1)
def test_functional_pad() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
good: torch.Tensor[torch.float32, L[2], L[3], L[5]] = nn.functional.pad(x, (1, 0))
bad: torch.Tensor[torch.float32, L[99], L[3], L[5]] = nn.functional.pad(x, (1, 0))
good2: torch.Tensor[torch.float32, L[2], L[10], L[7]] = nn.functional.pad(
x, (1, 2, 3, 4), "constant", value=0.0
)
def test_allclose() -> None:
x: torch.Tensor[torch.float32, L[2], L[3]]
y: torch.Tensor[torch.float32, L[2], L[1]]
not_broadcastable: torch.Tensor[torch.float32, L[3], L[4]]
good: bool = torch.allclose(x, y, atol=0.0, rtol=0.0, equal_nan=True)
# This should complain about non-broadcastable tensors but we don't have a
# way to constrain two parameter types to be broadcastable.
should_error: bool = torch.allclose(x, not_broadcastable)
def test_new_ones() -> None:
x: torch.Tensor[torch.float32, L[2], L[3]]
y: torch.Tensor[torch.float32, L[8], L[9]] = x.new_ones((8, 9))
# pyre-fixme[9]: Expected error.
y_error: torch.Tensor[torch.float32, L[8], L[99]] = x.new_ones((8, 9))
y2: torch.Tensor[torch.int64, L[8], L[9]] = x.new_ones(
(8, 9), dtype=torch.int64, device="cuda", requires_grad=True
)
def test_ones_like() -> None:
x: torch.Tensor[torch.float32, L[2], L[3]]
device: torch.device
good: torch.Tensor[torch.int64, L[2], L[3]] = torch.ones_like(
x, dtype=torch.int64, device=device
)
# pyre-fixme[9]: bad has type `Tensor[torch.int64,
# typing_extensions.Literal[99], typing_extensions.Literal[3]]`; used as
# `Tensor[torch.int64, typing_extensions.Literal[2],
# typing_extensions.Literal[3]]`.
bad: torch.Tensor[torch.int64, L[99], L[3]] = torch.ones_like(
x, dtype=torch.int64, device=device
)
bad2: torch.Tensor[torch.float32, L[2], L[3]] = torch.ones_like(
x,
)
def test_sparse_softmax() -> None:
x: torch.Tensor[torch.float32, L[2], L[3]]
y: torch.Tensor[torch.float32, L[2], L[3]] = torch.sparse.softmax(x, dim=-1)
# pyre-fixme[9]: y_error has type `Tensor[torch.float32,
# typing_extensions.Literal[99], typing_extensions.Literal[3]]`; used as
# `Tensor[torch.float32, typing_extensions.Literal[2],
# typing_extensions.Literal[3]]`.
y_error: torch.Tensor[torch.float32, L[99], L[3]] = torch.sparse.softmax(x, dim=-1)
dtype: torch.int64
y2: torch.Tensor[torch.int64, L[2], L[3]] = torch.sparse.softmax(
x, dim=-1, dtype=dtype
)
def test_eye() -> None:
y: torch.Tensor[torch.int64, L[2], L[3]] = torch.eye(2, 3, dtype=torch.int64)
# pyre-fixme[9]: y_error has type `Tensor[torch.int64,
# typing_extensions.Literal[99], typing_extensions.Literal[3]]`; used as
# `Tensor[torch.int64, typing_extensions.Literal[2],
# typing_extensions.Literal[3]]`.
y_error: torch.Tensor[torch.int64, L[99], L[3]] = torch.eye(2, 3, dtype=torch.int64)
y2: torch.Tensor[torch.float32, L[3], L[3]] = torch.eye(3)
def test_adaptive_average_pool2d() -> None:
model: nn.AdaptiveAvgPool2d[L[5], L[7]] = nn.AdaptiveAvgPool2d((5, 7))
# pyre-fixme[9]: model_error has type
# `AdaptiveAvgPool2d[typing_extensions.Literal[5],
# typing_extensions.Literal[99]]`; used as
# `AdaptiveAvgPool2d[typing_extensions.Literal[5], typing_extensions.Literal[7]]`.
model_error: nn.AdaptiveAvgPool2d[L[5], L[99]] = nn.AdaptiveAvgPool2d((5, 7))
model2: nn.AdaptiveAvgPool2d[L[5], L[5]] = nn.AdaptiveAvgPool2d(5)
# TODO(T100083794): This should be an error.
model2_error: nn.AdaptiveAvgPool2d[L[5], L[99]] = nn.AdaptiveAvgPool2d(5)
model3: nn.AdaptiveAvgPool2d[L[5], L[-1]] = nn.AdaptiveAvgPool2d((5, None))
# TODO(T100083794): This should be an error.
model3_error: nn.AdaptiveAvgPool2d[L[5], L[99]] = nn.AdaptiveAvgPool2d((5, None))
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
y: torch.Tensor[torch.float32, L[2], L[5], L[7]] = model(x)
# pyre-fixme[9]: y_error has type `Tensor[torch.float32, typing_extensions.Litera...
y_error: torch.Tensor[torch.float32, L[2], L[99], L[7]] = model(x)
y2: torch.Tensor[torch.float32, L[2], L[5], L[5]] = model2(x)
y3: torch.Tensor[torch.float32, L[2], L[5], L[4]] = model3(x)
def test_randperm() -> None:
y: torch.Tensor[torch.int64, L[10]] = torch.randperm(10, dtype=torch.int64)
# pyre-fixme[9]: y_error has type `Tensor[torch.int64,
# typing_extensions.Literal[99]]`; used as `Tensor[torch.int64,
# typing_extensions.Literal[10]]`.
y_error: torch.Tensor[torch.int64, L[99]] = torch.randperm(10, dtype=torch.int64)
def test_sqrt() -> None:
x: torch.Tensor[torch.float32, L[2], L[3]]
y: torch.Tensor[torch.float32, L[2], L[3]] = torch.sqrt(x)
# pyre-fixme[9]: y_error has type `Tensor[torch.float32,
# typing_extensions.Literal[2], typing_extensions.Literal[99]]`; used as
# `Tensor[torch.float32, typing_extensions.Literal[2],
# typing_extensions.Literal[3]]`.
y_error: torch.Tensor[torch.float32, L[2], L[99]] = torch.sqrt(x)
def test_multinomial() -> None:
x: torch.Tensor[torch.float32, L[2], L[4]]
y: torch.Tensor[torch.float32, L[2], L[3]] = torch.multinomial(x, 3)
# pyre-fixme[9]: y_error has type `Tensor[torch.float32,
# typing_extensions.Literal[2], typing_extensions.Literal[99]]`; used as
# `Tensor[torch.float32, typing_extensions.Literal[2],
# typing_extensions.Literal[3]]`.
y_error: torch.Tensor[torch.float32, L[2], L[99]] = torch.multinomial(x, 3)
x2: torch.Tensor[torch.float32, L[4]]
y2: torch.Tensor[torch.float32, L[3]] = torch.multinomial(x2, 3)
y2: torch.Tensor[torch.float32, L[3]] = x2.multinomial(3)
def test_bmm() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
matrix: torch.Tensor[torch.float32, L[2], L[4], L[5]]
y: torch.Tensor[torch.float32, L[2], L[3], L[5]] = torch.bmm(x, matrix)
y2: torch.Tensor[torch.float32, L[2], L[3], L[5]] = x.bmm(matrix)
# pyre-fixme[9]: y_error has type `Tensor[torch.float32, typing_extensions.Litera...
y_error: torch.Tensor[torch.float32, L[2], L[3], L[99]] = torch.bmm(x, matrix)
bad_matrix: torch.Tensor[torch.float32, L[2], L[99], L[5]]
# Should raise an error but doesn't because we solve `L[99] <: M && L[4] <:
# M` to be M = int.
torch.bmm(x, bad_matrix)
def test_subtract() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[1]]
x2: torch.Tensor[torch.float32, L[2], L[1], L[4]]
y: torch.Tensor[torch.float32, L[2], L[3], L[4]] = x - x2
# pyre-fixme[9]: y_error has type `Tensor[torch.float32, typing_extensions.Litera...
y_error: torch.Tensor[torch.float32, L[2], L[3], L[99]] = x - x2
y2: torch.Tensor[torch.float32, L[2], L[3], L[4]] = x2 - x
y3: torch.Tensor[torch.float32, L[2], L[3], L[1]] = x - 42.0
y4: torch.Tensor[torch.float32, L[2], L[3], L[1]] = 42.0 - x
z: Any
# Should not error.
x - z
x5: Tensor[torch.float32, L[2], L[3]]
x6: Tensor[torch.float32, L[2], L[3]]
x6_bad: Tensor[torch.float32, L[2], L[99]]
x5 -= x6
x5 -= 4
y5: Tensor[torch.float32, L[2], L[3]] = x5
# pyre-fixme[2001]: Broadcast error at expression `x5.__isub__(x6_bad)`; types
# `Tuple[typing_extensions.Literal[2], typing_extensions.Literal[3]]` and
# `Tuple[typing_extensions.Literal[2], typing_extensions.Literal[99]]` cannot be
# broadcasted together.
x5 -= x6_bad
def test_add() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[1]]
x2: torch.Tensor[torch.float32, L[2], L[1], L[4]]
y: torch.Tensor[torch.float32, L[2], L[3], L[4]] = x + x2
# pyre-fixme[9]: y_error has type `Tensor[torch.float32, typing_extensions.Litera...
y_error: torch.Tensor[torch.float32, L[2], L[3], L[99]] = x + x2
y2: torch.Tensor[torch.float32, L[2], L[3], L[4]] = x2 + x
y3: torch.Tensor[torch.float32, L[2], L[3], L[1]] = x + 42.0
y4: torch.Tensor[torch.float32, L[2], L[3], L[1]] = 42.0 + x
x5: Tensor[torch.float32, L[2], L[3]]
x6: Tensor[torch.float32, L[2], L[3]]
x6_bad: Tensor[torch.float32, L[2], L[99]]
x5 += x6
x5 += 4
y5: Tensor[torch.float32, L[2], L[3]] = x5
# pyre-fixme[2001]: Broadcast error at expression `x5.__iadd__(x6_bad)`; types
# `Tuple[typing_extensions.Literal[2], typing_extensions.Literal[3]]` and
# `Tuple[typing_extensions.Literal[2], typing_extensions.Literal[99]]` cannot be
# broadcasted together.
x5 += x6_bad
def test_torch_fft() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
y: torch.Tensor[torch.complex64, L[2], L[3], L[4]] = torch.fft.fft(x)
# pyre-fixme[9]: y_error has type `Tensor[torch.complex64, typing_extensions.Lite...
y_error: torch.Tensor[torch.complex64, L[2], L[3], L[99]] = torch.fft.fft(x)
y2: torch.Tensor[torch.complex64, L[2], L[3], L[4]] = torch.fft.fft(x, dim=-2)
def test_torch_real() -> None:
x: torch.Tensor[torch.complex64, L[2], L[3], L[4]]
y: torch.Tensor[torch.float32, L[2], L[3], L[4]] = torch.real(x)
# pyre-fixme[9]: y_error has type `Tensor[torch.float32, typing_extensions.Litera...
y_error: torch.Tensor[torch.float32, L[2], L[3], L[99]] = torch.real(x)
x2: torch.Tensor[torch.complex128, L[2], L[3], L[4]]
y2: torch.Tensor[torch.float64, L[2], L[3], L[4]] = torch.real(x2)
bad: torch.Tensor[torch.float32, L[2], L[3], L[4]]
# pyre-fixme[6]: Expected `Tensor[torch.complex64, *torch.Ts]` for 1st param but
# got `Tensor[torch.float32, int, int, int]`.
torch.real(bad)
def test_logical_and() -> None:
x: torch.Tensor[torch.complex64, L[2], L[1], L[4]]
x2: torch.Tensor[torch.float32, L[2], L[3], L[1]]
y: torch.Tensor[torch.bool, L[2], L[3], L[4]] = torch.logical_and(x, x2)
# pyre-fixme[9]: y_error has type `Tensor[torch.bool, typing_extensions.Literal[2...
y_error: torch.Tensor[torch.bool, L[2], L[3], L[99]] = torch.logical_and(x, x2)
y2: torch.Tensor[torch.bool, L[2], L[3], L[4]] = x.logical_and(x2)
not_broadcastable: torch.Tensor[torch.float32, L[2], L[3], L[99]]
# pyre-fixme[2001]: Broadcast error at expression `torch.logical_and(x, not_broad...
torch.logical_and(x, not_broadcastable)
x3: torch.Tensor[torch.complex64, L[2], L[1], L[1]]
# In-place version.
x.logical_and_(x3)
# This is actually an error because the output type (2, 3, 4) is not
# assignable to x. But we can't catch that because the typechecker doesn't
# know this is an in-place operator. Leaving this as is for now.
x.logical_and_(x2)
def test_and() -> None:
x_bool: torch.Tensor[torch.bool, L[2], L[1], L[4]]
x_bool2: torch.Tensor[torch.bool, L[2], L[3], L[1]]
y3: torch.Tensor[torch.bool, L[2], L[3], L[4]] = x_bool & x_bool2
# This broadcasts to (2, 1, 4), which is assignable to x_bool.
x_bool3: torch.Tensor[torch.bool, L[2], L[1], L[1]]
x_bool &= x_bool3
# This broadcasts to (2, 3, 4), which is not assignable to x_bool.
# pyre-fixme[9]: x_bool has type `Tensor[torch.bool, typing_extensions.Literal[2]...
x_bool &= x_bool2
x: torch.Tensor[torch.complex64, L[2], L[1], L[4]]
x2: torch.Tensor[torch.float32, L[2], L[3], L[1]]
# pyre-fixme[58]: `&` is not supported for operand types
# `Tensor[torch.complex64, int, int, int]` and `Tensor[torch.float32, int, int,
# int]`.
x & x2
def test_linalg_pinv() -> None:
x: torch.Tensor[torch.float32, L[2], L[2], L[3], L[4]]
y: torch.Tensor[torch.float32, L[2], L[2], L[4], L[3]] = torch.linalg.pinv(x)
# pyre-fixme[9]: y_error has type `Tensor[torch.float32, typing_extensions.Litera...
y_error: torch.Tensor[torch.float32, L[2], L[4], L[99]] = torch.linalg.pinv(x)
wrong_datatype: torch.Tensor[torch.bool, L[2], L[3], L[4]]
# pyre-fixme[6]: Expected `Tensor[Variable[torch.linalg.FloatOrDouble <:
# [torch.float32, torch.float64, torch.complex64, torch.complex128]],
# *torch.linalg.Ts, Variable[N1 (bound to int)], Variable[N2 (bound to int)]]` for
# 1st param but got `Tensor[torch.bool, int, int, int]`.
torch.linalg.pinv(wrong_datatype)
torch.linalg.pinv(x, hermitian=True)
# Last two dimensions have to be equal.
x_square: torch.Tensor[torch.float32, L[2], L[3], L[4], L[4]]
y2: torch.Tensor[torch.float32, L[2], L[3], L[4], L[4]] = torch.linalg.pinv(
x_square, hermitian=True
)
def test_linalg_qr() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
y: Tuple[
torch.Tensor[torch.float32, L[2], L[3], L[3]],
torch.Tensor[torch.float32, L[2], L[3], L[4]],
] = torch.linalg.qr(x)
# pyre-fixme[9]: y_error has type `Tuple[Tensor[torch.float32, typing_extensions....
y_error: Tuple[
torch.Tensor[torch.float32, L[2], L[3], L[99]],
torch.Tensor[torch.float32, L[2], L[3], L[4]],
] = torch.linalg.qr(x)
y2: Tuple[
torch.Tensor[torch.float32, L[2], L[3], L[3]],
torch.Tensor[torch.float32, L[2], L[3], L[4]],
] = torch.linalg.qr(x, mode="complete")
def test_torch_matmul() -> None:
x: torch.Tensor[torch.float32, L[2], L[1], L[3], L[4]]
x2: torch.Tensor[torch.float32, L[1], L[5], L[4], L[3]]
y: torch.Tensor[torch.float32, L[2], L[5], L[3], L[3]] = torch.matmul(x, x2)
# pyre-fixme[9]: y_error has type `Tensor[torch.float32, typing_extensions.Litera...
y_error: torch.Tensor[torch.float32, L[2], L[5], L[3], L[99]] = torch.matmul(x, x2)
y2: torch.Tensor[torch.float32, L[2], L[5], L[3], L[3]] = x.matmul(x2)
y3: torch.Tensor[torch.float32, L[2], L[5], L[3], L[3]] = x.__matmul__(x2)
bad_x: torch.Tensor[torch.float32, L[1], L[5], L[99], L[3]]
torch.matmul(x, bad_x)
x_1d: torch.Tensor[torch.float32, L[3]]
x2_1d: torch.Tensor[torch.float32, L[3]]
y4: torch.Tensor[torch.float32] = torch.matmul(x_1d, x2_1d)
x3_1d_different: torch.Tensor[torch.float32, L[1]]
torch.matmul(x_1d, x3_1d_different)
def test_torch_optim() -> None:
block_parameters: Any
torch.optim.SGD(block_parameters, lr=1.0)
def test_torch_cuda() -> None:
torch.cuda.reset_peak_memory_stats()
def test_torch_profiler() -> None:
torch.profiler.profile()
def test_mse_loss() -> None:
x: torch.Tensor[torch.float32, L[2], L[3]]
x2: torch.Tensor[torch.float32, L[2], L[3]]
y: torch.Tensor[torch.float32] = nn.MSELoss(
size_average=True, reduce=True, reduction="mean"
)(x, x2)
# pyre-fixme[9]: y_error has type `Tensor[torch.float32,
# typing_extensions.Literal[99]]`; used as `Tensor[torch.float32]`.
y_error: torch.Tensor[torch.float32, L[99]] = nn.MSELoss()(x, x2)
def test_clip_grad_norm() -> None:
x: torch.Tensor[torch.float32, L[2], L[3]]
y: torch.Tensor = nn.utils.clip_grad_norm_(
x, max_norm=0.0, norm_type=0.0, error_if_nonfinite=True
)
# pyre-fixme[9]: y_error has type `int`; used as `Tensor[typing.Any,
# *Tuple[typing.Any, ...]]`.
y_error: int = nn.utils.clip_grad_norm_(
x, max_norm=0.0, norm_type=0.0, error_if_nonfinite=True
)
def test_clip_grad_value() -> None:
x: torch.Tensor[torch.float32, L[2], L[3]]
nn.utils.clip_grad_value_([x], clip_value=0.0)
def test_bitwise_not() -> None:
x: torch.Tensor[torch.float32, L[2], L[3]]
y: torch.Tensor[torch.float32, L[2], L[3]] = torch.bitwise_not(x)
# pyre-fixme[9]: y_error has type `Tensor[torch.float32,
# typing_extensions.Literal[2], typing_extensions.Literal[99]]`; used as
# `Tensor[torch.float32, typing_extensions.Literal[2],
# typing_extensions.Literal[3]]`.
y_error: torch.Tensor[torch.float32, L[2], L[99]] = torch.bitwise_not(x)
y2: torch.Tensor[torch.float32, L[2], L[3]] = x.bitwise_not()
# In-place.
y3: torch.Tensor[torch.float32, L[2], L[3]] = x.bitwise_not_()
y4: torch.Tensor[torch.float32, L[2], L[3]] = ~x
def test_cdist() -> None:
x: torch.Tensor[torch.float32, L[5], L[1], L[2], L[3]]
x2: torch.Tensor[torch.float32, L[1], L[7], L[4], L[3]]
y: torch.Tensor[torch.float32, L[5], L[7], L[2], L[4]] = torch.cdist(x, x2)
# pyre-fixme[9]: y_error has type `Tensor[torch.float32, typing_extensions.Litera...
y_error: torch.Tensor[torch.float32, L[5], L[7], L[2], L[99]] = torch.cdist(x, x2)
not_broadcastable: torch.Tensor[torch.float32, L[99], L[1], L[2], L[3]]
# pyre-fixme[2001]: Broadcast error at expression `torch.cdist(x,
# not_broadcastable)`; types `Tuple[typing_extensions.Literal[5],
# typing_extensions.Literal[1]]` and `Tuple[typing_extensions.Literal[99],
# typing_extensions.Literal[1]]` cannot be broadcasted together.
torch.cdist(x, not_broadcastable)
def test_random_manual_seed() -> None:
torch.random.manual_seed(42)
def test_clone() -> None:
x: torch.Tensor[torch.float32, L[2], L[3]]
y: torch.Tensor[torch.float32, L[2], L[3]] = torch.clone(x)
# pyre-fixme[9]: y_error has type `Tensor[torch.float32,
# typing_extensions.Literal[2], typing_extensions.Literal[99]]`; used as
# `Tensor[torch.float32, typing_extensions.Literal[2],
# typing_extensions.Literal[3]]`.
y_error: torch.Tensor[torch.float32, L[2], L[99]] = torch.clone(x)
y2: torch.Tensor[torch.float32, L[2], L[3]] = x.clone()
def test_equal() -> None:
x: torch.Tensor[torch.float32, L[2], L[3]]
y: torch.Tensor[torch.bool, L[2], L[3]] = x == 42
# pyre-fixme[9]: y_error has type `Tensor[torch.bool,
# typing_extensions.Literal[2], typing_extensions.Literal[99]]`; used as
# `Tensor[torch.bool, typing_extensions.Literal[2],
# typing_extensions.Literal[3]]`.
y_error: torch.Tensor[torch.bool, L[2], L[99]] = x == 42
# This doesn't return a Tensor as expected because `int.__eq__` accepts `object`.
y2: int = 42 == x
x2: torch.Tensor[torch.float32, L[2], L[1]]
x3: torch.Tensor[torch.float32, L[1], L[3]]
y3: torch.Tensor[torch.bool, L[2], L[3]] = x2 == x3
def test_diag_embed() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
y: torch.Tensor = torch.diag_embed(x)
def test_unbind() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
y: Tuple[torch.Tensor[torch.float32, L[2], L[4]], ...] = torch.unbind(x, dim=1)
# pyre-fixme[9]: y_error has type `Tuple[Tensor[torch.float32,
# typing_extensions.Literal[2], typing_extensions.Literal[99]], ...]`; used as
# `Tuple[Tensor[torch.float32, typing_extensions.Literal[2],
# typing_extensions.Literal[4]], ...]`.
y_error: Tuple[torch.Tensor[torch.float32, L[2], L[99]], ...] = torch.unbind(
x, dim=1
)
y2: Tuple[torch.Tensor[torch.float32, L[2], L[3]], ...] = torch.unbind(x, dim=-1)
y3: Tuple[torch.Tensor[torch.float32, L[3], L[4]], ...] = torch.unbind(x)
y4: Tuple[torch.Tensor[torch.float32, L[3], L[4]], ...] = x.unbind()
def test_size() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
y: Tuple[L[2], L[3], L[4]] = x.size()
# pyre-fixme[9]: y_error has type `Tuple[typing_extensions.Literal[2],
# typing_extensions.Literal[3], typing_extensions.Literal[99]]`; used as
# `Tuple[typing_extensions.Literal[2], typing_extensions.Literal[3],
# typing_extensions.Literal[4]]`.
y_error: Tuple[L[2], L[3], L[99]] = x.size()
y2: L[2] = x.size(0)
y3: L[3] = x.size(1)
y4: L[4] = x.size(-1)
y5: L[3] = x.size(-2)
def test_stack(
arbitary_length_tuple: Tuple[torch.Tensor[torch.float32, L[3], L[4], L[5]], ...],
variadic_tuple: Tuple[Unpack[Ts]],
) -> None:
x: torch.Tensor[torch.float32, L[3], L[4], L[5]]
x_incompatible: torch.Tensor[torch.float32, L[3], L[4], L[99]]
y: torch.Tensor[torch.float32, L[2], L[3], L[4], L[5]] = torch.stack((x, x))
# pyre-fixme[9]: y_error has type `Tensor[torch.float32, typing_extensions.Litera...
y_error: torch.Tensor[torch.float32, L[2], L[3], L[4], L[99]] = torch.stack((x, x))
y_incompatible_tensors: torch.Tensor = torch.stack((x, x_incompatible))
y2: torch.Tensor[torch.float32, L[3], L[2], L[4], L[5]] = torch.stack((x, x), dim=1)
y3: torch.Tensor[torch.float32, L[3], L[3], L[4], L[5]] = torch.stack(
(x, x, x), dim=1
)
y4: torch.Tensor[torch.float32, L[3], L[3], L[4], L[5]] = torch.stack((x, x, x))
# Arbitrary-length tuples make it return an arbitrary Tensor.
y5: torch.Tensor = torch.stack(arbitary_length_tuple)
y6: torch.Tensor = torch.stack(variadic_tuple)
def test_repeat_interleave() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
repeats: torch.Tensor[torch.float32, L[2]]
y: torch.Tensor[torch.float32, L[72]] = torch.repeat_interleave(x, 3)
# pyre-fixme[9]: y_error has type `Tensor[torch.float32,
# typing_extensions.Literal[99]]`; used as `Tensor[torch.float32,
# typing_extensions.Literal[72]]`.
y_error: torch.Tensor[torch.float32, L[99]] = torch.repeat_interleave(x, 3)
y2: torch.Tensor[torch.float32, L[4], L[3], L[4]] = torch.repeat_interleave(
x, 2, dim=0
)
y3: torch.Tensor[torch.float32, L[2], L[6], L[4]] = torch.repeat_interleave(
x, 2, dim=1
)
y4: torch.Tensor[torch.float32, L[2], L[3], L[8]] = torch.repeat_interleave(
x, 2, dim=-1
)
# Too dynamic because the output shape depends on the contents of repeats.
y5: torch.Tensor[torch.float32, L[0], L[3], L[4]] = torch.repeat_interleave(
x, repeats, dim=0
)
y6: torch.Tensor[torch.float32, L[2], L[3], L[8]] = x.repeat_interleave(2, dim=-1)
def test_meshgrid() -> None:
x1: torch.Tensor[torch.float32, L[2]]
x2: torch.Tensor[torch.float32, L[3]]
x3: torch.Tensor[torch.float32, L[4]]
y: Tuple[
torch.Tensor[torch.float32, L[2], L[3], L[4]],
torch.Tensor[torch.float32, L[2], L[3], L[4]],
torch.Tensor[torch.float32, L[2], L[3], L[4]],
] = torch.meshgrid(x1, x2, x3)
# pyre-fixme[9]: y_error has type `Tuple[Tensor[torch.float32, typing_extensions....
y_error: Tuple[
torch.Tensor[torch.float32, L[2], L[3], L[4]],
torch.Tensor[torch.float32, L[2], L[3], L[4]],
torch.Tensor[torch.float32, L[2], L[3], L[99]],
] = torch.meshgrid(x1, x2, x3)
y2: Tuple[
torch.Tensor[torch.float32, L[2], L[3]],
torch.Tensor[torch.float32, L[2], L[3]],
] = torch.meshgrid(x1, x2)
y3: Tuple[
torch.Tensor[torch.float32, L[2]],
] = torch.meshgrid(x1)
x4: Tensor
xs = tuple(x4 for _ in range(5))
y4: Tuple[torch.Tensor, ...] = torch.meshgrid(*xs)
xs2 = [x4 for _ in range(5)]
y5: Tuple[torch.Tensor, ...] = torch.meshgrid(*xs2)
def test_argmax() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
y: torch.LongTensor[torch.int64] = torch.argmax(x)
# pyre-fixme[9]: y_error has type `Tensor[torch.int64,
# typing_extensions.Literal[99]]`; used as `Tensor[torch.int64]`.
y_error: torch.LongTensor[torch.int64, L[99]] = torch.argmax(x)
y2: torch.LongTensor[torch.int64, L[3], L[4]] = torch.argmax(x, dim=0)
y3: torch.LongTensor[torch.int64, L[1], L[3], L[4]] = torch.argmax(
x, dim=0, keepdim=True
)
y4: torch.LongTensor[torch.int64, L[2], L[4]] = torch.argmax(x, dim=1)
y5: torch.LongTensor[torch.int64, L[2], L[1], L[4]] = torch.argmax(
x, dim=1, keepdim=True
)
y6: torch.LongTensor[torch.int64, L[2], L[3]] = torch.argmax(x, dim=2)
y7: torch.LongTensor[torch.int64, L[2], L[3], L[1]] = torch.argmax(
x, dim=2, keepdim=True
)
y8: torch.LongTensor[torch.int64, L[2], L[3]] = torch.argmax(x, dim=-1)
y9: torch.LongTensor[torch.int64, L[2], L[3], L[1]] = torch.argmax(
x, dim=-1, keepdim=True
)
y10: torch.LongTensor[torch.int64, L[2], L[3], L[1]] = x.argmax(
dim=-1, keepdim=True
)
# pyre-fixme[6]: Expected `typing_extensions.Literal[0]` for 2nd param but got
# `typing_extensions.Literal[3]`.
torch.argmax(x, dim=3)
def test_argmin() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
y: torch.LongTensor[torch.int64] = torch.argmin(x)
# pyre-fixme[9]: y_error has type `Tensor[torch.int64,
# typing_extensions.Literal[99]]`; used as `Tensor[torch.int64]`.
y_error: torch.LongTensor[torch.int64, L[99]] = torch.argmin(x)
y2: torch.LongTensor[torch.int64, L[3], L[4]] = torch.argmin(x, dim=0)
y3: torch.LongTensor[torch.int64, L[1], L[3], L[4]] = torch.argmin(
x, dim=0, keepdim=True
)
y4: torch.LongTensor[torch.int64, L[2], L[4]] = torch.argmin(x, dim=1)
y5: torch.LongTensor[torch.int64, L[2], L[1], L[4]] = torch.argmin(
x, dim=1, keepdim=True
)
y6: torch.LongTensor[torch.int64, L[2], L[3]] = torch.argmin(x, dim=2)
y7: torch.LongTensor[torch.int64, L[2], L[3], L[1]] = torch.argmin(
x, dim=2, keepdim=True
)
y8: torch.LongTensor[torch.int64, L[2], L[3]] = torch.argmin(x, dim=-1)
y9: torch.LongTensor[torch.int64, L[2], L[3], L[1]] = torch.argmin(
x, dim=-1, keepdim=True
)
y10: torch.LongTensor[torch.int64, L[2], L[3], L[1]] = x.argmin(
dim=-1, keepdim=True
)
# pyre-fixme[6]: Expected `typing_extensions.Literal[0]` for 2nd param but got
# `typing_extensions.Literal[3]`.
torch.argmin(x, dim=3)
def test_mean() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
y: torch.Tensor[torch.float32] = torch.mean(x)
# pyre-fixme[9]: y_error has type `Tensor[torch.float32,
# typing_extensions.Literal[99]]`; used as `Tensor[torch.float32]`.
y_error: torch.Tensor[torch.float32, L[99]] = torch.mean(x)
y2: torch.Tensor[torch.float32, L[3], L[4]] = torch.mean(x, dim=0)
y3: torch.Tensor[torch.float32, L[1], L[3], L[4]] = torch.mean(
x, dim=0, keepdim=True
)
y4: torch.Tensor[torch.float32, L[2], L[4]] = torch.mean(x, dim=1)
y5: torch.Tensor[torch.float32, L[2], L[1], L[4]] = torch.mean(
x, dim=1, keepdim=True
)
y6: torch.Tensor[torch.float32, L[2], L[3]] = torch.mean(x, dim=2)
y7: torch.Tensor[torch.float32, L[2], L[3], L[1]] = torch.mean(
x, dim=2, keepdim=True
)
y8: torch.Tensor[torch.float32, L[2], L[3]] = torch.mean(x, dim=-1)
y9: torch.Tensor[torch.float32, L[2], L[3], L[1]] = torch.mean(
x, dim=-1, keepdim=True
)
y10: torch.Tensor[torch.float32, L[2], L[3], L[1]] = x.mean(dim=-1, keepdim=True)
# pyre-fixme[6]: Expected `typing_extensions.Literal[0]` for 2nd param but got
# `typing_extensions.Literal[3]`.
torch.mean(x, dim=3)
def test_count_nonzero() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
y: torch.Tensor[torch.int64] = torch.count_nonzero(x)
# pyre-fixme[9]: y_error has type `Tensor[torch.int64,
# typing_extensions.Literal[99]]`; used as `Tensor[torch.int64]`.
y_error: torch.Tensor[torch.int64, L[99]] = torch.count_nonzero(x)
y2: torch.Tensor[torch.int64, L[3], L[4]] = torch.count_nonzero(x, dim=0)
y3: torch.Tensor[torch.int64, L[2], L[4]] = torch.count_nonzero(x, dim=1)
y4: torch.Tensor[torch.int64, L[2], L[3]] = torch.count_nonzero(x, dim=2)
y5: torch.Tensor[torch.int64, L[2], L[3]] = x.count_nonzero(dim=-1)
# pyre-fixme[6]: Expected `typing_extensions.Literal[0]` for 2nd param but got
# `typing_extensions.Literal[3]`.
torch.count_nonzero(x, dim=3)
def test_cat() -> None:
x1: torch.Tensor[torch.float32, L[2], L[3], L[4]]
x1_first_is_3: torch.Tensor[torch.float32, L[3], L[3], L[4]]
x1_first_is_4: torch.Tensor[torch.float32, L[4], L[3], L[4]]
x1_second_is_4: torch.Tensor[torch.float32, L[2], L[4], L[4]]
x1_second_is_5: torch.Tensor[torch.float32, L[2], L[5], L[4]]
x1_last_is_5: torch.Tensor[torch.float32, L[2], L[3], L[5]]
x1_last_is_6: torch.Tensor[torch.float32, L[2], L[3], L[6]]
# 2-element tuple.
y: torch.Tensor[torch.float32, L[5], L[3], L[4]] = torch.cat((x1, x1_first_is_3))
# pyre-fixme[9]: y_error has type `Tensor[torch.float32, typing_extensions.Litera...
y_error: torch.Tensor[torch.float32, L[99], L[3], L[4]] = torch.cat(
(x1, x1_first_is_3)
)
y2: torch.Tensor[torch.float32, L[2], L[7], L[4]] = torch.cat(
(x1, x1_second_is_4), dim=1
)
y3: torch.Tensor[torch.float32, L[2], L[3], L[9]] = torch.cat(
(x1, x1_last_is_5), dim=-1
)
y3_shape_mismatch: torch.Tensor[torch.float32, Unpack[Tuple[Any, ...]]] = torch.cat(
(x1, x1_second_is_4), dim=-1
)
# 3-element tuple.
y4: torch.Tensor[torch.float32, L[9], L[3], L[4]] = torch.cat(
(x1, x1_first_is_3, x1_first_is_4)
)
y5: torch.Tensor[torch.float32, L[2], L[12], L[4]] = torch.cat(
(x1, x1_second_is_4, x1_second_is_5), dim=1
)
y6: torch.Tensor[torch.float32, L[2], L[3], L[15]] = torch.cat(
(x1, x1_last_is_5, x1_last_is_6), dim=-1
)
y_many_element_tuple: torch.Tensor[
torch.float32, Unpack[Tuple[Any, ...]]
] = torch.cat((x1, x1, x1, x1))
y_list: torch.Tensor[torch.float32, Unpack[Tuple[Any, ...]]] = torch.cat([x1, x1])
def test_sign() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
y: torch.Tensor[torch.float32, L[2], L[3], L[4]] = torch.sign(x)
# pyre-fixme[9]: y_error has type `Tensor[torch.float32, typing_extensions.Litera...
y_error: torch.Tensor[torch.float32, L[2], L[3], L[99]] = torch.sign(x)
y2: torch.Tensor[torch.float32, L[2], L[3], L[4]] = x.sign()
def test_diagonal() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4], L[5]]
y: torch.Tensor = torch.diagonal(x)
def test_diag() -> None:
x: torch.Tensor[torch.float32, L[2], L[3]]
y: torch.Tensor = torch.diag(x)
def test_module_list() -> None:
x: torch.Tensor[torch.float32, L[2], L[3]]
modules = nn.ModuleList([nn.AdaptiveAvgPool2d(0), nn.AdaptiveAvgPool2d(1)])
for module in modules:
y: Tensor = module(x)
z: int = len(modules)
def test_sparse_coo_tensor() -> None:
y: torch.Tensor[torch.float32, L[2], L[3]] = torch.sparse_coo_tensor(
torch.randn(5), [6, 7, 8], size=(2, 3)
)
# pyre-fixme[9]: y_error has type `Tensor[torch.float32,
# typing_extensions.Literal[2], typing_extensions.Literal[99]]`; used as
# `Tensor[torch.float32, typing_extensions.Literal[2],
# typing_extensions.Literal[3]]`.
y_error: torch.Tensor[torch.float32, L[2], L[99]] = torch.sparse_coo_tensor(
torch.randn(5), [6, 7, 8], size=(2, 3)
)
y2: torch.Tensor = torch.sparse_coo_tensor(torch.randn(5), [6, 7, 8])
def test_max() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
y: torch.Tensor[torch.float32] = torch.max(x)
y2: torch.Tensor[torch.float32, L[3], L[4]] = torch.max(x, dim=0).values
y2_indices: torch.Tensor[torch.int64, L[3], L[4]] = torch.max(x, dim=0).indices
y2_getitem: torch.Tensor[torch.int64, L[3], L[4]] = torch.max(x, dim=0)[1]
y3: torch.Tensor[torch.float32, L[1], L[3], L[4]] = torch.max(
x, dim=0, keepdim=True
).values
y4: torch.Tensor[torch.float32, L[2], L[4]] = torch.max(x, dim=1).values
y5: torch.Tensor[torch.float32, L[2], L[1], L[4]] = torch.max(
x, dim=1, keepdim=True
).values
y6: torch.Tensor[torch.float32, L[2], L[3]] = torch.max(x, dim=2).values
y7: torch.Tensor[torch.float32, L[2], L[3], L[1]] = torch.max(
x, dim=2, keepdim=True
).values
y8: torch.Tensor[torch.float32, L[2], L[3]] = torch.max(x, dim=-1).values
y9: torch.Tensor[torch.float32, L[2], L[3], L[1]] = torch.max(
x, dim=-1, keepdim=True
).values
y10: torch.Tensor[torch.float32, L[2], L[4]] = torch.max(x, dim=-2).values
y11: torch.Tensor[torch.float32, L[2], L[1], L[4]] = torch.max(
x, dim=-2, keepdim=True
).values
y12: torch.Tensor[torch.float32, L[2], L[3], L[1]] = x.max(
dim=-1, keepdim=True
).values
# pyre-fixme[6]: Expected `typing_extensions.Literal[0]` for 2nd param but got
# `typing_extensions.Literal[3]`.
torch.max(x, dim=3).values
def test_einsum() -> None:
x: Tensor = torch.einsum("ii", torch.randn(4, 4))
def test_type_as() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
x2: torch.Tensor[torch.int64, L[2], L[3], L[4]]
y: torch.Tensor[torch.int64, L[2], L[3], L[4]] = x.type_as(x2)
def test_softmax() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
y: torch.Tensor[torch.float32, L[2], L[3], L[4]] = torch.softmax(x, dim=1)
# pyre-fixme[9]: y_error has type `Tensor[torch.float32, typing_extensions.Litera...
y_error: torch.Tensor[torch.float32, L[2], L[3], L[99]] = torch.softmax(x, dim=1)
y2: torch.Tensor[torch.int64, L[2], L[3], L[4]] = torch.softmax(
x, dim=1, dtype=torch.int64
)
y3: torch.Tensor[torch.float32, L[2], L[3], L[4]] = x.softmax(dim=1)
def test_conv2d() -> None:
x: Tensor[torch.float32, L[20], L[16], L[50], L[100]]
y7: Tensor[torch.float32, L[20], L[33], L[56], L[100]] = nn.Conv2d(
16, 33, (3, 5), padding=(4, 2), bias=False
)(x)
# pyre-fixme[9]: y7_error has type `Tensor[torch.float32, typing_extensions.Liter...
y7_error: Tensor[torch.float32, L[20], L[33], L[56], L[99]] = nn.Conv2d(
16, 33, (3, 5), padding=(4, 2)
)(x)
module: nn.Module = nn.Conv2d(16, 33, (3, 5), padding=(4, 2))
def test_nn_Parameter() -> None:
x: Tensor[torch.float32, L[2], L[3], L[4]]
y: Tensor[torch.float32, L[2], L[3], L[4]] = nn.Parameter(x)
# pyre-fixme[9]: Expected error.
y_error: Tensor[torch.float32, L[2], L[3], L[99]] = nn.Parameter(x)
def test_torch_datatypes() -> None:
x: torch.float16
x2: torch.int
def test_norm() -> None:
x: Tensor[torch.float32, L[2], L[3], L[4]]
x_out: Tensor[torch.float32, L[2], L[3], L[4]]
y1: Tensor[torch.float32] = torch.norm(x)
y2: Tensor[torch.float32, L[3], L[4]] = torch.norm(x, dim=0, out=x_out, p=1)
# pyre-fixme[9]: Expected error.
y2_error: Tensor[torch.float32, L[3], L[99]] = torch.norm(x, dim=0)
y3: Tensor[torch.float32, L[1], L[3], L[4]] = torch.norm(x, dim=0, keepdim=True)
def test_rand() -> None:
x: Tensor[torch.float32, L[2], L[3], L[4]]
x_out: Tensor[torch.float32, L[2], L[3], L[4]]
device: torch.device
y1: Tensor[torch.float32, L[2], L[3], L[4]] = torch.rand(2, 3, 4)
# pyre-fixme[9]: Expected Error.
y1_error: Tensor[torch.float32, L[2], L[3], L[99]] = torch.rand(2, 3, 4)
y2: Tensor[torch.int64, L[2], L[3], L[4]] = torch.rand(
2,
3,
4,
dtype=torch.int64,
device=device,
layout=torch.strided,
out=x_out,
requires_grad=True,
generator=torch.default_generator,
)
y3: Tensor[torch.float32, L[2], L[3], L[4]] = torch.rand((2, 3, 4))
def test_randint() -> None:
x: Tensor[torch.float32, L[2], L[3], L[4]]
x_out: Tensor[torch.float32, L[2], L[3], L[4]]
device: torch.device
y1: Tensor[torch.int64, L[2], L[3], L[4]] = torch.randint(0, 3, (2, 3, 4))
# pyre-fixme[9]: Expected error.
y1_error: Tensor[torch.int64, L[2], L[3], L[99]] = torch.randint(0, 3, (2, 3, 4))
y2: Tensor[torch.int64, L[2], L[3], L[4]] = torch.randint(
3,
(2, 3, 4),
dtype=torch.int64,
device=device,
layout=torch.strided,
out=x_out,
requires_grad=True,
generator=torch.default_generator,
)
def test_zeros() -> None:
x: Tensor[torch.float32, L[2], L[3], L[4]]
x_out: Tensor[torch.float32, L[2], L[3], L[4]]
device: torch.device
y1: Tensor[torch.float32, L[2], L[3], L[4]] = torch.zeros(2, 3, 4)
# pyre-fixme[9]: Expected Error.
y1_error: Tensor[torch.float32, L[2], L[3], L[99]] = torch.zeros(2, 3, 4)
y2: Tensor[torch.int64, L[2], L[3], L[4]] = torch.zeros(
2,
3,
4,
dtype=torch.int64,
device=device,
layout=torch.strided,
out=x_out,
requires_grad=True,
)
y3: Tensor[torch.float32, L[2], L[3], L[4]] = torch.zeros((2, 3, 4))
def test_stride() -> None:
x: Tensor[torch.float32, L[2], L[3], L[4]]
y: Tuple[L[2], L[3], L[4]] = x.stride()
# pyre-fixme[9]: Expected error.
y_error: Tuple[L[2], L[3], L[99]] = x.stride()
y2: L[12] = x.stride(0)
y3: L[4] = x.stride(1)
y4: L[1] = x.stride(2)
def test_chunk() -> None:
x: Tensor[torch.float32, L[2], L[3], L[4]]
y: Tuple[
Tensor[torch.float32, L[2], L[3], L[2]], Tensor[torch.float32, L[2], L[3], L[2]]
] = torch.chunk(x, 2, dim=-1)
# pyre-fixme[9]: Expected error.
y_error: Tuple[
Tensor[torch.float32, L[2], L[3], L[99]],
Tensor[torch.float32, L[2], L[3], L[2]],
] = torch.chunk(x, 2, dim=-1)
y2: Tuple[
Tensor[torch.float32, L[1], L[3], L[4]], Tensor[torch.float32, L[1], L[3], L[4]]
] = torch.chunk(x, 2, dim=0)
y3: Tuple[
Tensor[torch.float32, L[1], L[3], L[4]], Tensor[torch.float32, L[1], L[3], L[4]]
] = x.chunk(2, dim=0)
def test_abs() -> None:
x: Tensor[torch.float32, L[2], L[3], L[4]]
y: Tensor[torch.float32, L[2], L[3], L[4]] = x.abs()
# pyre-fixme[9]: Expected error.
y_error: Tensor[torch.float32, L[2], L[3], L[99]] = x.abs()
def test_enable_grad() -> None:
with torch.enable_grad():
pass
def test_normal() -> None:
y: Tensor[torch.float32, L[2], L[3], L[4]] = torch.normal(
0, 1, size=(2, 3, 4), device="cuda", requires_grad=True
)
# pyre-fixme[9]: Expected error.
y_error: Tensor[torch.float32, L[2], L[3], L[99]] = torch.normal(
0, 1, size=(2, 3, 4), device="cuda", requires_grad=True
)
def test_dim() -> None:
x0: Tensor[torch.float32]
x1: Tensor[torch.float32, L[2]]
x2: Tensor[torch.float32, L[2], L[3]]
x3: Tensor[torch.float32, L[2], L[3], L[4]]
y: L[3] = x3.dim()
# pyre-fixme[9]: Expected error.
y_error: L[5] = x3.dim()
y2: L[0] = x0.dim()
y3: L[1] = x1.dim()
y4: L[2] = x2.dim()
def test_is_cuda() -> None:
x: Tensor[torch.float32, L[2], L[3], L[4]]
y: bool = x.is_cuda
def test_autograd_backward() -> None:
x: Tensor[torch.float32, L[2], L[3], L[4]]
torch.autograd.backward(x, x)
def test_linalg_norm() -> None:
x: Tensor[torch.float32, L[2], L[3], L[4]]
y: Tensor[torch.float32, L[2]] = torch.linalg.norm(x, dim=(-2, -1))
# pyre-fixme[9]: Expected error.
y_error: Tensor[torch.float32, L[99]] = torch.linalg.norm(x, dim=(-2, -1))
def test_Sized() -> None:
x: torch.Size = torch.Size((2, 3, 4))
def test_initial_seed() -> None:
x: int = torch.initial_seed()
def test_log_softmax() -> None:
x: Tensor[torch.float32, L[2], L[3], L[4]]
y: Tensor[torch.float32, L[2], L[3], L[4]] = torch.log_softmax(x, dim=1)
# pyre-fixme[9]: Expected error.
y_error: Tensor[torch.float32, L[2], L[3], L[99]] = torch.log_softmax(x, dim=1)
y2: Tensor[torch.int64, L[2], L[3], L[4]] = torch.log_softmax(
x, dtype=torch.int64, dim=1
)
def test_masked_select() -> None:
x: Tensor[torch.float32, L[2], L[3], L[4]]
mask: Tensor[torch.bool, L[2], L[3], L[4]]
out: Tensor[torch.float32, L[2], L[3], L[4]]
y: Tensor = x.masked_select(mask, out=out)
y2: Tensor = torch.masked_select(x, mask, out=out)
def test__lt__() -> None:
x: Tensor[torch.float32, L[2], L[3], L[4]]
y: Tensor[torch.bool, L[2], L[3], L[4]] = x < 3.0
def test_pow() -> None:
x: Tensor[torch.float32, L[2], L[3], L[4]]
y: Tensor[torch.float32, L[2], L[3], L[4]] = x ** 4
# pyre-fixme[9]: Expected error.
y_error: Tensor[torch.float32, L[2], L[3], L[99]] = x ** 4
def test_item() -> None:
x: Tensor[torch.float32]
x2: Tensor[torch.float32, L[1]]
y: torch.float32 = x.item()
# pyre-fixme[9]: Expected error.
y_error: torch.int64 = x.item()
y2: torch.float32 = x.item()
def test_uniform_() -> None:
x: Tensor[torch.float32, L[2], L[3], L[4]]
y: Tensor[torch.float32, L[2], L[3], L[4]] = nn.init.uniform_(x, a=1.0, b=2.0)
# pyre-fixme[9]: Expected error.
y_error: Tensor[torch.float32, L[2], L[3], L[99]] = nn.init.uniform_(
x, a=1.0, b=2.0
)
def test_kaiming_uniform_() -> None:
x: Tensor[torch.float32, L[2], L[3], L[4]]
y: Tensor[torch.float32, L[2], L[3], L[4]] = nn.init.kaiming_uniform_(
x, a=1.0, mode="fan_in", nonlinearity="leaky_relu"
)
# pyre-fixme[9]: Expected error.
y_error: Tensor[torch.float32, L[2], L[3], L[99]] = nn.init.kaiming_uniform_(x)
def test_constant_() -> None:
x: Tensor[torch.float32, L[2], L[3], L[4]]
y: Tensor[torch.float32, L[2], L[3], L[4]] = nn.init.constant_(x, val=1.0)
# pyre-fixme[9]: Expected error.
y_error: Tensor[torch.float32, L[2], L[3], L[99]] = nn.init.constant_(x, val=1.0)
def test_leaky_relu() -> None:
x: Tensor[torch.float32, L[2], L[3], L[4]]
y: Tensor[torch.float32, L[2], L[3], L[4]] = nn.LeakyReLU(
negative_slope=1.0, inplace=True
)(x)
# pyre-fixme[9]: Expected error.
y_error: Tensor[torch.float32, L[2], L[3], L[99]] = nn.LeakyReLU(
negative_slope=1.0, inplace=True
)(x)
def test_fft_fft2() -> None:
x: Tensor[torch.complex64, L[2], L[3], L[4]]
y: Tensor[torch.complex64, L[2], L[3], L[4]] = torch.fft.fft2(x)
# pyre-fixme[9]: Expected error.
y_error: Tensor[torch.complex64, L[2], L[3], L[99]] = torch.fft.fft2(x)
def test_real() -> None:
x: Tensor[torch.complex64, L[2], L[3], L[4]]
y: Tensor[torch.float32, L[2], L[3], L[4]] = x.real
# pyre-fixme[9]: Expected error.
y_error: Tensor[torch.float32, L[2], L[3], L[99]] = x.real
x2: Tensor[torch.complex128, L[2], L[3], L[4]]
y2: Tensor[torch.float64, L[2], L[3], L[4]] = x2.real
not_complex: Tensor[torch.float64, L[2], L[3], L[4]]
# Should error but we don't have overloads for @property.
not_complex.real
def test_Tensor_init() -> None:
x: Tensor[torch.float32, L[2], L[3], L[4]]
# pyre-fixme[9]: Unexpected error because the constructor doesn't bind DType.
y: Tensor[torch.float32, L[2], L[3], L[4]] = Tensor((2, 3, 4), device="cuda")
# pyre-fixme[9]: Expected error.
y_error: Tensor[torch.float32, L[2], L[3], L[99]] = Tensor((2, 3, 4), device="cuda")
y2: Tensor[torch.float32, L[2], L[3], L[4]] = Tensor(2, 3, 4, device="cuda")
y3: Tensor[torch.float32, L[2], L[3], L[4]] = Tensor(x)
def test_reflection_pad2d() -> None:
module: nn.Module = nn.ReflectionPad2d(4)
x: Tensor[torch.float32, L[20], L[16], L[50], L[100]]
y: Tensor[torch.float32, L[20], L[16], L[58], L[108]] = nn.ReflectionPad2d(4)(x)
# pyre-fixme[9]: Expected error.
y_error: Tensor[torch.float32, L[20], L[16], L[58], L[99]] = nn.ReflectionPad2d(4)(
x
)
def test_half() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
good1: torch.Tensor[torch.float16, L[2], L[3], L[4]] = x.half(torch.memory_format())
# pyre-fixme[9]: Expected error.
bad1: torch.Tensor[torch.float16, L[99], L[3], L[4]] = x.half()
def test_is_contiguous() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
y: bool = x.is_contiguous(torch.memory_format())
def test_scatter() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
# We don't really check for the shape of index or src.
index: torch.LongTensor[torch.float32, L[99]]
src: torch.Tensor[torch.float32, L[99], L[99]]
y: torch.Tensor[torch.float32, L[2], L[3], L[4]] = x.scatter(0, index, src)
# pyre-fixme[9]: Expected error.
y_error: torch.Tensor[torch.float32, L[2], L[3], L[99]] = x.scatter(0, index, src)
y2: torch.Tensor[torch.float32, L[2], L[3], L[4]] = x.scatter(2, index, src)
def test_scatter_() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
# We don't really check for the shape of index or src.
index: torch.LongTensor[torch.float32, L[99]]
src: torch.Tensor[torch.float32, L[99], L[99]]
y: torch.Tensor[torch.float32, L[2], L[3], L[4]] = x.scatter_(0, index, src)
# pyre-fixme[9]: Expected error.
y_error: torch.Tensor[torch.float32, L[2], L[3], L[99]] = x.scatter_(0, index, src)
y2: torch.Tensor[torch.float32, L[2], L[3], L[4]] = x.scatter_(2, index, src)
def test_bool() -> None:
x: Tensor[torch.float32, L[2], L[3], L[4]]
y: Tensor[torch.bool, L[2], L[3], L[4]] = x.bool()
# pyre-fixme[9]: Expected error.
y_error: Tensor[torch.bool, L[2], L[3], L[99]] = x.bool()
|
bart_ls-main
|
xformers/stubs/torch_stub_tests.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from xformers.components import Activation
from xformers.components.feedforward import FEEDFORWARD_REGISTRY, build_feedforward
from xformers.components.feedforward.mixture_of_experts import GateConfig
from xformers.helpers.test_utils import init_torch_distributed_local
BATCH = 4
SEQ = 512
EMBD = 16
LATENT = 128
DROPOUT = 0.5
DEVICES = (
[torch.device("cpu")] if not torch.cuda.is_available() else [torch.device("cuda")]
)
assert FEEDFORWARD_REGISTRY.keys(), "Feedforward layers should have been registered"
@pytest.mark.parametrize("feedforward_name", FEEDFORWARD_REGISTRY.keys())
@pytest.mark.parametrize("activation", [a.value for a in Activation])
@pytest.mark.parametrize("device", DEVICES)
def test_feedforward(
feedforward_name: str, activation: Activation, device: torch.device
):
test_config = {
"name": feedforward_name,
"dim_model": LATENT,
"dropout": DROPOUT,
"activation": activation,
"hidden_layer_multiplier": 4,
"number_of_experts": 4, # MoE
"gate": "top_2", # MoE
}
if feedforward_name == "MixtureOfExperts":
init_torch_distributed_local()
# dummy, just check construction and dimensions in the FW pass
ffw = build_feedforward(test_config)
if ffw.requires_cuda and not device.type == "cuda":
# pyre-fixme[29]: The library function `pytest.skip` is not supported by Pyre.
pytest.skip("This MLP requires CUDA and current device does not match")
inputs = torch.rand(BATCH, SEQ, LATENT, device=device)
ffw = ffw.to(device)
_ = ffw(inputs)
def get_expert():
return torch.nn.Linear(LATENT, LATENT, bias=False)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="This test requires CUDA")
@pytest.mark.parametrize("gate", [g.value for g in GateConfig])
@pytest.mark.parametrize("number_of_local_experts", [None, 4])
@pytest.mark.parametrize("expert_constructor", [None, get_expert])
def test_moe(gate, number_of_local_experts, expert_constructor):
test_config = {
"name": "MixtureOfExperts",
"dim_model": LATENT,
"dropout": DROPOUT,
"activation": Activation.ReLU,
"hidden_layer_multiplier": 4,
"number_of_experts": 4,
"number_of_local_experts": number_of_local_experts,
"gate": gate,
"expert_constructor": expert_constructor,
}
init_torch_distributed_local()
# dummy, just check construction and dimensions in the FW pass
ffw = build_feedforward(test_config)
inputs = torch.rand(BATCH, SEQ, LATENT, device=torch.device("cuda"))
ffw = ffw.to(torch.device("cuda"))
outputs = ffw(inputs)
loss = torch.sum(outputs)
loss.backward()
|
bart_ls-main
|
xformers/tests/test_feedforward.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import math
import pytest
import torch
from xformers.components import MultiHeadDispatch
from xformers.components.attention import build_attention
from xformers.components.attention.attention_patterns import block_sparsify_tensor
from xformers.triton.utils import get_current_cuda_device
# CREDITS:
# Tests from, very lightly changed
# https://github.com/openai/triton/blob/master/python/test/unit/operators/test_blocksparse.py
# Initially copied here folowing a fork from the matmul kernel
_triton_available = torch.cuda.is_available()
_matmul_types = []
if _triton_available:
try:
import triton
from triton.ops.blocksparse import matmul as blocksparse_matmul
from triton.ops.blocksparse import softmax as blocksparse_softmax
from xformers.components.attention import BlockSparseAttention
from xformers.triton.utils import (
assert_almost_equal,
gpu_capabilities_older_than_70,
)
_triton_available = not gpu_capabilities_older_than_70()
_matmul_types = ["sdd", "dsd", "dds"]
except (ImportError, ModuleNotFoundError) as e:
import logging
logging.warning(f"Triton is not available: {e}. Some tests will be skipped")
_triton_available = False
@pytest.mark.skipif(not _triton_available, reason="Triton requires a recent CUDA gpu")
@pytest.mark.skipif(
not _triton_available or get_current_cuda_device() == "T4",
reason="FIXME - blocksparse matmuls are slightly off on T4s",
)
@pytest.mark.parametrize("MODE", _matmul_types)
@pytest.mark.parametrize("TRANS_A", [False, True])
@pytest.mark.parametrize("TRANS_B", [False, True])
@pytest.mark.parametrize("BLOCK", [16, 32, 64])
@pytest.mark.parametrize("DTYPE", [torch.float16])
def test_matmul(MODE, TRANS_A, TRANS_B, BLOCK, DTYPE, Z=32, H=2, M=512, N=384, K=256):
# set seed
torch.random.manual_seed(0)
# create inputs
a = torch.randn(
(Z, H, K, M) if TRANS_A else (Z, H, M, K), dtype=DTYPE, device="cuda"
)
b = torch.randn(
(Z, H, N, K) if TRANS_B else (Z, H, K, N), dtype=DTYPE, device="cuda"
)
shape = {
"sdd": (M, N),
"dsd": (a.shape[2], a.shape[3]),
"dds": (b.shape[2], b.shape[3]),
}[MODE]
layout = torch.randint(2, (H, shape[0] // BLOCK, shape[1] // BLOCK))
# triton result
op = blocksparse_matmul(layout, BLOCK, MODE, trans_a=TRANS_A, trans_b=TRANS_B)
ra = block_sparsify_tensor(a, layout, BLOCK) if MODE == "dsd" else a
rb = block_sparsify_tensor(b, layout, BLOCK) if MODE == "dds" else b
rc = triton.testing.catch_oor(lambda: op(ra, rb), pytest)
# torch result
ta = triton.testing.mask_tensor(a, layout, BLOCK) if MODE == "dsd" else a
tb = triton.testing.mask_tensor(b, layout, BLOCK) if MODE == "dds" else b
ta = ta.transpose(2, 3) if TRANS_A else ta
tb = tb.transpose(2, 3) if TRANS_B else tb
tc = torch.matmul(ta, tb)
tc = triton.testing.mask_tensor(tc, layout, BLOCK) if MODE == "sdd" else tc
tc = block_sparsify_tensor(tc, layout, BLOCK) if MODE == "sdd" else tc
# compare
assert_almost_equal(rc, tc)
@pytest.mark.skipif(not _triton_available, reason="Triton requires a recent CUDA gpu")
@pytest.mark.parametrize("BLOCK", [32])
@pytest.mark.parametrize("WIDTH", [256, 576, 1024, 1792])
@pytest.mark.parametrize("DTYPE", [torch.float16, torch.float32])
def test_softmax(BLOCK, WIDTH, DTYPE):
# set seed
torch.random.manual_seed(0)
Z, H, M, N = 2, 4, WIDTH, WIDTH
scale = 0.4
# create inputs
layout = torch.randint(2, (H, M // BLOCK, N // BLOCK))
x = torch.randn((Z, H, M, N), dtype=DTYPE, requires_grad=True, device="cuda")
at_mask = torch.randint(
low=0, high=2, size=(N, N), dtype=torch.bool, requires_grad=False, device="cuda"
)
kp_mask = torch.randint(
low=0, high=2, size=(Z, N), dtype=DTYPE, requires_grad=False, device="cuda"
)
kp_mask[kp_mask == 1.0] = float("-inf")
# triton result
op = blocksparse_softmax(layout, BLOCK)
tx = block_sparsify_tensor(x, layout, BLOCK)
ty = op(
tx,
scale=scale,
key_padding_mask=kp_mask,
key_padding_mask_mode="add",
attn_mask=at_mask.to(DTYPE),
attn_mask_mode="mul",
)
# torch result
rx = triton.testing.mask_tensor(x, layout, BLOCK, value=float("-inf"))
if at_mask is not None:
# broadcast at_mask to the same shape as rx
M = at_mask[None, None, :, :] + torch.zeros_like(rx)
rx[M == 0] = float("-inf")
if kp_mask is not None:
rx += kp_mask[:, None, None, :]
ry = torch.softmax(rx * scale, -1)
ry = block_sparsify_tensor(ry, layout, BLOCK)
# compare
assert_almost_equal(ry, ty)
@pytest.mark.skipif(not _triton_available, reason="Triton requires a recent CUDA gpu")
@pytest.mark.parametrize("block", [32]) # 16, 32,
def test_attention_fwd_bwd(
block,
input_scale=1.0,
scale=1 / 8.0,
n_ctx=256,
dtype=torch.float16,
batch_size=2,
n_heads=2,
):
# inputs
head_dim = 64
qkv_shape = (batch_size, n_heads, n_ctx, head_dim)
qkvs = [
torch.nn.Parameter(input_scale * torch.randn(qkv_shape), requires_grad=True)
.to(dtype)
.cuda()
for _ in range(3)
]
attn_mask = torch.tril(
torch.ones(
[n_ctx, n_ctx],
device="cuda",
dtype=dtype,
),
diagonal=0,
)
def loss_fn(x):
return (x ** 2).mean()
# Triton:
n_blocks = n_ctx // block
layout = torch.tril(torch.ones([n_heads, n_blocks, n_blocks], dtype=torch.long))
query, key, value = [x.clone() for x in qkvs]
query.retain_grad()
key.retain_grad()
value.retain_grad()
block_sparse_attention = BlockSparseAttention(layout, block)
attn_out = block_sparse_attention(
att_mask=attn_mask, q=query, k=key, v=value, scale=scale
)
# ad hoc loss
loss = loss_fn(attn_out)
loss.backward()
grads = [query.grad, key.grad, value.grad]
# Torch version:
torch_q, torch_k, torch_v = [x.clone() for x in qkvs]
torch_q = torch_q / math.sqrt(head_dim)
attn_mask = 1e6 * (-1 + (attn_mask.reshape((1, 1, n_ctx, n_ctx)).cuda()))
torch_q.retain_grad()
torch_k.retain_grad()
torch_v.retain_grad()
scores = scale * torch.einsum("bhsd,bhtd->bhst", torch_q, torch_k)
scores = scores + attn_mask
probs = torch.softmax(scores, dim=-1)
torch_attn_out = torch.einsum("bhst,bhtd->bhsd", probs, torch_v)
# ad hoc loss
torch_loss = loss_fn(torch_attn_out)
torch_loss.backward()
torch_grads = [torch_q.grad, torch_k.grad, torch_v.grad]
# comparison
assert_almost_equal(
loss, torch_loss, err_msg=f"Triton loss {loss} and torch loss {torch_loss}"
)
for g1, g2 in zip(grads, torch_grads):
assert_almost_equal(
torch.norm(g1),
torch.norm(g2),
err_msg=f"Triton grad {torch.norm(g1).item()} and torch grad {torch.norm(g2).item()}",
)
@pytest.mark.skipif(not _triton_available, reason="Triton requires a recent CUDA gpu")
def test_blocksparse_attention_parity():
def _reset_seeds():
torch.manual_seed(0)
seq = 64
model = 64
heads = 4
block_size = 16
batch_size = 2
batched_dim = heads * batch_size
dim_head = model // heads
test_config = {
"dropout": 0.0,
"causal": False,
"seq_len": seq,
"num_heads": 4,
"dim_head": dim_head,
"block_size": block_size,
"layout": torch.ones(seq // block_size, seq // block_size, dtype=torch.long),
}
inputs = torch.rand(batched_dim, seq, model, device="cuda").half()
_reset_seeds()
test_config["name"] = "scaled_dot_product"
attention_sdp = build_attention(test_config)
multi_head_sdp = (
MultiHeadDispatch(
seq_len=seq,
dim_model=model,
residual_dropout=0.0,
num_heads=heads,
attention=attention_sdp,
)
.cuda()
.half()
)
r_sdp = multi_head_sdp(inputs, inputs, inputs)
_reset_seeds()
test_config["name"] = "blocksparse"
attention_blocksparse = build_attention(test_config)
multi_head_blocksparse = (
MultiHeadDispatch(
seq_len=seq,
dim_model=model,
residual_dropout=0.0,
num_heads=heads,
attention=attention_blocksparse,
)
.cuda()
.half()
)
r_blocksparse = multi_head_blocksparse(inputs, inputs, inputs)
# FIXME: currently has max diff of .009, perhaps can be improved.
assert_almost_equal(r_sdp, r_blocksparse)
|
bart_ls-main
|
xformers/tests/test_triton_blocksparse.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from xformers.components.attention import BlockSparseAttention,BlockSparseLocalAttention, BlockNoglobalAttention
from xformers.components import MultiHeadDispatch
# BATCH = 1
# HEADS = 16
# SEQ = 4096
# EMB = 64
# BLOCK_SIZE = 512
# DROPOUT = 0.1
BATCH = 1
HEADS = 16
SEQ = 8192
EMB = 64 * HEADS
BLOCK_SIZE = 512
BLOCK_UNIT = 64
DROPOUT = 0
dtype = torch.float16
blocks = SEQ // BLOCK_UNIT
def pattern_to_layout(mask: torch.Tensor, block_size: int) -> torch.Tensor:
r"""
Given a mask pattern and blocksize, return the corresponding layout
which makes sure that all the positives in the mask are covered
"""
assert mask.ndim >= 2, "We're expecting [Heads, Seq, Seq] or [Seq, Seq]"
_should_squeeze = False
if mask.ndim == 2:
mask = mask.unsqueeze(0)
_should_squeeze = True
assert (
mask.shape[1] % block_size == 0 and mask.shape[2] % block_size == 0
), "We're only handling masks divisible by block_size"
# Now mark the mask
layout = torch.nn.functional.max_pool2d(
mask.to(torch.float), kernel_size=block_size, stride=block_size
)
layout = layout.to(torch.long)
if _should_squeeze:
layout.squeeze_(0)
return layout
local_mask = torch.zeros(HEADS, SEQ, SEQ)
for block_start in range(0, SEQ, BLOCK_SIZE):
local_mask[:, block_start:block_start+BLOCK_SIZE, block_start:block_start+BLOCK_SIZE] = 1
local_layout_64 = pattern_to_layout(local_mask, 64)
local_layout_32 = pattern_to_layout(local_mask, 32)
attention_32 = BlockSparseAttention(layout=local_layout_32, block_size=32, dropout=DROPOUT, num_heads=HEADS)
attention_64 = BlockSparseAttention(layout=local_layout_64, block_size=64, dropout=DROPOUT, num_heads=HEADS)
test = torch.rand((2*HEADS, SEQ-100, 64)).cuda().half()
# att_32 = attention_32(test, test, test)[0]
# att_64 = attention_64(test, test, test)[0]
# # no issue here
# assert (attention_32(test, test, test) != attention_64(test, test, test)).sum() == 0
# test = test.transpose(1,2).reshape(2, SEQ, -1).half()
# multi_head_32 = (
# MultiHeadDispatch(
# seq_len=SEQ,
# dim_model=EMB,
# residual_dropout=0,
# num_heads=HEADS,
# attention=attention_32,
# )
# .cuda()
# .half()
# )
# multi_head_64 = (
# MultiHeadDispatch(
# seq_len=SEQ,
# dim_model=EMB,
# residual_dropout=0,
# num_heads=HEADS,
# attention=attention_64,
# )
# .cuda()
# .half()
# )
# att_val_32 = multi_head_32(query=test, key=test, value=test)
# att_val_64 = multi_head_64(query=test, key=test, value=test)
# # error here
# assert (att_val_32 != att_val_64).sum() == 0
# def build_local_layout(HEADS, block_size, block_unit, seq_len):
# local_block_units = block_size // block_unit
# layout = torch.zeros(HEADS, seq_len // block_unit, seq_len // block_unit)
# for block_start_idx in range(0, seq_len // block_unit, local_block_units):
# layout[:,block_start_idx:block_start_idx + local_block_units, block_start_idx:block_start_idx + local_block_units] = 1
# return layout
# layout_64 = build_local_layout(HEADS, BLOCK_SIZE, 64, SEQ)
# layout_32 = build_local_layout(HEADS, BLOCK_SIZE, 32, SEQ)
# assert (layout_32 == local_layout_32).float().mean() == 1
# assert (layout_64 == local_layout_64).float().mean() == 1
attention_32 = BlockSparseLocalAttention(seq_len=SEQ, block_size=BLOCK_SIZE, dropout=DROPOUT, num_heads=HEADS, block_unit=32)
attention_64 = BlockSparseLocalAttention(seq_len=SEQ, block_size=BLOCK_SIZE, dropout=DROPOUT, num_heads=HEADS, block_unit=64)
attention_base = BlockNoglobalAttention(dropout=DROPOUT, num_heads=HEADS, block_size=BLOCK_SIZE)
assert (attention_32(test, test, test) != attention_64(test, test, test)).sum() == 0
diff_index = (attention_32(test, test, test) != attention_base(test, test, test)).nonzero(as_tuple=True)
breakpoint()
assert (attention_32(test, test, test) != attention_base(test, test, test)).sum() == 0
|
bart_ls-main
|
xformers/tests/test_blocksparse_local.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import logging
import pytest
import torch
from torch.cuda.amp.autocast_mode import autocast
from xformers.components import Activation, build_activation
from xformers.triton.dropout import FusedDropoutBias
_triton_available = torch.cuda.is_available()
if _triton_available:
try:
from xformers.triton import dropout as triton_dropout
from xformers.triton.utils import gpu_capabilities_older_than_70
except ImportError:
logging.warning(
"Triton is not available, some optimizations will not be tested."
)
_triton_available = False
# Testing odd (non-power-of-two for instance) shapes on purpose
SHAPES = [
(384, 512),
(8, 384, 128),
(8, 784, 512),
(4, 16, 384),
(4, 16, 1024),
(2, 16, 2048),
(2, 16, 4096),
(1, 16, 12288),
]
def test_dropout_cpu():
triton_dropout = FusedDropoutBias(p=0.1, bias_shape=None)
x = torch.normal(0, 1, size=(16, 16), device="cpu")
_ = triton_dropout(x)
# Check eval means no dropout
triton_dropout.eval()
y = triton_dropout(x)
assert y.count_nonzero() == y.numel()
triton_dropout.train()
y = triton_dropout(x)
assert y.count_nonzero() != y.numel()
@pytest.mark.skipif(not _triton_available, reason="Triton is not available")
@pytest.mark.skipif(
not _triton_available or gpu_capabilities_older_than_70(),
reason="Triton requires a SM70+ GPU",
)
@pytest.mark.parametrize("shape", SHAPES)
@pytest.mark.parametrize("amp", [False, True])
@pytest.mark.parametrize("bias", [False, True])
@pytest.mark.parametrize("p", [0, 0.1, 0.5])
def test_dropout(shape, amp, bias, p):
"""
Check some basic dropout properties
"""
torch.random.manual_seed(0)
torch.cuda.manual_seed_all(0)
x = torch.normal(0, 1, size=shape, device="cuda", requires_grad=True)
b = (
torch.normal(0, 1, size=(shape[-1],), device="cuda", requires_grad=True)
if bias
else None
)
with autocast(enabled=amp):
tol = 1e-2 if amp else 1e-5 # AMP rounding causes issues, 1e-5 is the default
# Check that 0 means no dropout
y = triton_dropout(x, p=0, bias=b)
x_ref = (x + b if bias else x).to(y.dtype)
assert torch.allclose(x_ref, y, rtol=tol), f"{x[x>y]}"
# Check that 1 means dropout for sure
y = triton_dropout(x, p=1, bias=b)
x_ref = (x + b if bias else x).to(y.dtype)
assert not torch.allclose(x_ref, y, rtol=tol)
# Check that the drops are different for every row (could catch broken seeds per row)
y = triton_dropout(x, p=0.5)
y = y.flatten(0, 1) if y.ndim == 3 else y
assert not torch.sum(torch.eq(y[0, :] == 0.0, y[1, :] == 0.0)) == y.shape[1]
# Check that the drops are different over time, for the same line
y_a = triton_dropout(x, p=0.5)
y_b = triton_dropout(x, p=0.5)
y_a = y_a.flatten(0, 1) if y_a.ndim == 3 else y_a
y_b = y_b.flatten(0, 1) if y_b.ndim == 3 else y_b
assert (
not torch.sum(torch.eq(y_a[0, :] == 0.0, y_b[0, :] == 0.0)).item()
== y.shape[1]
)
# Check that the drop probability is about right
y = triton_dropout(x, p=p)
drop_p = (y.numel() - y.count_nonzero()) / y.numel()
assert abs(drop_p - p) < 0.01
# Check that the same seeds lead to the same dropout
torch.manual_seed(0)
torch.cuda.manual_seed(0)
y_1 = triton_dropout(x, p=0.5)
torch.manual_seed(0)
torch.cuda.manual_seed(0)
y_2 = triton_dropout(x, p=0.5)
assert torch.allclose(y_1, y_2)
@pytest.mark.skipif(not _triton_available, reason="Triton is not available")
@pytest.mark.skipif(
not _triton_available or gpu_capabilities_older_than_70(),
reason="Triton requires a SM70+ GPU",
)
@pytest.mark.parametrize("shape", SHAPES)
@pytest.mark.parametrize("amp", [False, True])
@pytest.mark.parametrize("bias", [True, False])
@pytest.mark.parametrize("activation", [a.value for a in Activation])
@pytest.mark.parametrize("p", [0, 0.01, 0.5])
def test_dropout_parity(shape, amp, bias, activation, p):
"""
Check some basic dropout properties
"""
torch.random.manual_seed(0)
x = torch.normal(0, 1, size=shape, device="cuda", requires_grad=True)
b = (
torch.ones(size=(shape[-1],), device="cuda", requires_grad=True)
if bias
else None
)
torch.random.manual_seed(0)
x_ = torch.normal(0, 1, size=shape, device="cuda", requires_grad=True)
b_ = (
torch.ones(size=(shape[-1],), device="cuda", requires_grad=True)
if bias
else None
)
with autocast(enabled=amp):
torch_activation = build_activation(activation)
res_torch = torch.nn.functional.dropout(
torch_activation(x + b if b is not None else x), p=p
)
loss_torch = torch.sum(res_torch)
res_triton = triton_dropout(x=x_, p=p, bias=b_, activation=activation)
loss_triton = torch.sum(res_triton)
if p < 0.01:
# Check the FW pass
assert torch.allclose(
loss_torch, loss_triton, rtol=0.01
), f"{loss_torch} - {loss_triton}"
# Check the gradients
loss_torch.backward()
loss_triton.backward()
# - gradients wrt inputs
assert torch.allclose(
torch.norm(x.grad), torch.norm(x_.grad), rtol=0.01
), f"{x.grad}\n{x_.grad}"
# - gradients wrt bias
if bias:
assert torch.allclose(
torch.norm(b.grad), torch.norm(b_.grad), rtol=0.01
), f"{b.grad.norm()} - {b_.grad.norm()}"
|
bart_ls-main
|
xformers/tests/test_triton_dropout.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import math
import pytest
import torch
from xformers.components.attention import FavorAttention, ScaledDotProduct
from xformers.components.attention.feature_maps import (
FeatureMapType,
NormDistribution,
SMHyperbolic,
SMOrf,
SMReg,
)
_device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
@pytest.mark.parametrize("features", [SMOrf, SMHyperbolic, SMReg])
def test_random_matrix(features):
torch.random.manual_seed(0)
DRAWS = 100
DIM = 10
for _ in range(DRAWS):
q = features._get_random_ortho_matrix(
1, DIM, device=_device, norm_distribution=NormDistribution.Xi
).squeeze(0)
# Check that the matrix is indeed orthonormal
torch.allclose(
torch.diag(q @ q.transpose(0, 1)),
torch.diag(torch.ones(10, device=_device)),
)
# Check that the row norm is in the right ballpark (sqrt(dim))
assert abs(torch.mean(torch.norm(q, dim=1)).item() - math.sqrt(DIM)) < 1.0
def _plot_distribution(ortho_feature_map):
# Debug helper, check the uniformity of the random matrix draws
DRAWS = 1000
DIM = 50
q = ortho_feature_map._get_random_ortho_matrix(DRAWS, DIM, device=_device)
x, y = [], []
for qq in q:
# For every matrix, look at the real and imaginary eigen value
e, _ = torch.eig(qq)
x.append(e[:, 0])
y.append(e[:, 1])
# Ideally the repartition of the real and imaginary eigenvalues
# should build a circle in the complex plane
import matplotlib.pyplot as plt
import seaborn as sns
sns.kdeplot(x=torch.cat(x).cpu().numpy(), y=torch.cat(y).cpu().numpy())
plt.axis("equal")
plt.savefig("kde.png")
def _get_rng_data(device):
emb = 10
batch_size = 2
seq_len = 20
num_heads = 1
shape = (batch_size * num_heads, seq_len, emb)
return torch.randn(shape, device=device)
def test_feature_map_shape():
# Check the delayed initialization of the feature map
nb_random_features = 1000
batch = _get_rng_data(_device)
att = FavorAttention(
dropout=0.0,
dim_features=nb_random_features,
feature_map_type=FeatureMapType.SMOrf,
)
_ = att(batch, batch, batch)
assert att.feature_map.features.shape[0] == batch.shape[-1]
assert att.feature_map.features.shape[1] == nb_random_features
def test_feature_map_redraw():
# Check the delayed initialization of the feature map
nb_random_features = 1000
batch = _get_rng_data(_device)
def check(should_redraw: bool):
att = FavorAttention(
dropout=0.0,
dim_features=nb_random_features,
feature_map_type=FeatureMapType.SMOrf,
iter_before_redraw=1 if should_redraw else 100,
)
v0 = att(batch, batch, batch)
assert att.feature_map is not None
f0 = att.feature_map.features
v1 = att(batch, batch, batch)
f1 = att.feature_map.features
# There should not have been a redraw after v0
assert should_redraw != torch.allclose(v0, v1)
assert should_redraw != torch.allclose(f0, f1) # type: ignore
check(should_redraw=True)
check(should_redraw=False)
@pytest.mark.parametrize("feature", ["sm_orf", "sm_hyp", "sm_reg"])
@pytest.mark.parametrize("causal", [True, False])
@pytest.mark.parametrize("normalize_inputs", [True, False])
@pytest.mark.parametrize("device", [_device])
def test_favor_approximation_accuracy(feature, causal, normalize_inputs, device):
# Run two attentions in parallel, the normal scaled dot product and the favor approximation
torch.random.manual_seed(0)
query, key, value = (
_get_rng_data(device),
_get_rng_data(device),
_get_rng_data(device),
)
for x in (query, key, value):
x.requires_grad = True
# Build the two attention heads
sdp_attention = ScaledDotProduct(dropout=0.0, causal=causal).to(device)
approx_attention = FavorAttention(
dropout=0.0,
causal=causal,
dim_head=10,
feature_map_type=FeatureMapType(feature),
normalize_inputs=normalize_inputs,
).to(device)
with torch.cuda.amp.autocast(enabled=_device.type == "cuda"):
standard_attention_result = sdp_attention(query, key, value)
approx_attention_result = approx_attention(query, key, value)
mismatch = torch.mean(
(standard_attention_result - approx_attention_result) ** 2
).item()
if causal:
# FIXME(@lefaudeux) the causal case seems significantly worse, not obvious why,
# could be worth investigating
assert mismatch < 0.6
else:
assert mismatch < 0.23
# Check trainability
torch.sum(approx_attention_result).backward()
if __name__ == "__main__":
_plot_distribution(SMOrf)
|
bart_ls-main
|
xformers/tests/test_favor.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import logging
import pytest
import torch
from torch.cuda.amp.autocast_mode import autocast
try:
from xformers.triton import FusedLayerNorm
from xformers.triton.utils import gpu_capabilities_older_than_70
_triton_available = True
except ImportError:
logging.warning("Triton is not available, some optimizations will not be tested.")
_triton_available = False
# Testing odd shapes on purpose
SHAPES = [
(384, 128),
(8, 384, 128),
(8, 784, 512),
(4, 2048, 384),
(4, 3136, 1024),
(2, 1024, 2048),
(2, 2048, 4096),
(2, 4096, 4096),
(1, 2048, 12288),
]
@pytest.mark.skipif(not _triton_available, reason="Triton is not available")
@pytest.mark.skipif(
not _triton_available or gpu_capabilities_older_than_70(),
reason="Triton requires a SM70+ GPU",
)
@pytest.mark.parametrize("shape", SHAPES)
@pytest.mark.parametrize("amp", [True, False])
def test_layernorm_parity(shape, amp):
"""Check that PyTorch and Triton softmax give the same result"""
# Get the same inputs
torch.random.manual_seed(0)
X = torch.normal(0, 1, size=shape, device="cuda", requires_grad=True)
torch.random.manual_seed(0)
X_ = torch.normal(0, 1, size=shape, device="cuda", requires_grad=True)
eps = 1e-5
# Initialize the two layers, weights are 1 and 0 by default, no randomness
torch_layernorm = torch.nn.LayerNorm(X.shape[-1], eps).to("cuda")
triton_layernorm = FusedLayerNorm(X.shape[-1], eps).to("cuda")
with autocast(enabled=amp):
assert torch.allclose(X, X_) # sanity checking, else all hell breaks loose
# Check the forward pass
y_torch = torch_layernorm(X)
y_triton = triton_layernorm(X_)
assert torch.allclose(
y_torch.norm(), y_triton.norm(), atol=1e-3
), f"{torch.norm(y_torch)} vs. {torch.norm(y_triton)}"
# Check that BW also gives the same result
loss_torch = torch.norm(y_torch)
loss_torch.backward()
loss_triton = torch.norm(y_triton)
loss_triton.backward()
print(torch.norm(y_torch), torch.norm(y_triton))
print(y_torch[0, :])
print(y_triton[0, :])
# There are 3 items to check:
# - gradient on the inputs
assert torch.allclose(
X.grad, X_.grad
), f"Inputs grad mismatch: {torch.norm(X.grad)} vs. {torch.norm(X_.grad)}"
# - gradient on the layernorm weight
assert torch.allclose(
torch_layernorm.weight.grad, triton_layernorm.weight.grad, atol=1e-3
), (
f"Weight grad mismatch: {torch.norm(torch_layernorm.weight.grad)} vs."
+ f" {torch.norm(triton_layernorm.weight.grad)}"
)
# - gradient on the layernorm bias
assert torch.allclose(
torch_layernorm.bias.grad, triton_layernorm.bias.grad, atol=1e-3
), (
f"Bias grad mismatch: {torch.norm(torch_layernorm.bias.grad)} vs."
+ f" {torch.norm(triton_layernorm.bias.grad)}"
)
@pytest.mark.skipif(not _triton_available, reason="Triton is not available")
def test_no_contiguous():
"""Check that we don't choke on non-contigous tensors"""
shape = (8, 384, 128)
# Get the same inputs
torch.random.manual_seed(0)
X = torch.normal(0, 1, size=shape, device="cuda", requires_grad=True)
X = X.transpose(2, 1).contiguous().transpose(2, 1)
assert not X.is_contiguous()
triton_layernorm = FusedLayerNorm(X.shape[-1]).to("cuda")
_ = triton_layernorm(X)
|
bart_ls-main
|
xformers/tests/test_triton_layernorm.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import torch
from xformers.components.attention.utils import (
maybe_merge_masks,
reshape_key_padding_mask,
)
def test_reshape_key_padding_mask():
batch_size = 2
num_heads = 2
seq_len = 4
batched_dim = batch_size * num_heads
key_padding_mask = torch.randint(0, 2, (batch_size, seq_len)).to(dtype=torch.bool)
reshaped_mask = reshape_key_padding_mask(
key_padding_mask=key_padding_mask, batched_dim=batched_dim
)
assert reshaped_mask.size() == (batched_dim, 1, seq_len)
merged_mask = maybe_merge_masks(
att_mask=None,
key_padding_mask=key_padding_mask,
batch_size=batch_size,
src_len=seq_len,
num_heads=num_heads,
)
assert torch.equal(merged_mask, reshaped_mask.expand(-1, seq_len, -1))
key_padding_mask = torch.randint(0, 2, (batched_dim, seq_len)).to(dtype=torch.bool)
reshaped_mask = reshape_key_padding_mask(
key_padding_mask=key_padding_mask, batched_dim=batched_dim
)
assert reshaped_mask.size() == (batched_dim, 1, seq_len)
|
bart_ls-main
|
xformers/tests/test_attention_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import logging
import pytest
import torch
from torch.cuda.amp.autocast_mode import autocast
try:
from xformers.triton import log_softmax as triton_log_softmax
from xformers.triton import softmax as triton_softmax
_triton_available = True
except ImportError as e:
logging.warning(
f"Triton is not available, some optimizations will not be tested.\n{e}"
)
_triton_available = False
SHAPES = [
(384, 384),
(2, 384, 384),
(1, 784, 784),
(1, 1024, 1024),
(1, 2048, 2048),
(1, 3136, 3136),
(1, 4096, 4096),
]
@pytest.mark.skipif(not _triton_available, reason="Triton is not available")
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA is not available")
@pytest.mark.parametrize("shape", SHAPES)
@pytest.mark.parametrize("amp", [False, True])
@pytest.mark.parametrize("log", [False, True])
@pytest.mark.parametrize("masking", [True, False])
@pytest.mark.parametrize("causal", [True, False])
@pytest.mark.parametrize("contiguous", [True, False])
def test_softmax_parity(shape, amp, log, masking, causal, contiguous):
"""Check that PyTorch and Triton softmax give the same result"""
torch.random.manual_seed(0)
# Check the result of a FW pass
X = torch.normal(0, 1, size=shape, device="cuda", requires_grad=False)
if not contiguous:
# Make sure that the buffer is not contiguous
X = X.transpose(-2, -1).contiguous().transpose(-2, -1)
X_ = X.clone()
X.requires_grad = True
X_.requires_grad = True
seq = shape[1]
mask = torch.zeros((seq, seq)).cuda()
if masking:
mask[torch.rand((seq, seq)) > 0.8] = -float("inf")
mask_triton = mask.clone() if masking else None
if causal:
mask[~torch.tril(torch.ones_like(mask)).bool()] = -float("inf")
with autocast(enabled=amp):
y_torch = (
torch.log_softmax(X + mask, dim=-1)
if log
else torch.softmax(X + mask, dim=-1)
)
y_triton = (
triton_log_softmax(X_, mask_triton, causal)
if log
else triton_softmax(X_, mask_triton, causal)
)
assert torch.allclose(y_torch, y_triton, equal_nan=True)
# Check that BW also gives the same result
loss_torch = torch.norm(y_torch.transpose(-2, -1) @ y_torch)
loss_torch.backward()
loss_triton = torch.norm(y_triton.transpose(-2, -1) @ y_triton)
loss_triton.backward()
assert torch.allclose(
torch.norm(X.grad), torch.norm(X_.grad), equal_nan=True, atol=1e-5
), f"{torch.norm(X.grad)}, {torch.norm(X_.grad)}"
@pytest.mark.skipif(not _triton_available, reason="Triton is not available")
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA is not available")
@pytest.mark.parametrize("dtype", [torch.float16, torch.float32])
def test_softmax_fp16(dtype):
b, s, d = 8, 64, 32
a = torch.rand(b, s, d, device="cuda", dtype=dtype)
triton_softmax(a)
@pytest.mark.skipif(not _triton_available, reason="Triton is not available")
@pytest.mark.parametrize("log", [False, True])
@pytest.mark.parametrize("masking", [True, False])
@pytest.mark.parametrize("causal", [True, False])
@pytest.mark.parametrize("contiguous", [True, False])
def test_softmax_parity_fallback(log, masking, causal, contiguous):
"""Check that the fallback paths are correct"""
torch.random.manual_seed(0)
shape = (16, 16)
# Check the result of a FW pass
X = torch.normal(0, 1, size=shape, device="cpu", requires_grad=False)
if not contiguous:
# Make sure that the buffer is not contiguous
X = X.transpose(-2, -1).contiguous().transpose(-2, -1)
X_ = X.clone()
X.requires_grad = True
X_.requires_grad = True
seq = shape[1]
mask = torch.zeros((seq, seq))
if masking:
mask[torch.rand((seq, seq)) > 0.8] = -float("inf")
if causal:
mask[~torch.tril(torch.ones_like(mask)).bool()] = -float("inf")
y_torch = (
torch.log_softmax(X + mask, dim=-1) if log else torch.softmax(X + mask, dim=-1)
)
y_triton = (
triton_log_softmax(X_, mask, causal)
if log
else triton_softmax(X_, mask, causal)
)
assert torch.allclose(y_torch, y_triton, equal_nan=True)
# Check that BW also gives the same result
loss_torch = torch.norm(y_torch.transpose(-2, -1) @ y_torch)
loss_torch.backward()
loss_triton = torch.norm(y_triton.transpose(-2, -1) @ y_triton)
loss_triton.backward()
assert torch.allclose(
torch.norm(X.grad), torch.norm(X_.grad), equal_nan=True, atol=1e-5
), f"{torch.norm(X.grad)}, {torch.norm(X_.grad)}"
|
bart_ls-main
|
xformers/tests/test_triton_softmax.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
|
bart_ls-main
|
xformers/tests/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import pytest
import torch
import xformers.components.attention.attention_patterns as AP
# baseline implementations
def _local_1d_pattern(attn_size: int, window_size: int) -> torch.Tensor:
assert (
window_size % 2 == 1
), "The window size is assumed to be odd (counts self-attention + 2 wings)"
h_win_size = window_size // 2
attn_shape = (attn_size, attn_size)
full_attn = torch.ones(attn_shape, dtype=torch.bool)
mask = torch.tril(full_attn, diagonal=h_win_size)
mask &= ~torch.tril(full_attn, diagonal=-(h_win_size + 1))
return mask
def _generate_2d_grid(H, W):
i = torch.arange(H)
j = torch.arange(W)
i, j = torch.meshgrid(i, j)
return i, j
def _horizontal_axial_2d_distance(H, W, p=2.0):
i, _ = _generate_2d_grid(H, W)
ij = i.reshape(-1, 1).float()
d = torch.cdist(ij, ij, p=p)
return d
def _vertical_axial_2d_distance(H, W, p=2.0):
_, j = _generate_2d_grid(H, W)
ij = j.reshape(-1, 1).float()
d = torch.cdist(ij, ij, p=p)
return d
def _local_2d_distance(H, W, p=2.0):
# axial is a special case with p=0 and distance=2
i, j = _generate_2d_grid(H, W)
ij = torch.stack([i.flatten(), j.flatten()], 1).float()
d = torch.cdist(ij, ij, p=p)
return d
def _local_2d_gaussian_distribution(H, W, sigma=1.0):
d = _local_2d_distance(H, W, p=2.0) ** 2
d = torch.exp(-0.5 * sigma ** (-2.0) * d)
return d
@pytest.mark.parametrize("window_size", [3, 7, 11])
@pytest.mark.parametrize("attn_size", [50, 51, 64])
def test_local_1d_pattern(attn_size, window_size):
mask = AP.local_1d_pattern(attn_size, window_size).float()
mask_ref = _local_1d_pattern(attn_size, window_size).float()
assert torch.allclose(mask, mask_ref)
@pytest.mark.parametrize("p", [0, 1, 2])
@pytest.mark.parametrize("W", [5, 7, 10])
@pytest.mark.parametrize("H", [5, 7, 10])
def test_horizontal_axial_2d_distance(H, W, p):
d = AP.horizontal_axial_2d_distance(H, W, p=p)
d_ref = _horizontal_axial_2d_distance(H, W, p=p)
assert torch.allclose(d, d_ref)
@pytest.mark.parametrize("p", [0, 1, 2])
@pytest.mark.parametrize("W", [5, 7, 10])
@pytest.mark.parametrize("H", [5, 7, 10])
def test_vertical_axial_2d_distance(H, W, p):
d = AP.vertical_axial_2d_distance(H, W, p=p)
d_ref = _vertical_axial_2d_distance(H, W, p=p)
assert torch.allclose(d, d_ref)
@pytest.mark.parametrize("p", [0, 1, 2])
@pytest.mark.parametrize("W", [5, 7, 10])
@pytest.mark.parametrize("H", [5, 7, 10])
def test_local_2d_distance(H, W, p):
d = AP.local_2d_distance(H, W, p=p)
d_ref = _local_2d_distance(H, W, p=p)
assert torch.allclose(d, d_ref)
@pytest.mark.parametrize("sigma", [0.5, 1, 2])
@pytest.mark.parametrize("W", [5, 7, 10])
@pytest.mark.parametrize("H", [5, 7, 10])
def test_local_2d_gaussian_distribution(H, W, sigma):
d = AP.local_2d_gausian_distribution(H, W, sigma=sigma)
d_ref = _local_2d_gaussian_distribution(H, W, sigma=sigma)
assert torch.allclose(d, d_ref)
@pytest.mark.parametrize("window_size", [2, 4])
@pytest.mark.parametrize("W", [8, 16])
@pytest.mark.parametrize("H", [8, 16])
def test_swin_attention_pattern(H, W, window_size):
# test non-shifted case
d = AP.swin_attention_pattern(H, W, window_size, shift_size=0)
# partition the self-attention into regions of window_size
# similar to the window_partition function from the original paper
h = H // window_size
w = W // window_size
d = d.reshape(h, window_size, w, window_size, h, window_size, w, window_size)
for y, x in itertools.product(range(h), range(w)):
# every region should fully attend to itself
assert torch.all(d[y, :, x, :, y, :, x, :])
for y2, x2 in itertools.product(range(h), range(w)):
if y == y2 or x == x2:
continue
# different regions shouldn't attend between each other
assert torch.all(~d[y, :, x, :, y2, :, x2, :])
# test shifted case
# in the shifted case, the self-attention should be the same
# as in the non-shifted case, when we pad the inputs, apply the operations and then
# remove the padding from the result
d_shifted = AP.swin_attention_pattern(
H, W, window_size, shift_size=window_size // 2
)
# add padding and remove shift
h = H + window_size
w = W + window_size
d_padded = AP.swin_attention_pattern(h, w, window_size, shift_size=0)
d_padded = d_padded.reshape(h, w, h, w)
# remove padding elements
half_size = window_size // 2
s = slice(half_size, -half_size)
d_padded = d_padded[s, s, s, s].reshape(H * W, H * W)
assert torch.all(d_padded == d_shifted)
@pytest.mark.parametrize("k", [2, 3])
@pytest.mark.parametrize("W", [8, 15])
@pytest.mark.parametrize("H", [8, 15])
def test_dilated_2d_pattern(H, W, k):
d = AP.dilated_2d_pattern(H, W, k)
d = d.reshape(H, W, H, W)
for h, w in itertools.product(range(H), range(W)):
i = h % k
j = w % k
# every kth element is taken
assert torch.all(d[h, w][i::k, j::k])
for ii, jj in itertools.product(range(k), range(k)):
if ii == i and jj == j:
continue
# and the other elements are discarded
assert torch.all(~d[h, w][ii::k, jj::k])
def test_pattern_to_layout():
BLOCK = 16
SIZE = 128
LAYOUT_SIZE = SIZE // BLOCK
# All ones
mask1 = torch.ones((SIZE, SIZE), dtype=torch.bool)
layout1 = AP.pattern_to_layout(mask1, BLOCK)
ref1 = torch.ones((LAYOUT_SIZE, LAYOUT_SIZE), dtype=torch.long)
assert torch.allclose(layout1, ref1)
# Diagonal -> expect block diagonal
mask2 = torch.eye(SIZE, dtype=torch.bool)
layout2 = AP.pattern_to_layout(mask2, BLOCK)
ref2 = torch.eye(LAYOUT_SIZE, dtype=torch.long)
assert torch.allclose(layout2, ref2)
# Lower triangular, without the diagonal
# note that the layout will need to have the diagonal, else the coefficients close enough would not be computed
mask3 = torch.tril(torch.ones((SIZE, SIZE)), diagonal=-1).to(torch.bool)
layout3 = AP.pattern_to_layout(mask3, BLOCK)
ref3 = torch.tril(torch.ones((LAYOUT_SIZE, LAYOUT_SIZE)), diagonal=0).to(torch.long)
assert torch.allclose(layout3, ref3)
# Handle heads properly
mask = torch.cat((mask1, mask2, mask3))
layout = AP.pattern_to_layout(mask, BLOCK)
assert torch.allclose(layout, torch.cat((ref1, ref2, ref3)))
# Catch problematic dimensions
mask_off = torch.ones((SIZE + 3, SIZE), dtype=torch.bool)
with pytest.raises(AssertionError):
AP.pattern_to_layout(mask_off, BLOCK)
def test_alibi_pattern():
mask = AP.alibi_pattern(1e-3, (16, 128, 128))
# Minor, check that all the top left corners are True
assert torch.sum(mask[:, 0, 0]) == 16
|
bart_ls-main
|
xformers/tests/test_attention_patterns.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from xformers.components.attention import AttentionMask
@pytest.mark.skipif(
not torch.cuda.is_available(), reason="This test requires a CUDA device"
)
def test_mask_creation():
# Check that we can create from boolean
bool_mask = torch.rand((256, 256)) > 0.5
additive_mask = AttentionMask.from_bool(bool_mask)
assert (bool_mask == additive_mask.to_bool()).all()
bool_mask = torch.rand((2, 256, 256)) > 0.5
additive_mask = AttentionMask.from_bool(bool_mask)
assert (bool_mask == additive_mask.to_bool()).all()
assert additive_mask.ndim == bool_mask.ndim
# Check that we can create from multiplicative
ref_mask = torch.randint(0, 2, (256, 256))
mul_mask = ref_mask.float()
additive_mask = AttentionMask.from_multiplicative(mul_mask)
assert (ref_mask.bool() == additive_mask.to_bool()).all()
# Check the causal mask
causal_mask = AttentionMask.make_causal(256, 256)
assert (torch.tril(torch.ones(256, 256)).bool() == causal_mask.to_bool()).all()
assert causal_mask.is_causal
causal_mask = AttentionMask.make_causal(256)
assert (torch.tril(torch.ones(256, 256)).bool() == causal_mask.to_bool()).all()
causal_mask = AttentionMask.make_causal(256, 128)
assert (torch.tril(torch.ones(256, 128)).bool() == causal_mask.to_bool()).all()
# Check that we can add masks
bool_mask_1 = torch.rand((256, 256)) > 0.5
add_mask_1 = AttentionMask.from_bool(bool_mask_1)
bool_mask_2 = torch.rand((256, 256)) > 0.5
add_mask_2 = AttentionMask.from_bool(bool_mask_2)
assert ((add_mask_1 + add_mask_2).to_bool() == (bool_mask_1 & bool_mask_2)).all()
# Check type handling
additive_mask = AttentionMask.from_bool(torch.rand((256, 256)) > 0.5)
additive_mask = additive_mask.to(device=torch.device("cuda"))
assert "cuda" in str(additive_mask.values.device)
# Check that the causal flag is maintained
additive_mask = AttentionMask.make_causal(256, 256)
additive_mask = additive_mask.to(device=torch.device("cuda"))
assert additive_mask.is_causal
|
bart_ls-main
|
xformers/tests/test_attention_mask.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from xformers.components import MultiHeadDispatch
# Automatically test all the registered attentions
from xformers.components.attention import (
_DENSITY_THRESHOLD,
ATTENTION_REGISTRY,
build_attention,
)
DEVICES = (
[torch.device("cpu")] if not torch.cuda.is_available() else [torch.device("cuda")]
)
BATCH = 2
SEQ = 128 if torch.cuda.is_available() else 32
MODEL = 128 if torch.cuda.is_available() else 64
GLOBAL_ATTENTION_RATIO = (
_DENSITY_THRESHOLD * 0.9
) # Make sure that we test the sparse implementation, no matter the threshold
assert ATTENTION_REGISTRY.keys(), "Attention layers should have been registered"
@pytest.mark.parametrize("attn_dropout", [0.0, 0.3])
@pytest.mark.parametrize("causal", [True, False])
@pytest.mark.parametrize("heads", [1, 4])
@pytest.mark.parametrize("rules", [1, 4])
@pytest.mark.parametrize("q_compose", [False, True])
@pytest.mark.parametrize("dim_selection", [MODEL // 2, None])
@pytest.mark.parametrize("bias", [True, False])
@pytest.mark.parametrize("qk_rule", [True, False])
@pytest.mark.parametrize("nonlinear", [True, False])
@pytest.mark.parametrize("device", DEVICES)
def test_build_and_run(
heads: int,
attn_dropout: float,
causal: bool,
rules: int,
q_compose: bool,
dim_selection: int,
bias: bool,
qk_rule: bool,
nonlinear: bool,
device: torch.device,
):
torch.manual_seed(42)
test_config = {
"name": "compositional",
"dropout": attn_dropout,
"causal": causal,
"seq_len": SEQ,
"window_size": SEQ // 8 + 1, # local attention
"attention_query_mask": torch.rand((SEQ, 1)) < GLOBAL_ATTENTION_RATIO,
"dim_model": MODEL,
"num_heads": heads,
"num_rules": 2, # Compositional Attention
"q_compose": q_compose,
"rules": rules,
"dim_selection": dim_selection,
"bias": bias,
"qk_rule": qk_rule,
"nonlinear": nonlinear,
}
# Add some blocksparse layout to test the corresponding attention
block_size = 16
test_config["layout"] = torch.eye(
SEQ // block_size, SEQ // block_size, dtype=torch.long
)
test_config["block_size"] = block_size
attention = build_attention(test_config)
# build a multi head dispatch to test this attention mechanism
multi_head = MultiHeadDispatch(
seq_len=SEQ,
dim_model=MODEL,
num_heads=heads,
attention=attention,
residual_dropout=0.0,
).to(device)
# Check that a shuffled input produces the same results
seqs = [SEQ, SEQ - 16]
for seq in seqs:
# Check that we can pass a smaller sequence
inputs = torch.rand(BATCH, seq, MODEL, device=device)
shuffle = torch.randperm(inputs.shape[1])
inputs_shuffled = inputs[:, shuffle, :].clone()
results = multi_head(inputs, inputs, inputs)
results_shuffled = multi_head(inputs_shuffled, inputs_shuffled, inputs_shuffled)
torch.allclose(results[:, shuffle, :], results_shuffled)
# Test the non-self-attention codepath
att = multi_head(inputs, inputs_shuffled, inputs)
# Check that dropout actually drops some values
if attn_dropout > 0:
att_2 = multi_head(inputs, inputs_shuffled, inputs)
assert (att != att_2).any()
|
bart_ls-main
|
xformers/tests/test_compositional_attention.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from xformers.components.attention._sputnik_sparse import SparseCS
from xformers.components.attention.core import scaled_dot_product_attention
_devices = ["cpu", "cuda"] if torch.cuda.is_available() else ["cpu"]
def test_core_attention():
b, s, d = 8, 900, 32
prob = 0.95
a = torch.rand(b, s, d)
m = torch.rand(b, s, s) > prob
m = m.to_sparse()
# Check that the sparse and dense computations are equivalent
r_sparse = scaled_dot_product_attention(a, a, a, m)
r_dense = scaled_dot_product_attention(a, a, a, m.to_dense())
assert torch.allclose(r_sparse, r_dense)
def test_core_attention_mask_types():
b, s, d = 8, 900, 32
prob = 0.8 # make sure that we trigger the sparse kernels
a = torch.rand(b, s, d)
mask = torch.rand(b, s, s) > prob
# mask of bools
r_dense_bool = scaled_dot_product_attention(a, a, a, mask)
r_sparse_bool = scaled_dot_product_attention(a, a, a, mask.to_sparse())
assert torch.allclose(r_dense_bool, r_sparse_bool)
# Test additive mask. Mask of 0's and -infs.
float_mask_add = torch.zeros_like(mask, dtype=torch.float)
float_mask_add = float_mask_add.masked_fill(mask, float("-inf"))
r_dense_add = scaled_dot_product_attention(a, a, a, float_mask_add)
r_sparse_add = scaled_dot_product_attention(a, a, a, float_mask_add.to_sparse())
# Now properly handled
assert torch.allclose(r_dense_add, r_sparse_add)
@pytest.mark.parametrize("device", _devices)
def test_amp_attention_dense_no_mask(device):
b, s, d = 8, 64, 32
a = torch.rand(b, s, d, device=device)
with torch.cuda.amp.autocast():
r = scaled_dot_product_attention(a, a, a, att_mask=None)
expected_device = torch.float16 if device == "cuda" else torch.float32
assert r.dtype == expected_device
@pytest.mark.parametrize("device", _devices)
def test_amp_attention_dense(device):
b, s, d = 8, 64, 32
prob = 0.9
a = torch.rand(b, s, d, device=device)
m = torch.rand(s, s, device=device) > prob
with torch.cuda.amp.autocast():
r = scaled_dot_product_attention(a, a, a, m)
expected_device = torch.float16 if device == "cuda" else torch.float32
assert r.dtype == expected_device
@pytest.mark.parametrize("device", _devices)
def test_amp_attention_sparse(device):
b, s, d = 8, 64, 32
prob = 0.9
a = torch.rand(b, s, d, device=device)
m = torch.rand(s, s, device=device) > prob
m = m.to_sparse()
with torch.cuda.amp.autocast():
r = scaled_dot_product_attention(a, a, a, m)
expected_device = torch.float32
assert r.dtype == expected_device
@pytest.mark.parametrize("device", _devices)
def test_amp_attention_sparsecs(device):
b, s, d = 8, 64, 32
prob = 0.9
a = torch.rand(b, s, d, device=device)
m = torch.rand(s, s, device=device) > prob
m = SparseCS(m, device)
with torch.cuda.amp.autocast():
r = scaled_dot_product_attention(a, a, a, m)
expected_device = torch.float32
assert r.dtype == expected_device
|
bart_ls-main
|
xformers/tests/test_core_attention.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import logging
import pytest
import torch
from torch.cuda.amp.autocast_mode import autocast
from xformers.components import Activation, build_activation
_triton_available = torch.cuda.is_available()
if _triton_available:
try:
from xformers.triton import FusedLinear
from xformers.triton.k_activations import get_triton_activation_kernel
from xformers.triton.k_fused_matmul_fw import fused_matmul
from xformers.triton.utils import gpu_capabilities_older_than_70
except ImportError:
logging.warning(
"Triton is not available, some optimizations will not be tested."
)
_triton_available = False
SHAPES = [(128, 256), (8, 384, 128), (8, 784, 512)]
@pytest.mark.skipif(not _triton_available, reason="Triton is not available")
@pytest.mark.skipif(
not _triton_available or gpu_capabilities_older_than_70(),
reason="Triton requires a SM70+ GPU",
)
@pytest.mark.parametrize("shape", SHAPES)
@pytest.mark.parametrize(
"dtype", [torch.float32]
) # Triton use tensor cores, which return slightly different results to pytorch mm
def test_fused_matmul(shape, dtype):
""" Check that the matrix multiply kernel and Pytorch's give the same results"""
torch.random.manual_seed(0)
# Raw fused matrix multiply first, to catch gross errors
a = torch.rand((shape[-2], shape[-1]), dtype=dtype, device="cuda")
b = torch.rand((shape[-1], shape[-2]), dtype=dtype, device="cuda")
# Test that not passing any bias is fine
res_torch = a @ b
res_triton, _ = fused_matmul(a, b.transpose(0, 1), None)
assert torch.allclose(res_torch, res_triton), "Vanilla matmul is broken"
# Now test with a real FMA
c = -torch.rand((shape[-2],), dtype=dtype, device="cuda")
res_torch = torch.addmm(c, a, b)
res_triton, _ = fused_matmul(a, b.transpose(1, 0), c)
assert torch.allclose(
res_torch, res_triton
), f"Vanilla fused matmul is broken {torch.max(torch.abs(res_torch-res_triton)).item()}"
# Now check that adding an activation to the mix still produces valid results
for activation in Activation:
torch_activation = build_activation(activation.value)
res_torch = torch_activation(torch.addmm(c, a, b))
triton_activation = get_triton_activation_kernel(activation)
res_triton, _ = fused_matmul(a, b.transpose(1, 0), c, triton_activation)
# NOTE: @lefaudeux
# GeLUs are not well handled for now, we use an approximation
# they're also slower than pytorch so not likely to be used
# Issue tracked with https://github.com/fairinternal/xformers/issues/238
tol = 1e-6 if activation != Activation.GeLU else 1e-2
assert torch.allclose(
res_torch, res_triton, atol=tol
), f"Fused matmul broken with activation {activation}. Max diff: {torch.max(torch.abs(res_torch - res_triton))}"
@pytest.mark.skipif(
not _triton_available or gpu_capabilities_older_than_70(),
reason="Triton requires a SM70+ GPU",
)
@pytest.mark.parametrize("activation", [None] + [a.value for a in Activation]) # type: ignore
@pytest.mark.parametrize("shape", SHAPES)
@pytest.mark.parametrize("bias", [True, False])
@pytest.mark.parametrize("amp", [True]) # FIXME: @lefaudeux check the fp32 case
def test_fused_linear_parity(shape, activation: Activation, bias: bool, amp: bool):
"""Check that PyTorch and fused linear layers give the same result"""
# Instantiate pytorch and fused layers, same initialization
torch.random.manual_seed(0)
X = torch.normal(0, 1, size=shape, device="cuda")
X.requires_grad_()
torch_linear = torch.nn.Linear(shape[-1], shape[-1] // 2, bias=bias).to("cuda")
torch_activation = build_activation(activation)
torch_sequence = torch.nn.Sequential(torch_linear, torch_activation)
torch.random.manual_seed(0)
X_ = torch.normal(0, 1, size=shape, device="cuda")
X_.requires_grad_()
# pyre-ignore[16]: TODO(T101400990): Pyre did not recognize the
# `FusedLinear` import.
triton_fused_linear = FusedLinear(
shape[-1], shape[-1] // 2, bias=bias, activation=activation
).to("cuda")
# Now check parity
torch_linear.train()
triton_fused_linear.train()
torch_linear.zero_grad()
triton_fused_linear.zero_grad()
assert torch.allclose(
triton_fused_linear.weight, torch_linear.weight
), "Broken test setup"
assert torch.allclose(X, X_), "Broken test setup"
with autocast(enabled=amp):
tolerance = 1e-3 if not amp else 1e-2
y_torch = torch_sequence(X)
y_triton = triton_fused_linear(X_)
# Check that BW also gives the same result
loss_torch = torch.norm(y_torch)
loss_torch.backward()
loss_triton = torch.norm(y_triton)
loss_triton.backward()
assert torch.allclose(X, X_, atol=tolerance), f"{X} vs. {X_}"
# Input grad being correct checks both the loss + some of the backward pass
assert torch.allclose(
X.grad, X_.grad, atol=tolerance
), f"{X.grad} vs. {X_.grad}"
# Check that the linear layer bias are also properly trainable
if bias:
assert triton_fused_linear.bias is not None
assert triton_fused_linear.bias.grad is not None
assert torch.allclose(
torch_linear.bias.grad, triton_fused_linear.bias.grad, atol=tolerance
), f"{torch_linear.bias.grad} vs. {triton_fused_linear.bias.grad}"
# Check that the linear layer weights are also properly trainable
assert torch.allclose(
torch_linear.weight.grad,
triton_fused_linear.weight.grad,
atol=tolerance,
), f"{torch_linear.weight.grad} vs. {triton_fused_linear.weight.grad}"
|
bart_ls-main
|
xformers/tests/test_triton_fused_linear.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from xformers.components.positional_embedding import (
POSITION_EMBEDDING_REGISTRY,
build_positional_embedding,
)
BATCH = 20
SEQ = 512
MODEL = 384
assert (
POSITION_EMBEDDING_REGISTRY.keys()
), "Positional encoding layers should have been registered"
@pytest.mark.parametrize("encoding_name", POSITION_EMBEDDING_REGISTRY.keys())
@pytest.mark.parametrize("dropout", [0.0, 0.2])
def test_dimensions(encoding_name: str, dropout: float):
test_config = {
"name": encoding_name,
"dim_model": MODEL,
"vocab_size": 32,
"dropout": dropout,
"seq_len": SEQ,
}
# dummy, just check construction and dimensions in the FW pass
encoding = build_positional_embedding(test_config)
inputs = (torch.rand(BATCH, SEQ) * 10).abs().to(torch.int)
_ = encoding(inputs)
# Test that inputs having an embedding dimension would also work out
if "name" == "sine":
inputs = (torch.rand(BATCH, SEQ, MODEL) * 10).abs().to(torch.int)
_ = encoding(inputs)
|
bart_ls-main
|
xformers/tests/test_embedding.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
# needed to register custom ops
import xformers # noqa: F401
import xformers.components.attention.core
from xformers.components.attention._sputnik_sparse import _csr_to_coo
from xformers.components.attention.core import (
_broadcast_batch,
_create_random_sparsity,
_sparse_bmm,
)
cuda_only = pytest.mark.skipif(not torch.cuda.is_available(), reason="requires CUDA")
_devices = ["cpu", "cuda"] if torch.cuda.is_available() else ["cpu"]
def _baseline_matmul_with_sparse_mask(
a: torch.Tensor, b: torch.Tensor, mask: torch.Tensor
) -> torch.Tensor:
assert a.ndim == b.ndim
assert mask.ndim == a.ndim
assert a.shape[-1] == b.shape[-2]
assert a.shape[-2] == mask.shape[-2], f"{a.shape}, {mask.shape}"
assert b.shape[-1] == mask.shape[-1], f"{b.shape}, {mask.shape}"
assert a.shape[:-2] == b.shape[:-2], f"{a.shape}, {b.shape}"
assert a.shape[:-2] == mask.shape[:-2], f"{a.shape}, {mask.shape}"
idxs = mask.indices().unbind()
b = b.transpose(-2, -1)
# compute matmul for elements within the mask
val = (a[idxs[:-2] + (idxs[-2], slice(None))] * b[idxs[:-2] + (idxs[-1], slice(None))]).sum(-1) # type: ignore
out_shape = a.shape[:-1] + (b.shape[-2],)
res = torch.sparse_coo_tensor(torch.stack(idxs), val, out_shape)
return res
def _baseline_matmul_with_dense_mask(
a: torch.Tensor, b: torch.Tensor, mask: torch.Tensor
) -> torch.Tensor:
res = a @ b
res[~mask] = float("-inf")
return res
def _baseline_sparse_bmm(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
# need to use torch.sparse.mm to get gradients wrt sparse matrix a
# TODO implement this in C++ / CUDA as this is slow!
out = []
for ai, bi in zip(a, b):
out.append(torch.sparse.mm(ai, bi))
return torch.stack(out, dim=0)
@pytest.mark.parametrize("is_sparse", [True, False])
@pytest.mark.parametrize("contiguous", [True, False])
@pytest.mark.parametrize("device", _devices)
def test_matmul_with_mask(device, contiguous, is_sparse):
B, L, K = 8, 30, 32
prob = 0.5
a = torch.rand(B, L, K, device=device)
b = torch.rand(B, K, L, device=device)
if not contiguous:
a = a.transpose(-2, -1).contiguous().transpose(-2, -1)
b = b.transpose(-2, -1).contiguous().transpose(-2, -1)
mask = torch.rand(B, L, L, device=device) > prob
fn = torch.ops.xformers.matmul_with_mask
fn_gt = _baseline_matmul_with_dense_mask
if is_sparse:
mask = mask.to_sparse()
fn_gt = _baseline_matmul_with_sparse_mask
res = fn(a, b, mask)
res_gt = fn_gt(a, b, mask)
if is_sparse:
res = res.to_dense()
res_gt = res_gt.to_dense()
assert res.dtype == res_gt.dtype
assert torch.allclose(res, res_gt)
@pytest.mark.parametrize("is_sparse", [True, False])
@pytest.mark.parametrize("contiguous", [True, False])
@pytest.mark.parametrize("device", _devices)
def test_matmul_with_mask_backward(device, contiguous, is_sparse):
if device == "cuda" and is_sparse is False:
# Skip test for now due to bug in torch 1.8
# See https://github.com/pytorch/pytorch/issues/54975
# Broken CUDA / torch 1.8 combination, awaiting an update
return
B, L, K = 8, 10, 16
prob = 0.5
a = torch.rand(B, L, K, device=device, requires_grad=True)
b = torch.rand(B, K, L, device=device, requires_grad=True)
if not contiguous:
a = a.detach().transpose(-2, -1).contiguous().transpose(-2, -1).requires_grad_()
b = b.detach().transpose(-2, -1).contiguous().transpose(-2, -1).requires_grad_()
mask = torch.rand(B, L, L, device=device) > prob
fn = torch.ops.xformers.matmul_with_mask
fn_gt = _baseline_matmul_with_dense_mask
if is_sparse:
mask = mask.to_sparse()
fn_gt = _baseline_matmul_with_sparse_mask
def compute_grads(f):
out = f(a, b, mask)
if is_sparse:
out = out.to_dense()
out.sum().backward()
compute_grads(fn)
grad_a = a.grad.clone()
grad_b = b.grad.clone()
a.grad = None
b.grad = None
compute_grads(fn_gt)
assert torch.allclose(grad_a, a.grad)
assert torch.allclose(grad_b, b.grad)
@pytest.mark.parametrize("device", _devices)
def test_sddmm_sputnik(device):
B, L, M, K = 8, 30, 16, 32
prob = 0.5
a = torch.rand(B, L, K, device=device)
b = torch.rand(B, M, K, device=device).transpose(-2, -1)
mask = _create_random_sparsity(
torch.ones(B, L, M, dtype=torch.bool, device=device), prob
)
mask_csr = xformers.components.attention.core.SparseCS(mask, device)
fn = xformers.components.attention.core._matmul_with_mask
mask = mask.to_sparse()
res = fn(a, b, mask_csr)
res_gt = fn(a, b, mask)
res = res.to_dense()
res_gt = res_gt.to_dense()
assert res.dtype == res_gt.dtype
assert torch.allclose(res, res_gt)
@cuda_only
@pytest.mark.parametrize("K", [32, 17])
@pytest.mark.parametrize("M", [30, 17])
@pytest.mark.parametrize("L", [30, 17])
def test_sddmm_csr(L, M, K):
device = torch.device("cuda")
# TODO add more checks for different nnz
B = 8
prob = 0.5
a = torch.rand(B, L, K, device=device)
b = torch.rand(B, M, K, device=device)
mask = _create_random_sparsity(
torch.ones(B, L, M, dtype=torch.bool, device=device), prob
)
mask_csr = xformers.components.attention.core.SparseCS(mask, device)
row_indices = mask_csr.row_indices
row_offsets = mask_csr.row_offsets
column_indices = mask_csr.column_indices
fn = torch.ops.xformers.csr_sddmm
fn_gt = torch.ops.xformers.sddmm_sputnik
res = fn(a, b, row_indices, row_offsets, column_indices)
res_gt = fn_gt(a, b, row_indices, row_offsets, column_indices)
assert res.dtype == res_gt.dtype
assert torch.allclose(res, res_gt, atol=1e-6)
@cuda_only
@pytest.mark.parametrize("nnz", [4, 16, 20, 36])
def test_sddmm_csr_per_nnz(nnz):
device = torch.device("cuda")
B = 8
L, M, K = 1024, 1024, 32
a = torch.rand(B, L, K, device=device)
b = torch.rand(B, M, K, device=device)
mask = torch.zeros(L, M, dtype=torch.bool, device=device)
mask.view(-1)[: nnz - 1] = True
mask[-1, -1] = True
mask_csr = xformers.components.attention.core.SparseCS(mask, device)
row_indices = mask_csr.row_indices
row_offsets = mask_csr.row_offsets
column_indices = mask_csr.column_indices
fn = torch.ops.xformers.csr_sddmm
fn_gt = torch.ops.xformers.sddmm_sputnik
res = fn(a, b, row_indices, row_offsets, column_indices)
res_gt = fn_gt(a, b, row_indices, row_offsets, column_indices)
assert res.dtype == res_gt.dtype
assert torch.allclose(res, res_gt, atol=1e-6)
@cuda_only
@pytest.mark.parametrize("K", [32, 17])
@pytest.mark.parametrize("M", [30, 17])
@pytest.mark.parametrize("L", [30, 17])
def test_sddmm_coo(L, M, K):
device = torch.device("cuda")
# TODO add more checks for different nnz
B = 8
prob = 0.5
a = torch.rand(B, L, K, device=device)
b = torch.rand(B, M, K, device=device)
mask = _create_random_sparsity(
torch.ones(B, L, M, dtype=torch.bool, device=device), prob
)
mask_csr = xformers.components.attention.core.SparseCS(mask, device)
row_indices = mask_csr.row_indices
row_offsets = mask_csr.row_offsets
column_indices = mask_csr.column_indices
fn = torch.ops.xformers.coo_sddmm
fn_gt = torch.ops.xformers.sddmm_sputnik
# convert from csr to coo
row_coo, _ = _csr_to_coo(L, M, row_offsets, column_indices)
res = fn(a, b, row_indices, row_coo, column_indices)
res_gt = fn_gt(a, b, row_indices, row_offsets, column_indices)
assert res.dtype == res_gt.dtype
assert torch.allclose(res, res_gt, atol=1e-6)
@pytest.mark.parametrize("device", _devices)
def test_sddmm_sputnik_backward(device):
contiguous = True
B, L, M, K = 8, 10, 16, 32
prob = 0.5
a = torch.rand(B, L, K, device=device, requires_grad=True)
b = torch.rand(B, M, K, device=device).transpose(-2, -1).requires_grad_(True)
if not contiguous:
a = a.detach().transpose(-2, -1).contiguous().transpose(-2, -1).requires_grad_()
b = b.detach().transpose(-2, -1).contiguous().transpose(-2, -1).requires_grad_()
mask = _create_random_sparsity(
torch.ones(B, L, M, dtype=torch.bool, device=device), prob
)
mask_csr = xformers.components.attention.core.SparseCS(mask, device)
fn = xformers.components.attention.core._matmul_with_mask
mask = mask.to_sparse()
out_csr = fn(a, b, mask_csr)
out_csr.values.sum().backward()
grad_a = a.grad.clone()
grad_b = b.grad.clone()
a.grad = None
b.grad = None
# fn(a[None], b[None], mask).coalesce().values().sum().backward() # TODO check why this fails
fn(a, b, mask).to_dense().sum().backward()
assert torch.allclose(grad_a, a.grad, atol=1e-7)
assert torch.allclose(grad_b, b.grad, atol=1e-7)
@pytest.mark.parametrize("device", _devices)
def test_sparse_softmax_sputnik(device):
B, L = 8, 30
prob = 0.5
a = _create_random_sparsity(torch.rand(B, L, L, device=device), prob)
a_csr = xformers.components.attention.core.SparseCS(a, device)
fn = xformers.components.attention.core._softmax
a = a.to_sparse()
res = fn(a_csr)
res_gt = fn(a)
res = res.to_dense()
res_gt = res_gt.to_dense()
assert res.dtype == res_gt.dtype
assert torch.allclose(res, res_gt)
@pytest.mark.parametrize("device", _devices)
def test_sparse_softmax_sputnik_backward(device):
B, L = 8, 30
prob = 0.5
a = _create_random_sparsity(torch.rand(B, L, L, device=device), prob)
a_csr = xformers.components.attention.core.SparseCS(a, device)
fn = xformers.components.attention.core._softmax
a = a.to_sparse()
a_csr.values.requires_grad_(True)
fn(a_csr).values.sum().backward()
grad_a = a_csr.values.grad.clone()
a.requires_grad_(True)
fn(a).coalesce().values().sum().backward()
assert torch.allclose(
grad_a, a.grad.coalesce().values().reshape_as(grad_a), atol=1e-7
)
@pytest.mark.parametrize("device", _devices)
def test_spmm_sputnik(device):
B, L, K = 8, 30, 32
prob = 0.5
a = _create_random_sparsity(torch.rand(B, L, L, device=device), prob)
b = torch.rand(B, L, K, device=device)
a_csr = xformers.components.attention.core.SparseCS(a, device)
fn = xformers.components.attention.core.bmm
a = a.to_sparse()
res = fn(a_csr, b)
res_gt = fn(a, b)
res = res
res_gt = res_gt
assert res.dtype == res_gt.dtype
assert torch.allclose(res, res_gt)
@pytest.mark.parametrize("device", _devices)
def test_spmm_sputnik_backward(device):
B, M, L, K = 8, 16, 30, 32
prob = 0.5
a = _create_random_sparsity(torch.rand(B, M, L, device=device), prob)
b = torch.rand(B, L, K, device=device)
b.requires_grad_(True)
a_csr = xformers.components.attention.core.SparseCS(a, device)
fn = xformers.components.attention.core.bmm
a = a.to_sparse()
a.requires_grad_(True)
a_csr.values.requires_grad_(True)
fn(a_csr, b).sum().backward()
grad_a = a_csr.values.grad.clone()
grad_b = b.grad.clone()
b.grad = None
fn(a, b).sum().backward()
assert torch.allclose(
grad_a, a.grad.coalesce().values().reshape_as(grad_a), atol=1e-7
)
assert torch.allclose(grad_b, b.grad, atol=1e-7)
@cuda_only
def test_csr_transpose():
B, L, K = 8, 30, 40
prob = 0.5
device = torch.device("cuda")
a = _create_random_sparsity(torch.rand(B, L, K, device=device), prob)
a_csr = xformers.components.attention.core.SparseCS(a, device)
res = a_csr.transpose()
res2 = res.transpose()
assert torch.allclose(res.to_dense(), a.transpose(-2, -1))
assert torch.allclose(res2.to_dense(), a)
@pytest.mark.parametrize("contiguous", [True, False])
@pytest.mark.parametrize("device", _devices)
@pytest.mark.parametrize("prob", [0.95, 0.996]) # cover > 0.995
@pytest.mark.parametrize("N", [32, 64, 96]) # cover > 64
def test_sparse_bmm(device, contiguous, prob, N):
B, M = 8, 64
a = torch.rand(B, M, N, device=device)
a[a < prob] = 0
a = a.to_sparse()
b = torch.rand(B, N, M, device=device)
if not contiguous:
a = a + a
b = b.transpose(-2, -1).contiguous().transpose(-2, -1)
res = _sparse_bmm(a, b)
res_gt = _baseline_sparse_bmm(a, b)
assert torch.allclose(res, res_gt)
@pytest.mark.parametrize("contiguous", [True, False])
@pytest.mark.parametrize("device", _devices)
def test_sparse_bmm_backward(device, contiguous):
if device == "cuda":
# Skip test for now due to bug in torch 1.8
# See https://github.com/pytorch/pytorch/issues/54975
# Broken CUDA / torch 1.8 combination, awaiting an update
return
B, L, K = 8, 10, 16
prob = 0.5
a = torch.rand(B, L, K, device=device)
a[a < prob] = 0
a = a.to_sparse()
b = torch.rand(B, K, L, device=device, requires_grad=True)
if not contiguous:
a = a + a
b = b.detach().transpose(-2, -1).contiguous().transpose(-2, -1).requires_grad_()
a.requires_grad_(True)
def compute_grads(f):
out = f(a, b)
out.sum().backward()
compute_grads(_sparse_bmm)
grad_a = a.grad.clone().coalesce()
grad_b = b.grad.clone()
a.grad = None
b.grad = None
compute_grads(_baseline_sparse_bmm)
new_grad_a = a.grad.coalesce()
assert torch.allclose(grad_a.indices(), new_grad_a.indices())
assert torch.allclose(grad_a.values(), new_grad_a.values())
assert torch.allclose(grad_b, b.grad)
@pytest.mark.parametrize("device", _devices)
def test_sparse_coo_broadcast(device):
B, L, K = 8, 10, 16
prob = 0.5
a = torch.rand(L, K, device=device)
a[a < prob] = 0
a_sparse = a.to_sparse()
res = _broadcast_batch(a_sparse, B)
res_gt = a[None, :, :].expand(B, L, K)
assert torch.allclose(res.to_dense(), res_gt)
|
bart_ls-main
|
xformers/tests/test_custom_ops.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
# Automatically fetch all registered attentions and Feedforwards
from xformers.components import Activation
from xformers.components.attention import ATTENTION_REGISTRY
from xformers.components.feedforward import FEEDFORWARD_REGISTRY
from xformers.factory import (
xFormerDecoderBlock,
xFormerDecoderConfig,
xFormerEncoderBlock,
xFormerEncoderConfig,
)
from xformers.helpers.test_utils import init_torch_distributed_local
BATCH = 4
SEQ = 128
MODEL = 96
DROPOUT = 0.5
GLOBAL_ATTENTION_RATIO = 0.1 # 10% of the tokens have a global view
DEVICES = [torch.device("cuda")]
VOCAB_SIZE = 32
@pytest.mark.parametrize("attn_dropout", [0.0, 0.1])
@pytest.mark.parametrize("residual_dropout", [0.0, 0.1])
@pytest.mark.parametrize("heads", [1, 3])
@pytest.mark.parametrize("activation", [a.value for a in Activation])
@pytest.mark.parametrize("attention_name", ATTENTION_REGISTRY.keys())
@pytest.mark.parametrize("feedforward_name", FEEDFORWARD_REGISTRY.keys())
@pytest.mark.parametrize("layer_norm_style", ["pre", "post"])
@pytest.mark.parametrize("device", DEVICES)
@pytest.mark.parametrize("reversible", [True, False])
@pytest.mark.skipif(
not torch.cuda.is_available(), reason="This test requires a CUDA device"
)
def test_xformer_encoder_block(
attention_name: str,
feedforward_name: str,
heads: int,
attn_dropout: float,
residual_dropout: float,
activation: Activation,
layer_norm_style: str,
device: torch.device,
reversible: bool,
):
block_size = 16
attention_config = {
"name": attention_name,
"dropout": attn_dropout,
"causal": False,
"window_size": SEQ // 8 + 1,
"seq_len": SEQ,
"attention_query_mask": torch.rand((SEQ, 1)) < GLOBAL_ATTENTION_RATIO,
"dim_model": MODEL,
"num_heads": heads,
"dim_head": MODEL // heads,
"layout": torch.eye(SEQ // block_size, SEQ // block_size, dtype=torch.long),
"block_size": block_size,
"num_rules": 2, # Compositional Attention
}
multi_head_config = {
"num_heads": heads,
"dim_model": MODEL,
"residual_dropout": residual_dropout,
"attention": attention_config,
}
feedforward_config = {
"name": feedforward_name,
"dim_model": MODEL,
"dropout": DROPOUT,
"activation": activation,
"hidden_layer_multiplier": 4,
"number_of_experts": 4,
"gate": "top_2",
}
if feedforward_name == "MixtureOfExperts":
init_torch_distributed_local()
position_encoding_config = {
"name": "sine",
"dim_model": MODEL,
"seq_len": SEQ,
"vocab_size": VOCAB_SIZE,
}
block_config = xFormerEncoderConfig(
dim_model=MODEL,
multi_head_config=multi_head_config,
feedforward_config=feedforward_config,
position_encoding_config=position_encoding_config,
layer_norm_style=layer_norm_style,
reversible=reversible,
)
# Test that the whole block can be instantiated
block = xFormerEncoderBlock.from_config(block_config).to(device)
# Check that the dimensions make sense, to a FW pass
inputs = torch.rand(BATCH, SEQ, device=device)
_ = block(inputs)
# Check that we support attention masking, at least interface wise (do not check correctness yet)
att_mask = torch.ones(SEQ, SEQ, dtype=torch.bool, device=device)
_ = block(inputs, att_mask=att_mask)
# Check that we support input masking, at least interface wise (do not check correctness yet)
input_mask = torch.randn(SEQ, dtype=torch.float, device=device)
input_mask[input_mask < 0.0] = -float("inf")
_ = block(inputs, input_mask=input_mask)
@pytest.mark.parametrize("attn_dropout", [0.0, 0.1])
@pytest.mark.parametrize("residual_dropout", [0.0, 0.1])
@pytest.mark.parametrize("causal", [True, False])
@pytest.mark.parametrize("heads", [1, 3])
@pytest.mark.parametrize("activation", [a.value for a in Activation])
@pytest.mark.parametrize("rotary_embeddings", [False, True])
@pytest.mark.parametrize("attention_name", ATTENTION_REGISTRY.keys())
@pytest.mark.parametrize("feedforward_name", FEEDFORWARD_REGISTRY.keys())
@pytest.mark.parametrize("layer_norm_style", ["pre", "post"])
@pytest.mark.parametrize("device", DEVICES)
@pytest.mark.skipif(
not torch.cuda.is_available(), reason="This test requires a CUDA device"
)
def test_xformer_decoder_block(
attention_name: str,
rotary_embeddings: bool,
feedforward_name: str,
heads: int,
attn_dropout: float,
residual_dropout: float,
causal: bool,
activation: Activation,
layer_norm_style: str,
device: torch.device,
):
block_size = 16
attention_config = {
"name": attention_name,
"dropout": attn_dropout,
"causal": causal,
"window_size": SEQ // 8 + 1,
"seq_len": SEQ,
"dim_head": MODEL // heads,
"attention_query_mask": torch.rand((SEQ, 1)) < GLOBAL_ATTENTION_RATIO,
"layout": torch.eye(SEQ // block_size, SEQ // block_size, dtype=torch.long),
"block_size": block_size,
"num_rules": 2, # Compositional Attention
}
multi_head_config = {
"num_heads": heads,
"dim_model": MODEL,
"residual_dropout": residual_dropout,
"attention": attention_config,
"use_rotary_embeddings": rotary_embeddings,
}
feedforward_config = {
"name": feedforward_name,
"dim_model": MODEL,
"dropout": DROPOUT,
"activation": activation,
"hidden_layer_multiplier": 4,
"number_of_experts": 4,
"gate": "top_2",
}
if feedforward_name == "MixtureOfExperts":
init_torch_distributed_local()
position_encoding_config = {
"name": "sine",
"dim_model": MODEL,
"seq_len": SEQ,
"vocab_size": VOCAB_SIZE,
}
encoder_block_config = xFormerEncoderConfig(
dim_model=MODEL,
multi_head_config=multi_head_config,
feedforward_config=feedforward_config,
position_encoding_config=position_encoding_config,
layer_norm_style=layer_norm_style,
)
decoder_block_config = xFormerDecoderConfig(
dim_model=MODEL,
multi_head_config_masked=multi_head_config,
multi_head_config_cross=multi_head_config,
feedforward_config=feedforward_config,
position_encoding_config=position_encoding_config,
layer_norm_style=layer_norm_style,
)
# Test that the whole block can be instantiated
encoder_block = xFormerEncoderBlock.from_config(encoder_block_config).to(device)
decoder_block = xFormerDecoderBlock.from_config(decoder_block_config).to(device)
# Check that the dimensions make sense, to a FW pass
inputs = torch.rand(BATCH, SEQ, device=device)
encoded = encoder_block(inputs)
_ = decoder_block(
inputs, encoded
) # NOTE: does not make a lot of sense, just checking dimensions
# Check that we support masking, at least interface wise (do not check correctness yet)
att_mask = torch.ones(SEQ, SEQ, dtype=torch.bool, device=device)
input_mask = torch.randn(SEQ, dtype=torch.float, device=device)
input_mask[input_mask < 0.0] = -float("inf")
encoded = encoder_block(inputs)
_ = decoder_block(inputs, encoded, encoder_att_mask=att_mask, input_mask=input_mask)
# Test different sequence lengths when encoding and decoding
if not decoder_block.mha.attention.requires_same_k_q_dimensions:
if not causal or not hasattr(decoder_block.mha.attention, "causal"):
_ = decoder_block(inputs[:, :-16], encoded)
else:
# Check that we assert properly
with pytest.raises(AssertionError):
_ = decoder_block(inputs[:, :-16], encoded)
else:
# Check that we assert properly
with pytest.raises(AssertionError):
_ = decoder_block(inputs[:, :-16], encoded)
|
bart_ls-main
|
xformers/tests/test_block_factory.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from xformers.components.attention import maybe_sparsify
from xformers.components.attention._sputnik_sparse import _dense_to_sparse
from xformers.components.attention.core import SparseCS, _create_random_sparsity
B = 2
M = 16 # not a nice round number, on purpose
_devices_list = ["cpu", "cuda:0"] if torch.cuda.is_available() else ["cpu"]
_devices = [torch.device(d) for d in _devices_list]
@pytest.mark.parametrize("device", _devices)
def test_logical_and(device):
mask = _create_random_sparsity(torch.ones(B, M, M, dtype=torch.bool), 0.1)
mask_cs = SparseCS(mask, device)
# Check that we cannot & two sparse matrices (for now)
with pytest.raises(Exception):
_ = mask_cs & mask_cs
# Check that & ones returns the same values
mask_ones = mask_cs & torch.ones_like(mask, dtype=torch.bool, device=device)
assert torch.allclose(mask_cs.to_dense().long(), mask_ones.to_dense().long())
# Check that & the inverse returns 0 all around
mask_not = ~mask.to(device)
assert (mask_cs & mask_not).values.numel() == 0
@pytest.mark.parametrize("device", _devices)
@pytest.mark.parametrize("seq", [12, 32, 128])
def test_dense_sparse(seq, device):
# Check that we can .to_dense() without crashing
mask = torch.rand(seq, seq, device=device) > 0.1
mask_cs = SparseCS(mask, device)
mask_back_forth = SparseCS(mask_cs.to_dense(), device)
assert torch.allclose(mask_cs.to_dense().long(), mask_back_forth.to_dense().long())
@pytest.mark.parametrize("device", _devices)
def test_device(device):
mask = _create_random_sparsity(
torch.ones(B, M, M, dtype=torch.bool, device=device), 0.1
)
assert mask.device.type == device.type
sparse_mask = maybe_sparsify(mask)
assert sparse_mask.device.type == device.type
def _baseline_dense_to_sparse(matrix):
import numpy as np
# Extract the nonzero values.
values = matrix.compress((matrix != 0).flatten())
# Calculate the offset of each row.
mask = (matrix != 0).astype(np.int32)
row_offsets = np.concatenate(([0], np.cumsum(np.add.reduce(mask, axis=1))), axis=0)
# Create the row indices and sort them.
# note: use torch.argsort to make it compatible as sorting is not stable in PyTorch
row_indices = torch.argsort(-1 * torch.as_tensor(np.diff(row_offsets))).numpy()
# Extract the column indices for the nonzero values.
x = mask * (np.arange(matrix.shape[1]) + 1)
column_indices = x.compress((x != 0).flatten())
column_indices = column_indices - 1
# Cast the desired precision.
values = torch.as_tensor(values.astype(np.float32))
row_indices, row_offsets, column_indices = [
torch.as_tensor(x.astype(np.int32))
for x in [row_indices, row_offsets, column_indices]
]
return values, row_indices, row_offsets, column_indices
@pytest.mark.parametrize("device", _devices)
@pytest.mark.parametrize("seq", [12, 32, 128])
def test_dense_to_sparse(seq, device):
matrix = torch.rand(seq, seq, device=device)
matrix[matrix > 0.9] = 0
baseline_res = _baseline_dense_to_sparse(matrix.cpu().numpy())
res = _dense_to_sparse(matrix, device=device)
_idx_to_name = ["values", "row_indices", "row_offsets", "column_indices"]
for idx, (bi, i) in enumerate(zip(baseline_res, res)):
if idx != 1:
# row_indices is the result of an argsort, which is not stable
# for same number of elements
assert torch.allclose(bi.to(device), i), f"error in {_idx_to_name[idx]}"
assert bi.dtype == i.dtype
assert i.device == device
|
bart_ls-main
|
xformers/tests/test_sparsecs.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
# needed to register custom ops
import xformers # noqa: F401
import xformers.components.attention
from xformers.sparse import SparseCSRTensor
cuda_only = pytest.mark.skipif(not torch.cuda.is_available(), reason="requires CUDA")
_devices = ["cpu", "cuda:0"] if torch.cuda.is_available() else ["cpu"]
def _create_random_sparsity(matrix, sparsity, divisible_by=4):
assert matrix.ndim == 3
keep = torch.rand_like(matrix[0], dtype=torch.float32) > sparsity
nonzero = torch.nonzero(keep)
nnz = nonzero.shape[0]
# NOTE: need to make it a multiple of 4 for sputnik
nonzero = nonzero[: (nnz - nnz % divisible_by)]
i, j = nonzero.unbind(1)
output = torch.zeros_like(matrix)
bdim = torch.arange(matrix.shape[0], device=matrix.device)[:, None]
output[bdim, i, j] = matrix[bdim, i, j]
return output
@pytest.mark.parametrize("device", _devices)
@pytest.mark.parametrize("func", [torch.add, torch.mul])
def test_sparse_binary_ops(func, device):
B, L = 8, 30
prob = 0.5
a = _create_random_sparsity(torch.rand(B, L, L, device=device), prob)
a_csr = SparseCSRTensor.from_dense(a)
b = a
b_csr = a_csr
res = func(a_csr, b_csr).to_dense()
res_gt = func(a, b)
assert torch.allclose(res, res_gt)
@pytest.mark.parametrize("device", _devices)
def test_sparse_softmax(device):
B, L = 8, 30
prob = 0.5
a = _create_random_sparsity(torch.rand(B, L, L, device=device), prob)
a_csr = SparseCSRTensor.from_dense(a)
fn = xformers.components.attention.core._softmax
def fn2(x):
return torch.nn.functional.softmax(x, -1)
a = a.to_sparse()
res = fn2(a_csr)
res_gt = fn(a)
res = res.to_dense()
res_gt = res_gt.to_dense()
assert res.dtype == res_gt.dtype
assert torch.allclose(res, res_gt)
@pytest.mark.parametrize("device", _devices)
def test_sparse_softmax_backward(device):
B, L = 8, 30
prob = 0.5
a = _create_random_sparsity(torch.rand(B, L, L, device=device), prob)
a_csr = SparseCSRTensor.from_dense(a)
fn = xformers.components.attention.core._softmax
def fn2(x):
return torch.nn.functional.softmax(x, -1)
a = a.to_sparse()
a_csr.requires_grad_(True)
fn2(a_csr)._csr_values.sum().backward()
grad_a = a_csr._csr_values.grad.clone()
a.requires_grad_(True)
fn(a).coalesce().values().sum().backward()
assert torch.allclose(
grad_a, a.grad.coalesce().values().reshape_as(grad_a), atol=1e-7
)
@pytest.mark.parametrize("device", _devices)
def test_deepcopy(device):
import copy
B, L = 8, 30
prob = 0.3
a = _create_random_sparsity(torch.rand(B, L, L), prob)
a_csr = SparseCSRTensor.from_dense(a)
b_csr = copy.deepcopy(a_csr)
assert torch.equal(a_csr, b_csr)
@pytest.mark.parametrize("device", _devices)
def test_module_buffer(device):
B, L = 8, 30
prob = 0.3
a = _create_random_sparsity(torch.rand(B, L, L), prob)
a_csr = SparseCSRTensor.from_dense(a)
prob = 0.5
b = _create_random_sparsity(torch.rand(B, L, L), prob)
b_csr = SparseCSRTensor.from_dense(b)
module = torch.nn.Module()
# test that register_buffer works
module.register_buffer("a_csr", a_csr)
assert module.a_csr is a_csr
module.to(device)
assert module.a_csr.device == torch.device(device)
state_dict = module.state_dict()
assert "a_csr" in state_dict
assert torch.equal(a_csr.to(device), state_dict["a_csr"])
module.load_state_dict(state_dict)
module.load_state_dict({"a_csr": b_csr})
assert torch.equal(module.a_csr, b_csr.to(device))
|
bart_ls-main
|
xformers/tests/test_sparse_csr.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from xformers.components.positional_embedding import RotaryEmbedding
DEVICES = (
[torch.device("cpu")]
if not torch.cuda.is_available()
else [
torch.device("cuda")
] # save a bit on CI for now, we have seperate cpu and gpu jobs
)
BATCH = 2
SEQ = 32
HEADS = 2
EMB = 32
@pytest.mark.parametrize("device", DEVICES)
def test_rotary_embeddings(device):
rotary = RotaryEmbedding(EMB).to(device)
# Generate dummy inputs
q = torch.ones((BATCH, HEADS, SEQ, EMB), device=device) # uniform on purpose
k = q.clone()
k_rot, q_rot = rotary(q, k)
# Check that the sequences now encode relative position information
att = torch.einsum("bhne,bhme->bhnm", q, k)
att_rot = torch.einsum("bhne,bhme->bhnm", q_rot, k_rot)
# - the attention for the same positions is not changed
assert torch.allclose(torch.diag(att[0, 0, :, :]), torch.diag(att_rot[0, 0, :, :]))
# - the post-rotary attention is more focused on the diagonal
att_rot -= att_rot[
0, 0, 0, 0
].clone() # all diagonal elements will have the same value
att_rot = (
att_rot <= 1e-4
) # all non diagonal elements had lower attention than diagonal (+ float tolerance)
assert torch.all(att_rot)
# Test that different sequence lengths is ok
_, _ = rotary(q[:, :, :-16, :], k)
|
bart_ls-main
|
xformers/tests/test_rotary_embeddings.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import Tuple
import pytest
import torch
from xformers.components import InProjContainer, InProjParams, MultiHeadDispatch
# Automatically test all the registered attentions
from xformers.components.attention import (
_DENSITY_THRESHOLD,
ATTENTION_REGISTRY,
build_attention,
)
DEVICES = (
[torch.device("cpu")] if not torch.cuda.is_available() else [torch.device("cuda")]
)
BATCH = 2
SEQ = 128 if torch.cuda.is_available() else 32
MODEL = 128 if torch.cuda.is_available() else 64
GLOBAL_ATTENTION_RATIO = (
_DENSITY_THRESHOLD * 0.9
) # Make sure that we test the sparse implementation, no matter the threshold
assert ATTENTION_REGISTRY.keys(), "Attention layers should have been registered"
def _get_multihead(
attention_name,
attn_dropout,
res_dropout,
causal,
heads,
device,
skip_output_projection=False,
):
test_config = {
"name": attention_name,
"dropout": attn_dropout,
"causal": causal,
"seq_len": SEQ,
"window_size": SEQ // 8 + 1, # local attention
"attention_query_mask": torch.rand((SEQ, 1)) < GLOBAL_ATTENTION_RATIO,
"dim_model": MODEL,
"num_heads": heads,
"dim_head": MODEL / heads,
"num_rules": 2, # Compositional Attention
}
if skip_output_projection:
def noop(x):
return x
test_config["out_proj"] = noop
# Add some blocksparse layout to test the corresponding attention
block_size = 16
test_config["layout"] = torch.eye(
SEQ // block_size, SEQ // block_size, dtype=torch.long
)
test_config["block_size"] = block_size
attention = build_attention(test_config)
# build a multi head dispatch to test this attention mechanism
multi_head = MultiHeadDispatch(
seq_len=SEQ,
dim_model=MODEL,
residual_dropout=res_dropout,
num_heads=heads,
attention=attention,
).to(device)
return multi_head
@pytest.mark.parametrize("attn_dropout", [0.0, 0.3])
@pytest.mark.parametrize("residual_dropout", [0.0, 0.1])
@pytest.mark.parametrize("causal", [True, False])
@pytest.mark.parametrize("heads", [1, 4])
@pytest.mark.parametrize("attention_name", ATTENTION_REGISTRY.keys())
@pytest.mark.parametrize("device", DEVICES)
def test_order_invariance(
attention_name: str,
heads: int,
attn_dropout: float,
residual_dropout: float,
causal: bool,
device: torch.device,
):
torch.manual_seed(42)
multi_head = _get_multihead(
attention_name, attn_dropout, residual_dropout, causal, heads, device
)
# Check that a shuffled input produces the same results
seqs = [SEQ, SEQ - 16] if (attention_name != "blocksparse") else [SEQ]
for seq in seqs:
# Check that we can pass a smaller sequence
inputs = torch.rand(BATCH, seq, MODEL, device=device)
shuffle = torch.randperm(inputs.shape[1])
inputs_shuffled = inputs[:, shuffle, :].clone()
results = multi_head(inputs, inputs, inputs)
results_shuffled = multi_head(inputs_shuffled, inputs_shuffled, inputs_shuffled)
torch.allclose(results[:, shuffle, :], results_shuffled)
# Test the non-self-attention codepath
att = multi_head(inputs, inputs_shuffled, inputs)
# Check that dropout actually drops some values
if attn_dropout > 0:
att_2 = multi_head(inputs, inputs_shuffled, inputs)
assert (att != att_2).any()
@pytest.mark.parametrize("heads", [1, 4])
@pytest.mark.parametrize("attention_name", ["scaled_dot_product"])
@pytest.mark.parametrize("device", DEVICES)
def test_kqv_ordering(
attention_name: str,
heads: int,
device: torch.device,
):
multi_head = _get_multihead(attention_name, 0.0, 0.0, False, heads, device)
# Check kqv are not flipped
# this will not catch all issues, but would catch a V being misplaced
# make k and q complimentary, so that QKt is all zero and attention is uniform
q = torch.cat(
(
torch.rand((1, MODEL // 2), device=device),
torch.zeros((1, MODEL // 2), device=device),
),
dim=1,
).expand((BATCH, SEQ, MODEL))
k = torch.cat(
(
torch.zeros((1, MODEL // 2), device=device),
torch.rand((1, MODEL // 2), device=device),
),
dim=1,
).expand((BATCH, SEQ, MODEL))
v = torch.rand(BATCH, SEQ, MODEL, device=device)
# Normal call
res = multi_head(query=q, key=k, value=v)
for i in range(BATCH):
assert torch.allclose(res[i, :, :], res[i, 0, :].unsqueeze(-2))
assert not torch.allclose(res[0, :, :], res[1, :, :])
# Flip qkv, and check that we invert the above check properly
res_false = multi_head(query=v, key=k, value=q)
assert torch.allclose(res_false[0, :, :], res_false[1, :, :])
@pytest.mark.parametrize("small_init", [False, True])
@pytest.mark.parametrize("proj_bias", [False, True])
@pytest.mark.parametrize("same_sizes", [False, True])
@pytest.mark.parametrize("same_settings", [False, True])
def test_inproj(
small_init: bool, proj_bias: bool, same_sizes: bool, same_settings: bool
):
test_config = {
"name": "scaled_dot_product",
"dropout": 0.1,
"causal": False,
"seq_len": SEQ,
"window_size": SEQ // 8 + 1,
"num_heads": 1,
"dim_head": MODEL,
}
attention = build_attention(test_config)
# Construct the initial projection, test different options
in_params = InProjParams(MODEL, MODEL, proj_bias, small_init)
if same_settings:
in_proj = InProjContainer(in_params, None, None)
else:
out_features = MODEL if same_sizes else MODEL - 16
in_params_flip = InProjParams(MODEL, out_features, not proj_bias, small_init)
in_proj = InProjContainer(in_params, in_params_flip, in_params_flip)
# build a multi head dispatch to test this attention mechanism
multi_head = MultiHeadDispatch(
seq_len=SEQ,
dim_model=MODEL,
residual_dropout=0.1,
num_heads=1,
attention=attention,
in_proj_container=in_proj,
)
# Check kqv are not flipped
# this will not catch all issues, but would catch a V being misplaced
# make k and q complimentary, so that QKt is all zero and attention is uniform
q = torch.cat(
(
torch.rand((1, MODEL // 2)),
torch.zeros((1, MODEL // 2)),
),
dim=1,
).expand((BATCH, SEQ, MODEL))
k = torch.cat(
(
torch.zeros((1, MODEL // 2)),
torch.rand((1, MODEL // 2)),
),
dim=1,
).expand((BATCH, SEQ, MODEL))
v = torch.rand(BATCH, SEQ, MODEL)
# just check that a FW does not assert out
_ = multi_head(query=q, key=k, value=v)
@pytest.mark.parametrize("heads", [1, 4])
@pytest.mark.parametrize("attention_name", ATTENTION_REGISTRY.keys())
@pytest.mark.parametrize("device", DEVICES)
def test_different_kq_dimensions(
attention_name: str,
heads: int,
device: torch.device,
):
multi_head = _get_multihead(attention_name, 0.0, 0.0, False, heads, device)
if multi_head.attention.requires_same_k_q_dimensions:
# pyre-fixme[29]: The library function `pytest.skip` is not supported by Pyre.
pytest.skip(f"{attention_name} does not support different k, q dimensions yet.")
seq_q = SEQ - 16
q = torch.rand((BATCH, seq_q, MODEL), device=device)
k = torch.rand((BATCH, SEQ, MODEL), device=device)
v = torch.rand((BATCH, SEQ, MODEL), device=device)
res = multi_head(query=q, key=k, value=v)
assert res.shape == torch.Size([BATCH, seq_q, MODEL])
@pytest.mark.parametrize("heads", [1, 4])
@pytest.mark.parametrize("attention_name", ATTENTION_REGISTRY.keys())
@pytest.mark.parametrize("device", DEVICES)
@pytest.mark.parametrize(
"batch_sizes",
[
(1, BATCH, BATCH),
(BATCH, 1, BATCH),
(BATCH, BATCH, 1),
(1, 1, BATCH),
(BATCH, 1, 1),
(1, BATCH, 1),
],
)
def test_broadcast_batch_dimension(
attention_name: str,
heads: int,
device: torch.device,
batch_sizes: Tuple[int, int, int],
):
Q_BATCH, K_BATCH, V_BATCH = batch_sizes
multi_head = _get_multihead(attention_name, 0.0, 0.0, False, heads, device)
if multi_head.attention.requires_same_k_q_dimensions:
# pyre-fixme[29]: The library function `pytest.skip` is not supported by Pyre.
pytest.skip(f"{attention_name} does not support different k, q dimensions yet.")
q = torch.rand((Q_BATCH, SEQ, MODEL), device=device)
k = torch.rand((K_BATCH, SEQ, MODEL), device=device)
v = torch.rand((V_BATCH, SEQ, MODEL), device=device)
res = multi_head(query=q, key=k, value=v)
assert res.shape == torch.Size([BATCH, SEQ, MODEL])
@pytest.mark.parametrize("heads", [1, 4])
@pytest.mark.parametrize("attention_name", ["scaled_dot_product", "favor"])
@pytest.mark.skipif(not torch.cuda.is_available(), reason="requires a CUDA gpu")
def test_causal(
attention_name: str,
heads: int,
):
"""
Make sure that the causal flag is respected.
The input data is orthogonal by design if causal is respected, but if the attention looks ahead this will fail
"""
torch.random.manual_seed(42)
device = torch.device("cuda")
multi_head = _get_multihead(
attention_name,
0.0,
0.0,
causal=True,
heads=heads,
device=device,
skip_output_projection=True,
)
k = (
torch.tril(torch.ones((SEQ, SEQ), device=device), diagonal=0)
.unsqueeze(0)
.expand(1, -1, -1)
)
q = (
torch.triu(torch.ones((SEQ, SEQ), device=device), diagonal=0)
.unsqueeze(0)
.expand(1, -1, -1)
)
v = (
torch.arange(SEQ, device=device)
.float()
.unsqueeze(0)
.unsqueeze(-1)
.expand(1, -1, SEQ)
)
# Make sure that we don´t project, to keep the embeddings orthogonal
multi_head.attention.requires_input_projection = False
res = multi_head(query=q, key=k, value=v).squeeze(0)
# Consolidate along the embedding, if causal was respected the amplitude should be sorted already
res_sum = torch.sum(res, dim=1).cpu()
assert torch.allclose(torch.sort(res_sum)[1], torch.arange(SEQ)) or torch.allclose(
torch.sort(res_sum, descending=True)[1], torch.arange(SEQ)
), res_sum
@pytest.mark.parametrize("attn_dropout", [0.0, 0.1])
@pytest.mark.parametrize("heads", [2])
@pytest.mark.parametrize("attention_name", ATTENTION_REGISTRY.keys())
@pytest.mark.skipif(torch.cuda.is_available(), reason="CUDA gpu not supported yet")
def test_torch_script_ability(
attention_name: str,
heads: int,
attn_dropout: float,
):
if attention_name in {
"favor",
"global",
"local",
"random",
}:
# pyre-fixme[29]: The library function `pytest.skip` is not supported by Pyre.
pytest.skip(f"{attention_name} does not support scripting yet.")
device = torch.device("cpu")
multi_head = _get_multihead(attention_name, attn_dropout, 0.0, False, heads, device)
# input for tracing the function
q = torch.rand((BATCH, SEQ, MODEL), device=device)
k = torch.rand((BATCH, SEQ, MODEL), device=device)
v = torch.rand((BATCH, SEQ, MODEL), device=device)
# to make sure dropout behaves deterministically
torch.random.manual_seed(42)
# tracing the attention module
traced_multi_head = torch.jit.trace(multi_head, (q, k, v))
# create new random inputs for testing the eager model and traced model
q = torch.rand((BATCH, SEQ, MODEL), device=device)
k = torch.rand((BATCH, SEQ, MODEL), device=device)
v = torch.rand((BATCH, SEQ, MODEL), device=device)
# to make sure dropout behaves deterministically need to set the seed again
torch.random.manual_seed(42)
res = multi_head(query=q, key=k, value=v)
# to make sure dropout behaves deterministically need to set the seed again
torch.random.manual_seed(42)
res_traced = traced_multi_head(query=q, key=k, value=v)
assert torch.allclose(res, res_traced)
# TODO: way more unit tests..
|
bart_ls-main
|
xformers/tests/test_attentions.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import torch
from xformers.components.attention import GlobalAttention, ScaledDotProduct
def test_global_attention():
b, s, d = 8, 900, 384
def test_ratio(global_attention_ratio: float):
# Make sure that Global and Normal attention get the same results for the corresponding tokens
a = torch.rand(b, s, d)
config = {
"name": "global",
"dropout": 0.0,
"causal": False,
"max_seq_len": s,
"attention_query_mask": torch.rand((s, 1)) < global_attention_ratio,
}
global_attention = GlobalAttention(**config)
sdp_attention = ScaledDotProduct(**config)
r_global = global_attention(a, a, a)
r_dense = sdp_attention(a, a, a)
# Check that the tokens which have access to the full attention give the same
# results as the monolithic dense scaled_dot_product
mask = config["attention_query_mask"][:, 0]
torch.allclose(r_global[:, mask, :], r_dense[:, mask, :])
# Test with different levels of sparsity, to make sure that all the paths are covered
test_ratio(0.02)
test_ratio(0.5)
test_ratio(1.0) # All queries allowed
|
bart_ls-main
|
xformers/tests/test_global_attention.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from hydra.core.config_store import ConfigStore
from xformers.factory.hydra_helper import import_xformer_config_schema
def test_import_schema():
import_xformer_config_schema()
cs = ConfigStore.instance()
groups = cs.list("xformers")
# check all groups registered
assert groups == ["attention", "ff", "pe"]
# check the attention is registered
attentions = cs.list("xformers/attention")
assert "favor_schema.yaml" in attentions
|
bart_ls-main
|
xformers/tests/test_hydra_helper.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import random
import pytest
import torch
from xformers.components.attention import OrthoFormerAttention, ScaledDotProduct
from xformers.components.attention.utils import maybe_merge_masks
@pytest.mark.skipif(not torch.cuda.is_available(), reason="requires CUDA")
@pytest.mark.parametrize(
"landmark_selection", ["orthogonal", "kmeans", "kmeans_spherical", "random"]
)
@pytest.mark.parametrize("num_landmarks", [30, 33, 905])
@pytest.mark.parametrize("subsample_fraction", [1.0, 0.3])
def test_ortho_attention(
landmark_selection: str, num_landmarks: int, subsample_fraction: float
):
# TODO: conv_kernel_size parameter not set to None fails this test. Investigate.
b, s, d = 8, 900, 32
num_heads = 2
seed = 42
torch.random.manual_seed(seed)
random.seed(seed)
ortho_config = {
"name": "orthoformer",
"dropout": 0.0,
"num_landmarks": num_landmarks,
"num_heads": num_heads,
"landmark_selection": landmark_selection,
"subsample_fraction": subsample_fraction,
}
sdp_config = {
"name": "scaled_dot_product",
"dropout": 0.0,
}
a = torch.rand(b, s, d, device=torch.device("cuda"))
def test_close_to_sdp():
# Make sure that Ortho and Normal attention are not too far off.
ortho_attention = OrthoFormerAttention(**ortho_config).cuda()
sdp_attention = ScaledDotProduct(**sdp_config).cuda()
r_ortho = ortho_attention(a, a, a, att_mask=None)
r_sdp = sdp_attention(a, a, a, att_mask=None)
assert torch.allclose(r_ortho, r_sdp, rtol=0.02, atol=1e-1)
# Make sure that OrthoFormerAttention and Normal attention are not too far off.
ortho_attention = OrthoFormerAttention(**ortho_config).cuda()
sdp_attention = ScaledDotProduct(**sdp_config).cuda()
r_ortho = ortho_attention(a, a, a, att_mask=None)
r_sdp = sdp_attention(a, a, a, att_mask=None)
assert torch.allclose(r_ortho, r_sdp, rtol=0.02, atol=1e-1)
def test_att_mask_ignored():
# If an sxs attention mask is passed in, it should be ignored.
# Results should be the same as if no mask was passed in.
ortho_attention = OrthoFormerAttention(**ortho_config).cuda()
sdp_attention = ScaledDotProduct(**sdp_config).cuda()
key_padding_mask = None
att_mask = torch.randint(0, 2, (s, s), device=torch.device("cuda")).to(
dtype=torch.bool
)
sdp_mask = maybe_merge_masks(
att_mask=None,
key_padding_mask=key_padding_mask,
batch_size=b // num_heads,
src_len=s,
num_heads=num_heads,
)
r_ortho = ortho_attention(
a, a, a, att_mask=att_mask, key_padding_mask=key_padding_mask
)
r_sdp = sdp_attention(a, a, a, att_mask=sdp_mask)
assert torch.allclose(r_ortho, r_sdp, rtol=0.02, atol=1e-1)
test_close_to_sdp()
test_att_mask_ignored()
|
bart_ls-main
|
xformers/tests/test_ortho_attention.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import random
import pytest
import torch
from xformers import _is_triton_available
if _is_triton_available:
from xformers.benchmarks.benchmark_pytorch_transformer import evaluate, train
from xformers.factory.model_factory import xFormer, xFormerConfig
BATCH = 20
SEQ = 32
EMB = 8
VOCAB = 8
HEADS = 4
DROP = 0.1
LAYERS = 2
ACTIVATION = "relu"
_test_config_encoder = {
"block_type": "encoder",
"dim_model": EMB,
"num_layers": LAYERS,
"layer_norm_style": "post",
"multi_head_config": {
"num_heads": HEADS,
"residual_dropout": DROP,
"bias": True,
"attention": {
"name": "scaled_dot_product",
"dropout": DROP,
"causal": False,
"seq_len": SEQ,
},
"dim_model": EMB,
},
"feedforward_config": {
"name": "MLP",
"dropout": DROP,
"activation": ACTIVATION,
"hidden_layer_multiplier": 4,
"dim_model": EMB,
},
}
_test_config_decoder = {
"block_type": "decoder",
"dim_model": EMB,
"num_layers": LAYERS,
"layer_norm_style": "post",
"multi_head_config_masked": {
"num_heads": HEADS,
"residual_dropout": DROP,
"dim_model": EMB,
"bias": True,
"attention": {
"name": "scaled_dot_product",
"dropout": DROP,
"causal": False,
"seq_len": SEQ,
},
},
"multi_head_config_cross": {
"num_heads": HEADS,
"residual_dropout": DROP,
"dim_model": EMB,
"bias": True,
"attention": {
"name": "scaled_dot_product",
"dropout": DROP,
"causal": False,
"seq_len": SEQ,
},
},
"feedforward_config": {
"name": "MLP",
"dropout": DROP,
"activation": ACTIVATION,
"hidden_layer_multiplier": 4,
"dim_model": EMB,
},
}
_test_config = [_test_config_encoder, _test_config_decoder]
def reset_seeds():
torch.manual_seed(0)
random.seed(0)
@pytest.mark.skipif(
not torch.cuda.is_available(), reason="This test requires a gpu"
)
def test_pytorch_encoder_parity(device=torch.device("cuda")):
# Build both a xFormers and Pytorch model
reset_seeds()
model_xformers = xFormer.from_config(xFormerConfig([_test_config_encoder])).to(
device
)
print(model_xformers)
model_pytorch = torch.nn.TransformerEncoder(
torch.nn.TransformerEncoderLayer(
d_model=EMB,
nhead=HEADS,
dim_feedforward=4 * EMB,
dropout=DROP,
activation=ACTIVATION,
batch_first=True, # (batch, seq, feature)
device=device,
),
num_layers=LAYERS,
)
print(model_pytorch)
optim_xformers = torch.optim.SGD(
model_xformers.parameters(), lr=1e-3, momentum=0.9
)
optim_pytorch = torch.optim.SGD(
model_pytorch.parameters(), lr=1e-3, momentum=0.9
)
# Check that both models can be trained to comparable results
eval_start_xformer = evaluate(model_xformers, BATCH, SEQ, EMB, device)
eval_start_pytorch = evaluate(model_pytorch, BATCH, SEQ, EMB, device)
print("starting point: ", eval_start_pytorch, eval_start_xformer)
train(model_pytorch, optim_pytorch, "pytorch", 500, BATCH, SEQ, EMB, device)
train(model_xformers, optim_xformers, "xformers", 500, BATCH, SEQ, EMB, device)
# Check that we can classify this dummy example
# Arbitrary threshold
eval_stop_xformer = evaluate(model_xformers, BATCH, SEQ, EMB, device)
eval_stop_pytorch = evaluate(model_pytorch, BATCH, SEQ, EMB, device)
print("end point: ", eval_stop_pytorch, eval_stop_xformer)
fit_ratio_xformer = eval_start_xformer / eval_stop_xformer
fit_ratio_pytorch = eval_start_pytorch / eval_stop_pytorch
print(fit_ratio_pytorch, fit_ratio_xformer)
# Catch a broken training
assert fit_ratio_xformer > 120
assert fit_ratio_pytorch > 120
# Catch a significant difference in between the two
assert (
abs(eval_start_xformer - eval_start_pytorch) < 1e-6
) # initial eval is about 25, arbitrary limits
assert (
abs(eval_stop_xformer - eval_stop_pytorch) < 1e-1
) # final eval is about 0.2, arbitrary limits
@pytest.mark.skipif(
not torch.cuda.is_available(), reason="This test requires a gpu"
)
def test_pytorch_tranformer_parity(device=torch.device("cuda")):
# Build both a xFormers and Pytorch model
reset_seeds()
model_xformers = xFormer.from_config(xFormerConfig(_test_config)).to(device)
print(model_xformers)
model_pytorch = torch.nn.Transformer(
d_model=EMB,
nhead=HEADS,
num_encoder_layers=LAYERS,
num_decoder_layers=LAYERS,
dim_feedforward=4 * EMB,
dropout=DROP,
activation=ACTIVATION,
layer_norm_eps=1e-05,
batch_first=True, # (batch, seq, feature)
device=device,
)
print(model_pytorch)
optim_xformers = torch.optim.SGD(
model_xformers.parameters(), lr=1e-3, momentum=0.9
)
optim_pytorch = torch.optim.SGD(
model_pytorch.parameters(), lr=1e-3, momentum=0.9
)
# Check that both models can be trained to comparable results
eval_start_xformer = evaluate(model_xformers, BATCH, SEQ, EMB, device)
eval_start_pytorch = evaluate(model_pytorch, BATCH, SEQ, EMB, device)
print("starting point: ", eval_start_pytorch, eval_start_xformer)
train(model_xformers, optim_xformers, "xformers", 100, BATCH, SEQ, EMB, device)
train(model_pytorch, optim_pytorch, "pytorch", 100, BATCH, SEQ, EMB, device)
# Check that we can classify this dummy example
# Arbitrary threshold
eval_stop_xformer = evaluate(model_xformers, BATCH, SEQ, EMB, device)
eval_stop_pytorch = evaluate(model_pytorch, BATCH, SEQ, EMB, device)
print("end point: ", eval_stop_pytorch, eval_stop_xformer)
fit_ratio_xformer = eval_start_xformer / eval_stop_xformer
fit_ratio_pytorch = eval_start_pytorch / eval_stop_pytorch
print(fit_ratio_pytorch, fit_ratio_xformer)
assert fit_ratio_xformer > 50
assert fit_ratio_pytorch > 50
|
bart_ls-main
|
xformers/tests/test_pytorch_transformer_parity.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
SHAPES = [
(384, 128),
(8 * 384, 128),
(34, 128),
(16, 128),
(16, 512),
(8, 384),
(8, 1024),
(8, 2048),
(8, 4096),
(8, 4096),
(4, 12288),
]
_triton_available = torch.cuda.is_available()
if _triton_available:
try:
import triton
import triton.language as tl
from xformers.triton.sum_strided import sum_2d_dim_0
except (ImportError, ModuleNotFoundError):
_triton_available = False
if _triton_available:
@triton.jit
def k_mean(X, Mean, Var, stride, N, **META):
# fmt: on
"""
Fused layernorm kernel over a 3d tensor.
The layer norm is applied over the last dimension.
Compute
y = (x - E(x))/(sqrt(var(x) + epsilon)) * gamma + beta
"""
row = tl.program_id(0)
cols = tl.arange(0, META["BLOCK_SIZE_N"])
# Move to this row
x_ptrs = X + row * stride + cols
x = tl.load(x_ptrs, mask=cols < N, other=0.0).to(tl.float32)
x = tl.where(cols < N, x, 0.0)
# Compute variance
x_mean = tl.sum(x, axis=0) / N
x_zm = x - x_mean
x_zm = tl.where(cols < N, x_zm, 0.0)
x_var = tl.sum(x_zm * x_zm, axis=0) / N
tl.store(Mean + row, x_mean)
tl.store(Var + row, x_var)
def stats(x: torch.Tensor):
# reshape input data into 2D tensor
x_arg = x.reshape(-1, x.shape[-1])
M, N = x_arg.shape
MAX_FUSED_SIZE = 65536 // x.element_size()
BLOCK_SIZE_N = min(MAX_FUSED_SIZE, triton.next_power_of_2(N))
if N > BLOCK_SIZE_N:
raise RuntimeError("This layer norm doesn't support feature dim >= 64KB.")
# heuristics for number of warps.
num_warps = min(max(BLOCK_SIZE_N // 256, 1), 8)
mean = torch.zeros((M,)).cuda()
var = torch.zeros((M,)).cuda()
# enqueue kernel
# fmt: off
k_mean[(M,)](
x_arg, mean, var,
x_arg.stride(0),
N,
num_warps=num_warps,
BLOCK_SIZE_N=BLOCK_SIZE_N
)
# fmt: on
return mean.reshape(x.shape[:-1]), var.reshape(x.shape[:-1])
def test_mean():
torch.random.manual_seed(0)
a = torch.rand((4, 2048, 384), device=torch.device("cuda"))
mean, var = stats(a)
t_mean = torch.mean(a, dim=-1)
t_var = torch.var(a, dim=-1)
print(mean)
print(t_mean)
print(var)
print(t_var)
assert torch.allclose(mean, t_mean, rtol=1e-1)
assert torch.allclose(var, t_var, rtol=1e-1)
@pytest.mark.parametrize("shape", SHAPES)
@pytest.mark.parametrize("dtype", [torch.float16, torch.float32])
def test_sum_strided(shape, dtype):
torch.random.manual_seed(0)
a = torch.rand(shape, device=torch.device("cuda"), dtype=dtype)
torch_sum = torch.sum(a, dim=0)
triton_sum = sum_2d_dim_0(a)
assert torch.allclose(
torch_sum, triton_sum, rtol=0.01
), f"{torch_sum}\n{triton_sum}"
def test_sum_strided_asserts():
torch.random.manual_seed(0)
a = torch.rand((128, 256), device=torch.device("cuda"), dtype=torch.float16)
with pytest.raises(AssertionError):
# This kernel is not useful in that case, assert to prevent misuse
sum_2d_dim_0(a.transpose(1, 0))
a = torch.rand((3, 128, 256), device=torch.device("cuda"), dtype=torch.float16)
with pytest.raises(AssertionError):
# This kernel expects 2D tensors, assert to prevent misuse
sum_2d_dim_0(a)
|
bart_ls-main
|
xformers/tests/test_triton_basics.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from contextlib import nullcontext
import pytest
import torch
from xformers.factory.model_factory import xFormer, xFormerConfig
BATCH = 20
SEQ = 512
EMB = 384
DEVICES = (
[torch.device("cpu")]
if not torch.cuda.is_available()
else [
torch.device("cuda")
] # save a bit on CI for now, we have seperate cpu and gpu jobs
)
encoder_configs = {
"reversible": False,
"block_type": "encoder",
"dim_model": 384,
"position_encoding_config": {
"name": "vocab",
"seq_len": SEQ,
"vocab_size": 64,
"dim_model": EMB,
},
"num_layers": 3,
"multi_head_config": {
"num_heads": 4,
"residual_dropout": 0,
"attention": {
"name": "linformer",
"dropout": 0,
"causal": True,
"seq_len": 512,
},
"dim_model": EMB,
},
"feedforward_config": {
"name": "MLP",
"dropout": 0,
"activation": "relu",
"hidden_layer_multiplier": 4,
"dim_model": EMB,
"number_of_experts": 4,
"gate_config": "top_2",
},
}
decoder_configs = {
"block_type": "decoder",
"dim_model": 384,
"position_encoding_config": {
"name": "vocab",
"seq_len": SEQ,
"vocab_size": 64,
"dim_model": EMB,
},
"num_layers": 2,
"multi_head_config_masked": {
"num_heads": 4,
"residual_dropout": 0,
"dim_model": EMB,
"attention": {
"name": "linformer",
"dropout": 0,
"causal": True,
"seq_len": 512,
},
},
"multi_head_config_cross": {
"num_heads": 4,
"residual_dropout": 0,
"dim_model": EMB,
"attention": {
"name": "linformer",
"dropout": 0,
"causal": True,
"seq_len": 512,
},
},
"feedforward_config": {
"name": "MLP",
"dropout": 0,
"activation": "relu",
"hidden_layer_multiplier": 4,
"dim_model": EMB,
},
}
test_configs_list = [encoder_configs, decoder_configs]
test_configs_dict = {"encoder": encoder_configs, "decoder": decoder_configs}
""" Test all the model configurations saved in model_presets. """
@pytest.mark.parametrize("config", [test_configs_list, test_configs_dict])
@pytest.mark.parametrize("reversible", [True, False])
@pytest.mark.parametrize("tie_embedding_weights", [True, False])
@pytest.mark.parametrize("device", DEVICES)
def test_presets(config, reversible, tie_embedding_weights, device):
# Build the model
if isinstance(config, list):
config[0]["reversible"] = reversible
else:
config["encoder"]["reversible"] = reversible
modelConfig = xFormerConfig(config, tie_embedding_weights)
if isinstance(modelConfig.stack_configs, dict):
for _, blockConfig in modelConfig.stack_configs.items():
assert blockConfig.layer_position
else:
for blockConfig in modelConfig.stack_configs:
assert blockConfig.layer_position
context = (
pytest.raises(AssertionError)
if reversible and tie_embedding_weights
else nullcontext()
)
with context:
model = xFormer.from_config(modelConfig).to(device)
# Dummy inputs, test a forward
inputs = (torch.rand((BATCH, SEQ), device=device) * 10).abs().to(torch.int)
input_mask = torch.randn(SEQ, dtype=torch.float, device=device)
input_mask[input_mask < 0.0] = -float("inf")
outputs = model(
inputs, encoder_input_mask=input_mask, decoder_input_mask=input_mask
)
# Test a BW
loss = torch.sum(torch.abs(outputs))
loss.backward()
# If we requested tied embedding weights, check that this is the case indeed
if tie_embedding_weights and not reversible:
assert model.encoders[0].pose_encoding == model.decoders[0].pose_encoding
|
bart_ls-main
|
xformers/tests/test_model_factory.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import random
import pytest
import torch
from xformers.components.attention import NystromAttention, ScaledDotProduct
from xformers.components.attention.utils import maybe_merge_masks
@pytest.mark.parametrize("pinverse_original_init", [True, False])
@pytest.mark.parametrize("use_razavi_pinverse", [True, False])
@pytest.mark.parametrize("num_landmarks", [30, 33, 905])
def test_nystrom_attention(
pinverse_original_init: bool,
use_razavi_pinverse: bool,
num_landmarks: int,
):
# TODO: conv_kernel_size parameter not set to None fails this test. Investigate.
b, s, d = 8, 900, 384
num_heads = 2
seed = 42
torch.random.manual_seed(seed)
random.seed(seed)
nystrom_config = {
"name": "nystrom",
"dropout": 0.0,
"num_landmarks": num_landmarks,
"num_heads": num_heads,
"pinverse_original_init": pinverse_original_init,
"use_razavi_pinverse": use_razavi_pinverse,
}
sdp_config = {
"name": "scaled_dot_product",
"dropout": 0.0,
}
a = torch.rand(b, s, d)
def test_close_to_sdp():
# Make sure that Nystrom and Normal attention are not too far off.
nystrom_attention = NystromAttention(**nystrom_config)
sdp_attention = ScaledDotProduct(**sdp_config)
r_nystrom = nystrom_attention(a, a, a, att_mask=None)
r_sdp = sdp_attention(a, a, a, att_mask=None)
assert torch.allclose(r_nystrom, r_sdp, rtol=0.005, atol=1e-2)
# Make sure that Nystrom and Normal attention are not too far off.
nystrom_attention = NystromAttention(**nystrom_config)
sdp_attention = ScaledDotProduct(**sdp_config)
r_nystrom = nystrom_attention(a, a, a, att_mask=None)
r_sdp = sdp_attention(a, a, a, att_mask=None)
assert torch.allclose(r_nystrom, r_sdp, rtol=0.005, atol=1e-2)
def test_att_mask_ignored():
# If an sxs attention mask is passed in, it should be ignored.
# Results should be the same as if no mask was passed in.
nystrom_attention = NystromAttention(**nystrom_config)
sdp_attention = ScaledDotProduct(**sdp_config)
key_padding_mask = None
att_mask = torch.randint(0, 2, (s, s)).to(dtype=torch.bool)
sdp_mask = maybe_merge_masks(
att_mask=None,
key_padding_mask=key_padding_mask,
batch_size=b // num_heads,
src_len=s,
num_heads=num_heads,
)
r_nystrom = nystrom_attention(
a, a, a, att_mask=att_mask, key_padding_mask=key_padding_mask
)
r_sdp = sdp_attention(a, a, a, att_mask=sdp_mask)
assert torch.allclose(r_nystrom, r_sdp, rtol=0.005, atol=1e-2)
def test_masking():
# FIXME
# nystrom_config["causal"] = True
# sdp_config["causal"] = True
nystrom_attention = NystromAttention(**nystrom_config)
sdp_attention = ScaledDotProduct(**sdp_config)
key_padding_mask = torch.rand((b // num_heads, s)) > 0.1
att_mask = None
mask = maybe_merge_masks(
att_mask,
key_padding_mask,
batch_size=b // num_heads,
src_len=s,
num_heads=num_heads,
)
r_nystrom = nystrom_attention(a, a, a, key_padding_mask=key_padding_mask)
r_sdp = sdp_attention(a, a, a, att_mask=mask)
# account for when nan != nan
if r_nystrom.isnan().any() or r_sdp.isnan().any():
rand = random.uniform(0, 1)
r_nystrom = r_nystrom.masked_fill(r_nystrom.isnan(), rand)
r_sdp = r_sdp.masked_fill(r_sdp.isnan(), rand)
# Not very close, but more so testing functionality.
assert torch.allclose(
r_nystrom, r_sdp, rtol=0.1, atol=0.5
), f"max diff {torch.max(torch.abs(r_nystrom-r_sdp))}"
# Error when key padding mask doesn't have expected dimensions.
key_padding_mask = torch.randint(0, 2, (s, b)).to(dtype=torch.bool)
with pytest.raises(AssertionError):
nystrom_attention(a, a, a, key_padding_mask=key_padding_mask)
test_close_to_sdp()
test_att_mask_ignored()
test_masking()
|
bart_ls-main
|
xformers/tests/test_nystrom_attention.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
try:
import timm
from timm.models.vision_transformer import VisionTransformer
except ImportError:
timm = None
VisionTransformer = None
from xformers.helpers.timm_sparse_attention import TimmSparseAttention
_device_list = ["cpu", "cuda:0"] if torch.cuda.is_available() else ["cpu"]
@pytest.mark.skipif(timm is None, reason="requires timm")
@pytest.mark.parametrize("device", _device_list)
def test_timm_wrapper(device):
img_size = 224
patch_size = 16
batch = 8
# Instantiate the reference model
model = VisionTransformer(
img_size=img_size,
patch_size=patch_size,
embed_dim=96,
depth=8,
num_heads=8,
mlp_ratio=3.0,
qkv_bias=False,
norm_layer=torch.nn.LayerNorm,
).to(device)
# Monkey patch all attentions to test the sparse-aware wrap
def replace_attn_with_xformers_one(module, att_mask):
module_output = module
if isinstance(module, timm.models.vision_transformer.Attention):
qkv = module.qkv
dim = qkv.weight.shape[1] * module.num_heads
module_output = TimmSparseAttention(
dim, module.num_heads, attn_mask=att_mask
)
for name, child in module.named_children():
module_output.add_module(
name, replace_attn_with_xformers_one(child, att_mask)
)
del module
return module_output
H, W = img_size // patch_size, img_size // patch_size
mask = (torch.rand((H * W + 1, H * W + 1), device=device) > 0.5).bool()
model = replace_attn_with_xformers_one(model, att_mask=mask)
# Check that we can throw a couple of random pictures at it
inputs = torch.rand((batch, 3, img_size, img_size), device=device)
_ = model(inputs)
|
bart_ls-main
|
xformers/tests/test_timm_wrap.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import random
import pytest
import torch
from xformers.factory.model_factory import xFormer, xFormerConfig
BATCH = 20
SEQ = 128
EMB = 48
VOCAB = 16
DEVICES = (
[torch.device("cpu")]
if not torch.cuda.is_available()
else [torch.device("cuda")] # save a bit on CI, we have seperate cpu and gpu jobs
)
_test_config_encoder = {
"reversible": False,
"block_type": "encoder",
"dim_model": EMB,
"position_encoding_config": {
"name": "vocab",
"seq_len": SEQ,
"vocab_size": VOCAB,
"dim_model": EMB,
},
"num_layers": 3,
"multi_head_config": {
"num_heads": 4,
"residual_dropout": 0,
"attention": {
"name": "linformer",
"dropout": 0,
"causal": True,
"seq_len": SEQ,
},
"dim_model": EMB,
},
"feedforward_config": {
"name": "MLP",
"dropout": 0,
"activation": "relu",
"hidden_layer_multiplier": 4,
"dim_model": EMB,
},
}
_test_config_decoder = {
"block_type": "decoder",
"dim_model": EMB,
"position_encoding_config": {
"name": "vocab",
"seq_len": SEQ,
"vocab_size": VOCAB,
"dim_model": EMB,
},
"num_layers": 2,
"multi_head_config_masked": {
"num_heads": 4,
"residual_dropout": 0,
"dim_model": EMB,
"attention": {
"name": "linformer",
"dropout": 0,
"causal": True,
"seq_len": SEQ,
},
},
"multi_head_config_cross": {
"num_heads": 4,
"residual_dropout": 0,
"dim_model": EMB,
"attention": {
"name": "linformer",
"dropout": 0,
"causal": True,
"seq_len": SEQ,
},
},
"feedforward_config": {
"name": "MLP",
"dropout": 0,
"activation": "relu",
"hidden_layer_multiplier": 4,
"dim_model": EMB,
},
}
# Test a pure encoder, a pure decoder, an encoder/decoder stack
_test_configs = [
[_test_config_encoder, _test_config_decoder],
[_test_config_encoder],
]
def _rev_config(config, flag: bool):
for c in filter(
lambda x: x["block_type"] == "encoder",
config,
):
c["reversible"] = flag
return config
@pytest.mark.parametrize("config", _test_configs)
@pytest.mark.parametrize("device", DEVICES)
def test_reversible_runs(config, device):
# Build both a reversible and non-reversible model
model_non_reversible = xFormer.from_config(
xFormerConfig(_rev_config(config, False))
).to(device)
model_reversible = xFormer.from_config(xFormerConfig(_rev_config(config, True))).to(
device
)
# Dummy inputs, test a forward
inputs = (torch.rand((BATCH, SEQ), device=device) * 10).abs().to(torch.int)
_ = model_non_reversible(inputs)
_ = model_reversible(inputs)
@pytest.mark.parametrize("device", DEVICES)
def test_reversible_no_alternate(device):
# Check that we cannot build a non-coherent stack
with pytest.raises(AssertionError):
rev = dict(_test_config_encoder) # we need to make a copy
rev["reversible"] = True
non_rev = dict(_test_config_encoder)
non_rev["reversible"] = False
_ = xFormer.from_config(xFormerConfig([rev, non_rev])).to(device)
@pytest.mark.parametrize("config", _test_configs)
@pytest.mark.parametrize("device", DEVICES)
def test_reversible_train(config, device):
torch.manual_seed(0)
random.seed(0)
# Dummy inputs, test some training to make sure that we both can approximate the same thing to some extent
# This is not super scientific, more of a foolproof catch
def data():
input_a = torch.zeros((BATCH, SEQ), device=device).to(torch.int)
input_b = (torch.rand((BATCH, SEQ), device=device) * VOCAB).abs().to(torch.int)
target_a = torch.zeros((BATCH, SEQ), device=device)
target_b = torch.ones((BATCH, SEQ), device=device)
if random.random() > 0.5:
return torch.cat([input_a, input_b], dim=0), torch.cat(
[target_a, target_b], dim=0
)
return torch.cat([input_b, input_a], dim=0), torch.cat(
[target_b, target_a], dim=0
)
def step(model: torch.nn.Module, optim: torch.optim.Optimizer):
batch, target = data()
model.train()
optim.zero_grad()
outputs = model(batch)
loss = torch.norm(torch.mean(outputs, dim=-1) - target)
loss.backward()
# Clip grad and error out if we're producing NaNs, part of the unit test
torch.nn.utils.clip_grad_norm_(
model.parameters(), 10.0, norm_type=2.0, error_if_nonfinite=True
)
optim.step()
return loss.item()
def evaluate(model: torch.nn.Module):
batch, target = data()
model.eval()
outputs = model(batch)
return torch.norm(torch.mean(outputs, dim=-1) - target).item()
# Build both a reversible and non-reversible model
model_non_reversible = xFormer.from_config(
xFormerConfig(_rev_config(config, False))
).to(device)
model_reversible = xFormer.from_config(xFormerConfig(_rev_config(config, True))).to(
device
)
optim_rev = torch.optim.SGD(model_reversible.parameters(), lr=1e-3, momentum=0.9)
optim_non_rev = torch.optim.SGD(
model_non_reversible.parameters(), lr=1e-3, momentum=0.9
)
# Check that both models can be trained to comparable results
eval_start_rev = evaluate(model_reversible)
eval_start_non_rev = evaluate(model_non_reversible)
for i in range(100):
print(i, " reversible: ", step(model_reversible, optim_rev))
print(i, " non reversible: ", step(model_non_reversible, optim_non_rev))
# Check that we can classify this dummy example
# Arbitrary threshold
eval_stop_rev = evaluate(model_reversible)
eval_stop_non_rev = evaluate(model_non_reversible)
if len(config) < 2: # only check the encoder case
assert eval_start_rev / eval_stop_rev > 3
assert eval_start_non_rev / eval_stop_non_rev > 3
|
bart_ls-main
|
xformers/tests/test_reversible.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# type: ignore
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
from typing import Any, List
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
from recommonmark.transform import AutoStructify
sys.path.insert(0, os.path.abspath("../.."))
# -- Project information -----------------------------------------------------
project = "xFormers"
copyright = "Copyright © 2021 Meta Platforms, Inc"
author = "Facebook AI Research"
# The full version, including alpha/beta/rc tags
release = "0.0.8"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosectionlabel",
"sphinx.ext.napoleon", # support NumPy and Google style docstrings
"recommonmark",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.viewcode",
"sphinx.ext.githubpages",
"sphinx.ext.doctest",
"sphinx.ext.ifconfig",
]
# autosectionlabel throws warnings if section names are duplicated.
# The following tells autosectionlabel to not throw a warning for
# duplicated section names that are in different documents.
autosectionlabel_prefix_document = True
# -- Configurations for plugins ------------
napoleon_google_docstring = True
napoleon_include_init_with_doc = True
napoleon_include_special_with_doc = True
napoleon_numpy_docstring = False
napoleon_use_rtype = False
autodoc_inherit_docstrings = False
autodoc_member_order = "bysource"
intersphinx_mapping = {
"python": ("https://docs.python.org/3.6", None),
"numpy": ("https://docs.scipy.org/doc/numpy/", None),
"torch": ("https://pytorch.org/docs/master/", None),
}
# -------------------------
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns: List[Any] = []
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = [".rst", ".md"]
# The master toctree document.
master_doc = "index"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output -------------------------------------------------
html_theme = "pytorch_sphinx_theme"
templates_path = ["_templates"]
# Add any paths that contain custom static files (such as style sheets) here,
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
"includehidden": True,
"canonical_url": "https://facebookresearch.github.io/xformers",
"pytorch_project": "docs",
"logo_only": True, # default = False
}
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# setting custom stylesheets https://stackoverflow.com/a/34420612
html_context = {"css_files": ["_static/css/customize.css"]}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "xformersdocs"
github_doc_root = "https://github.com/facebookresearch/xformers/tree/main/docs/"
# Over-ride PyTorch Sphinx css
def setup(app):
app.add_config_value(
"recommonmark_config",
{
"url_resolver": lambda url: github_doc_root + url,
"auto_toc_tree_section": "Contents",
"enable_math": True,
"enable_inline_math": True,
"enable_eval_rst": True,
"enable_auto_toc_tree": True,
},
True,
)
app.add_transform(AutoStructify)
app.add_css_file("css/customize.css")
|
bart_ls-main
|
xformers/docs/source/conf.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# A MinGPT + Lightning + xFormers example Code from Sean Naren (@seannaren)
# This is an hommage to https://github.com/karpathy/minGPT
import math
import os
import pytorch_lightning as pl
import torch
import torch.nn as nn
from pytorch_lightning import Trainer, seed_everything
from pytorch_lightning.utilities import rank_zero_info
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset, RandomSampler
from xformers.factory.model_factory import xFormer, xFormerConfig
class GPT(pl.LightningModule):
""" the full GPT language model, with a context size of block_size """
def __init__(
self,
vocab_size,
weight_decay=0.1,
betas=(0.9, 0.95),
learning_rate=6e-4,
n_embd=512,
block_size=128,
n_layer=8,
n_head=8,
resid_pdrop=0.1,
attn_pdrop=0.1,
mlp_pdrop=0.1,
attention="scaled_dot_product",
hidden_layer_multiplier=4,
warmup_tokens=20,
final_tokens=1000,
):
super().__init__()
# auto creates self.hparams from the method signature
self.save_hyperparameters()
# A list of the encoder or decoder blocks which constitute the Transformer.
xformer_config = [
{
"reversible": False, # Turn on to test the effect of using reversible layers
"block_type": "encoder",
"num_layers": self.hparams.n_layer,
"dim_model": self.hparams.n_embd,
"layer_norm_style": "pre",
"position_encoding_config": {
"name": "vocab",
"seq_len": self.hparams.block_size,
"vocab_size": self.hparams.vocab_size,
},
"multi_head_config": {
"num_heads": self.hparams.n_head,
"residual_dropout": self.hparams.resid_pdrop,
"use_rotary_embeddings": True,
"attention": {
"name": self.hparams.attention,
"dropout": self.hparams.attn_pdrop,
"causal": True,
"seq_len": self.hparams.block_size,
"num_rules": self.hparams.n_head,
},
},
"feedforward_config": {
"name": "FusedMLP", # Use MLP if Triton is not available
"dropout": self.hparams.mlp_pdrop,
"activation": "gelu",
"hidden_layer_multiplier": self.hparams.hidden_layer_multiplier,
},
}
]
config = xFormerConfig(xformer_config)
self.model = xFormer.from_config(config)
# decoder head
self.ln_f = nn.LayerNorm(self.hparams.n_embd)
self.head = nn.Linear(self.hparams.n_embd, self.hparams.vocab_size, bias=False)
self.block_size = self.hparams.block_size
self.apply(self._init_weights)
self._tokens_seen = 0
def _init_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
# Reset the token counter
self._tokens_seen = 0
def get_block_size(self):
return self.block_size
def configure_optimizers(self):
# Create the optimizer and the training schedule:
# - Handle the per-param weight decay
no_decay = ["bias", "LayerNorm.weight"]
params_decay = [
p for n, p in self.named_parameters() if not any(nd in n for nd in no_decay)
]
params_nodecay = [
p for n, p in self.named_parameters() if any(nd in n for nd in no_decay)
]
optim_groups = [
{"params": params_decay, "weight_decay": self.hparams.weight_decay},
{"params": params_nodecay, "weight_decay": 0.0},
]
# - Start with a warm up, ramp up then cosine
optimizer = torch.optim.AdamW(
optim_groups, lr=self.hparams.learning_rate, betas=self.hparams.betas
)
def update_lr(*_):
config = self.hparams
if self._tokens_seen < config.warmup_tokens:
# linear warmup
lr_mult = float(self._tokens_seen) / float(max(1, config.warmup_tokens))
lr_mult = max(lr_mult, 1e-2) # could be that we've not seen any yet
else:
# cosine learning rate decay
progress = float(self._tokens_seen - config.warmup_tokens) / float(
max(1, config.final_tokens - config.warmup_tokens)
)
lr_mult = max(0.1, 0.5 * (1.0 + math.cos(math.pi * progress)))
return lr_mult
lr_scheduler = {
"scheduler": torch.optim.lr_scheduler.LambdaLR(
optimizer,
lr_lambda=[update_lr, update_lr],
),
"name": "learning_rate",
"interval": "step", # The unit of the scheduler's step size
"frequency": 1, # The frequency of the scheduler
}
return [optimizer], [lr_scheduler]
def forward(self, src):
# predict the next tokens (in latent space)
prediction = self.model(src)
# translate the predictions into tokens
prediction = self.ln_f(prediction)
logits = self.head(prediction)
return logits
def training_step(self, batch, _):
src, targets = batch
# Update the tokens we've seen (tracked for LR scheduling)
self._tokens_seen += (src >= 0).numel()
# same action as inference
logits = self(src)
# if we are given some desired targets also calculate the loss
loss = None
if targets is not None:
loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1))
self.logger.log_metrics(
{
"train_loss": loss.mean(),
"learning_rate": self.lr_schedulers().get_last_lr()[0],
},
step=trainer.global_step,
)
return loss
class CharDataset(Dataset):
def __init__(self, data, block_size):
chars = list(set(data))
data_size, vocab_size = len(data), len(chars)
rank_zero_info("data has %d characters, %d unique." % (data_size, vocab_size))
self.stoi = {ch: i for i, ch in enumerate(chars)}
self.itos = {i: ch for i, ch in enumerate(chars)}
self.block_size = block_size
self.vocab_size = vocab_size
self.data = data
def __len__(self):
return len(self.data) - self.block_size
def __getitem__(self, i):
chunk = self.data[i : i + self.block_size + 1]
dix = [self.stoi[s] for s in chunk]
# src and target are off by one, we want the model to predict the next word
x = torch.tensor(dix[:-1], dtype=torch.long)
y = torch.tensor(dix[1:], dtype=torch.long)
return x, y
def to_tokens(self, message, device):
return torch.tensor([self.stoi[s] for s in message], dtype=torch.long)[
None, ...
].to(device)
def from_tokens(self, tokens):
return "".join([self.itos[int(i)] for i in tokens])
@torch.no_grad()
def sample(model, x, steps, temperature=1.0, sample=False, top_k=None):
"""
take a conditioning sequence of indices in x (of shape (b,t)) and predict the next token in
the sequence, feeding the predictions back into the model each time. Clearly the sampling
has quadratic complexity unlike an RNN that is only linear, and has a finite context window
of block_size, unlike an RNN that has an infinite context window.
"""
block_size = model.get_block_size()
model.eval()
# CREDITS: https://github.com/karpathy/minGPT/blob/master/mingpt/utils.py
def top_k_logits(logits, k):
v, _ = torch.topk(logits, k)
out = logits.clone()
out[out < v[:, [-1]]] = -float("Inf")
return out
for _ in range(steps):
x_cond = (
x if x.size(1) <= block_size else x[:, -block_size:]
) # crop context if needed
logits = model(x_cond)
# pluck the logits at the final step and scale by temperature
logits = logits[:, -1, :] / temperature
# optionally crop probabilities to only the top k options
if top_k is not None:
logits = top_k_logits(logits, top_k)
# apply softmax to convert to probabilities
probs = F.softmax(logits, dim=-1)
# sample from the distribution or take the most likely
if sample:
ix = torch.multinomial(probs, num_samples=1)
else:
_, ix = torch.topk(probs, k=1, dim=-1)
# append to the sequence and continue
x = torch.cat((x, ix), dim=1)
return x[0] # escape the batch dimension
if __name__ == "__main__":
seed_everything(42)
# Adjust batch depending on the available memory on your machine.
# You can also use reversible layers to save memory
REF_BATCH = 512
BATCH = 256
WORKERS = 4
EPOCHS = 1
BLOCK = 128
WARMUP = 20
if not os.path.exists("input.txt"):
os.system(
"wget https://raw.githubusercontent.com/karpathy/char-rnn/master/data/tinyshakespeare/input.txt"
)
text = open("input.txt", "r").read()
train_dataset = CharDataset(
text, BLOCK
) # one line of poem is roughly 50 characters
random_sampler = RandomSampler(train_dataset)
train_loader = DataLoader(
train_dataset,
sampler=random_sampler,
batch_size=BATCH,
num_workers=WORKERS,
pin_memory=True,
)
model = GPT(
vocab_size=train_dataset.vocab_size,
block_size=train_dataset.block_size,
attention="scaled_dot_product",
warmup_tokens=REF_BATCH * WARMUP,
final_tokens=EPOCHS * len(train_dataset) * BLOCK,
)
print(model)
trainer = Trainer(
gpus=1,
max_epochs=EPOCHS,
precision=16,
gradient_clip_val=1, # Use to catch divergent gradients, if experimenting
log_every_n_steps=1,
# detect_anomaly=True, # Use to catch NaNs, if experimenting
accumulate_grad_batches=REF_BATCH // BATCH,
)
trainer.fit(model, train_loader)
# Sample from the model, let it predict a paragraph
context = "Friends of my soul" # prime with something
x = train_dataset.to_tokens(context, model.device)
y = sample(model, x, steps=1000, temperature=1.0, sample=True, top_k=10)
print(train_dataset.from_tokens(y))
|
bart_ls-main
|
xformers/examples/microGPT.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# CREDITS:
# inspired by
# https://github.com/nateraw/lightning-vision-transformer
# which in turn references https://github.com/lucidrains/vit-pytorch
# Orignal author: Sean Naren
import math
from enum import Enum
import pytorch_lightning as pl
import torch
from pl_bolts.datamodules import CIFAR10DataModule
from torch import nn
from torchmetrics import Accuracy
from torchvision import transforms
from xformers.factory import xFormer, xFormerConfig
class Classifier(str, Enum):
GAP = "gap"
TOKEN = "token"
class VisionTransformer(pl.LightningModule):
def __init__(
self,
steps,
learning_rate=1e-3,
betas=(0.9, 0.99),
weight_decay=0.03,
image_size=32,
num_classes=10,
patch_size=4,
dim=768,
n_layer=12,
n_head=12,
resid_pdrop=0.1,
attn_pdrop=0.1,
mlp_pdrop=0.1,
attention="scaled_dot_product",
hidden_layer_multiplier=4,
linear_warmup_ratio=0.05,
classifier: Classifier = Classifier.GAP,
):
super().__init__()
# all the inputs are saved under self.hparams (hyperparams)
self.save_hyperparameters()
assert image_size % patch_size == 0
num_patches = (image_size // patch_size) ** 2
# A list of the encoder or decoder blocks which constitute the Transformer.
xformer_config = [
{
"reversible": False, # Turn on to test the effect of using reversible layers
"block_type": "encoder",
"num_layers": n_layer,
"dim_model": dim,
"layer_norm_style": "pre",
"multi_head_config": {
"num_heads": n_head,
"residual_dropout": resid_pdrop,
"use_rotary_embeddings": True,
"attention": {
"name": attention,
"dropout": attn_pdrop,
"causal": False,
},
},
"feedforward_config": {
"name": "FusedMLP",
"dropout": mlp_pdrop,
"activation": "gelu",
"hidden_layer_multiplier": hidden_layer_multiplier,
},
}
]
config = xFormerConfig(xformer_config)
self.transformer = xFormer.from_config(config)
# init positional embedding with 0.02 from BERT
self.pos_emb = nn.Parameter(
torch.randn(1, num_patches + (classifier == Classifier.TOKEN), dim) * 0.02
)
self.patch_emb = nn.Conv2d(3, dim, kernel_size=patch_size, stride=patch_size)
if classifier == Classifier.TOKEN:
self.clf_token = nn.Parameter(torch.zeros(dim))
self.ln = nn.LayerNorm(dim)
self.head = nn.Linear(dim, num_classes)
self.criterion = torch.nn.CrossEntropyLoss()
self.val_accuracy = Accuracy()
@staticmethod
def linear_warmup_cosine_decay(warmup_steps, total_steps):
"""
Linear warmup for warmup_steps, with cosine annealing to 0 at total_steps
"""
def fn(step):
if step < warmup_steps:
return float(step) / float(max(1, warmup_steps))
progress = float(step - warmup_steps) / float(
max(1, total_steps - warmup_steps)
)
return 0.5 * (1.0 + math.cos(math.pi * progress))
return fn
def configure_optimizers(self):
optimizer = torch.optim.AdamW(
self.parameters(),
lr=self.hparams.learning_rate,
betas=self.hparams.betas,
weight_decay=self.hparams.weight_decay,
)
warmup_steps = int(self.hparams.linear_warmup_ratio * self.hparams.steps)
scheduler = {
"scheduler": torch.optim.lr_scheduler.LambdaLR(
optimizer,
self.linear_warmup_cosine_decay(warmup_steps, self.hparams.steps),
),
"interval": "step",
}
return [optimizer], [scheduler]
def forward(self, x):
batch, *_ = x.shape # BCHW
x = self.patch_emb(x)
# flatten patches into sequence
x = x.flatten(2, 3).transpose(1, 2).contiguous() # B HW C
if self.hparams.classifier == Classifier.TOKEN:
# prepend classification token to the sequence
clf_token = (
torch.ones(batch, 1, self.hparams.dim, device=x.device) * self.clf_token
)
x = torch.cat([clf_token, x], axis=1)
# add position embedding
x += self.pos_emb.expand_as(x)
x = self.transformer(x)
x = self.ln(x)
if self.hparams.classifier == Classifier.TOKEN:
x = x[:, 0] # only consider the token, we're classifying anyway
elif self.hparams.classifier == Classifier.GAP:
x = x.mean(dim=1) # mean over sequence len
x = self.head(x)
return x
def training_step(self, batch, _):
x, y = batch
y_hat = self(x)
loss = self.criterion(y_hat, y)
self.logger.log_metrics(
{
"train_loss": loss.mean(),
"learning_rate": self.lr_schedulers().get_last_lr()[0],
},
step=trainer.global_step,
)
return loss
def evaluate(self, batch, stage=None):
x, y = batch
y_hat = self(x)
loss = self.criterion(y_hat, y)
acc = self.val_accuracy(y_hat, y)
if stage:
self.log(f"{stage}_loss", loss, prog_bar=True)
self.log(f"{stage}_acc", acc, prog_bar=True)
def validation_step(self, batch, _):
self.evaluate(batch, "val")
def test_step(self, batch, _):
self.evaluate(batch, "test")
if __name__ == "__main__":
pl.seed_everything(42)
# Adjust batch depending on the available memory on your machine.
# You can also use reversible layers to save memory
REF_BATCH = 4096
BATCH = 512
MAX_EPOCHS = 20
NUM_WORKERS = 4
GPUS = 1
train_transforms = transforms.Compose(
[
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]
)
test_transforms = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]
)
# We'll use a datamodule here, which already handles dataset/dataloader/sampler
# See https://pytorchlightning.github.io/lightning-tutorials/notebooks/lightning_examples/cifar10-baseline.html
# for a full tutorial
dm = CIFAR10DataModule(
data_dir="data",
batch_size=BATCH,
num_workers=NUM_WORKERS,
pin_memory=True,
train_transforms=train_transforms,
test_transforms=test_transforms,
val_transforms=test_transforms,
)
image_size = dm.size(-1) # 32 for CIFAR
num_classes = dm.num_classes # 10 for CIFAR
# compute total number of steps
batch_size = BATCH * GPUS
steps = dm.num_samples // batch_size * MAX_EPOCHS
lm = VisionTransformer(
steps=steps,
image_size=image_size,
num_classes=num_classes,
attention="scaled_dot_product",
classifier=Classifier.TOKEN,
)
trainer = pl.Trainer(
gpus=GPUS,
max_epochs=MAX_EPOCHS,
detect_anomaly=True,
precision=16,
accumulate_grad_batches=REF_BATCH // BATCH,
)
trainer.fit(lm, dm)
# check the training
trainer.test(lm, datamodule=dm)
|
bart_ls-main
|
xformers/examples/microViT.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import hydra
from omegaconf import DictConfig
from xformers.factory.hydra_helper import import_xformer_config_schema
@hydra.main(config_path="conf", config_name="config")
def my_app(cfg: DictConfig) -> None:
model = hydra.utils.instantiate(cfg.xformer, _convert_="all")
print(
f"Built a model with {len(cfg.xformer.stack_configs)} stack: {cfg.xformer.stack_configs.keys()}"
)
print(model)
if __name__ == "__main__":
# optional - only needed when you want to use xformer config dataclass
# to validate config values.
import_xformer_config_schema()
my_app()
|
bart_ls-main
|
xformers/examples/build_model/my_model.py
|
#!/usr/bin/env python
"""
MIT License
Copyright (c) 2017 Guillaume Papin
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
"""A wrapper script around clang-format, suitable for linting multiple files
and to use for continuous integration.
This is an alternative API for the clang-format command line.
It runs over multiple files and directories in parallel.
A diff output is produced and a sensible exit code is returned.
"""
import argparse # noqa: E402
import difflib # noqa: E402
import fnmatch # noqa: E402
import io # noqa: E402
import multiprocessing # noqa: E402
import os # noqa: E402
import signal # noqa: E402
import subprocess # noqa: E402
import sys # noqa: E402
import traceback # noqa: E402
from functools import partial # noqa: E402
from subprocess import DEVNULL # noqa: E402
DEFAULT_EXTENSIONS = "c,h,C,H,cpp,hpp,cc,hh,c++,h++,cxx,hxx,cu"
class ExitStatus:
SUCCESS = 0
DIFF = 1
TROUBLE = 2
def list_files(files, recursive=False, extensions=None, exclude=None):
if extensions is None:
extensions = []
if exclude is None:
exclude = []
out = []
for file in files:
if recursive and os.path.isdir(file):
for dirpath, dnames, fnames in os.walk(file):
fpaths = [os.path.join(dirpath, fname) for fname in fnames]
for pattern in exclude:
# os.walk() supports trimming down the dnames list
# by modifying it in-place,
# to avoid unnecessary directory listings.
dnames[:] = [
x
for x in dnames
if not fnmatch.fnmatch(os.path.join(dirpath, x), pattern)
]
fpaths = [x for x in fpaths if not fnmatch.fnmatch(x, pattern)]
for f in fpaths:
ext = os.path.splitext(f)[1][1:]
if ext in extensions:
out.append(f)
else:
out.append(file)
return out
def make_diff(file, original, reformatted):
return list(
difflib.unified_diff(
original,
reformatted,
fromfile="{}\t(original)".format(file),
tofile="{}\t(reformatted)".format(file),
n=3,
)
)
class DiffError(Exception):
def __init__(self, message, errs=None):
super(DiffError, self).__init__(message)
self.errs = errs or []
class UnexpectedError(Exception):
def __init__(self, message, exc=None):
super(UnexpectedError, self).__init__(message)
self.formatted_traceback = traceback.format_exc()
self.exc = exc
def run_clang_format_diff_wrapper(args, file):
try:
ret = run_clang_format_diff(args, file)
return ret
except DiffError:
raise
except Exception as e:
raise UnexpectedError("{}: {}: {}".format(file, e.__class__.__name__, e), e)
def run_clang_format_diff(args, file):
try:
with io.open(file, "r", encoding="utf-8") as f:
original = f.readlines()
except IOError as exc:
raise DiffError(str(exc))
invocation = [args.clang_format_executable, file]
# Use of utf-8 to decode the process output.
#
# Hopefully, this is the correct thing to do.
#
# It's done due to the following assumptions (which may be incorrect):
# - clang-format will returns the bytes read from the files as-is,
# without conversion, and it is already assumed that the files use utf-8.
# - if the diagnostics were internationalized, they would use utf-8:
# > Adding Translations to Clang
# >
# > Not possible yet!
# > Diagnostic strings should be written in UTF-8,
# > the client can translate to the relevant code page if needed.
# > Each translation completely replaces the format string
# > for the diagnostic.
# > -- http://clang.llvm.org/docs/InternalsManual.html#internals-diag-translation
try:
proc = subprocess.Popen(
invocation,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
encoding="utf-8",
)
except OSError as exc:
raise DiffError(
"Command '{}' failed to start: {}".format(
subprocess.list2cmdline(invocation), exc
)
)
proc_stdout = proc.stdout
proc_stderr = proc.stderr
# hopefully the stderr pipe won't get full and block the process
outs = list(proc_stdout.readlines())
errs = list(proc_stderr.readlines())
proc.wait()
if proc.returncode:
raise DiffError(
"Command '{}' returned non-zero exit status {}".format(
subprocess.list2cmdline(invocation), proc.returncode
),
errs,
)
return make_diff(file, original, outs), errs
def bold_red(s):
return "\x1b[1m\x1b[31m" + s + "\x1b[0m"
def colorize(diff_lines):
def bold(s):
return "\x1b[1m" + s + "\x1b[0m"
def cyan(s):
return "\x1b[36m" + s + "\x1b[0m"
def green(s):
return "\x1b[32m" + s + "\x1b[0m"
def red(s):
return "\x1b[31m" + s + "\x1b[0m"
for line in diff_lines:
if line[:4] in ["--- ", "+++ "]:
yield bold(line)
elif line.startswith("@@ "):
yield cyan(line)
elif line.startswith("+"):
yield green(line)
elif line.startswith("-"):
yield red(line)
else:
yield line
def print_diff(diff_lines, use_color):
if use_color:
diff_lines = colorize(diff_lines)
sys.stdout.writelines(diff_lines)
def print_trouble(prog, message, use_colors):
error_text = "error:"
if use_colors:
error_text = bold_red(error_text)
print("{}: {} {}".format(prog, error_text, message), file=sys.stderr)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--clang-format-executable",
metavar="EXECUTABLE",
help="path to the clang-format executable",
default="clang-format",
)
parser.add_argument(
"--extensions",
help="comma separated list of file extensions (default: {})".format(
DEFAULT_EXTENSIONS
),
default=DEFAULT_EXTENSIONS,
)
parser.add_argument(
"-r",
"--recursive",
action="store_true",
help="run recursively over directories",
)
parser.add_argument("files", metavar="file", nargs="+")
parser.add_argument("-q", "--quiet", action="store_true")
parser.add_argument(
"-j",
metavar="N",
type=int,
default=0,
help="run N clang-format jobs in parallel" " (default number of cpus + 1)",
)
parser.add_argument(
"--color",
default="auto",
choices=["auto", "always", "never"],
help="show colored diff (default: auto)",
)
parser.add_argument(
"-e",
"--exclude",
metavar="PATTERN",
action="append",
default=[],
help="exclude paths matching the given glob-like pattern(s)"
" from recursive search",
)
args = parser.parse_args()
# use default signal handling, like diff return SIGINT value on ^C
# https://bugs.python.org/issue14229#msg156446
signal.signal(signal.SIGINT, signal.SIG_DFL)
try:
signal.SIGPIPE
except AttributeError:
# compatibility, SIGPIPE does not exist on Windows
pass
else:
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
colored_stdout = False
colored_stderr = False
if args.color == "always":
colored_stdout = True
colored_stderr = True
elif args.color == "auto":
colored_stdout = sys.stdout.isatty()
colored_stderr = sys.stderr.isatty()
version_invocation = [args.clang_format_executable, str("--version")]
try:
subprocess.check_call(version_invocation, stdout=DEVNULL)
except subprocess.CalledProcessError as e:
print_trouble(parser.prog, str(e), use_colors=colored_stderr)
return ExitStatus.TROUBLE
except OSError as e:
print_trouble(
parser.prog,
"Command '{}' failed to start: {}".format(
subprocess.list2cmdline(version_invocation), e
),
use_colors=colored_stderr,
)
return ExitStatus.TROUBLE
retcode = ExitStatus.SUCCESS
files = list_files(
args.files,
recursive=args.recursive,
exclude=args.exclude,
extensions=args.extensions.split(","),
)
if not files:
return
njobs = args.j
if njobs == 0:
njobs = multiprocessing.cpu_count() + 1
njobs = min(len(files), njobs)
if njobs == 1:
# execute directly instead of in a pool,
# less overhead, simpler stacktraces
it = (run_clang_format_diff_wrapper(args, file) for file in files)
pool = None
else:
pool = multiprocessing.Pool(njobs)
it = pool.imap_unordered(partial(run_clang_format_diff_wrapper, args), files)
while True:
try:
outs, errs = next(it)
except StopIteration:
break
except DiffError as e:
print_trouble(parser.prog, str(e), use_colors=colored_stderr)
retcode = ExitStatus.TROUBLE
sys.stderr.writelines(e.errs)
except UnexpectedError as e:
print_trouble(parser.prog, str(e), use_colors=colored_stderr)
sys.stderr.write(e.formatted_traceback)
retcode = ExitStatus.TROUBLE
# stop at the first unexpected error,
# something could be very wrong,
# don't process all files unnecessarily
if pool:
pool.terminate()
break
else:
sys.stderr.writelines(errs)
if outs == []:
continue
if not args.quiet:
print_diff(outs, use_color=colored_stdout)
if retcode == ExitStatus.SUCCESS:
retcode = ExitStatus.DIFF
return retcode
if __name__ == "__main__":
sys.exit(main())
|
bart_ls-main
|
xformers/.circleci/run-clang-format.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import logging
import torch
# Please update the doc version in docs/source/conf.py as well.
__version__ = "0.0.8"
_is_sparse_available = True
_is_triton_available = torch.cuda.is_available()
def _register_extensions():
import importlib
import os
import torch
# load the custom_op_library and register the custom ops
lib_dir = os.path.dirname(__file__)
if os.name == "nt":
# Register the main torchvision library location on the default DLL path
import ctypes
import sys
kernel32 = ctypes.WinDLL("kernel32.dll", use_last_error=True)
with_load_library_flags = hasattr(kernel32, "AddDllDirectory")
prev_error_mode = kernel32.SetErrorMode(0x0001)
if with_load_library_flags:
kernel32.AddDllDirectory.restype = ctypes.c_void_p
if sys.version_info >= (3, 8):
os.add_dll_directory(lib_dir)
elif with_load_library_flags:
res = kernel32.AddDllDirectory(lib_dir)
if res is None:
err = ctypes.WinError(ctypes.get_last_error())
err.strerror += f' Error adding "{lib_dir}" to the DLL directories.'
raise err
kernel32.SetErrorMode(prev_error_mode)
loader_details = (
importlib.machinery.ExtensionFileLoader,
importlib.machinery.EXTENSION_SUFFIXES,
)
extfinder = importlib.machinery.FileFinder(lib_dir, loader_details)
ext_specs = extfinder.find_spec("_C")
if ext_specs is None:
raise ImportError
torch.ops.load_library(ext_specs.origin)
if _is_sparse_available:
try:
_register_extensions()
except (ImportError, OSError) as e:
print(e)
logging.warning(
f"WARNING: {e}\nNeed to compile C++ extensions to get sparse attention suport."
+ " Please run python setup.py build develop"
)
_is_sparse_available = False
if _is_triton_available:
try:
from xformers.triton.softmax import softmax as triton_softmax # noqa
except ImportError as e:
logging.warning(
f"Triton is not available, some optimizations will not be enabled.\nError {e}"
)
_is_triton_available = False
|
bart_ls-main
|
xformers/xformers/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
|
bart_ls-main
|
xformers/xformers/test.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import torch
def masked_matmul(a, b, mask=None):
if torch.overrides.has_torch_function((a, b, mask)):
return torch.overrides.handle_torch_function(
masked_matmul, (a, b, mask), a, b, mask
)
att = a @ b
if mask is None:
return att
if mask.dtype == torch.bool:
if mask.ndim == 2:
mask = mask.unsqueeze(0).expand(att.shape[0], -1, -1)
# mask is presumed false == ignore
att[~mask] = float("-inf")
else:
# mask is presumed additive
att += mask
return att
|
bart_ls-main
|
xformers/xformers/ops.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.