python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
from pathlib import Path
import pytest
from models.openai_model import Model
from transformers import GPT2TokenizerFast
from services.usage_service import UsageService
# Non-ChatGPT -> TODO: make generic test and loop through text models
@pytest.mark.asyncio
async def test_send_req():
usage_service = UsageService(Path("../tests"))
model = Model(usage_service)
prompt = "how many hours are in a day?"
tokens = len(GPT2TokenizerFast.from_pretrained("gpt2")(prompt)["input_ids"])
res = await model.send_request(prompt, tokens)
assert "24" in res["choices"][0]["text"]
# ChatGPT version
@pytest.mark.asyncio
async def test_send_req_gpt():
usage_service = UsageService(Path("../tests"))
model = Model(usage_service)
prompt = "how many hours are in a day?"
res = await model.send_request(
prompt, None, is_chatgpt_request=True, model="gpt-3.5-turbo"
)
assert "24" in res["choices"][0]["message"]["content"]
# GPT4 version
@pytest.mark.asyncio
async def test_send_req_gpt4():
usage_service = UsageService(Path("../tests"))
model = Model(usage_service)
prompt = "how many hours are in a day?"
res = await model.send_request(prompt, None, is_chatgpt_request=True, model="gpt-4")
assert "24" in res["choices"][0]["message"]["content"]
# Edit request -> currently broken due to endpoint
# @pytest.mark.asyncio
# async def test_send_edit_req():
# usage_service = UsageService(Path("../tests"))
# model = Model(usage_service)
# text = 'how many hours are in a day?'
# res = await model.send_edit_request(text)
# assert '24' in res['choices'][0]['text']
|
SwarmsDiscord-main
|
tests/test_requests.py
|
Speculative-Decoding-main
|
sd/__init__.py
|
|
import torch
import torch.nn.functional as F
class SpeculativeDecoder:
def __init__(self, Mp, Mq, gamma):
"""
Initialize the SpeculativeDecoder.
Parameters:
- Mp (nn.Module): The target model.
- Mq (nn.Module): The more efficient approximation model.
- gamma (int): The number of completions to generate.
"""
self.Mp = Mp
self.Mq = Mq
self.gamma = gamma
def speculative_sampling(self, p, q):
"""
Perform speculative sampling from distribution p(x) using distribution q(x).
Parameters:
- p (torch.Tensor): The target distribution p(x).
- q (torch.Tensor): The approximation distribution q(x).
Returns:
- x (int): The sampled token from p(x).
"""
try:
# Sample x from q(x)
x = torch.multinomial(q, 1).item()
# Keep x if q(x) <= p(x), otherwise reject and resample from adjusted distribution
if q[x] > p[x]:
p_0 = F.normalize(torch.clamp(p - q, min=0), p=1, dim=0)
x = torch.multinomial(p_0, 1).item()
return x
except Exception as e:
print("An error occurred in speculative_sampling: ", str(e))
def speculative_decoding_step(self, prefix):
"""
Perform one step of speculative decoding.
Parameters:
- prefix (torch.Tensor): The conditioning prefix.
Returns:
- new_prefix (torch.Tensor): The updated prefix.
"""
try:
# Sample γ guesses from Mq autoregressively
prefix = prefix.unsqueeze(0) # Add batch dimension
guesses = []
for i in range(self.gamma):
with torch.no_grad():
q = self.Mq(prefix)
guess = torch.multinomial(q[0], 1)
guesses.append(guess)
prefix = torch.cat((prefix, guess.unsqueeze(0)), dim=1)
# Run Mp in parallel
p_values = []
for i in range(self.gamma + 1):
with torch.no_grad():
p = self.Mp(prefix[:, :len(prefix[0]) - i])
p_values.append(p[0])
# Determine the number of accepted guesses
n = self.gamma
for i in range(self.gamma):
ri = torch.rand(1).item()
if ri > p_values[i][guesses[i].item()] / q[0][guesses[i].item()]:
n = i - 1
break
# Adjust the distribution from Mp if needed
p_0 = p_values[n + 1]
if n < self.gamma:
q_n = q[0][guesses[n].item()]
p_0 = F.normalize(torch.clamp(p_0 - q_n, min=0), p=1, dim=0)
# Return one token from Mp, and n tokens from Mq
t = torch.multinomial(p_0, 1)
new_prefix = torch.cat((prefix[0, :len(prefix[0]) - self.gamma + n], t))
return new_prefix
except Exception as e:
print("An error occurred in speculative_decoding_step: ", str(e))
# Example Usage
# Make sure to define your models Mp and Mq, and provide the prefix and gamma value
# Mp = some_target_model
# Mq = some_approximation_model
# prefix = torch.tensor([some_initial_tokens])
# gamma = some_integer_value
# decoder = SpeculativeDecoder(Mp, Mq, gamma)
# new_prefix = decoder.speculative_decoding_step(prefix)
|
Speculative-Decoding-main
|
sd/main.py
|
import json
import warnings
# warning raised by pkg_resources used in a lot of google packages
warnings.filterwarnings("ignore", message=r".*declare_namespace\(\'.*google.*", category=DeprecationWarning)
# base warning raised when warning above are raised
warnings.filterwarnings("ignore", message=r".*pkg_resources is deprecated.*", category=DeprecationWarning)
# must import taggers to register them
# we import the rust extension here and wrap it in a python module
from . import dolma as _dolma # type: ignore # noqa: E402
from .core.errors import DolmaRustPipelineError # noqa: E402
from .taggers import * # noqa: E402
def deduper(config: dict):
try:
_dolma.deduper_entrypoint(json.dumps(config))
except RuntimeError as e:
raise DolmaRustPipelineError(f"Error running deduper: {e}") from e
def mixer(config: dict):
try:
_dolma.mixer_entrypoint(json.dumps(config))
except RuntimeError as e:
raise DolmaRustPipelineError(f"Error running mixer: {e}") from e
|
dolma-main
|
python/dolma/__init__.py
|
import math
from abc import abstractmethod, abstractproperty
from typing import Dict, List, NamedTuple, Optional, Tuple, Union
import numpy as np
import numpy.typing as npt
# # # OLD IMPORT # # #
# from sortedcontainers import SortedDict
class SummaryTuple(NamedTuple):
counts: List[int]
bins: List[float]
def sort_and_merge_bins(
bins: npt.NDArray[np.float64], counts: npt.NDArray[np.int64], mask: Optional[npt.NDArray[np.bool_]] = None
) -> Tuple[npt.NDArray[np.float64], npt.NDArray[np.int64]]:
"""Sort bins and counts; merge counts for duplicate bins"""
masked_bins = bins[mask] if mask is not None else bins
masked_counts = counts[mask] if mask is not None else counts
uniq_bins, uniq_indices, uniq_counts = np.unique(masked_bins, return_counts=True, return_index=True)
uniq_counts *= masked_counts[uniq_indices]
return uniq_bins, uniq_counts
def merge_bins(
bin_a: npt.NDArray[np.float64],
count_a: npt.NDArray[np.int64],
bin_b: npt.NDArray[np.float64],
count_b: npt.NDArray[np.int64],
):
"""Merge two sets of bins and counts into one set of bins and counts;
assumes that bin_a and bin_b are sorted
Args:
bin_a (npt.NDArray[np.float64]): A sorted array of bins
count_a (npt.NDArray[np.int64]): A corresponding array of counts
bin_b (npt.NDArray[np.float64]): A sorted array of bins
count_b (npt.NDArray[np.int64]): A corresponding array of counts
"""
if bin_a.size < bin_b.size:
# bin_a is always the larger one
bin_a, count_a, bin_b, count_b = bin_b, count_b, bin_a, count_a
# we first find where the bins in bin_b would be inserted into bin_a
# b_locs = np.minimum(np.searchsorted(bin_a, bin_b, side="left"), bin_a.size - 1)
b_locs = np.searchsorted(bin_a, bin_b, side="left")
# make a masked version of b_locs that only contains indices that are in bounds
b_bounded_mask = b_locs < bin_a.size
b_bounded_locs = b_locs * b_bounded_mask.astype(b_locs.dtype)
# we make a few useful masks and arrays for later operations
# we need to keep track of which bins in bin_b are new and which are duplicates of bins in bin_a
# for the former, we will insert them into bin_a at the appropriate locations; for the latter,
# we will add their counts to the counts of the corresponding bins in bin_a
# new mask consists of either the bins in bin_b that are in bounds (i.e., would be between values of
# bin_a and not after) and are not equal to the corresponding bins in bin_a, or the bins in bin_b that
# will be inserted after the last bin in bin_a.
b_new_mask = (bin_a[b_bounded_locs] != bin_b) | ~b_bounded_mask
b_new_vals = bin_b[b_new_mask]
# now we need to find the locations where the new bins will be inserted into new array bin_c which
# is the size of bin_a + the number of new bins in bin_b
b_new_locs = np.arange(b_new_vals.size) + b_locs[b_new_mask]
# this is were we will store the new bins and counts
bin_c = np.empty(bin_a.size + b_new_vals.size, dtype=bin_a.dtype)
# we first fill bins from bin_a into bin_c; a_indices is a mask of the indices in bin_c that
# should be filled with values from bin_a, so we remove values from bin_c.
a_indices = np.ones(bin_c.size, dtype=bool)
a_indices[b_new_locs] = False
bin_c[a_indices] = bin_a
# finally, we add values from bin_b into bin_c
bin_c[b_new_locs] = b_new_vals
# now onto the counts; we start by creating new container, and populate counts from count_a and count_b
# where bin_a values are different from bin_b values.
count_c = np.empty_like(bin_c, dtype=count_a.dtype)
count_c[b_new_locs] = count_b[b_new_mask]
count_c[a_indices] = count_a
# finally, for the remaining counts, we group them by bin value and add them to counts from count_a
# we must group because `array[locs] += values` does not work if there are duplicate indices in locs.
b_uniq_locs, b_repeats, b_rep_cnt = np.unique(b_locs[~b_new_mask], return_counts=True, return_index=True)
count_c[b_uniq_locs] += count_b[~b_new_mask][b_repeats] * b_rep_cnt
return bin_c, count_c
class BaseBucketApi:
@abstractproperty
def full(self) -> bool:
raise NotImplementedError()
@abstractmethod
def add(self, value: Union[int, float], count: int = 1):
raise NotImplementedError()
def add_many(self, values: List[Union[int, float]], counts: List[int]):
for value, count in zip(values, counts):
self.add(value, count)
@abstractmethod
def summarize(self, n: int, density: bool = False) -> SummaryTuple:
raise NotImplementedError()
class InferBucketsValTracker(BaseBucketApi):
"""Keep track of running values by using two bucketed buffers"""
_bins: npt.NDArray[np.float64]
_counts: npt.NDArray[np.int64]
_buffer_bins: npt.NDArray[np.float64]
_buffer_counts: npt.NDArray[np.int64]
_buffer_idx: int
n: int
_n: int
def __init__(self, n: int, b: Optional[int] = None):
self.n = self._n = n
self.b = b or int(np.sqrt(n))
self._bins = np.empty(0, dtype=np.float64)
self._counts = np.empty_like(self._bins, dtype=np.int64)
# hold temporary values in a buffer
self._new_buffer()
def _new_buffer(self):
"""Create a new buffer and reset the buffer index"""
self._buffer_bins = np.zeros(self.b, dtype=np.float64)
self._buffer_counts = np.zeros_like(self._buffer_bins, dtype=np.int64)
self._buffer_idx = 0
def _add_buffer_to_bins(self):
"""Bin the values in the buffer and merge them with the existing bins and counts."""
locs = np.minimum(np.searchsorted(self._bins, self._buffer_bins, side="left"), self._bins.size - 1)
trim_og_locs, trim_og_counts = locs[: self._buffer_idx], self._buffer_counts[: self._buffer_idx]
trim_locs, repeats_locs, repeats_cnt = np.unique(trim_og_locs, return_counts=True, return_index=True)
trim_counts = trim_og_counts[repeats_locs] * repeats_cnt
self._counts[trim_locs] += trim_counts
self._new_buffer()
def _sort_buffer(self):
mask = np.arange(0, self._buffer_bins.size) < self._buffer_idx
bins, counts = sort_and_merge_bins(bins=self._buffer_bins, counts=self._buffer_counts, mask=mask)
self._buffer_bins = bins
self._buffer_counts = counts
self._buffer_idx = bins.size
def _concat_buffer(self):
"""Concatenate the buffer with the existing bins and counts."""
# make sure the buffer is sorted before merging
self._sort_buffer()
if self._bins.size == 0:
# shortcut: if there are no bins, just copy the buffer
self._bins = self._buffer_bins
self._counts = self._buffer_counts
self._new_buffer()
return
# actually do the merge here!
self._bins, self._counts = merge_bins(
bin_a=self._bins, count_a=self._counts, bin_b=self._buffer_bins, count_b=self._buffer_counts
)
self._new_buffer()
def _add_not_full(self, value: float, count: int = 1):
"""Add a value to the tracker when the tracker is not full; in this case, the value is
added to the buffer and eventually merged with existing bins and counts."""
self._n -= 1
if self._n < 0:
return self._add_full(value, count)
self._buffer_bins[self._buffer_idx] = value
self._buffer_counts[self._buffer_idx] = count
self._buffer_idx += 1
if self._buffer_idx == self._buffer_bins.size:
self._concat_buffer()
def _add_full(self, value: float, count: int = 1):
"""Add a value to the tracker when the tracker is full; in this case, the value is added by
bisecting the tracker and adding the value to the appropriate bucket."""
self._buffer_bins[self._buffer_idx] = value
self._buffer_counts[self._buffer_idx] = count
self._buffer_idx += 1
if self._buffer_idx == self._buffer_bins.size:
self._add_buffer_to_bins()
def __len__(self) -> int:
return self._counts.size
@property
def full(self) -> bool:
return self._n <= 0
def add(self, value: Union[int, float], count: int = 1):
if self._n >= 0:
self._add_not_full(value=value, count=count)
else:
self._add_full(value=value, count=count)
def summarize(self, n: int, density: bool = False) -> SummaryTuple:
"""Return up to n buckets with counts of merged values"""
# finalize operations
self._concat_buffer() if self._n >= 0 else self._add_buffer_to_bins()
if len(self) <= n:
# if there are fewer than n buckets, return the buckets as is
return SummaryTuple(counts=self._counts.tolist(), bins=self._bins.tolist())
# make weighted histogram using counts
new_counts, new_values = np.histogram(a=self._bins, bins=n, weights=self._counts, density=density)
# return lists instead of numpy arrays
return SummaryTuple(counts=new_counts.tolist(), bins=new_values.tolist())
class FixedBucketsValTracker(BaseBucketApi):
def __init__(self, n: int = 2):
assert n >= 0
# we use n to determine the precision of the bins; for convenience we store it as a power of 10
self.n = 10**n
self._bins: Dict[Tuple[int, int], int] = {}
def add(self, value: Union[int, float], count: int = 1):
m, e = math.frexp(value)
k = int(m * self.n), e
if k not in self._bins:
self._bins[k] = 0
self._bins[k] += count
def __len__(self) -> int:
return len(self._bins)
@property
def full(self) -> bool:
return False
def summarize(self, n: int, density: bool = False) -> SummaryTuple:
bins, counts = zip(*sorted((m / self.n * 2**e, c) for (m, e), c in self._bins.items()))
if len(self) <= n:
# if there are fewer than n buckets, return the buckets as is
return SummaryTuple(counts=[int(c) for c in counts], bins=[float(b) for b in bins])
# computing the weighted histograms
new_counts, new_values = np.histogram(a=bins, bins=n, weights=counts, density=density)
# return lists instead of numpy arrays
return SummaryTuple(counts=new_counts.tolist(), bins=new_values.tolist())
|
dolma-main
|
python/dolma/core/binning.py
|
import glob
import re
from functools import partial
from itertools import chain
from pathlib import Path
from typing import Any, Dict, Iterable, Iterator, List, Tuple, Union
from urllib.parse import urlparse
from fsspec import AbstractFileSystem, get_filesystem_class
__all__ = [
"glob_path",
"sub_prefix",
"add_suffix",
"sub_suffix",
"make_relative",
"mkdir_p",
"split_path",
"join_path",
"is_glob",
"split_glob",
"partition_path",
]
FS_KWARGS: Dict[str, Dict[str, Any]] = {
"": {"auto_mkdir": True},
}
RE_ANY_ESCAPE = re.compile(r"(?<!\\)(\*\?\[\])")
RE_GLOB_STAR_ESCAPE = re.compile(r"(?<!\\)\*")
RE_GLOB_ONE_ESCAPE = re.compile(r"(?<!\\)\?")
RE_GLOB_OPEN_ESCAPE = re.compile(r"(?<!\\)\[")
RE_GLOB_CLOSE_ESCAPE = re.compile(r"(?<!\\)\]")
ESCAPE_SYMBOLS_MAP = {"*": "\u2581", "?": "\u2582", "[": "\u2583", "]": "\u2584"}
REVERSE_ESCAPE_SYMBOLS_MAP = {v: k for k, v in ESCAPE_SYMBOLS_MAP.items()}
def _get_fs(path: Union[Path, str]) -> AbstractFileSystem:
"""
Get the filesystem class for a given path.
"""
path = str(path)
protocol = urlparse(path).scheme
fs = get_filesystem_class(protocol)(**FS_KWARGS.get(protocol, {}))
# patch glob method to support recursive globbing
if protocol == "":
fs.glob = partial(glob.glob, recursive=True)
return fs
def _escape_glob(s: Union[str, Path]) -> str:
"""
Escape glob characters in a string.
"""
s = str(s)
s = RE_GLOB_STAR_ESCAPE.sub(ESCAPE_SYMBOLS_MAP["*"], s)
s = RE_GLOB_ONE_ESCAPE.sub(ESCAPE_SYMBOLS_MAP["?"], s)
s = RE_GLOB_OPEN_ESCAPE.sub(ESCAPE_SYMBOLS_MAP["["], s)
s = RE_GLOB_CLOSE_ESCAPE.sub(ESCAPE_SYMBOLS_MAP["]"], s)
return s
def _unescape_glob(s: Union[str, Path]) -> str:
"""
Unescape glob characters in a string.
"""
s = str(s)
for k, v in REVERSE_ESCAPE_SYMBOLS_MAP.items():
s = s.replace(k, v)
return s
def _pathify(path: Union[Path, str]) -> Tuple[str, Path]:
"""
Return the protocol and path of a given path.
"""
path = _escape_glob(str(path))
parsed = urlparse(path)
path = Path(f"{parsed.netloc}/{parsed.path}") if parsed.netloc else Path(parsed.path)
return parsed.scheme, path
def _unpathify(protocol: str, path: Path) -> str:
"""
Return a path from its protocol and path components.
"""
path_str = _unescape_glob(str(path))
if protocol:
path_str = f"{protocol}://{path_str.lstrip('/')}"
return path_str
def partition_path(path: str) -> Tuple[str, Tuple[str, ...], Tuple[str, ...]]:
"""Partition a path into its protocol, symbols before a glob, and symbols after a glob."""
# split the path into its protocol and path components
prot, path_obj = _pathify(path)
# we need to first figure out if this path has a glob by checking if any of the escaped symbols for
# globs are in the path.
glob_locs = [i for i, p in enumerate(path_obj.parts) if any(c in p for c in REVERSE_ESCAPE_SYMBOLS_MAP)]
# make the path components before the glob
pre_glob_path = path_obj.parts[: glob_locs[0]] if glob_locs else path_obj.parts
pre_glob_path = tuple(_unescape_glob(p) for p in pre_glob_path)
# make the path components after the glob
post_glob_path = path_obj.parts[glob_locs[0] + 1 :] if glob_locs else ()
post_glob_path = tuple(_unescape_glob(p) for p in post_glob_path)
return prot, pre_glob_path, post_glob_path
def split_path(path: str) -> Tuple[str, Tuple[str, ...]]:
"""
Split a path into its protocol and path components.
"""
protocol, _path = _pathify(path)
return protocol, tuple(_unescape_glob(p) for p in _path.parts)
def join_path(protocol: Union[str, None], *parts: Union[str, Iterable[str]]) -> str:
"""
Join a path from its protocol and path components.
"""
all_parts = (_escape_glob(p) for p in chain.from_iterable([p] if isinstance(p, str) else p for p in parts))
path = str(Path(*all_parts)).rstrip("/")
if protocol:
path = f"{protocol}://{path.lstrip('/')}"
return _unescape_glob(path)
def glob_path(path: Union[Path, str], hidden_files: bool = False, autoglob_dirs: bool = True) -> Iterator[str]:
"""
Expand a glob path into a list of paths.
"""
path = str(path)
protocol = urlparse(path).scheme
fs = _get_fs(path)
if fs.isdir(path) and autoglob_dirs:
path = join_path(None, path, "*")
for gl in fs.glob(path):
gl = str(gl)
if not hidden_files and Path(gl).name.startswith("."):
continue
yield join_path(protocol, gl)
def sub_prefix(a: str, b: str) -> str:
"""
Return the relative path of b from a.
"""
prot_a, path_a = _pathify(a)
prot_b, path_b = _pathify(b)
if prot_a != prot_b:
raise ValueError(f"Protocols of {a} and {b} do not match")
try:
diff = str(path_a.relative_to(path_b))
except ValueError:
diff = join_path(prot_a, path_a.parts)
return _unescape_glob(diff)
def sub_suffix(a: str, b: str) -> str:
"""
Remove b from the end of a.
"""
prot_a, path_a = _pathify(a)
prot_b, path_b = _pathify(b)
if prot_b:
raise ValueError(f"{b} is not a relative path")
sub_path = re.sub(f"{path_b}$", "", str(path_a))
sub_prot = f"{prot_a}://" if prot_a else ""
# need to trim '/' from the end if (a) '/' is not the only symbol in the path or
# (b) there is a protocol so absolute paths don't make sense
if sub_path != "/" or sub_prot:
sub_path = sub_path.rstrip("/")
return _unescape_glob(sub_prot + sub_path)
def add_suffix(a: str, b: str) -> str:
"""
Return the the path of a joined with b.
"""
prot_a, path_a = _pathify(a)
prot_b, path_b = _pathify(b)
if prot_b:
raise ValueError(f"{b} is not a relative path")
return join_path(prot_a, str(path_a / path_b))
def mkdir_p(path: str) -> None:
"""
Create a directory if it does not exist.
"""
if is_glob(path):
raise ValueError(f"Cannot create directory with glob pattern: {path}")
fs = _get_fs(path)
fs.makedirs(path, exist_ok=True)
def make_relative(paths: List[str]) -> Tuple[str, List[str]]:
"""Find minimum longest root shared among all paths"""
if len(paths) == 0:
raise ValueError("Cannot make relative path of empty list")
common_prot, common_parts, _ = partition_path(paths[0])
for path in paths:
current_prot, current_parts, _ = partition_path(path)
if current_prot != common_prot:
raise ValueError(f"Protocols of {path} and {paths[0]} do not match")
for i in range(min(len(common_parts), len(current_parts))):
if common_parts[i] != current_parts[i]:
common_parts = common_parts[:i]
break
if len(common_parts) > 0:
common_path = (f"{common_prot}://" if common_prot else "") + str(Path(*common_parts))
relative_paths = [sub_prefix(path, common_path) for path in paths]
else:
common_path = f"{common_prot}://" if common_prot else ""
relative_paths = [_unpathify("", _pathify(path)[1]) for path in paths]
return common_path, relative_paths
def is_glob(path: str) -> bool:
"""
Check if a path contains a glob wildcard.
"""
return bool(re.search(r"(?<!\\)[*?[\]]", path))
def split_glob(path: str) -> Tuple[str, str]:
"""
Partition a path on the first wildcard.
"""
if not is_glob(path):
return path, ""
protocol, parts = split_path(path)
i = min(i for i, c in enumerate(parts) if is_glob(c))
path = join_path(protocol, *parts[:i])
rest = join_path("", *parts[i:])
return path, rest
|
dolma-main
|
python/dolma/core/paths.py
|
# flake8: noqa
# type: ignore
import argparse
import json
import os
from contextlib import ExitStack
from typing import Dict, List, Optional
import msgspec
import yaml
from .data_types import DocResult, InputSpec, OutputSpec
class Visualizer:
BASE_S3_PREFIX = "s3://ai2-llm/pretraining-data/sources"
def __init__(
self,
dataset: str,
experiment: Optional[str] = None,
tagger: Optional[str] = None,
type: Optional[str] = None,
):
self.dataset = dataset
self.experiment = experiment
self.tagger = tagger
self.type = type
def list_tags(self, path: str):
prefix, doc_path = path.split("/documents/")
attrs_decoder = msgspec.json.Decoder(OutputSpec)
doc_decoder = msgspec.json.Decoder(InputSpec)
with ExitStack() as stack:
doc_file = stack.enter_context(stream_file_for_read(path, "rb"))
doc_stream = stack.enter_context(decompress_stream(doc_file, "rt"))
exp_path = f"{prefix}/attributes/{self.experiment}/{doc_path}"
exp_file = stack.enter_context(stream_file_for_read(exp_path, "rb"))
exp_stream = stack.enter_context(decompress_stream(exp_file, "rt"))
tags: Dict[str, List[str]] = {}
for doc_line, exp_line in zip(doc_stream, exp_stream):
# parse out data from the line
input_doc = doc_decoder.decode(doc_line)
input_exp = attrs_decoder.decode(exp_line)
doc_result = DocResult.from_spec(input_doc, input_exp)
for span in doc_result.spans:
tags.setdefault(str(span.tagger), []).append(span.type)
break
print(colored(f"from {self.short_path(path)}:", color="yellow"))
for tagger, types in sorted(tags.items()):
print(colored(f"{tagger}:", color="magenta"))
for type in sorted(set(types)):
print(colored(f" {type}", color="cyan"))
print()
def short_path(self, path: str, slack: int = 20) -> str:
return f"...{path[-s:]}" if (s := round(os.get_terminal_size().columns - slack)) < len(path) else path
def visualize_single(self, path: str):
prefix, doc_path = path.split("/documents/")
attrs_decoder = msgspec.json.Decoder(OutputSpec)
doc_decoder = msgspec.json.Decoder(InputSpec)
with ExitStack() as stack:
doc_file = stack.enter_context(stream_file_for_read(path, "rb"))
doc_stream = stack.enter_context(decompress_stream(doc_file, "rt"))
exp_path = f"{prefix}/attributes/{self.experiment}/{doc_path}"
exp_file = stack.enter_context(stream_file_for_read(exp_path, "rb"))
exp_stream = stack.enter_context(decompress_stream(exp_file, "rt"))
i = 0
file_header = colored(f"file: {self.short_path(path)}\n", color="magenta")
for doc_line, exp_line in zip(doc_stream, exp_stream):
# parse out data from the line
input_doc = doc_decoder.decode(doc_line)
input_exp = attrs_decoder.decode(exp_line)
doc_result = DocResult.from_spec(input_doc, input_exp)
example_header = colored(f"example: {i:,}\n", color="yellow")
dt = doc_result.doc.text
spans = sorted(
(s for s in doc_result.spans if s.tagger == self.tagger and s.type == self.type),
key=lambda s: s.start,
)
if not spans:
continue
prev_start = 0
text_fragments = []
for span in spans:
text_fragments.append(colored(dt[prev_start : span.start].replace("\n", "\\n"), color="black"))
text_fragments.append(colored(dt[span.start : span.end].replace("\n", "\\n"), color="green"))
text_fragments.append(colored(f"{{{span.type}: {span.score}}}", color="red"))
prev_start = span.end
text_fragments.append(colored(dt[prev_start:].replace("\n", "\\n"), color="black"))
tagger_header = colored(f"tagger: {self.tagger}\n", color="cyan")
print("".join(text_fragments) + "\n" + file_header + example_header + tagger_header + "\n")
while True:
out = input("next? [l/f] ").lower().strip()
if out == "l":
i += 1
break
elif out == "f":
return
else:
print(f"invalid input: {out}; choose between next (l)ine or next (f)ile.")
def __call__(self):
try:
source_prefix = f"{self.BASE_S3_PREFIX}/{self.dataset}/documents"
for path in recursively_list_files(source_prefix):
# just list tags if no tagger or type is specified
if self.tagger is None or self.type is None:
self.list_tags(path)
return
# visualize the specified tagger and type
self.visualize_single(path)
except KeyboardInterrupt:
print("\nExiting... bye!")
@classmethod
def main(cls):
ap = argparse.ArgumentParser()
ap.add_argument(
"-d",
"--dataset",
required=True,
help="Dataset to visualize",
)
ap.add_argument(
"-e",
"--experiment-name",
required=True,
help="Experiment name to visualize",
)
ap.add_argument(
"-t",
"--tagger",
type=str,
default=None,
help="Tagger to visualize",
)
ap.add_argument(
"-y",
"--type",
type=str,
default=None,
help="Type to visualize",
)
opts = ap.parse_args()
cls(dataset=opts.dataset, experiment=opts.experiment_name, tagger=opts.tagger, type=opts.type)()
class RawPreviewer:
BASE_S3_PREFIX = "s3://ai2-llm/pretraining-data/sources"
def __init__(self, dataset: str, type: str, file: str, pretty: bool = False, experiment: Optional[str] = None):
self.dataset = dataset
self.experiment = experiment
self.type = type
self.file = file
self.pretty = pretty
assert type == "documents" or experiment is not None, "Must specify experiment for attributes"
def preview_file(self):
if self.type == "documents":
path = f"{self.BASE_S3_PREFIX}/{self.dataset}/documents/{self.file}"
else:
path = f"{self.BASE_S3_PREFIX}/{self.dataset}/attributes/{self.experiment}/{self.file}"
with ExitStack() as stack:
file = stack.enter_context(stream_file_for_read(path, "rb"))
stream = stack.enter_context(decompress_stream(file, "rt"))
list_colors = ["red", "green", "blue", "magenta", "cyan"]
for line in stream:
row = json.loads(line)
if self.pretty:
out = yaml.dump(row, width=float("inf"))
for ln in out.split("\n"):
if not ln.startswith(" "):
key, *rest = ln.split(":")
rest = (":" + ":".join(rest) if rest else "").strip()
print(colored(key, color=list_colors[0]) + rest)
list_colors = list_colors[1:] + list_colors[:1]
else:
print(ln)
else:
print(row)
input(colored("\n[press enter for next line]", color="yellow"))
def list_files(self):
prefix = f"{self.BASE_S3_PREFIX}/{self.dataset}/documents"
for path in recursively_list_files(prefix):
print(path[len(prefix) + 1 :])
def __call__(self):
try:
if self.file is not None:
self.preview_file()
else:
self.list_files()
except KeyboardInterrupt:
print("\nExiting... bye!")
@classmethod
def main(cls):
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", required=True, help="Dataset to preview, e.g. `wikipedia/v0`")
ap.add_argument(
"-t",
"--type",
choices=["documents", "attributes"],
required=True,
help="Type of data to preview; it can be either `documents` or `attributes`.",
)
ap.add_argument(
"-e",
"--experiment",
default=None,
help="Experiment to preview; this is only used for previewing `attributes` assigned by tagger.",
)
ap.add_argument(
"-f",
"--file",
default=None,
type=str,
help="File to preview; if not sure which file to preview, skip this argument to list all files.",
)
ap.add_argument(
"-p", "--pretty", action="store_true", help="Whether to use pretty print for previewing JSON lines."
)
opts = ap.parse_args()
cls(dataset=opts.dataset, type=opts.type, file=opts.file, experiment=opts.experiment, pretty=opts.pretty)()
|
dolma-main
|
python/dolma/core/vizualizer.py
|
from typing import Callable, Dict, Generator, Tuple, Type, TypeVar
from .taggers import BaseTagger
T = TypeVar("T", bound=BaseTagger)
class TaggerRegistry:
__taggers: Dict[str, Type[BaseTagger]] = {}
@classmethod
def taggers(cls) -> Generator[Tuple[str, Type[BaseTagger]], None, None]:
yield from cls.__taggers.items()
@classmethod
def add(cls, name: str) -> Callable[[Type[T]], Type[T]]:
def _add(
tagger_cls: Type[T],
tagger_name: str = name,
taggers_dict: Dict[str, Type[BaseTagger]] = cls.__taggers,
) -> Type[T]:
if tagger_name in taggers_dict and taggers_dict[tagger_name] != tagger_cls:
if tagger_cls.__module__ == "__main__":
return tagger_cls
raise ValueError(f"Tagger {tagger_name} already exists")
taggers_dict[tagger_name] = tagger_cls
return tagger_cls
return _add
@classmethod
def get(cls, name: str) -> Type[BaseTagger]:
if name not in cls.__taggers:
raise ValueError(
f"Unknown tagger {name}; available taggers: " + ", ".join([tn for tn, _ in cls.taggers()])
)
return cls.__taggers[name]
|
dolma-main
|
python/dolma/core/registry.py
|
import logging
def get_logger(name: str) -> logging.Logger:
name = f"dolma.{name}"
logger = logging.getLogger(name)
logger.setLevel(logging.WARN)
if not logger.handlers:
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(asctime)s %(levelname)s %(name)s %(message)s"))
logger.addHandler(handler)
return logger
|
dolma-main
|
python/dolma/core/loggers.py
|
import multiprocessing
import re
import shutil
from contextlib import ExitStack
from tempfile import TemporaryDirectory
from typing import Dict, List, Optional
import msgspec
import smart_open
import tqdm
from msgspec.json import Decoder
from rich.console import Console
from rich.table import Table
from .binning import BaseBucketApi, FixedBucketsValTracker, InferBucketsValTracker
from .data_types import OutputSpec
from .errors import DolmaError
from .parallel import BaseParallelProcessor, QueueType
from .paths import glob_path, mkdir_p
NUM_BINS = 100_000
BUFF_SIZE = 1_000
def _make_tracker(type_: str = "fixed", **kwargs: int) -> BaseBucketApi:
"""Make a tracker of given type. Choose between `infer` or `fixed`"""
if type_ == "infer":
return InferBucketsValTracker(**{"n": NUM_BINS, "b": BUFF_SIZE, **kwargs})
elif type_ == "fixed":
return FixedBucketsValTracker(**{"n": NUM_BINS, **kwargs})
else:
raise ValueError(f"Unknown tracker type {type_}")
class SummarySpec(msgspec.Struct):
name: str
counts: List[int]
bins: List[float]
@classmethod
def from_tracker(cls, name: str, tracker: "BaseBucketApi", n: int) -> "SummarySpec":
counts, bins = tracker.summarize(n=n)
return SummarySpec(name=name, counts=counts, bins=bins)
def to_tracker(self) -> "BaseBucketApi":
tracker = _make_tracker()
tracker.add_many(values=self.bins, counts=self.counts)
return tracker
class AnalyzerProcessor(BaseParallelProcessor):
@classmethod
def increment_progressbar( # type: ignore
cls,
queue: QueueType, # queue must be the first argument, and it should be a positional-only argument
/,
files: int = 0,
documents: int = 0,
) -> Dict[str, int]:
"""We override this method to specify which units we want to keep track of in a progress bar.
Specifically, we keep track of files and documents in this example. Their default value must be zero."""
# we call the super method to increment the progress bar
return super().increment_progressbar(queue, files=files, documents=documents)
@classmethod
def process_single(
cls,
source_path: str,
destination_path: str,
queue: QueueType,
**kwargs,
):
# instantiate a decoder for faster decoding
decoder = Decoder(OutputSpec)
# number of bins to use
num_bins = kwargs.get("num_bins", 1000)
# regex to filter attribute names
name_regex = re.compile(r) if (r := kwargs.get("name_regex", None)) else None
# keep track of the length and score of each attribute
trackers: Dict[str, BaseBucketApi] = {}
# interval at which to update the progress bar; will double if queue is too full
update_interval = 1
# running document count; gets reset every time we update the progress bar
docs_cnt = 0
with smart_open.open(source_path) as f:
for ln in f:
try:
row = decoder.decode(ln)
except Exception as e:
raise DolmaError(
f"Failed to decode line {ln} in {source_path}; "
f"are you sure {source_path} is an attributes file?"
) from e
# update the length and score trackers for each attribute
for attr_name, attr_values in row.attributes.items():
# if a regex is provided, skip attributes that don't match it
if name_regex and not name_regex.search(attr_name):
continue
# empty attributes count as zero
attr_values = attr_values or [(0, 0, 0.0)]
for start, end, score in attr_values:
if "__label__" in attr_name:
# annoying fix for fasttext: fasttext sometimes emits probabilities that are slightly
# above 1.0, which causes issues with histograms. Therefore, we shift values that are
# greater than 1.0 down to 1.0
#
# fasttext labels are of the form __label__<label>, so we can just check if the
# attribute name contains __label__
score = min(score, 1.0)
trackers.setdefault(f"{attr_name}/score", _make_tracker()).add(score)
trackers.setdefault(f"{attr_name}/length", _make_tracker()).add(end - start)
# increment the number of documents processed so far
docs_cnt += 1
if docs_cnt % update_interval == 0:
# update the progress bar every 1000 documents to prevent
# buffering
cls.increment_progressbar(queue, documents=docs_cnt)
docs_cnt = 0
if queue.qsize() >= multiprocessing.cpu_count():
# double the update interval if the queue is full
update_interval *= 2
with smart_open.open(destination_path, "w") as f:
for attr_name, tracker in trackers.items():
summary = SummarySpec.from_tracker(name=attr_name, tracker=tracker, n=num_bins)
f.write(msgspec.json.encode(summary).decode("utf-8") + "\n")
# update the progress bar one last time
cls.increment_progressbar(queue, files=1, documents=docs_cnt)
def aggregate_summaries(summaries_path: str, num_bins: int = 1000) -> List[SummarySpec]:
# keep track of the length and score of each attribute
trackers: Dict[str, BaseBucketApi] = {}
# instantiate a decoder for faster decoding
decoder = Decoder(SummarySpec)
# iterator with nice progress bar
it = tqdm.tqdm(list(glob_path(summaries_path)), desc="Aggregating summaries", unit=" files", unit_scale=True)
# load partial summaries and aggregate it
for path in it:
with smart_open.open(path, "rt") as f:
for ln in f:
summary = decoder.decode(ln)
trackers.setdefault(summary.name, _make_tracker()).add_many(summary.bins, summary.counts)
# convert trackers to summaries
summaries = [
SummarySpec.from_tracker(name=attr_name, tracker=attr_tracker, n=num_bins)
for attr_name, attr_tracker in trackers.items()
]
return summaries
def visualize_summaries(summaries: List[SummarySpec], digits: int = 4, num_viz_bins: int = 10):
console = Console()
console.print()
def round_all(values: List[float], opt_sci: bool = False) -> List[str]:
"""Logic to round values depending on their range"""
if values == [0, 1]:
# just 0 and 1; no need to round or add decimal points
return ["0", "1"]
elif all(-1 <= val <= 1 for val in values):
# all values are in the range [-1, 1]; let's attempt rounding with {digits} decimal points
# unless some values are identical after rounding.
attempt_rounding = [round(val, digits) for val in values]
if len(set(attempt_rounding)) != len(values) and opt_sci:
# oops, some values collide after rounding; let's use scientific notation instead
# with one decimal point (note that we do this only if `opt_sci` is True)
return [f"{val:.1e}" for val in values]
else:
# phew, all good; let's use {digits} decimal points for all values
return [f"{round(val, digits):.{digits}f}" for val in values]
else:
# all values are outside the range [-1, 1]; let's round them to the nearest integer
return [f"{int(round(val, 0)):d}" for val in values]
for summary in summaries:
# we use fewer bins for visualization
summary = SummarySpec(
name=summary.name,
counts=(re_binned := summary.to_tracker().summarize(n=num_viz_bins)).counts,
bins=re_binned.bins,
)
# build the table here
table = Table(title=summary.name, style="bold", min_width=len(summary.name))
table.add_column("value", justify="left", style="cyan")
table.add_column("dist", justify="left", style="magenta")
table.add_column("count", justify="left", style="green")
rounded_bins = round_all(summary.bins)
ranges = (
[f"[{lo}, {hi})" for lo, hi in zip(rounded_bins, rounded_bins[1:])]
if len(summary.bins) > len(summary.counts)
else rounded_bins
)
counts_sum = sum(summary.counts)
counts_normed = round_all([(count / counts_sum) for count in summary.counts], opt_sci=False)
for value, dist, count in zip(ranges, counts_normed, summary.counts):
table.add_row(value, dist, f"{count:,}")
console.print(table)
console.print()
def write_output(summaries: List[SummarySpec], report: Optional[str] = None):
if report is None:
return
mkdir_p(report)
with smart_open.open(f"{report}/summaries.jsonl.gz", "w") as f:
for summary in summaries:
f.write(msgspec.json.encode(summary).decode("utf-8") + "\n")
def create_and_run_analyzer(
attributes: List[str],
summaries_path: Optional[str] = None,
metadata_path: Optional[str] = None,
report: Optional[str] = None,
debug: bool = False,
seed: int = 0,
num_bins: int = 1000,
num_processes: int = 1,
name_regex: Optional[str] = None,
):
""" """
# create the report directory if it doesn't exist
if report:
mkdir_p(report)
with ExitStack() as stack:
# use temporary directories if no paths are provided
summaries_path = summaries_path or stack.enter_context(TemporaryDirectory())
metadata_path = metadata_path or stack.enter_context(TemporaryDirectory())
# make sure these locations exist
mkdir_p(summaries_path)
mkdir_p(metadata_path)
try:
analyzer = AnalyzerProcessor(
source_prefix=attributes,
destination_prefix=summaries_path,
metadata_prefix=metadata_path,
debug=debug,
seed=seed,
ignore_existing=True,
retries_on_error=0,
num_processes=num_processes,
)
analyzer(num_bins=num_bins, name_regex=name_regex)
summaries = aggregate_summaries(summaries_path=summaries_path, num_bins=num_bins)
visualize_summaries(summaries=summaries)
write_output(summaries=summaries, report=report)
finally:
shutil.rmtree(summaries_path)
shutil.rmtree(metadata_path)
|
dolma-main
|
python/dolma/core/analyzer.py
|
from .data_types import DocResult, Document, Span
from .registry import TaggerRegistry
from .taggers import BaseTagger
__all__ = [
"BaseTagger",
"DocResult",
"Document",
"Span",
"TaggerRegistry",
]
|
dolma-main
|
python/dolma/core/__init__.py
|
"""
Data types assumed by Filters.
@kylel, @soldni
"""
from typing import Any, Dict, List, Optional, Tuple
from msgspec import Struct
from typing_extensions import TypeAlias
TaggerOutputValueType: TypeAlias = Tuple[int, int, float]
TaggerOutputType: TypeAlias = List[TaggerOutputValueType]
TaggerOutputDictType: TypeAlias = Dict[str, TaggerOutputType]
class InputSpec(Struct):
id: str
text: str
source: str
version: Optional[str] = None
# ignoring metadata for now; taggers run on text only
# metadata: Optional[Dict[str, Any]] = None
class OutputSpec(Struct):
id: str
attributes: Dict[str, List[Tuple[int, int, float]]]
source: Optional[str] = None
class Document:
__slots__ = "source", "version", "id", "text"
def __init__(self, source: str, id: str, text: str, version: Optional[str] = None) -> None:
self.source = source
self.version = version
self.id = id
self.text = text
@classmethod
def from_spec(cls, spec: InputSpec) -> "Document":
return Document(source=spec.source, version=spec.version, id=spec.id, text=spec.text)
def to_spec(self) -> InputSpec:
return InputSpec(source=self.source, version=self.version, id=self.id, text=self.text)
@classmethod
def from_json(cls, d: Dict) -> "Document":
return Document(source=d["source"], version=d["version"], id=d["id"], text=d["text"])
def to_json(self) -> Dict:
return {"source": self.source, "version": self.version, "id": self.id, "text": self.text}
def __str__(self) -> str:
attributes_string = ",".join([f"{k}:{repr(v)}" for k, v in self.to_json()])
return f"{self.__class__.__name__}({attributes_string})"
class Span:
__slots__ = "start", "end", "type", "score", "experiment", "tagger"
def __init__(
self,
start: int,
end: int,
type: str,
score: float = 1.0,
experiment: Optional[str] = None,
tagger: Optional[str] = None,
):
self.start = start
self.end = end
self.type = type
self.score = float(score)
self.experiment = experiment
self.tagger = tagger
def mention(self, text: str, window: int = 0) -> str:
return text[max(0, self.start - window) : min(len(text), self.end + window)]
def select(self, doc: Document) -> str:
return doc.text[self.start : self.end]
@classmethod
def from_spec(cls, attribute_name: str, attribute_value: TaggerOutputValueType) -> "Span":
if "__" in attribute_name:
# bff tagger has different name
exp_name, tgr_name, attr_type = attribute_name.split("__", 2)
else:
exp_name = tgr_name = attr_type = attribute_name
start, end, score = attribute_value
return Span(
start=int(start),
end=int(end),
type=attr_type,
score=float(score),
experiment=exp_name,
tagger=tgr_name,
)
def to_spec(self) -> Tuple[str, TaggerOutputValueType]:
assert self.experiment is not None, "Experiment name must be set to convert to spec"
assert self.tagger is not None, "Tagger name must be set to convert to spec"
return (
f"{self.experiment}__{self.tagger}__{self.type}",
(self.start, self.end, self.score),
)
def __len__(self) -> int:
return self.end - self.start
@classmethod
def from_json(cls, di: Dict) -> "Span":
return Span(start=di["start"], end=di["end"], type=di["type"], score=di["score"])
def to_json(self, text: Optional[str] = None, window: int = 0) -> dict:
span_repr = {"start": self.start, "end": self.end, "type": self.type, "score": self.score}
if text is not None:
span_repr["mention"] = self.mention(text=text, window=window)
return span_repr
def __str__(self) -> str:
cls_name = self.__class__.__name__
return f"{cls_name}(start={self.start},end={self.end},type={repr(self.type)},score={self.score:.5f})"
class DocResult:
__slots__ = "doc", "spans"
def __init__(self, doc: Document, spans: List[Span]) -> None:
self.doc = doc
self.spans = spans
@classmethod
def from_spec(cls, doc: InputSpec, *attrs_groups: OutputSpec) -> "DocResult":
spans: List[Span] = []
for attrs in attrs_groups:
assert doc.id == attrs.id, f"doc.id={doc.id} != attrs.id={attrs.id}"
spans.extend(
[
Span.from_spec(attribute_name=attr_name, attribute_value=attr_value)
for attr_name, attr_values in attrs.attributes.items()
for attr_value in attr_values
]
)
return DocResult(doc=Document.from_spec(doc), spans=spans)
def to_spec(self) -> Tuple[InputSpec, OutputSpec]:
doc_spec = self.doc.to_spec()
attributes: Dict[str, List[TaggerOutputValueType]] = {}
for span in self.spans:
attr_name, attr_value = span.to_spec()
attributes.setdefault(attr_name, []).append(attr_value)
return doc_spec, OutputSpec(source=self.doc.source, id=self.doc.id, attributes=attributes)
@classmethod
def from_json(cls, d: Dict[str, Any]) -> "DocResult":
return DocResult(
doc=Document.from_json(d["doc"]),
spans=[Span.from_json(span) for span in d["spans"]],
)
def to_json(self, with_doc: bool = False, window: int = 0) -> Dict[str, Any]:
d: Dict[str, Any] = {"spans": [span.to_json(text=self.doc.text, window=window) for span in self.spans]}
if with_doc:
d["doc"] = self.doc.to_json()
return d
def __str__(self) -> str:
return f"{self.__class__.__name__}(doc={self.doc},spans=[{','.join(str(s) for s in self.spans)}])"
class TextSlice:
"""A slice of text from a document."""
__slots__ = "doc", "start", "end"
def __init__(self, doc: str, start: int, end: int):
self.doc = doc
self.start = start
self.end = end
@property
def text(self) -> str:
return self.doc[self.start : self.end]
def __str__(self) -> str:
return f"{self.__class__.__name__}(text={repr(self.text)},start={self.start},end={self.end})"
|
dolma-main
|
python/dolma/core/data_types.py
|
import logging
import multiprocessing
import tempfile
from contextlib import ExitStack, contextmanager
from typing import (
IO,
Any,
Dict,
Generator,
Iterable,
List,
NamedTuple,
Optional,
Set,
Union,
)
import msgspec
import smart_open
from .data_types import InputSpec, OutputSpec, TaggerOutputDictType
from .errors import DolmaFatalError, DolmaRetryableFailure, DolmaShardError
from .loggers import get_logger
from .parallel import BaseParallelProcessor, QueueType
from .paths import join_path, make_relative, mkdir_p, split_glob, split_path
from .registry import TaggerRegistry
from .utils import make_variable_name
# this placeholder gets used when a user has provided no experiment name, and we want to use taggers'
# names as experiment names.
EXPERIMENT_PLACEHOLDER_NAME = "_______EXPERIMENT_PLACEHOLDER_NAME_______"
def _make_paths_from_substitution(paths: List[str], find: str, replace: str) -> List[str]:
"""
Utility function to make paths using a find/replace substitution. This is useful if you want to
create a destination path from a source path by replacing part of the source path with something else.
For example, if you have a source path of `current_paths = ["s3://bucket/data/documents/**.json.gz"]` and
you want to replace `documents` with `attributes`, you can use this function to do that. by calling
`_make_paths_from_substitution(current_paths, "documents", "attribute")`. This will return the following
list `["s3://bucket/data/attributes"]`. Note how glob patterns are removed from the paths.
"""
new_paths: List[str] = []
for curr in paths:
curr_pre_glob, _ = split_glob(curr)
curr_prot, curr_parts = split_path(curr_pre_glob)
find_dir_index = curr_parts.index(find)
if not curr_pre_glob.strip():
raise RuntimeError(f"Path '{curr}' contains a wildcard at the beginning. ")
elif find_dir_index < 0:
raise RuntimeError(f"Path '{curr}' does not contain a '{find}' component.")
dst_parts = [p if i != find_dir_index else replace for i, p in enumerate(curr_parts)]
new_paths.append(join_path(curr_prot, dst_parts))
return new_paths
def _make_paths_from_prefix(paths: List[str], prefix: str) -> List[str]:
"""
Utility function to make paths using a prefix. This is useful if you want to create a destination path
from a source path by prepending a prefix to the source path.
To create destination paths, we first find the longest common prefix among all source paths. Then, we
we remove the prefix from each source path and prepend the new prefix to each source path. For example,
if you have a source path of
```
current_paths = [
"s3://bucket/data/documentsA/**.json.gz",
"s3://bucket/data/documentsB/**.json.gz",
]
```
and you want to replace `s3://bucket/data/` with `s3://bucket/attributes/`, you can use this function
to do that. by calling `_make_paths_from_prefix(current_paths, "s3://bucket/attributes/")`. This will
return the following list
```
[
"s3://bucket/attributes/documentsA/",
"s3://bucket/attributes/documentsB/",
]
```
Note how glob patterns are removed from the paths.
"""
new_paths: List[str] = []
prefix_prot, prefix_path = split_path(prefix)
_, relative_paths = make_relative(paths)
for curr_path in relative_paths:
base_curr_path, _ = split_glob(curr_path)
new_paths.append(join_path(prefix_prot, prefix_path, base_curr_path))
return new_paths
class TaggerOutputLocation(NamedTuple):
exp: str
name: str
path: str
class TaggerOutputIO(NamedTuple):
exp: str
taggers: Set[str]
path: str
io: IO
encoder: msgspec.json.Encoder
def write(self, d: OutputSpec) -> None:
enc = self.encoder.encode(d)
self.io.write(enc.decode("utf-8") + "\n")
def _determine_output_paths_for_taggers(
experiment_name: str, destination: str, taggers: Iterable[str]
) -> Dict[str, TaggerOutputLocation]:
"""Utility function to derive the paths to which taggers output should be written.
If experiment_name is the placeholder name, then the name of each tagger will be used as part of the
destination path. Otherwise, the destination path will be used for all taggers."""
if experiment_name == EXPERIMENT_PLACEHOLDER_NAME:
return {
tagger_name: TaggerOutputLocation(
exp=make_variable_name(tagger_name),
name=make_variable_name(tagger_name),
path=destination.replace(EXPERIMENT_PLACEHOLDER_NAME, tagger_name),
)
for tagger_name in taggers
}
else:
return {
tagger_name: TaggerOutputLocation(
exp=make_variable_name(experiment_name), name=make_variable_name(tagger_name), path=destination
)
for tagger_name in taggers
}
@contextmanager
def _make_output_streams(
taggers_paths: Dict[str, TaggerOutputLocation], **open_kwargs: Any
) -> Generator[Dict[str, TaggerOutputIO], None, None]:
"""Utility function to open paths for taggers.
It is designed NOT to open duplicate paths if multiple taggers are writing to the same file.
"""
# keep track of the paths that have been opened
opened: Dict[str, TaggerOutputIO] = {}
with ExitStack() as stack:
for key, loc in taggers_paths.items():
if loc.path not in opened:
# make sure the parent directory exists
prot, path = split_path(loc.path)
parent = join_path(prot, path[:-1])
mkdir_p(parent)
# open a new file and create a new encoder
io = stack.enter_context(smart_open.open(loc.path, **open_kwargs))
encoder = msgspec.json.Encoder()
opened[loc.path] = TaggerOutputIO(
exp=loc.exp, taggers=set(), path=loc.path, io=io, encoder=encoder
)
# keep track of which taggers are writing to this paths
opened[loc.path].taggers.add(key)
yield opened
@contextmanager
def _write_sample_to_streams(
taggers_paths: Dict[str, TaggerOutputLocation],
output_streams: Dict[str, TaggerOutputIO],
row: InputSpec,
) -> Generator[Dict[str, TaggerOutputDictType], None, None]:
"""Utility function to write a sample to the output streams; yields a dictionary that should be used
to collect the output of each tagger."""
samples_collectors: Dict[str, TaggerOutputDictType] = {}
yield samples_collectors
attributes_by_stream: Dict[str, TaggerOutputDictType] = {}
for tagger_name, tagger_data in samples_collectors.items():
tagger_output = taggers_paths[tagger_name]
for tagger_key, tagger_value in tagger_data.items():
tagger_key = f"{tagger_output.exp}__{tagger_output.name}__{make_variable_name(tagger_key)}"
attributes_by_stream.setdefault(tagger_output.path, {})[tagger_key] = tagger_value
for stream_path, attributes in attributes_by_stream.items():
output = OutputSpec(source=row.source, id=row.id, attributes=attributes)
output_streams[stream_path].write(output)
class TaggerProcessor(BaseParallelProcessor):
@classmethod
def get_logger(cls) -> logging.Logger:
return get_logger(cls.__name__)
@classmethod
def increment_progressbar( # type: ignore
cls,
queue: QueueType, # queue must be the first argument, and it should be a positional-only argument
/,
files: int = 0,
documents: int = 0,
) -> Dict[str, int]:
"""We override this method to specify which units we want to keep track of in a progress bar.
Specifically, we keep track of files and documents in this example. Their default value must be zero."""
# we call the super method to increment the progress bar
return super().increment_progressbar(queue, files=files, documents=documents)
@classmethod
def process_single(
cls,
source_path: str,
destination_path: str,
queue: QueueType,
**kwargs,
):
"""Lets count run the taggers! We will use the destination path to save each tagger output."""
# get names of taggers
taggers_names = kwargs.get("taggers_names", None)
if taggers_names is None:
raise RuntimeError("Taggers not in kwargs, this is a bug! Please report it.")
elif not isinstance(taggers_names, list) or not all(isinstance(t, str) for t in taggers_names):
raise RuntimeError("Taggers are in the wrong format, this is a bug! Please report it.")
taggers = {make_variable_name(t): TaggerRegistry.get(t)() for t in taggers_names}
# get name of experiment
if (experiment_name := kwargs.get("experiment_name", None)) is None:
raise RuntimeError("Experiment name not in kwargs, this is a bug! Please report it.")
# this is the dictionary that will hold the output of each tagger
taggers_paths = _determine_output_paths_for_taggers(
experiment_name=experiment_name, destination=destination_path, taggers=taggers
)
# skip on failure
skip_on_failure = kwargs.get("skip_on_failure", False)
# interval at which to update the progress bar; will double if it gets
# too full
update_interval = 1
# running document count; gets reset every time we update the progress
# bar
docs_cnt = 0
# creating dedicated decoder speeds up the process
decoder = msgspec.json.Decoder(InputSpec)
with smart_open.open(source_path, "rt", encoding="utf-8") as in_stream, _make_output_streams(
taggers_paths=taggers_paths, mode="wt", encoding="utf-8"
) as output_streams:
try:
for raw in in_stream:
row = decoder.decode(raw)
with _write_sample_to_streams(
taggers_paths=taggers_paths,
output_streams=output_streams,
row=row,
) as samples_collectors:
# we run the taggers; the context manager will write the output to the output streams
for tagger_name, tagger in taggers.items():
samples_collectors[tagger_name] = tagger.tag(row)
# increment the number of documents processed so far
docs_cnt += 1
if docs_cnt % update_interval == 0:
# update the progress bar every 1000 documents to prevent
# buffering
cls.increment_progressbar(queue, documents=docs_cnt)
docs_cnt = 0
if queue.qsize() >= multiprocessing.cpu_count():
# double the update interval if the queue is full
update_interval *= 2
except Exception as e:
# handle any exception that might have occurred
msg = f"Failed to process {source_path} due to {e.__class__.__name__}: {' '.join(e.args)}"
if e.__class__.__name__ == "IncompleteReadError":
# Intermittent error that occurs when reading from S3
raise DolmaRetryableFailure(msg) from e
else:
if skip_on_failure:
raise DolmaShardError(msg) from e
else:
raise DolmaFatalError(msg) from e
# increment the files progress bar
cls.increment_progressbar(queue, files=1, documents=docs_cnt)
def create_and_run_tagger(
documents: List[str],
taggers: List[str],
experiment: Optional[str] = None,
destination: Union[None, str, List[str]] = None,
metadata: Union[None, str, List[str]] = None,
debug: bool = False,
seed: int = 0,
ignore_existing: bool = False,
skip_on_failure: bool = False,
retries_on_error: int = 0,
num_processes: int = 1,
):
"""This function creates a tagger and runs it on a list of documents.
Args:
documents (List[str]): List of documents to run the taggers on. Each element of the list is a path to
a file containing documents in json lines format, or a glob pattern that matches such files.
taggers (List[str]): List of taggers to run. Each element of the list is the name of a tagger.
experiment (str, optional): The name of the experiment. This will be used to prefix the names of the
attributes, as well as the name of the directory where the outputs will be saved in `destination`.
If not provided, the name of each tagger will be used as the experiment name.
destination (Union[None, str, List[str]], optional): The path where the outputs will be saved. If
`None`, the outputs will be saved in a directory parallel to the directory containing the
documents, with the same name as `experiment`. If a string, paths corresponding to each element
of `documents` will be created by determining a relative path from the directory containing the
documents.
metadata (Union[None, str, List[str]], optional): Location where to save metadata that keeps track of
which documents have been processed. If `None`, the metadata will be saved in a temporary directory.
debug (bool, optional): Whether to run in debug mode. Defaults to False.
seed (int, optional): The seed to use for the random number generator. Defaults to 0.
ignore_existing (bool, optional): Whether to ignore existing outputs and re-run the taggers.
Defaults to False.
skip_on_failure (bool, optional): Whether to skip a document if it fails to process. Defaults to False.
retries_on_error (int, optional): Number of times to retry processing a document if it fails.
Defaults to 0 (fail immediately)
num_processes (int, optional): Number of processes to use. Defaults to 1.
"""
# use placeholder experiment name if none is provided; raise an error if the placeholder name is used
if experiment == EXPERIMENT_PLACEHOLDER_NAME:
raise RuntimeError(f"Experiment name cannot be {EXPERIMENT_PLACEHOLDER_NAME}; reserved for internal use.")
elif experiment is None:
experiment = EXPERIMENT_PLACEHOLDER_NAME
if destination is None:
try:
destination = _make_paths_from_substitution(documents, "documents", f"attributes/{experiment}")
except Exception as e:
raise RuntimeError("Could not make destination paths from documents paths") from e
elif isinstance(destination, str):
try:
destination = _make_paths_from_prefix(documents, join_path(None, destination, experiment))
except Exception as e:
raise RuntimeError(f"Could not make destination paths from prefix {destination}") from e
metadata = metadata or tempfile.mkdtemp()
if isinstance(metadata, str):
try:
metadata = _make_paths_from_prefix(documents, metadata)
except Exception as e:
raise RuntimeError(f"Could not make metadata paths from prefix {metadata}") from e
tagger = TaggerProcessor(
source_prefix=documents,
destination_prefix=destination,
metadata_prefix=metadata,
debug=debug,
seed=seed,
ignore_existing=ignore_existing,
retries_on_error=retries_on_error,
num_processes=num_processes,
)
tagger(
experiment_name=experiment,
taggers_names=taggers,
skip_on_failure=skip_on_failure,
)
|
dolma-main
|
python/dolma/core/runtime.py
|
"""
Base implementation for a fasttext tagger; all fasttext taggers should inherit from this class.
@kylel, @soldni
"""
import os
from tempfile import NamedTemporaryFile
from typing import Iterable, Literal, NamedTuple, Optional
import smart_open
from cached_path import cached_path
from fasttext import train_supervised
from fasttext.FastText import _FastText
from .data_types import DocResult, Document, Span, TextSlice
from .taggers import BaseTagger
from .utils import split_paragraphs, split_sentences
class Prediction(NamedTuple):
label: str
score: float
class BaseFastTextTagger(BaseTagger):
SENTENCE_LEVEL_TAGGER = "sentence"
PARAGRAPH_LEVEL_TAGGER = "paragraph"
DOCUMENT_LEVEL_TAGGER = "document"
def __init__(self, model_path: str, model_mode: str) -> None:
# we use this private attribute to avoid a warning from the fasttext library. See this comment:
# https://github.com/facebookresearch/fastText/issues/1056#issuecomment-1278058705
self.classifier = _FastText(str(cached_path(model_path)))
self.mode = model_mode
@classmethod
def train(
cls,
train_file: str,
save_path: str,
learning_rate: float = 0.1,
word_vectors_dim: int = 100,
context_window_size: int = 5,
max_epochs: int = 100, # non-default
min_word_count: int = 1,
min_label_count: int = 1,
min_char_ngram: int = 0,
max_char_ngram: int = 0,
num_negative_samples: int = 5,
max_word_ngram: int = 2, # non-default
loss_function: Literal["ns", "hs", "softmax", "ova"] = "softmax",
num_buckets: int = 2_000_000,
num_threads: int = 0,
learning_rate_update_rate: int = 100,
sampling_threshold: float = 0.0001,
label_prefix: str = "__label__",
verbose: int = 2,
pretrained_vectors: Optional[str] = None,
) -> _FastText:
# download potentially remote files
local_train_file = cached_path(train_file)
local_pretrained_vectors = cached_path(pretrained_vectors) if pretrained_vectors else None
# base checks on file format
with open(local_train_file, "r") as f:
# check a few lines to see if the file is in the right format
i = 0
for ln in f:
if label_prefix not in ln:
raise ValueError(f"{train_file} not the fasttext format, no labels found!")
if (i := i + 1) > 5:
break
if i == 0:
raise ValueError(f"{train_file} is empty!")
# train the fasttext model
classifier = train_supervised(
input=local_train_file,
lr=learning_rate,
dim=word_vectors_dim,
ws=context_window_size,
epoch=max_epochs,
minCount=min_word_count,
minCountLabel=min_label_count,
minn=min_char_ngram,
maxn=max_char_ngram,
neg=num_negative_samples,
wordNgrams=max_word_ngram,
loss=loss_function,
bucket=num_buckets,
thread=num_threads,
lrUpdateRate=learning_rate_update_rate,
t=sampling_threshold,
label=label_prefix,
verbose=verbose,
pretrainedVectors=local_pretrained_vectors,
)
local_save_path = None
try:
# create a local temp file where we save the model
with NamedTemporaryFile("w", delete=False) as f:
local_save_path = f.name
# save the model
classifier.save_model(local_save_path)
# upload to remote if save_path is s3 path
with smart_open.open(save_path, "wb") as fo, smart_open.open(local_save_path, "rb") as fi:
fo.write(fi.read())
finally:
# regardless to what happened, remove the local temp file if it
# exists
if local_save_path is not None and os.path.exists(local_save_path):
os.remove(local_save_path)
return classifier
@classmethod
def test(
cls,
test_file: str,
model_path: Optional[str] = None,
classifier: Optional[_FastText] = None,
):
# load the model if one is not provided
if classifier is None:
assert model_path is not None, "Please provide either a model path or a model"
classifier = _FastText(str(cached_path(model_path)))
local_test_file = cached_path(test_file)
model_performance = classifier.test(local_test_file)
print(model_performance)
def predict(self, doc: Document) -> DocResult:
if self.mode == self.SENTENCE_LEVEL_TAGGER:
units = split_sentences(doc.text)
elif self.mode == self.PARAGRAPH_LEVEL_TAGGER:
units = split_paragraphs(doc.text)
elif self.mode == self.DOCUMENT_LEVEL_TAGGER:
units = [TextSlice(doc=doc.text, start=0, end=len(doc.text))]
else:
raise ValueError(f"Unknown mode {self.mode}")
spans = []
for unit in units:
for prediction in self.predict_slice(unit):
spans.append(Span(start=unit.start, end=unit.end, type=prediction.label, score=prediction.score))
return DocResult(doc=doc, spans=spans)
def predict_slice(self, text_slice: TextSlice) -> Iterable[Prediction]:
raise NotImplementedError("Please implement the predict slice method")
|
dolma-main
|
python/dolma/core/ft_tagger.py
|
"""
Builds a dataset for training a FastText model from 2 or more pretraining datasets.
@rauthur
"""
import argparse
import gzip
import json
import os
import random
from dataclasses import dataclass
from functools import partial
from multiprocessing import Manager, Pool, Process, Queue
from threading import Event
from typing import Generator, List, Optional
import smart_open
from .data_types import TextSlice
from .ft_tagger import BaseFastTextTagger
from .paths import glob_path
from .utils import split_paragraphs, split_sentences
_WRITER_EXIT_MSG = "__WRITE__EXIT__"
@dataclass
class Config:
target_path: str
sample_paths: List[str]
out_path: str
mode: str
newlines: str
n_proc: int
n_segments: Optional[int]
pos_label: str
neg_label: str
def gzip_open(file, mode, **open_kwargs):
return gzip.open(filename=file, mode=mode, **open_kwargs)
def _split(text: str, config: Config) -> Generator[TextSlice, None, None]:
if config.mode == BaseFastTextTagger.SENTENCE_LEVEL_TAGGER:
for sentence in split_sentences(text):
yield sentence
elif config.mode == BaseFastTextTagger.PARAGRAPH_LEVEL_TAGGER:
for paragraph in split_paragraphs(text):
yield paragraph
elif config.mode == BaseFastTextTagger.DOCUMENT_LEVEL_TAGGER:
yield TextSlice(doc=text, start=0, end=len(text))
else:
raise RuntimeError(f"Unknown data split mode: {config.mode}")
@dataclass
class ReadResult:
examples: List[str]
def process_file(config: Config, q: "Queue[str]", flag: Event, label: str, fn):
# Check a global exit flag and stop processing file
if flag.is_set():
return
print(f"Processing {fn}")
with smart_open.open(fn, "rt") as f:
for line in f:
# Abort part way through processing this file is flag set
if flag.is_set():
return
# Expected JSONL format following OLMo data spec
data = json.loads(line)
line_text = data["text"]
if len(line_text) == 0:
continue
for slice in _split(line_text, config):
final_text = slice.text
if "\n" in final_text:
if config.newlines == "replace":
final_text = final_text.replace("\n", " ")
elif config.newlines == "skip":
continue
q.put(f"__label__{label} {final_text}")
def write_results(config: Config, q: "Queue[str]", flag: Event):
written = 0
with smart_open.open(config.out_path, "wb") as o:
while True:
msg = q.get()
if msg == _WRITER_EXIT_MSG:
break
if not flag.is_set():
o.write(q.get())
o.write("\n")
written += 1
if config.n_segments is not None and written >= config.n_segments:
flag.set()
written = 0
def process_paths(paths: List[str], config: Config, q: "Queue[str]", flag: Event, label: str):
fns = [fn for p in paths for fn in glob_path(p)]
random.shuffle(fns)
work_fn = partial(process_file, config, q, flag, label)
with Pool(processes=max(1, config.n_proc - 1)) as pool:
pool.map(work_fn, fns)
pool.close()
pool.join()
def main(config: Config):
random.seed(117)
with Manager() as manager:
q: "Queue[str]" = manager.Queue() # type: ignore
flag = manager.Event()
writer = Process(target=write_results, args=(config, q, flag))
writer.start()
# Generate expected number of positive examples
process_paths([config.target_path], config, q, flag, config.pos_label)
# Reset early exit flag as all positive examples processed
flag.clear()
# Generate expected number of negative examples
process_paths(config.sample_paths, config, q, flag, config.neg_label)
q.put(_WRITER_EXIT_MSG)
writer.join()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-t",
"--target",
required=True,
type=str,
help="Local or remote path including OLMo formatted TARGET dataset",
)
parser.add_argument(
"-s",
"--sample",
required=True,
type=str,
nargs="+",
help="Sample these paths to create negative examples",
)
parser.add_argument(
"-p",
"--processes",
type=int,
required=False,
default=os.cpu_count(),
help="Number of processes to launch",
)
parser.add_argument(
"-m",
"--mode",
type=str,
required=True,
choices=[
BaseFastTextTagger.SENTENCE_LEVEL_TAGGER,
BaseFastTextTagger.PARAGRAPH_LEVEL_TAGGER,
BaseFastTextTagger.DOCUMENT_LEVEL_TAGGER,
],
help="Output examples at this level",
)
parser.add_argument(
"-o",
"--output-filename",
type=str,
required=True,
help="Path to write the processed result (can be on S3)",
)
parser.add_argument(
"--n-segments",
type=int,
required=False,
help="Stop after generating this many segments (e.g., sentences)",
)
parser.add_argument(
"--newlines",
type=str,
required=False,
choices=["skip", "keep", "replace"],
default="skip",
help="Skip, keep or replace with ' ' examples with newlines after splitting",
)
parser.add_argument(
"--pos-label",
type=str,
required=False,
default="pos",
help="Use this class label for positive instances",
)
parser.add_argument(
"--neg-label",
type=str,
required=False,
default="neg",
help="Use this class label for negative instances",
)
args = parser.parse_args()
config = Config(
target_path=args.target,
sample_paths=args.sample,
out_path=args.output_filename,
mode=args.mode,
newlines=args.newlines,
n_proc=args.processes,
n_segments=args.n_segments,
pos_label=args.pos_label,
neg_label=args.neg_label,
)
main(config)
|
dolma-main
|
python/dolma/core/ft_dataset.py
|
"""
Filters.
@kylel, @soldni
"""
from abc import abstractmethod
from typing import List
from .data_types import DocResult, Document, InputSpec, TaggerOutputDictType
# digits after the decimal point
TAGGER_SCORE_PRECISION = 5
class BaseTagger:
FIELDS: List[str] = ["text"]
@classmethod
def train(cls, *args, **kwargs):
raise RuntimeError("This tagger does not support training")
@classmethod
def test(cls, *args, **kwargs):
raise RuntimeError("This tagger does not support testing")
@abstractmethod
def predict(self, doc: Document) -> DocResult:
raise NotImplementedError
def tag(self, row: InputSpec) -> TaggerOutputDictType:
"""Internal function that is used by the tagger to get data"""
doc = Document(source=row.source, version=row.version, id=row.id, text=row.text)
doc_result = self.predict(doc)
tagger_output: TaggerOutputDictType = {}
for span in doc_result.spans:
output = (span.start, span.end, round(float(span.score), TAGGER_SCORE_PRECISION))
tagger_output.setdefault(span.type, []).append(output)
return tagger_output
|
dolma-main
|
python/dolma/core/taggers.py
|
import re
import string
from typing import List
try:
import blingfire
BLINGFIRE_AVAILABLE = True
except Exception:
BLINGFIRE_AVAILABLE = False
import nltk
from nltk.tokenize.punkt import PunktSentenceTokenizer
try:
nltk.data.find("tokenizers/punkt")
except LookupError:
nltk.download("punkt")
from .data_types import TextSlice
sent_tokenizer = PunktSentenceTokenizer()
def make_variable_name(name: str, remove_multiple_underscores: bool = False) -> str:
# use underscores for any non-valid characters in variable name
name = re.sub(r"[^a-zA-Z0-9_]", "_", name)
if remove_multiple_underscores:
# replace multiple underscores with a single underscore
name = re.sub(r"__+", "_", name)
if name[0] in string.digits:
raise ValueError(f"Invalid variable name {name}")
return name
def split_paragraphs(text: str, remove_empty: bool = True) -> List[TextSlice]:
"""
Split a string into paragraphs. A paragraph is defined as a sequence of zero or more characters, followed
by a newline character, or a sequence of one or more characters, followed by the end of the string.
"""
text_slices = [
TextSlice(doc=text, start=match.start(), end=match.end())
for match in re.finditer(r"([^\n]*\n|[^\n]+$)", text)
]
if remove_empty is True:
text_slices = [text_slice for text_slice in text_slices if text_slice.text.strip()]
return text_slices
def split_sentences(text: str, remove_empty: bool = True) -> List[TextSlice]:
"""
Split a string into sentences.
"""
if text and BLINGFIRE_AVAILABLE:
_, offsets = blingfire.text_to_sentences_and_offsets(text)
elif text:
offsets = [(start, end) for start, end in sent_tokenizer.span_tokenize(text)]
else:
offsets = []
if remove_empty is True:
return [TextSlice(doc=text, start=start, end=end) for (start, end) in offsets]
else:
raise NotImplementedError("remove_empty=False is not implemented yet")
|
dolma-main
|
python/dolma/core/utils.py
|
class DolmaError(Exception):
"""Base class for all errors"""
class DolmaFatalError(DolmaError):
"""Fatal error. Abort the entire process"""
class DolmaShardError(DolmaError):
"""Fail the shard and continue"""
class DolmaRetryableFailure(DolmaError):
"""Retry if a shard throws this error"""
class DolmaRustPipelineError(DolmaError):
"""Error raised by the rust pipeline"""
class DolmaConfigError(DolmaError):
"""Error raised while parsing config"""
|
dolma-main
|
python/dolma/core/errors.py
|
"""
Code to train a Filter.
@kylel
"""
|
dolma-main
|
python/dolma/core/trainer.py
|
import inspect
import itertools
import multiprocessing
import os
import pickle
import random
import re
import time
from contextlib import ExitStack
from datetime import datetime
from functools import partial
from queue import Queue
from threading import Thread
from typing import Any, Dict, List, Optional, Tuple, Union
import smart_open
import tqdm
from typing_extensions import TypeAlias
from .errors import DolmaError, DolmaRetryableFailure
from .paths import (
add_suffix,
glob_path,
join_path,
make_relative,
mkdir_p,
split_path,
sub_prefix,
)
METADATA_SUFFIX = ".done.txt"
# we need to quote the type alias because we want to support Python 3.8
QueueType: TypeAlias = "Queue[Union[None, Tuple[int, ...]]]"
class BaseParallelProcessor:
"""A base parallel processor that supports applying the same process_single method to a list of files.
This class is meant to be subclassed. The subclass must implement:
- `process_single` method, which takes a source path file to transform, and a destination path where
to save the transformed file.
- `increment_progressbar` method, which defines which units to keep track of in the progress bar.
See documentation of both methods for more details on how to implement them correctly.
"""
def __init__(
self,
source_prefix: Union[str, List[str]],
destination_prefix: Union[str, List[str]],
metadata_prefix: Union[str, List[str]],
num_processes: int = 1,
debug: bool = False,
seed: int = 0,
pbar_timeout: float = 1e-3,
ignore_existing: bool = False,
include_paths: Optional[List[str]] = None,
exclude_paths: Optional[List[str]] = None,
files_regex_pattern: Optional[str] = None,
retries_on_error: int = 0,
):
"""Initialize the parallel processor.
Args:
source_prefix (str): The location where source files are stored. This can be a local directory or a
prefix to an S3 location.
destination_prefix (str): The location where to save the transformed files. This can be a local
directory or a prefix to an S3 location. Local directories will be created if they do not exist.
The directory structure from the source prefix will be replicated in the destination prefix;
file names will also be the same.
metadata_prefix (str): The prefix of the metadata files to save. This can be a local path or an
S3 path. Metadata output will be created for each file after it is processed. Filenames are
checked to verify if a file has been processed and can be skipped unless `ignore_existing` is
set to true.
num_processes (int, optional): The number of processes to use. Defaults to 1.
debug (bool, optional): Whether to run in debug mode; if true, no multiprocessing will be used.
Defaults to False.
seed (int, optional): The random seed to use when shuffling input files. Defaults to 0.
pbar_timeout (float, optional): How often to update progress bars in seconds.
Defaults to 0.01 seconds.
ignore_existing (bool, optional): Whether to ignore files that have been already processed and
re-run the processor on all files from scratch. Defaults to False.
include_paths (Optional[List[str]], optional): A list of paths to include. If provided, only files
that match one of the paths will be processed. Defaults to None.
exclude_paths (Optional[List[str]], optional): A list of paths to exclude. If provided, files that
match one of the paths will be skipped. Defaults to None.
"""
self.src_prefixes = [source_prefix] if isinstance(source_prefix, str) else source_prefix
self.dst_prefixes = [destination_prefix] if isinstance(destination_prefix, str) else destination_prefix
self.meta_prefixes = [metadata_prefix] if isinstance(metadata_prefix, str) else metadata_prefix
self.num_processes = num_processes
self.debug = debug
self.seed = seed
self.pbar_timeout = pbar_timeout
self.ignore_existing = ignore_existing
self.include_paths = set(include_paths) if include_paths is not None else None
self.exclude_paths = set(exclude_paths) if exclude_paths is not None else None
self.files_regex_pattern = re.compile(files_regex_pattern) if files_regex_pattern else None
self.retries_on_error = retries_on_error
# checking that the increment_progressbar method is subclassed correctly
sig = inspect.signature(self.increment_progressbar)
if "queue" not in sig.parameters or sig.parameters["queue"].kind != inspect.Parameter.POSITIONAL_ONLY:
raise AttributeError(
"increment_progressbar must have a positional-only argument named 'queue'; "
"Check that you have subclassed BaseParallelProcessor correctly!"
)
if "kwargs" in sig.parameters and sig.parameters["kwargs"].kind == inspect.Parameter.VAR_KEYWORD:
raise AttributeError(
"increment_progressbar must not have a **kwargs argument; "
"Check that you have subclassed BaseParallelProcessor correctly!"
)
if any(p.name != "queue" and p.default != 0 for p in sig.parameters.values()):
raise AttributeError(
"increment_progressbar must have a default value of 0 for all arguments except 'queue'; "
"Check that you have subclassed BaseParallelProcessor correctly!"
)
if len(self.src_prefixes) != len(self.dst_prefixes) or len(self.src_prefixes) != len(self.meta_prefixes):
raise ValueError("The number of source, destination and metadata prefixes must be the same.")
if len(self.src_prefixes) == 0:
raise ValueError("At least one source prefix must be provided.")
if any("*" in p for p in itertools.chain(self.dst_prefixes, self.meta_prefixes)):
raise ValueError("Destination and metadata prefixes cannot contain wildcards.")
@classmethod
def process_single(
cls,
source_path: str,
destination_path: str,
queue: QueueType,
**kwargs: Any,
):
"""Process a single file.
This method must be implemented by the subclass. It takes a source path file to transform, and a
destination path where to save the transformed file. It also takes a queue to increment the progress
bars. The queue should be passed to the `increment_progressbar` method.
Args:
source_path (str): The path to the source file to transform. Can be an S3 path or a local path.
destination_path (str): The path to the destination file to save. Can be an S3 path or a local path.
queue (QueueType): The queue to increment the progress bars.
"""
raise NotImplementedError()
@classmethod
def _process_single_and_save_status(
cls,
source_path: str,
destination_path: str,
metadata_path: str,
queue: QueueType,
serialized_kwargs: bytes,
):
"""A wrapper around process single that saves a metadata file if processing is successful."""
kwargs = pickle.loads(serialized_kwargs)
retries_on_error = kwargs.get("retries_on_error", 0) + 1
while True:
try:
cls.process_single(
source_path=source_path, destination_path=destination_path, queue=queue, **kwargs
)
break
except DolmaRetryableFailure as e:
retries_on_error -= 1
if retries_on_error == 0:
raise DolmaError from e
with smart_open.open(metadata_path, "wt") as f:
f.write(datetime.now().isoformat())
@classmethod
def increment_progressbar(cls, queue: QueueType, /, **kwargs: int) -> Dict[str, int]:
"""Increment the progress bar by putting a tuple in the queue.
When subclassing, we recommend defining which units to keep track of in the progress bar by
defining keyword arguments. Then you can call the base class via `super()` and pass the keyword.
Example:
```python
class MyProcessor(BaseParallelProcessor):
def increment_progressbar(self, queue, /, files = 0, documents = 0): # we use two progress bars
return super().increment_progressbar(queue, files=files, documents=documents)
```
"""
queue.put(tuple(kwargs.get(k, 0) for k in kwargs))
return kwargs
@classmethod
def _run_threaded_progressbar(
cls,
queue: QueueType,
timeout: float,
):
"""Run a progress bar in a separate thread.
Args:
queue (QueueType): The queue to increment the progress bars.
timeout (float): How often to update the progress bars in seconds.
"""
sample_queue_output = cls.increment_progressbar(queue)
with ExitStack() as stack:
pbars = [
stack.enter_context(
tqdm.tqdm(desc=str(k), unit=str(k)[:1], position=i, unit_scale=True) # pyright: ignore
)
for i, k in enumerate(sample_queue_output)
]
while True:
item = queue.get()
if item is None:
break
for pbar, value in zip(pbars, item):
pbar.update(value)
time.sleep(timeout)
def _debug_run_all(
self,
all_source_paths: List[str],
all_destination_paths: List[str],
all_metadata_paths: List[str],
**process_single_kwargs: Any,
):
"""Run files one by one on the main process
Args:
all_source_paths (List[MultiPath]): The list of source paths to process.
all_destination_paths (List[MultiPath]): The list of destination paths to save.
all_metadata_paths (List[MultiPath]): The locations where to save metadata.
"""
it = zip(all_source_paths, all_destination_paths, all_metadata_paths)
pbar_queue: QueueType = Queue()
thread = Thread(target=self._run_threaded_progressbar, args=(pbar_queue, self.pbar_timeout), daemon=True)
thread.start()
for source_prefix, destination_prefix, metadata_prefix in it:
self._process_single_and_save_status(
source_path=source_prefix,
destination_path=destination_prefix,
metadata_path=metadata_prefix,
queue=pbar_queue,
serialized_kwargs=pickle.dumps(process_single_kwargs),
)
pbar_queue.put(None)
thread.join()
def _multiprocessing_run_all(
self,
all_source_paths: List[str],
all_destination_paths: List[str],
all_metadata_paths: List[str],
**process_single_kwargs: Any,
):
"""Run files in parallel using multiprocessing.
Args:
all_source_paths (List[MultiPath]): The list of source paths to process.
all_destination_paths (List[MultiPath]): The list of destination paths to save.
all_metadata_paths (List[MultiPath]): The locations where to save metadata.
"""
try:
multiprocessing.set_start_method("spawn")
except RuntimeError:
assert multiprocessing.get_start_method() == "spawn", "Multiprocessing start method must be spawn"
with multiprocessing.Pool(processes=self.num_processes) as pool:
pbar_queue: QueueType = (manager := multiprocessing.Manager()).Queue()
thread = Thread(
target=self._run_threaded_progressbar, args=(pbar_queue, self.pbar_timeout), daemon=True
)
thread.start()
process_single_fn = partial(self.process_single, queue=pbar_queue)
results = []
for s, d, m in zip(all_source_paths, all_destination_paths, all_metadata_paths):
process_single_fn = partial(
self._process_single_and_save_status,
queue=pbar_queue,
source_path=s,
destination_path=d,
metadata_path=m,
serialized_kwargs=pickle.dumps(process_single_kwargs),
)
result = pool.apply_async(process_single_fn)
results.append(result)
for result in results:
result.get()
pool.close()
pool.join()
pbar_queue.put(None)
thread.join()
manager.shutdown()
def _valid_path(self, path: str) -> bool:
if self.include_paths is not None and path not in self.include_paths:
return False
if self.exclude_paths is not None and path in self.exclude_paths:
return False
if self.files_regex_pattern is not None and not self.files_regex_pattern.search(path):
return False
return True
def _get_all_paths(self) -> Tuple[List[str], List[str], List[str]]:
"""Get all paths to process using prefixes provided"""
all_source_paths, all_destination_paths, all_metadata_paths = [], [], []
for src_prefix, dst_prefix, meta_prefix in zip(self.src_prefixes, self.dst_prefixes, self.meta_prefixes):
current_source_prefixes = sorted(glob_path(src_prefix))
if len(current_source_prefixes) > 1:
# make relative only makes sense if there is more than one path; otherwise, it's unclear
# what a relative path would be.
prefix, rel_paths = make_relative(current_source_prefixes)
elif len(current_source_prefixes) == 1:
# in case we have a single path, we can just use the path minus the file as the shared prefix,
# and the file as the relative path
prot, parts = split_path(current_source_prefixes[0])
prefix, rel_paths = join_path(prot, *parts[:-1]), [parts[-1]]
else:
raise ValueError(f"Could not find any files matching {src_prefix}")
# shuffle the order of the files so time estimation in progress bars is more accurate
random.shuffle(rel_paths)
# get a list of which metadata files already exist
existing_metadata_names = set(
sub_prefix(path, meta_prefix).strip(METADATA_SUFFIX) for path in glob_path(meta_prefix)
)
for path in rel_paths:
if not self.ignore_existing and path in existing_metadata_names:
continue
if not self._valid_path(path):
continue
# get relative path from source prefix
rel_dir, _ = os.path.split(path)
# make sure destination/metadata directories exists
mkdir_p(os.path.join(dst_prefix, rel_dir))
mkdir_p(os.path.join(meta_prefix, rel_dir))
# create new paths to pass to taggers
all_source_paths.append(add_suffix(prefix, path))
all_destination_paths.append(add_suffix(dst_prefix, path))
all_metadata_paths.append(add_suffix(meta_prefix, path) + METADATA_SUFFIX)
return all_source_paths, all_destination_paths, all_metadata_paths
def __call__(self, **process_single_kwargs: Any):
"""Run the processor."""
random.seed(self.seed)
# in case the user wants to override the default kwargs for retries
process_single_kwargs.setdefault("retries_on_error", self.retries_on_error)
all_source_paths, all_destination_paths, all_metadata_paths = self._get_all_paths()
print(f"Found {len(all_source_paths):,} files to process")
fn = self._debug_run_all if self.debug else self._multiprocessing_run_all
fn(
all_source_paths=all_source_paths,
all_destination_paths=all_destination_paths,
all_metadata_paths=all_metadata_paths,
**process_single_kwargs,
)
|
dolma-main
|
python/dolma/core/parallel.py
|
import multiprocessing
from typing import List, TypeVar
from cached_path import cached_path
from omegaconf.omegaconf import OmegaConf as om
from omegaconf.omegaconf import Resolver
from ..core.paths import glob_path
__all__ = ["cache", "glob", "processes"]
C = TypeVar("C", bound=Resolver)
def resolver(resolver: C) -> C:
resolver_name = f"d.{resolver.__name__}"
om.register_new_resolver(resolver_name, resolver, replace=True)
return resolver
@resolver
def cache(path: str) -> str:
return str(cached_path(path))
@resolver
def glob(path: str) -> List[str]:
globbed = list(glob_path(path))
assert len(globbed) > 0, f"Path {path} does not match any files"
return globbed
@resolver
def processes(n: int = 0) -> int:
return max(1, multiprocessing.cpu_count() - n)
|
dolma-main
|
python/dolma/cli/resolvers.py
|
from dataclasses import dataclass
from typing import List, Optional
from dolma.cli import BaseCli, field, print_config
from dolma.cli.shared import WorkDirConfig, make_workdirs
from dolma.core.analyzer import create_and_run_analyzer
from dolma.core.errors import DolmaConfigError
from dolma.core.loggers import get_logger
from dolma.core.paths import glob_path
@dataclass
class AnalyzerConfig:
attributes: List[str] = field(
default=[],
help="One or more attributes paths to process; Can be either local or S3 paths. Globs are supported.",
)
report: Optional[str] = field(
default=None,
help=(
"Path where to save the report. Can be either local or S3 path. "
"If not provided, the report will be printed to stdout."
),
)
bins: int = field(
default=1_000,
help="Number of bins to use for the histograms.",
)
processes: int = field(
default=1,
help="Number of parallel processes to use.",
)
seed: int = field(
default=0,
help="Seed to use for reproducibility.",
)
debug: bool = field(
default=False,
help="Whether to run in debug mode.",
)
work_dir: WorkDirConfig = field(default=WorkDirConfig(), help="Configuration for temporary work directories.")
regex: Optional[str] = field(
default=None,
help="Regex to use for filtering the attributes by name.",
)
class AnalyzerCli(BaseCli):
CONFIG = AnalyzerConfig
DESCRIPTION = "Analyze the distribution of attributes values in a dataset."
@classmethod
def run(cls, parsed_config: AnalyzerConfig):
logger = get_logger("analyzer")
# perform some path validation to make sure we don't call the mixer with invalid config
total_matching_documents = 0
for document in parsed_config.attributes:
current_matching_documents = sum(1 for _ in glob_path(document))
if current_matching_documents == 0:
# only raise a warning if no documents are found for a single path
logger.warn(f"No documents found for path {document}")
total_matching_documents += current_matching_documents
if total_matching_documents == 0:
# but raise an error if no documents are found for all paths
raise DolmaConfigError(f"No documents found for paths {parsed_config.attributes}.")
print_config(parsed_config)
with make_workdirs(parsed_config.work_dir) as work_dirs:
create_and_run_analyzer(
attributes=parsed_config.attributes,
report=parsed_config.report,
summaries_path=work_dirs.output,
metadata_path=work_dirs.input,
debug=parsed_config.debug,
seed=parsed_config.seed,
num_bins=parsed_config.bins,
num_processes=parsed_config.processes,
name_regex=parsed_config.regex,
)
|
dolma-main
|
python/dolma/cli/analyzer.py
|
"""
Utilities to work with a OmegaConf structured config object
Author: Luca Soldaini (@soldni)
"""
from argparse import ArgumentParser, Namespace
from collections.abc import Iterable
from copy import deepcopy
from dataclasses import Field
from dataclasses import field as dataclass_field
from dataclasses import is_dataclass
from logging import warn
from typing import Any, Dict, Generic, Literal, Optional, Protocol, Type, TypeVar, Union
from omegaconf import MISSING, DictConfig, ListConfig
from omegaconf import OmegaConf as om
from rich.console import Console
from rich.syntax import Syntax
__all__ = [
"BaseCli",
"field",
"make_parser",
"namespace_to_nested_omegaconf",
"print_config",
]
T = TypeVar("T", bound=Any)
D = TypeVar("D", bound="DataClass")
A = TypeVar("A", bound="ArgumentParser")
def _field_nargs(default: Any) -> Union[Literal["?"], Literal["*"]]:
# return '+' if _default is iterable but not string/bytes, else 1
if isinstance(default, str) or isinstance(default, bytes):
return "?"
elif isinstance(default, Iterable):
return "*"
else:
return "?"
def field(default: T = MISSING, help: Optional[str] = None, **extra: Any) -> T:
metadata = {"help": help, "type": type(default), "default": default, "nargs": _field_nargs(default), **extra}
return dataclass_field(default_factory=lambda: deepcopy(default), metadata=metadata)
class DataClass(Protocol):
__dataclass_fields__: Dict[str, Field]
def make_parser(parser: A, config: Type[DataClass], prefix: Optional[str] = None) -> A:
for field_name, field in config.__dataclass_fields__.items():
# get type from annotations or metadata
typ_ = config.__annotations__.get(field_name, field.metadata.get("type", MISSING))
if typ_ is MISSING:
warn(f"No type annotation for field {field_name} in {config.__name__}")
continue
if is_dataclass(typ_):
# recursively add subparsers
make_parser(parser, typ_, prefix=field_name)
continue
field_name = f"{prefix}.{field_name}" if prefix else field_name
parser.add_argument(
f"--{field_name}",
help=field.metadata.get("help"),
nargs=field.metadata.get("nargs", "?"),
default=MISSING,
)
return parser
def _make_nested_dict(key: str, value: Any, d: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
d = d or {}
if "." in key:
key, rest = key.split(".", 1)
value = _make_nested_dict(rest, value, d.get(key))
if value is not MISSING:
d[key] = value
return d
def namespace_to_nested_omegaconf(args: Namespace, structured: Type[T], config: Optional[dict] = None) -> T:
nested_config_dict: Dict[str, Any] = {}
for key, value in vars(args).items():
nested_config_dict = _make_nested_dict(key, value, nested_config_dict)
untyped_config: DictConfig = om.merge(
om.create(config or {}), om.create(nested_config_dict)
) # pyright: ignore (pylance is confused because om.create might return a DictConfig or a ListConfig)
base_structured_config: DictConfig = om.structured(structured)
merged_config = om.merge(base_structured_config, untyped_config)
assert isinstance(merged_config, DictConfig)
return merged_config # pyright: ignore
def print_config(config: Any, console: Optional[Console] = None) -> None:
if not isinstance(config, (DictConfig, ListConfig)):
config = om.create(config)
console = console or Console()
syntax = Syntax(code=om.to_yaml(config).strip(), lexer="yaml", theme="ansi_dark")
console.print(syntax)
class BaseCli(Generic[D]):
CONFIG: Type[D]
DESCRIPTION: Optional[str] = None
@classmethod
def make_parser(cls, parser: A) -> A:
assert hasattr(cls, "CONFIG"), f"{cls.__name__} must have a CONFIG attribute"
return make_parser(parser, cls.CONFIG)
@classmethod
def run_from_args(cls, args: Namespace, config: Optional[dict] = None):
assert hasattr(cls, "CONFIG"), f"{cls.__name__} must have a CONFIG attribute"
parsed_config = namespace_to_nested_omegaconf(args=args, structured=cls.CONFIG, config=config)
return cls.run(parsed_config)
@classmethod
def run(cls, parsed_config: D):
raise NotImplementedError("Abstract method; must be implemented in subclass")
|
dolma-main
|
python/dolma/cli/__init__.py
|
import copy
import tempfile
from contextlib import ExitStack, contextmanager
from dataclasses import dataclass
from typing import Generator, Optional
from dolma.cli import field
@dataclass
class WorkDirConfig:
input: Optional[str] = field(default=None, help="Path to the input directory.")
output: Optional[str] = field(default=None, help="Path to the output directory.")
@contextmanager
def make_workdirs(config: WorkDirConfig) -> Generator[WorkDirConfig, None, None]:
"""Create temporary work directories and update the config with their paths."""
# make a copy of the configuration
config = copy.deepcopy(config)
with ExitStack() as stack:
if config.input is None:
config.input = stack.enter_context(tempfile.TemporaryDirectory())
if config.output is None:
config.output = stack.enter_context(tempfile.TemporaryDirectory())
yield config
|
dolma-main
|
python/dolma/cli/shared.py
|
from dataclasses import dataclass
from typing import List, Optional
from rich.console import Console
from rich.table import Table
from dolma.cli import BaseCli, field, print_config
from dolma.cli.shared import WorkDirConfig, make_workdirs
from dolma.core.errors import DolmaConfigError
from dolma.core.loggers import get_logger
from dolma.core.paths import glob_path
from dolma.core.registry import TaggerRegistry
from dolma.core.runtime import create_and_run_tagger
@dataclass
class TaggerConfig:
documents: List[str] = field(
default=[],
help="One or more document paths to process; Can be either local or S3 paths. Globs are supported.",
)
destination: Optional[List[str]] = field(
default=None,
nargs="*",
help=(
"Destination paths to save the outputs; should match the number of document paths. "
"If not provided, destination will be derived from the document path."
),
)
taggers: List[str] = field(
default=[],
help="List of taggers to run.",
)
experiment: Optional[str] = field(
default=None,
help="Name of the experiment.",
)
processes: int = field(
default=1,
help="Number of parallel processes to use.",
)
ignore_existing: bool = field(
default=False,
help="Whether to ignore existing outputs and re-run the taggers.",
)
debug: bool = field(
default=False,
help="Whether to run in debug mode.",
)
work_dir: WorkDirConfig = field(default=WorkDirConfig(), help="Configuration for temporary work directories.")
class TaggerCli(BaseCli):
CONFIG = TaggerConfig
DESCRIPTION = (
"Tag documents or spans of documents using one or more taggers. "
"For a list of available taggers, run `dolma list`."
)
@classmethod
def run(cls, parsed_config: TaggerConfig):
logger = get_logger("tagger")
with make_workdirs(parsed_config.work_dir) as work_dirs:
documents = [str(p) for p in parsed_config.documents]
taggers = [str(p) for p in parsed_config.taggers]
# perform some path validation to make sure we don't call the mixer with invalid config
total_matching_documents = 0
for document in documents:
current_matching_documents = sum(1 for _ in glob_path(document))
if current_matching_documents == 0:
# only raise a warning if no documents are found for a single path
logger.warn(f"No documents found for path {document}")
total_matching_documents += current_matching_documents
if total_matching_documents == 0:
# but raise an error if no documents are found for all paths
raise DolmaConfigError(f"No documents found for paths {documents}.")
print_config(parsed_config)
create_and_run_tagger(
documents=documents,
destination=parsed_config.destination,
metadata=work_dirs.output,
taggers=taggers,
ignore_existing=parsed_config.ignore_existing,
num_processes=parsed_config.processes,
experiment=parsed_config.experiment,
debug=parsed_config.debug,
)
@dataclass
class ListTaggerConfig:
...
class ListTaggerCli(BaseCli):
CONFIG = ListTaggerConfig
DESCRIPTION = "List available taggers."
@classmethod
def run(cls, parsed_config: ListTaggerConfig):
table = Table(title="dolma taggers", style="bold")
table.add_column("name", justify="left", style="cyan")
table.add_column("class", justify="left", style="magenta")
for tagger_name, tagger_cls in sorted(TaggerRegistry.taggers()):
tagger_repr = f"{tagger_cls.__module__}.{tagger_cls.__name__}"
table.add_row(tagger_name, tagger_repr)
console = Console()
console.print(table)
|
dolma-main
|
python/dolma/cli/tagger.py
|
from dataclasses import dataclass
from typing import Any, Dict, List, Optional
from omegaconf import OmegaConf as om
from dolma import deduper
from dolma.cli import BaseCli, field, print_config
from dolma.cli.shared import WorkDirConfig, make_workdirs
from dolma.core.errors import DolmaConfigError
from dolma.core.loggers import get_logger
from dolma.core.paths import glob_path
@dataclass
class ParagraphDedupeConfig:
attribute_name: str = field(help="Name of the output field in the tagger")
@dataclass
class DocumentDedupeConfig:
attribute_name: str = field(help="Name of the output field in the tagger")
key: str = field(help="Name of the input field to use for deduplication, e.g. `$.metadata.url`")
@dataclass
class BloomFilterConfig:
file: str = field(help="Path where to read/write the bloom filter file to/from. Required.")
size_in_bytes: int = field(
default=-1,
help=(
"Size of the bloom filter in bytes. Either this value is provided, or both estimated_doc_count "
"and desired_false_positive_rate."
),
)
read_only: bool = field(help="If true, the bloom filter will be read from the file and not updated. Required.")
estimated_doc_count: int = field(
default=-1,
help=(
"Estimated number of documents to be added to the bloom filter. Either this value is provided, "
"or both size_in_bytes and desired_false_positive_rate."
),
)
desired_false_positive_rate: float = field(
default=-1.0,
help=(
"Desired false positive rate. Either this value is provided, or both size_in_bytes and "
"estimated_doc_count."
),
)
@dataclass
class DedupeConfig:
name: str = field(help="Name of the deduper. Required.")
documents: Optional[DocumentDedupeConfig] = field(
default=None, help="Configuration for document deduplication"
)
paragraphs: Optional[ParagraphDedupeConfig] = field(
default=None, help="Configuration for paragraph deduplication"
)
skip_empty: Optional[bool] = field(default=False, help="If true, empty documents/paragraphs will be skipped")
@dataclass
class DeduperConfig:
documents: List[str] = field(default=[], help="Paths to the documents to be deduplicated. Required.")
work_dir: WorkDirConfig = field(default=WorkDirConfig(), help="Configuration for temporary work directories.")
dedupe: DedupeConfig = field(help="Deduplication configuration. Required.")
bloom_filter: BloomFilterConfig = field(help="Bloom filter configuration. Required.")
processes: int = field(
default=1, help="Number of processes to use for deduplication. If 1, no multiprocessing will be used."
)
class DeduperCli(BaseCli):
CONFIG = DeduperConfig
DESCRIPTION = "Deduplicate documents or paragraphs using a bloom filter."
@classmethod
def run(cls, parsed_config: DeduperConfig):
logger = get_logger("tagger")
dict_config: Dict[str, Any] = {}
with make_workdirs(parsed_config.work_dir) as work_dirs:
dict_config["dedupe"] = {
"name": parsed_config.dedupe.name,
"skip_empty": parsed_config.dedupe.skip_empty,
}
if parsed_config.dedupe.documents is not None:
dict_config["dedupe"]["documents"] = om.to_container(parsed_config.dedupe.documents)
elif parsed_config.dedupe.paragraphs is not None:
dict_config["dedupe"]["paragraphs"] = om.to_container(parsed_config.dedupe.paragraphs)
else:
raise ValueError("Either dedupe.documents or dedupe.paragraphs must be specified")
# perform some path validation to make sure we don't call the mixer with invalid config
total_matching_documents = 0
for document in parsed_config.documents:
if document.count("*") > 1:
raise DolmaConfigError("Only one wildcard is allowed in the document path")
current_matching_documents = sum(1 for _ in glob_path(document))
if current_matching_documents == 0:
# only raise a warning if no documents are found for a single path
logger.warn(f"No documents found for path {document}")
total_matching_documents += current_matching_documents
if total_matching_documents == 0:
# but raise an error if no documents are found for all paths
raise DolmaConfigError(f"No documents found for the paths {parsed_config.documents}.")
dict_config["bloom_filter"] = {
"file": parsed_config.bloom_filter.file,
"read_only": parsed_config.bloom_filter.read_only,
"size_in_bytes": getattr(parsed_config.bloom_filter, "size_in_bytes", 0),
"estimated_doc_count": getattr(parsed_config.bloom_filter, "estimated_doc_count", 0),
"desired_false_positive_rate": getattr(
parsed_config.bloom_filter, "desired_false_positive_rate", 0
),
}
if dict_config["bloom_filter"]["size_in_bytes"] <= 0 and (
dict_config["bloom_filter"]["estimated_doc_count"] <= 0
or dict_config["bloom_filter"]["desired_false_positive_rate"] <= 0
):
raise ValueError(
"Either bloom_filter.size_in_bytes or bloom_filter.estimated_doc_count and "
"bloom_filter.desired_false_positive_rate must be specified"
)
dict_config["work_dir"] = {"input": work_dirs.input, "output": work_dirs.output}
dict_config["processes"] = parsed_config.processes
dict_config["documents"] = list(om.to_container(parsed_config.documents)) # pyright: ignore
if len(dict_config["documents"]) == 0:
raise ValueError("At least one document must be specified")
print_config(dict_config)
return deduper(dict_config)
|
dolma-main
|
python/dolma/cli/deduper.py
|
from dataclasses import dataclass
from typing import Any, Dict, List, Optional
from dolma import mixer
from dolma.cli import BaseCli, field, print_config
from dolma.cli.shared import WorkDirConfig, make_workdirs
from dolma.core.errors import DolmaConfigError
from dolma.core.loggers import get_logger
from dolma.core.paths import glob_path
@dataclass
class StreamOutputConfig:
path: str = field(help="Path where to write the mixed documents to. Required.")
max_size_in_bytes: int = field(
default=2 * 2**30, help="Maximum size of the output file in bytes. Defaults to 2GB."
)
discard_fields: List[str] = field(default=[], help="List of fields to discard from the output documents.")
@dataclass
class FilterConfig:
include: List[str] = field(default=[], help="JSONPath expressions to include documents")
exclude: List[str] = field(default=[], help="JSONPath expressions to exclude documents")
@dataclass
class SpanReplacementConfig:
span: str = field(help="JSONPath expression for the span to replace")
min_score: float = field(default=0.5, help="Minimum score for the span to be replaced")
replacement: str = field(default="", help="Replacement for the span")
@dataclass
class StreamConfig:
name: str = field(help="Name of the stream. Required.")
documents: List[str] = field(default=[], help="Paths to the documents to be mixed. Required.")
output: StreamOutputConfig = field(
default=StreamOutputConfig(), help="Configuration for the output of the stream."
)
attributes: List[str] = field(default=[], help="List of attributes files to used for mixing.")
filter: Optional[FilterConfig] = field( # pyright: ignore
default=None, help="Configuration for filtering documents."
)
span_replacement: List[SpanReplacementConfig] = field(default=[], help="Configuration for replacing spans.")
@dataclass
class MixerConfig:
streams: List[StreamConfig] = field(default=[], help="List configurations of streams to be mixed")
work_dir: WorkDirConfig = field(default=WorkDirConfig(), help="Configuration for temporary work directories.")
processes: int = field(default=1, help="Number of processes to use for mixing. By default 1 process is used.")
class MixerCli(BaseCli):
CONFIG = MixerConfig
DESCRIPTION = "Mix documents from multiple streams."
@classmethod
def run(cls, parsed_config: MixerConfig):
logger = get_logger("mixer")
with make_workdirs(parsed_config.work_dir) as work_dirs:
dict_config: Dict[str, Any] = {
"work_dir": {"input": work_dirs.input, "output": work_dirs.output},
"processes": parsed_config.processes,
"streams": [],
}
for stream_config in parsed_config.streams:
stream_config_dict: Dict[str, Any] = {}
if stream_config.filter is not None:
if not stream_config.filter.include and not stream_config.filter.exclude:
raise DolmaConfigError("Either `include` or `exclude` must be specified for filter")
stream_config_dict["filter"] = {
"include": list(stream_config.filter.include),
"exclude": list(stream_config.filter.exclude),
}
for span_replacement in stream_config.span_replacement:
stream_config_dict.setdefault("span_replacement", []).append(
{
"span": span_replacement.span,
"min_score": span_replacement.min_score,
"replacement": span_replacement.replacement,
}
)
if "span_replacement" not in stream_config_dict and "filter" not in stream_config_dict:
raise DolmaConfigError("Either `filter` or `span_replacement` must be specified")
# perform some path validation to make sure we don't call the mixer with invalid config
total_matching_documents = 0
for document in stream_config.documents:
if document.count("*") > 1:
raise DolmaConfigError("Only one wildcard is allowed in the document path")
current_matching_documents = sum(1 for _ in glob_path(document))
if current_matching_documents == 0:
# only raise a warning if no documents are found for a single path
logger.warn(f"No documents found for path {document}")
total_matching_documents += current_matching_documents
if total_matching_documents == 0:
# but raise an error if no documents are found for all paths
raise DolmaConfigError(f"No documents found for the paths for {stream_config.name} config.")
# populate the stream config dict
stream_config_dict["name"] = stream_config.name
stream_config_dict["documents"] = list(stream_config.documents)
stream_config_dict["attributes"] = list(stream_config.attributes)
stream_config_dict["output"] = {
"path": stream_config.output.path,
"max_size_in_bytes": stream_config.output.max_size_in_bytes,
}
if stream_config.output.discard_fields:
stream_config_dict["output"]["discard_fields"] = list(stream_config.output.discard_fields)
if len(stream_config_dict["documents"]) == 0:
raise ValueError("No documents to mix")
dict_config["streams"].append(stream_config_dict)
if len(dict_config["streams"]) == 0:
raise DolmaConfigError("No streams to mix")
print_config(dict_config)
return mixer(dict_config)
|
dolma-main
|
python/dolma/cli/mixer.py
|
from argparse import ArgumentParser
from pathlib import Path
from typing import List, Optional
from yaml import safe_load
from .analyzer import AnalyzerCli
from .deduper import DeduperCli
from .mixer import MixerCli
# must import these to register the resolvers
from .resolvers import * # noqa: F401,F403
from .tagger import ListTaggerCli, TaggerCli
AVAILABLE_COMMANDS = {
"dedupe": DeduperCli,
"mix": MixerCli,
"tag": TaggerCli,
"list": ListTaggerCli,
"stat": AnalyzerCli,
# following functionality is not yet implemented
# "train-ft": None,
# "train-lm": None,
}
def main(argv: Optional[List[str]] = None):
parser = ArgumentParser(
prog="dolma",
usage="dolma [command] [options]",
description="Command line interface for the DOLMa dataset processing toolkit",
)
parser.add_argument(
"-c",
"--config",
help="Path to configuration optional file",
type=Path,
default=None,
)
subparsers = parser.add_subparsers(dest="command")
subparsers.required = True
subparsers.choices = AVAILABLE_COMMANDS.keys() # type: ignore
for command, cli in AVAILABLE_COMMANDS.items():
cli.make_parser(subparsers.add_parser(command, help=cli.DESCRIPTION))
args = parser.parse_args(argv)
# try parsing the config file
config: Optional[dict] = None
if config_path := args.__dict__.pop("config"):
assert config_path.exists(), f"Config file {config_path} does not exist"
with open(config_path) as f:
config = dict(safe_load(f))
AVAILABLE_COMMANDS[args.__dict__.pop("command")].run_from_args(args=args, config=config)
|
dolma-main
|
python/dolma/cli/__main__.py
|
import logging
import os
from dataclasses import dataclass
from typing import List, Set
from ..core.data_types import DocResult, Document, Span
from ..core.registry import TaggerRegistry
from ..core.taggers import BaseTagger
MIN_WORDS_PER_LINE = 3
naughty_lines = (
open(os.path.join(os.path.dirname(os.path.dirname(__file__)), "data", "naughty_words_en.txt"))
.read()
.splitlines()
)
NAUGHTY_WORDS: Set[str] = set(w for w in naughty_lines if " " not in w)
NAUGHTY_PHRASES: Set[str] = set(w for w in naughty_lines if " " in w)
@dataclass
class C4Attributes:
lines_with_no_ending_punctuation: List[Span]
lines_with_too_few_words: List[Span]
has_naughty_word: bool = False
has_javascript: bool = False
has_lorem_ipsum: bool = False
has_curly_brace: bool = False
line_count: int = 0
character_count: int = 0
def as_spans(self) -> List[Span]:
spans = []
spans.extend(self.lines_with_no_ending_punctuation)
spans.extend(self.lines_with_too_few_words)
if self.has_naughty_word:
spans.append(Span(0, self.character_count, type="has_naughty_word"))
if self.has_javascript:
spans.append(Span(0, self.character_count, type="has_javascript"))
if self.has_lorem_ipsum:
spans.append(Span(0, self.character_count, type="has_lorem_ipsum"))
if self.has_curly_brace:
spans.append(Span(0, self.character_count, type="has_curly_brace"))
spans.append(Span(0, self.character_count, type="line_count", score=self.line_count))
return spans
def get_attributes(text: str) -> C4Attributes:
attrs = C4Attributes([], [])
attrs.character_count = len(text)
try:
lines = text.split("\n")
attrs.line_count = len(lines)
offset = 0
for line_no in range(0, len(lines)):
original_line = lines[line_no]
end_offset = offset + len(original_line)
if line_no < len(lines) - 1:
end_offset += 1
line = original_line.lower().strip()
if not line.endswith((".", "?", "!", '"')):
attrs.lines_with_no_ending_punctuation.append(
Span(offset, end_offset, type="lines_with_no_ending_punctuation")
)
words = line.split()
if len(words) < MIN_WORDS_PER_LINE:
attrs.lines_with_too_few_words.append(Span(offset, end_offset, type="lines_with_too_few_words"))
if any(word in NAUGHTY_WORDS for word in words) or any(phrase in line for phrase in NAUGHTY_PHRASES):
attrs.has_naughty_word = True
if any(word == "javascript" for word in words):
attrs.has_javascript = True
if "lorem ipsum" in line:
attrs.has_lorem_ipsum = True
if "{" in line:
attrs.has_curly_brace = True
offset = end_offset
except Exception:
logging.exception(f"Error parsing text: {text[:200]}")
return attrs
@TaggerRegistry.add("c4_v1")
class C4Tagger(BaseTagger):
def predict(self, doc: Document) -> DocResult:
attrs = get_attributes(doc.text)
result = DocResult(doc=doc, spans=attrs.as_spans())
return result
|
dolma-main
|
python/dolma/taggers/c4.py
|
import logging
from collections import Counter
from dataclasses import dataclass
from statistics import median
from typing import Counter as CounterType
from typing import List, Tuple
from ..core.data_types import DocResult, Document, Span
from ..core.registry import TaggerRegistry
from ..core.taggers import BaseTagger
REQUIRED_ENGLISH_WORDS = {"the", "be", "to", "of", "and", "that", "have", "with"}
SYMBOLS = {"#", "\u2026"}
BULLET_POINTS = {"*", "-"}
@dataclass
class GopherAttributes:
fraction_of_characters_in_most_common_ngram: List[Tuple[int, float]]
fraction_of_characters_in_duplicate_ngrams: List[Tuple[int, float]]
character_count: int = 0
word_count: int = 0
median_word_length: float = False
symbol_to_word_ratio: float = 0.0
fraction_of_words_with_alpha_character: float = 0.0
required_word_count: int = 0
fraction_of_lines_starting_with_bullet_point: float = 0.0
fraction_of_lines_ending_with_ellipsis: float = 0.0
fraction_of_duplicate_lines: float = 0.0
fraction_of_characters_in_duplicate_lines: float = 0.0
def as_spans(self) -> List[Span]:
spans = []
spans.extend(
[
Span(0, self.character_count, f"fraction_of_characters_in_most_common_{n}grams", v)
for n, v in self.fraction_of_characters_in_most_common_ngram
]
)
spans.extend(
[
Span(0, self.character_count, f"fraction_of_characters_in_duplicate_{n}grams", v)
for n, v in self.fraction_of_characters_in_duplicate_ngrams
]
)
spans.append(Span(0, self.character_count, type="character_count", score=self.character_count))
spans.append(Span(0, self.character_count, type="word_count", score=self.word_count))
spans.append(Span(0, self.character_count, type="median_word_length", score=self.median_word_length))
spans.append(Span(0, self.character_count, type="symbol_to_word_ratio", score=self.symbol_to_word_ratio))
spans.append(
Span(
0,
self.character_count,
type="fraction_of_words_with_alpha_character",
score=self.fraction_of_words_with_alpha_character,
)
)
spans.append(Span(0, self.character_count, type="required_word_count", score=self.required_word_count))
spans.append(
Span(
0,
self.character_count,
type="fraction_of_lines_starting_with_bullet_point",
score=self.fraction_of_lines_starting_with_bullet_point,
)
)
spans.append(
Span(
0,
self.character_count,
type="fraction_of_lines_ending_with_ellipsis",
score=self.fraction_of_lines_ending_with_ellipsis,
)
)
spans.append(
Span(
0, self.character_count, type="fraction_of_duplicate_lines", score=self.fraction_of_duplicate_lines
)
)
spans.append(
Span(
0,
self.character_count,
type="fraction_of_characters_in_duplicate_lines",
score=self.fraction_of_characters_in_duplicate_lines,
)
)
return spans
def get_attributes(text: str) -> GopherAttributes:
attrs = GopherAttributes([], [])
attrs.character_count = len(text)
if attrs.character_count == 0:
return attrs
try:
words = text.split()
word_count = len(words)
character_count = sum(len(word) for word in words)
attrs.word_count = word_count
attrs.median_word_length = median([len(word) for word in words])
attrs.symbol_to_word_ratio = sum(1 for word in words if any(s in word for s in SYMBOLS)) / word_count
attrs.fraction_of_words_with_alpha_character = (
sum(1 for word in words if any(c.isalpha() for c in word)) / word_count
)
attrs.required_word_count = sum(1 for word in words if word in REQUIRED_ENGLISH_WORDS)
all_counts = all_ngram_counts(words)
count_most_common_ngrams = {2, 3, 4}
for n, ngram_counts in all_counts:
if not ngram_counts:
continue
if n in count_most_common_ngrams:
most_common_ngram, count = ngram_counts.most_common(1)[0]
value = count * sum(len(w) for w in most_common_ngram) / character_count
attrs.fraction_of_characters_in_most_common_ngram.append((n, value))
else:
ng_char_count = sum(count * sum(len(w) for w in ng) for ng, count in ngram_counts.items())
value = (
sum(count * sum(len(w) for w in ng) for ng, count in ngram_counts.items() if count > 1)
/ ng_char_count
)
attrs.fraction_of_characters_in_duplicate_ngrams.append((n, value))
lines = text.split("\n")
line_count = len(lines)
for line in lines:
if any(line.startswith(s) for s in BULLET_POINTS):
attrs.fraction_of_lines_starting_with_bullet_point += 1
if line.endswith("\u2026"):
attrs.fraction_of_lines_ending_with_ellipsis += 1
attrs.fraction_of_lines_starting_with_bullet_point /= line_count
attrs.fraction_of_lines_ending_with_ellipsis /= line_count
line_counts = Counter(lines)
attrs.fraction_of_duplicate_lines = (
sum(count for line, count in line_counts.items() if count > 1) / line_count
)
attrs.fraction_of_characters_in_duplicate_lines = (
sum(len(line) * count for line, count in line_counts.items() if count > 1) / character_count
)
except Exception as e:
logging.exception(f"Error processing text {e}: {text[:200]}")
return attrs
def all_ngram_counts(words) -> List[Tuple[int, CounterType[Tuple[str, ...]]]]:
return [(n, Counter(list(zip(*[words[i:] for i in range(n)])))) for n in range(2, 11)]
def all_ngram_counts_alt(words: List[str]) -> List[Tuple[int, CounterType[Tuple[str, ...]]]]:
"""Seems like it should be faster, but isn't"""
ngram: List[Tuple[str, ...]] = list(zip(words, words[1:]))
all_counts: List[Tuple[int, CounterType[Tuple[str, ...]]]] = [(2, Counter(ngram))]
for n in range(3, 11):
ngram = list(a + (b,) for a, b in zip(ngram, words[n - 1 :]))
all_counts.append((n, Counter(ngram)))
return all_counts
@TaggerRegistry.add("gopher_v1")
class GopherTagger(BaseTagger):
def predict(self, doc: Document) -> DocResult:
attrs = get_attributes(doc.text)
result = DocResult(doc=doc, spans=attrs.as_spans())
return result
|
dolma-main
|
python/dolma/taggers/gopher.py
|
"""
Filters.
@kylel, @soldni
"""
import regex
import uniseg.wordbreak
from tokenizers import Regex, pre_tokenizers
from ..core.data_types import DocResult, Document, Span
from ..core.registry import TaggerRegistry
from ..core.taggers import BaseTagger
from ..core.utils import split_paragraphs
@TaggerRegistry.add("char_length_v1")
class CharLengthV1(BaseTagger):
def predict(self, doc: Document) -> DocResult:
score = len(doc.text)
return DocResult(doc=doc, spans=[Span(start=0, end=len(doc.text), type="length", score=score)])
@TaggerRegistry.add("char_length_with_paragraphs_v1")
class CharLengthWithParagraphsV1(BaseTagger):
def predict(self, doc: Document) -> DocResult:
spans = [
Span(start=p.start, end=p.end, type="paragraph", score=len(p.text)) for p in split_paragraphs(doc.text)
]
spans.append(Span(start=0, end=len(doc.text), type="document", score=len(doc.text)))
return DocResult(doc=doc, spans=spans)
@TaggerRegistry.add("whitespace_tokenizer_v1")
class WhitespaceLengthV1(BaseTagger):
WHITESPACE_REGEX = regex.compile(r"\w+|[^\w\s]+")
def predict(self, doc: Document) -> DocResult:
score = len(self.WHITESPACE_REGEX.split(doc.text))
return DocResult(doc=doc, spans=[Span(start=0, end=len(doc.text), type="length", score=score)])
@TaggerRegistry.add("whitespace_tokenizer_with_paragraphs_v1")
class WhitespaceLengthParagraphsV1(WhitespaceLengthV1):
def predict(self, doc: Document) -> DocResult:
spans = [
Span(start=p.start, end=p.end, type="paragraph", score=len(self.WHITESPACE_REGEX.split(p.text)))
for p in split_paragraphs(doc.text)
]
spans.append(Span(start=0, end=len(doc.text), type="document", score=sum(s.score for s in spans)))
return DocResult(doc=doc, spans=spans)
@TaggerRegistry.add("uniseg_length_paragraphs_v1")
class UnisegParagraphsV1(BaseTagger):
def predict(self, doc: Document) -> DocResult:
spans = []
for para in split_paragraphs(doc.text):
# we ignore whitespace-only tokens when counting words
para_length = sum(1 for w in uniseg.wordbreak.words(para.text.strip()) if w.strip())
spans.append(Span(start=para.start, end=para.end, type="paragraph", score=para_length))
# we have to record negative length because mixer can only work on greater than operations,
# and we might want to drop paragraphs that are shorter than a certain length n, so we need
# to filter on >= n
spans.append(Span(start=para.start, end=para.end, type="negative_paragraph", score=-para_length))
return DocResult(doc=doc, spans=spans)
@TaggerRegistry.add("uniseg_length_paragraphs_with_doc_length_v1")
class UnisegParagraphsWithDocLengthV1(UnisegParagraphsV1):
def predict(self, doc: Document) -> DocResult:
doc_results = super().predict(doc)
pos_len = sum(s.score for s in doc_results.spans if s.type == "paragraph")
neg_len = sum(s.score for s in doc_results.spans if s.type == "negative_paragraph")
doc_results.spans.append(Span(start=0, end=len(doc.text), type="document", score=pos_len))
doc_results.spans.append(Span(start=0, end=len(doc.text), type="negative_document", score=neg_len))
return doc_results
@TaggerRegistry.add("olmo_pretokenizer_v1")
class OlmoPreTokenizerV1(BaseTagger):
def __init__(self) -> None:
self.pre_tokenizer = pre_tokenizers.Sequence(
[
# Split on all punctuation.
pre_tokenizers.Split(
pattern=Regex(" ?[[:punct:]]"),
behavior="isolated",
invert=False,
),
# Split up digits.
pre_tokenizers.Split(
pattern=Regex(" ?\\d"),
behavior="isolated",
invert=False,
),
pre_tokenizers.ByteLevel(add_prefix_space=False, use_regex=True),
]
)
def predict(self, doc: Document) -> DocResult:
score = len(self.pre_tokenizer.pre_tokenize_str(doc.text))
return DocResult(doc=doc, spans=[Span(start=0, end=len(doc.text), type="length", score=score)])
@TaggerRegistry.add("olmo_pretokenizer_with_paragraphs_v1")
class OlmoPreTokenizerParagraphsV1(OlmoPreTokenizerV1):
def predict(self, doc: Document) -> DocResult:
spans = [
Span(
start=p.start, end=p.end, type="paragraph", score=len(self.pre_tokenizer.pre_tokenize_str(p.text))
)
for p in split_paragraphs(doc.text)
]
spans.append(Span(start=0, end=len(doc.text), type="document", score=sum(s.score for s in spans)))
return DocResult(doc=doc, spans=spans)
|
dolma-main
|
python/dolma/taggers/length.py
|
"""
Code-related taggers.
@akshitab
"""
import logging
import re
from typing import Generator, List
import numpy as np
import regex
from detect_secrets import SecretsCollection
from detect_secrets.core.scan import (
PotentialSecret,
_process_line_based_plugins,
get_plugins,
)
from detect_secrets.settings import default_settings
from ..core.data_types import DocResult, Document, Span
from ..core.registry import TaggerRegistry
from ..core.taggers import BaseTagger
logger = logging.getLogger(__name__)
def scan_code(code: str) -> Generator[PotentialSecret, None, None]:
if not get_plugins():
logger.error("No plugins to scan with!")
return
has_secret = False
for lines in [code.splitlines()]:
for secret in _process_line_based_plugins(
lines=list(enumerate(lines, start=1)),
filename="code_str.yml",
):
has_secret = True
yield secret
if has_secret:
break
class SecretsCollectionForStringInput(SecretsCollection):
def scan_str(self, code_str: str):
for secret in scan_code(code_str):
self["code_str.yml"].add(secret)
def get_secrets(code: str):
secrets = SecretsCollectionForStringInput()
with default_settings():
secrets.scan_str(code)
return secrets
@TaggerRegistry.add("code_secrets_v1")
class CodeSecretsTagger(BaseTagger):
@classmethod
def _extract_code_secrets(cls, text: str) -> List[Span]:
secrets_spans: List[Span] = []
text_lines = text.splitlines()
secrets = get_secrets(text)
for _, secret in secrets:
line_number = secret.line_number - 1
span = secret.secret_value
span_line = text_lines[line_number]
line_start = text.find(span_line)
start = line_start + span_line.find(span)
end = start + len(span)
assert text[start:end] == span
secret_type = secret.type.replace(" ", "_")
secrets_spans.append(Span(start=start, end=end, type=f"SECRET_{secret_type}")) # , span])
return secrets_spans
def predict(self, doc: Document) -> DocResult:
"""Main runner."""
spans = self._extract_code_secrets(doc.text)
# document-level score
score = self._score(text=doc.text, secrets_spans=spans)
spans.append(Span(start=0, end=len(doc.text), type="doc", score=score))
return DocResult(doc=doc, spans=spans)
def _score(self, text: str, secrets_spans: List[Span]) -> float:
try:
score = len(secrets_spans) * 1.0 / len(text.split())
except ZeroDivisionError:
score = -1.0
return score
@TaggerRegistry.add("code_copyright_comments_v1")
class CodeCopyrightTagger(BaseTagger):
"""
Based on RedPajama code filtering.
"""
def __init__(self):
self.cpat = re.compile("copyright", re.IGNORECASE)
self.pat = re.compile("/\\*[^*]*\\*+(?:[^/*][^*]*\\*+)*/")
def _extract_copyright_spans(self, text: str) -> List[Span]:
copyright_spans: List[Span] = []
reg = self.pat.search(text)
if reg:
# found one, now see if it contains "copyright", if so strip it
span = reg.span()
sub = text[span[0] : span[1]]
if self.cpat.search(sub):
copyright_spans.append(Span(start=span[0], end=span[1], type="copyright_notice", score=1.0))
return copyright_spans
lines = text.split("\n")
skip = 0
# Greedy replace any file that begins with comment block, most
# are copyright headers
end = 0
for k in range(len(lines)):
if lines[k].startswith("//") or lines[k].startswith("#") or lines[k].startswith("--") or not lines[k]:
skip = skip + 1
if not lines[k]:
end += 1
else:
end += len(lines[k])
else:
break
if skip:
copyright_spans.append(Span(start=0, end=end, type="comment_block", score=1.0))
return copyright_spans
def predict(self, doc: Document) -> DocResult:
"""Main runner."""
spans = self._extract_copyright_spans(doc.text)
# document-level score
score = self._score(text=doc.text, copyright_spans=spans)
spans.append(Span(start=0, end=len(doc.text), type="doc", score=score))
return DocResult(doc=doc, spans=spans)
def _score(self, text: str, copyright_spans: List[Span]) -> float:
try:
if len(copyright_spans) == 0:
score = 0.0
else:
span = copyright_spans[0]
# percentage of content affected
score = (span.end - span.start + 1) * 1.0 / len(text)
except ZeroDivisionError:
score = -1.0
return score
@TaggerRegistry.add("code_redpajama_taggers_v1")
class CodeRedPajamaTaggers(BaseTagger):
"""
Based on RedPajama code filtering.
"""
WHITESPACE_REGEX = regex.compile(r"\w+|[^\w\s]+")
def _get_num_tokens(self, text: str):
return len(self.WHITESPACE_REGEX.split(text))
def predict(self, doc: Document) -> DocResult:
"""Main runner."""
spans: List[Span] = []
doc_length = len(doc.text)
line_lengths = list(map(len, doc.text.splitlines()))
max_line_length = max(line_lengths, default=0.0)
avg_line_length = np.mean(line_lengths) if len(line_lengths) > 0 else 0.0
alnum_count = sum(map(lambda char: 1 if char.isalnum() else 0, doc.text))
alnum_prop = (alnum_count / doc_length) if doc_length > 0 else 0.0
num_tokens = self._get_num_tokens(doc.text)
num_alpha = len([c for c in doc.text if c.isalpha()])
alpha_token_prop = (num_alpha / num_tokens) if num_tokens > 0 else 0.0
# document-level scores
spans.append(Span(start=0, end=doc_length, type="max_line_length_doc", score=max_line_length))
spans.append(Span(start=0, end=doc_length, type="avg_line_length_doc", score=avg_line_length))
spans.append(Span(start=0, end=doc_length, type="alnum_prop_doc", score=alnum_prop))
spans.append(Span(start=0, end=doc_length, type="alpha_token_prop_doc", score=alpha_token_prop))
return DocResult(doc=doc, spans=spans)
|
dolma-main
|
python/dolma/taggers/code.py
|
from . import c4, code, gopher, jigsaw, language, length, pii, sampling
|
dolma-main
|
python/dolma/taggers/__init__.py
|
import random
from multiprocessing import current_process
from ..core.data_types import DocResult, Document, Span
from ..core.registry import TaggerRegistry
from ..core.taggers import BaseTagger
@TaggerRegistry.add("random_number_v1")
class RandomNumberTagger(BaseTagger):
def __init__(self, seed: int = 1) -> None:
assert seed > 0
# we multiply the seed by the current process id to ensure that each
# process has a different seed
self.seed = ((current_process().pid or 0) + 1) * seed
random.seed(self.seed)
def predict(self, doc: Document) -> DocResult:
score = random.random()
return DocResult(doc=doc, spans=[Span(start=0, end=len(doc.text), type="random", score=score)])
|
dolma-main
|
python/dolma/taggers/sampling.py
|
"""
Filters.
@kylel, @soldni
"""
from typing import Iterable
from ..core.data_types import TextSlice
from ..core.ft_tagger import BaseFastTextTagger, Prediction
from ..core.registry import TaggerRegistry
@TaggerRegistry.add("jigsaw_hatespeech_document_v2")
class FastTextJigsawHatespeechDocumentTagger(BaseFastTextTagger):
MODEL_PATH = "https://ai2-s2-research-public.s3.us-west-2.amazonaws.com/aakankshan/olmo-data-filters/jigsaw_fasttext_bigrams_hatespeech_final.bin" # noqa: E501
def __init__(self):
super().__init__(model_path=self.MODEL_PATH, model_mode=self.DOCUMENT_LEVEL_TAGGER)
def predict_slice(self, text_slice: TextSlice) -> Iterable[Prediction]:
labels, probs = self.classifier.predict(text_slice.text.replace("\n", " ").strip(), k=-1)
label_index = 1 if "non" in labels[0] else 0 # pyright: ignore
return (
Prediction(label=labels[label_index], score=probs[label_index]),
Prediction(label=labels[1 - label_index], score=probs[1 - label_index]),
)
@TaggerRegistry.add("jigsaw_hatespeech_sentence_v2")
class FastTextJigsawHatespeechSentenceTagger(FastTextJigsawHatespeechDocumentTagger):
def __init__(self):
BaseFastTextTagger.__init__(self, model_path=self.MODEL_PATH, model_mode=self.SENTENCE_LEVEL_TAGGER)
@TaggerRegistry.add("jigsaw_nsfw_document_v1")
class FastTextJigsawNsfwDocumentTagger(FastTextJigsawHatespeechDocumentTagger):
MODEL_PATH = "https://ai2-s2-research-public.s3.us-west-2.amazonaws.com/aakankshan/olmo-data-filters/jigsaw_fasttext_bigrams_nsfw_final.bin" # noqa: E501
@TaggerRegistry.add("jigsaw_nsfw_sencence_v2")
class FastTextJigsawNsfwSentenceTagger(FastTextJigsawHatespeechSentenceTagger):
MODEL_PATH = "https://ai2-s2-research-public.s3.us-west-2.amazonaws.com/aakankshan/olmo-data-filters/jigsaw_fasttext_bigrams_nsfw_final.bin" # noqa: E501
|
dolma-main
|
python/dolma/taggers/jigsaw.py
|
"""
Filters.
@kylel, @soldni
"""
from typing import Iterable, List, Tuple
try:
import cld3
CLD3_AVAILABLE = True
except ImportError:
CLD3_AVAILABLE = False
import pycld2 as cld2
import regex
from anyascii import anyascii
from ..core.data_types import DocResult, Document, Span, TextSlice
from ..core.ft_tagger import BaseFastTextTagger, Prediction
from ..core.registry import TaggerRegistry
from ..core.taggers import BaseTagger
from ..core.utils import split_paragraphs
@TaggerRegistry.add("cld3_en_doc_v2")
class Cld3LanguageTagger(BaseTagger):
def __init__(self) -> None:
if not CLD3_AVAILABLE:
raise ImportError(f"cld3 is not install, cannot instantiate {self.__class__.__name__}")
def _predict_text(self, text: str) -> Tuple[str, float]:
pred = cld3.get_language(text) # pyright: ignore
score = pred.probability if pred.language == "en" else 0.0
return "en", score
def predict(self, doc: Document) -> DocResult:
lang, score = self._predict_text(doc.text)
positive_span = Span(start=0, end=len(doc.text), type=lang, score=score)
negative_span = Span(start=0, end=len(doc.text), type=f"not_{lang}", score=1.0 - score)
return DocResult(doc=doc, spans=[positive_span, negative_span])
@TaggerRegistry.add("cld3_en_paragraph_v2")
class Cld3LanguageTaggerParagraph(Cld3LanguageTagger):
def predict(self, doc: Document) -> DocResult:
paragraphs = split_paragraphs(doc.text)
spans: List[Span] = []
for paragraph in paragraphs:
lang, score = self._predict_text(paragraph.text) # pyright: ignore
positive_span = Span(start=paragraph.start, end=paragraph.end, type=lang, score=score)
negative_span = Span(start=paragraph.start, end=paragraph.end, type=f"not_{lang}", score=1.0 - score)
spans.extend((positive_span, negative_span))
return DocResult(doc=doc, spans=spans)
@TaggerRegistry.add("cld2_en_doc_v2")
class Cld2LanguageFilter(BaseTagger):
RE_BAD_CHARS = regex.compile(r"[\p{Cc}\p{Cs}]+")
def _sanitize_input(self, text: str) -> str:
return self.RE_BAD_CHARS.sub("", text)
def _to_ascii_input(self, text: str) -> str:
return anyascii(text)
def _identity_fn(self, text: str) -> str:
return text
def _predict_text(self, text: str) -> Tuple[str, float]:
details = []
is_reliable = False
for fn in (self._identity_fn, self._to_ascii_input, self._sanitize_input):
try:
is_reliable, _, details = cld2.detect(fn(text))
break
except cld2.error:
...
score = max([d[2] for d in details if d[0] == "ENGLISH" and is_reliable] or [0])
return "en", score / 100.0
def predict(self, doc: Document) -> DocResult:
lang, score = self._predict_text(doc.text)
positive_span = Span(start=0, end=len(doc.text), type=lang, score=score)
negative_span = Span(start=0, end=len(doc.text), type=f"not_{lang}", score=1.0 - score)
return DocResult(doc=doc, spans=[positive_span, negative_span])
@TaggerRegistry.add("cld2_en_paragraph_v2")
class Cld2LanguageFilterParagraph(Cld2LanguageFilter):
def predict(self, doc: Document) -> DocResult:
paragraphs = split_paragraphs(doc.text)
spans: List[Span] = []
for paragraph in paragraphs:
lang, score = self._predict_text(paragraph.text) # pyright: ignore
positive_span = Span(start=paragraph.start, end=paragraph.end, type=lang, score=score)
negative_span = Span(start=paragraph.start, end=paragraph.end, type=f"not_{lang}", score=1.0 - score)
spans.extend((positive_span, negative_span))
return DocResult(doc=doc, spans=spans)
@TaggerRegistry.add("ft_lang_id_en_doc_v2")
class FastTextEnglishLanguageDocumentTagger(BaseFastTextTagger):
MODEL_PATH = "https://dl.fbaipublicfiles.com/fasttext/supervised-models/lid.176.bin"
def __init__(self):
super().__init__(model_path=self.MODEL_PATH, model_mode=self.DOCUMENT_LEVEL_TAGGER)
def predict_slice(self, text_slice: TextSlice) -> Iterable[Prediction]:
pred = self.classifier.predict(text_slice.text.lower().replace("\n", " ").strip(), k=-1)
for label, score in zip(*pred):
if label == "__label__en":
return Prediction(label="en", score=score), Prediction(label="not_en", score=1.0 - score)
return Prediction(label="en", score=0.0), Prediction(label="not_en", score=1.0)
@TaggerRegistry.add("ft_lang_id_en_paragraph_v2")
class FastTextEnglishLanguageParagraphTagger(FastTextEnglishLanguageDocumentTagger):
def __init__(self):
BaseFastTextTagger.__init__(self, model_path=self.MODEL_PATH, model_mode=self.PARAGRAPH_LEVEL_TAGGER)
def add_global_language_score_from_slice_score(result: DocResult) -> DocResult:
# the total document score is # of characters in each "english" span multiplied by the likelihood
# of said span being english
try:
doc_en_score = sum((s.end - s.start) * s.score for s in result.spans if s.type == "en") / len(
result.doc.text
)
doc_not_en_score = 1 - doc_en_score
except ZeroDivisionError:
doc_en_score = doc_not_en_score = 0.0
doc_level = (
Span(start=0, end=len(result.doc.text), type="doc_en", score=doc_en_score),
Span(start=0, end=len(result.doc.text), type="doc_not_en", score=doc_not_en_score),
)
result.spans.extend(doc_level)
return result
@TaggerRegistry.add("cld2_en_paragraph_with_doc_score_v2")
class Cld2LanguageFilterParagraphWithDocScoreTagger(Cld2LanguageFilterParagraph):
def predict(self, doc: Document) -> DocResult:
doc_result = super().predict(doc)
doc_result = add_global_language_score_from_slice_score(doc_result)
return doc_result
@TaggerRegistry.add("cld3_en_paragraph_with_doc_score_v2")
class Cld3LanguageFilterParagraphWithDocScoreTagger(Cld3LanguageTaggerParagraph):
def predict(self, doc: Document) -> DocResult:
doc_result = super().predict(doc)
doc_result = add_global_language_score_from_slice_score(doc_result)
return doc_result
@TaggerRegistry.add("ft_lang_id_en_paragraph_with_doc_score_v2")
class FastTextEnglishLanguageParagraphWithDocScoreTagger(FastTextEnglishLanguageParagraphTagger):
def predict(self, doc: Document) -> DocResult:
doc_result = super().predict(doc)
doc_result = add_global_language_score_from_slice_score(doc_result)
return doc_result
|
dolma-main
|
python/dolma/taggers/language.py
|
"""
Filters.
@kylel, @soldni
"""
try:
import re2 as re
except ImportError:
import re
else:
re.set_fallback_notification(re.FALLBACK_WARNING)
from typing import List
from warnings import warn
from presidio_analyzer import AnalyzerEngine
from ..core.data_types import DocResult, Document, Span, TextSlice
from ..core.registry import TaggerRegistry
from ..core.taggers import BaseTagger
from ..core.utils import split_paragraphs
__all__ = ["PiiPresidioV1", "PiiRegexV1", "PiiRegexV2", "FastPiiRegex", "PiiRegexWithCountV2"]
class BasePiiFilter(BaseTagger):
EMAIL = "EMAIL_ADDRESS"
PHONE = "PHONE_NUMBER"
IP = "IP_ADDRESS"
PRESIDIO = "presidio"
REGEX = "regex"
ENGLISH = "en"
WINDOW = 100
def __init__(
self,
method: str,
postprocess: bool,
window: int,
) -> None:
assert method in [
self.PRESIDIO,
self.REGEX,
], f"Please provide a valid method for filtering ({self.PRESIDIO} or {self.REGEX})"
# configs
self.method = method
self.postprocess = postprocess
self.window = window
# Regular expressions for different types of PII
self.pii_type_to_regex = {
self.EMAIL: re.compile("[.\\s@,?!;:)(]*([^\\s@]+@[^\\s@,?!;:)(]+?)[.\\s@,?!;:)(]?[\\s\n\r]"),
self.PHONE: re.compile("\\s+\\(?(\\d{3})\\)?[-\\. ]*(\\d{3})[-. ]?(\\d{4})"),
self.IP: re.compile(
"(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)"
),
}
self.url_regex = re.compile(
"(?i)\b((?:https?://|www\\d{0,3}[.]|[a-z0-9.\\-]+[.][a-z]{2,4}/)(?:[^\\s()<>]+|\\(([^\\s()<>]+|"
"(\\([^\\s()<>]+\\)))*\\))+(?:\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\)|[^\\s`!()\\[\\]"
"{};:'\".,<>?«»“”‘’]))"
)
# presidio
if self.method == self.PRESIDIO:
self.analyzer = AnalyzerEngine()
def predict(self, doc: Document) -> DocResult:
"""Main runner."""
# extract
if self.method == self.PRESIDIO:
pii_spans = self._extract_pii_presidio(text=doc.text)
elif self.method == self.REGEX:
pii_spans = self._extract_pii_regex(text=doc.text)
else:
raise NotImplementedError
# post process
if self.postprocess:
new_pii_spans = self._postprocess(text=doc.text, pii_spans=pii_spans, window=self.window)
else:
new_pii_spans = pii_spans
# document-level score
score = self._score(text=doc.text, pii_spans=new_pii_spans)
new_pii_spans.append(Span(start=0, end=len(doc.text), type="doc", score=score))
return DocResult(doc=doc, spans=new_pii_spans)
def _score(self, text: str, pii_spans: List[Span]) -> float:
return len(pii_spans) * 1.0 / len(text.split())
def _extract_pii_regex(self, text: str) -> List[Span]:
pii_spans: List[Span] = []
for pii_type, regex in self.pii_type_to_regex.items():
for match in regex.finditer(text):
start, end = match.span()
pii_spans.append(Span(start=start, end=end, type=pii_type))
return pii_spans
def _extract_pii_presidio(self, text: str) -> List[Span]:
analyzer_results = self.analyzer.analyze(
text=text,
entities=[self.EMAIL, self.PHONE, self.IP],
language=self.ENGLISH,
)
pii_spans: List[Span] = []
for res in analyzer_results:
pii_spans.append(Span(start=res.start, end=res.end, type=res.entity_type))
return pii_spans
def _postprocess(self, text: str, pii_spans: List[Span], window: int) -> List[Span]:
"""Applies some rules to remove over-prediction of PII types."""
new_pii_spans = []
for pii_span in pii_spans:
if pii_span.type == self.EMAIL:
if self._is_email(text, pii_span):
new_pii_spans.append(pii_span)
else:
pass
elif pii_span.type == self.PHONE or pii_span.type == self.IP:
context = pii_span.mention(text=text, window=window)
# for both phone numbers & IP addresses, context shouldnt
# contain these strings
if "isbn" in context or "doi" in context or "#" in context:
pass
elif pii_span.type == self.IP:
new_pii_spans.append(pii_span)
elif pii_span.type == self.PHONE:
# for phone numbers, additionally shouldnt be URL
if self._contains_url(text=text):
pass
else:
new_pii_spans.append(pii_span)
else:
raise NotImplementedError(f"Unsupported PII type for Postprocess: {pii_span.type}")
return new_pii_spans
def _contains_url(self, text: str) -> bool:
return len(self.url_regex.findall(text)) > 0
def _is_email(self, text: str, pii_span: Span) -> bool:
"""
Rules:
(1) The email address besides the domain, cannot be only "("
(2) There must be a "." in the domain
"""
mention = pii_span.mention(text=text)
addressee = mention.split("@")[0]
domain = mention.split("@")[1]
if addressee.strip() == "(" or "." not in domain:
return False
return True
@TaggerRegistry.add("pii_presidio_v1")
class PiiPresidioV1(BasePiiFilter):
def __init__(self):
super().__init__(method=self.PRESIDIO, postprocess=True, window=self.WINDOW)
@TaggerRegistry.add("pii_regex_v1")
class PiiRegexV1(BasePiiFilter):
def __init__(self):
super().__init__(method=self.REGEX, postprocess=True, window=self.WINDOW)
@TaggerRegistry.add("pii_regex_v2")
class PiiRegexV2(PiiRegexV1):
def _score(self, text: str, pii_spans: List[Span]) -> float:
try:
score = len(pii_spans) * 1.0 / len(text.split())
except ZeroDivisionError:
score = -1.0
return score
@TaggerRegistry.add("pii_regex_with_counts_fast_v2")
class FastPiiRegex(BaseTagger):
EMAIL_KEY = "EMAIL_ADDRESS"
PHONE_KEY = "PHONE_NUMBER"
IP_KEY = "IP_ADDRESS"
EMAIL_REGEX = "[.\\s@,?!;:)(]*([^\\s@]+@[^\\s@,?!;:)(]+?)[.\\s@,?!;:)(]?[\\s\n\r]"
PHONE_REGEX = "\\s+\\(?(\\d{3})\\)?[-\\. ]*(\\d{3})[-. ]?(\\d{4})"
IP_REGEX = "(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)"
URL_REGEX = "(?i)\b((?:https?://|www\\d{0,3}[.]|[a-z0-9.\\-]+[.][a-z]{2,4}/)(?:[^\\s()<>]+|\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\))+(?:\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\)|[^\\s`!()\\[\\]{};:'\".,<>?«»“”‘’]))" # noqa: E501
def __init__(
self,
email_regex: str = EMAIL_REGEX,
phone_regex: str = PHONE_REGEX,
ip_regex: str = IP_REGEX,
url_regex: str = URL_REGEX,
) -> None:
self.email_regex = re.compile(email_regex)
self.phone_regex = re.compile(phone_regex)
self.ip_regex = re.compile(ip_regex)
self.url_regex = re.compile(url_regex)
self.pre_ip_regex = re.compile(r"\.[^\s]")
self.pre_phone_regex = re.compile(r"\d")
def _false_positive_identifiers(self, text: str) -> bool:
return "isbn" in text or "doi" in text or "#" in text
def _predict_email(self, slice: TextSlice) -> List[Span]:
if "@" not in slice.text:
return []
spans = []
for match in self.email_regex.finditer(slice.text):
addressee, domain = match.group(1).split("@", 1)
if addressee.strip() == "(" or "." not in domain:
continue
start, end = match.span()
spans.append(Span(start=start + slice.start, end=end + slice.start, type=self.EMAIL_KEY))
return spans
def _predict_phone(self, slice: TextSlice) -> List[Span]:
if not self.pre_phone_regex.search(slice.text):
return []
spans = []
for match in self.phone_regex.finditer(slice.text):
start, end = match.span()
spans.append(Span(start=start + slice.start, end=end + slice.start, type=self.PHONE_KEY))
return spans
def _predict_ip(self, slice: TextSlice) -> List[Span]:
if not self.pre_ip_regex.search(slice.text):
return []
spans = []
for match in self.ip_regex.finditer(slice.text):
if self._contains_url(match.group(0)):
continue
start, end = match.span()
spans.append(Span(start=start + slice.start, end=end + slice.start, type=self.IP_KEY))
return spans
def _contains_url(self, text: str) -> bool:
return self.url_regex.search(text) is not None
def predict(self, doc: Document) -> DocResult:
paragraphs = split_paragraphs(doc.text)
spans: List[Span] = []
if doc.text.count("?") > 10_000:
warn("Skipping regex PII detection for doc with >10k question marks")
paragraphs = []
for paragraph in paragraphs:
spans.extend(self._predict_email(paragraph))
spans.extend(self._predict_phone(paragraph))
spans.extend(self._predict_ip(paragraph))
# doc level score is the count of spans matching any of the PII types
score = sum(1.0 for s in spans if s.type != "doc")
spans.append(Span(start=0, end=len(doc.text), type="doc_count", score=score))
try:
# fraction of words that are PII
score = sum(len(s) for s in spans) / len(doc.text)
except ZeroDivisionError:
# empty doc
score = -1.0
spans.append(Span(start=0, end=len(doc.text), type="doc_frac", score=score))
return DocResult(doc=doc, spans=spans)
@TaggerRegistry.add("pii_regex_with_counts_v2")
class PiiRegexWithCountV2(BasePiiFilter):
def __init__(self):
super().__init__(method=self.REGEX, postprocess=True, window=self.WINDOW)
def predict(self, doc: Document) -> DocResult:
doc_result = super().predict(doc=doc)
count = sum(1 for s in doc_result.spans if s.type != "doc")
doc_result.spans.append(Span(start=0, end=len(doc.text), type="doc_count", score=count))
return doc_result
|
dolma-main
|
python/dolma/taggers/pii.py
|
from argparse import ArgumentParser
from dataclasses import dataclass
from unittest import TestCase
from omegaconf import MissingMandatoryValue
from dolma.cli import field, make_parser, namespace_to_nested_omegaconf
@dataclass
class _1:
a: int = field(help="a")
b: str = field(help="b")
@dataclass
class _2:
a: _1 = field(help="a")
c: float = field(help="c", default=1.0)
class TestOmegaconf(TestCase):
def test_make_parser(self):
ap = ArgumentParser()
parser = make_parser(ap, _1)
args = parser.parse_args(["--a", "1", "--b", "2"])
opts = namespace_to_nested_omegaconf(args=args, structured=_1)
self.assertEqual(opts.a, 1)
self.assertEqual(opts.b, "2")
def test_nested_parser(self):
ap = ArgumentParser()
parser = make_parser(ap, _2)
args = parser.parse_args(["--a.a", "1", "--a.b", "2", "--c", "3"])
opts = namespace_to_nested_omegaconf(args=args, structured=_2)
self.assertEqual(opts.a.a, 1)
self.assertEqual(opts.a.b, "2")
self.assertEqual(opts.c, 3.0)
def test_fail_omegaconf(self):
ap = ArgumentParser()
parser = make_parser(ap, _2)
args = parser.parse_args(["--a.a", "1", "--c", "3"])
conf = namespace_to_nested_omegaconf(args, _2)
with self.assertRaises(MissingMandatoryValue):
conf.a.b
|
dolma-main
|
tests/python/test_omegaconf.py
|
"""
Tests for the utils module.
@kylel
"""
from unittest import TestCase
from dolma.core.data_types import TextSlice
from dolma.core.utils import split_paragraphs, split_sentences
class TestUtils(TestCase):
def test_make_variable_name(self):
pass
def test_split_paragraphs(self):
text = "This is a paragraph.\nThis is another paragraph.\nThis is a third paragraph."
paragraphs = split_paragraphs(text=text)
self.assertIsInstance(paragraphs[0], TextSlice)
self.assertEqual(len(paragraphs), 3)
self.assertEqual(paragraphs[0].text, "This is a paragraph.\n")
self.assertEqual(text[paragraphs[0].start : paragraphs[0].end], paragraphs[0].text)
self.assertEqual(paragraphs[1].text, "This is another paragraph.\n")
self.assertEqual(text[paragraphs[1].start : paragraphs[1].end], paragraphs[1].text)
paragraphs2 = split_paragraphs(text=text, remove_empty=False)
self.assertListEqual([p.text for p in paragraphs], [p.text for p in paragraphs2])
def test_split_paragraphs_empty(self):
text = ""
paragraphs = split_paragraphs(text=text)
self.assertEqual(len(paragraphs), 0)
def test_split_paragraphs_with_newline_and_spaces(self):
text = "This is a sentence. \n \n This is another sentence.\n\n This is a third sentence."
paragraphs = split_paragraphs(text=text)
self.assertEqual(len(paragraphs), 3)
self.assertIsInstance(paragraphs[0], TextSlice)
self.assertEqual(len(paragraphs), 3)
self.assertEqual(paragraphs[0].text, "This is a sentence. \n")
self.assertEqual(text[paragraphs[0].start : paragraphs[0].end], paragraphs[0].text)
self.assertEqual(paragraphs[1].text, " This is another sentence.\n")
self.assertEqual(text[paragraphs[1].start : paragraphs[1].end], paragraphs[1].text)
paragraphs = split_paragraphs(text=text, remove_empty=False)
self.assertEqual(len(paragraphs), 5)
self.assertIsInstance(paragraphs[0], TextSlice)
self.assertEqual(len(paragraphs), 5)
self.assertEqual(paragraphs[0].text, "This is a sentence. \n")
self.assertEqual(text[paragraphs[0].start : paragraphs[0].end], paragraphs[0].text)
self.assertEqual(paragraphs[1].text, " \n")
self.assertEqual(text[paragraphs[1].start : paragraphs[1].end], paragraphs[1].text)
self.assertEqual(paragraphs[2].text, " This is another sentence.\n")
self.assertEqual(text[paragraphs[2].start : paragraphs[2].end], paragraphs[2].text)
def test_split_sentences(self):
text = "This is a sentence. This is another sentence. This is a third sentence."
sentences = split_sentences(text=text)
self.assertIsInstance(sentences[0], TextSlice)
self.assertEqual(len(sentences), 3)
self.assertEqual(sentences[0].text, "This is a sentence.")
self.assertEqual(text[sentences[0].start : sentences[0].end], sentences[0].text)
self.assertEqual(sentences[1].text, "This is another sentence.")
self.assertEqual(text[sentences[1].start : sentences[1].end], sentences[1].text)
def test_split_sentences_empty(self):
text = ""
sentences = split_sentences(text=text)
self.assertEqual(len(sentences), 0)
def test_split_sentences_with_newline_and_spaces(self):
text = "This is a sentence. \n \n This is another sentence.\n\n This is a third sentence."
sentences = split_sentences(text=text)
self.assertEqual(len(sentences), 3)
self.assertIsInstance(sentences[0], TextSlice)
self.assertEqual(len(sentences), 3)
self.assertEqual(sentences[0].text, "This is a sentence.")
self.assertEqual(text[sentences[0].start : sentences[0].end], sentences[0].text)
self.assertEqual(sentences[1].text, "This is another sentence.")
self.assertEqual(text[sentences[1].start : sentences[1].end], sentences[1].text)
|
dolma-main
|
tests/python/test_utils.py
|
import unittest
import numpy as np
from dolma.core.binning import (
FixedBucketsValTracker,
InferBucketsValTracker,
merge_bins,
)
class TestBinning(unittest.TestCase):
def setUp(self) -> None:
np.random.seed(0)
def test_binning(self):
bin_a = np.arange(0, 10_000, 55).astype(np.float64)
bin_b = np.arange(0, 10_000, 100).astype(np.float64)
count_a = np.random.randint(0, 15, len(bin_a))
count_b = np.random.randint(0, 15, len(bin_b))
bin_c, count_c = merge_bins(bin_a, count_a, bin_b, count_b)
self.assertEqual(set(bin_c), set(np.concatenate([bin_a, bin_b])))
self.assertEqual(sum(count_c), sum(count_a) + sum(count_b))
def test_binning_with_repetitions(self):
bin_a = np.random.randint(0, 100, 10_000).astype(np.float64)
bin_b = np.random.randint(0, 100, 1_000).astype(np.float64)
bin_a = np.sort(bin_a)
bin_b = np.sort(bin_b)
count_a = np.ones_like(bin_a, dtype=np.int64)
count_b = np.ones_like(bin_b, dtype=np.int64)
bin_c, count_c = merge_bins(bin_a, count_a, bin_b, count_b)
self.assertEqual(set(bin_c), set(np.concatenate([bin_a, bin_b])))
self.assertEqual(sum(count_c), sum(count_a) + sum(count_b))
def test_binning_no_overlap(self):
bin_a = np.arange(0, 1_000, 3).astype(np.float64)
bin_b = np.arange(17, 1_000, 17).astype(np.float64)
count_a = np.ones_like(bin_a, dtype=np.int64)
count_b = np.ones_like(bin_b, dtype=np.int64)
bin_c, count_c = merge_bins(bin_a, count_a, bin_b, count_b)
self.assertEqual(set(bin_c), set(np.concatenate([bin_a, bin_b])))
self.assertEqual(sum(count_c), sum(count_a) + sum(count_b))
def test_bucket_val_trackers(self):
tracker = InferBucketsValTracker(n=100_000)
values = np.random.randn(1_000_000)
for v in values:
tracker.add(v)
tracker_counts, tracker_bins = tracker.summarize(10)
self.assertEqual(sum(tracker_counts), len(values))
self.assertEqual(sorted(tracker_bins), tracker_bins)
tracker_dist, tracker_bins = tracker.summarize(10, density=True)
hist_dist, hist_bins = np.histogram(values, bins=10, density=True)
for td, hd in zip(tracker_dist, hist_dist):
self.assertAlmostEqual(np.abs(td - hd), 0, delta=0.1)
for tb, hb in zip(tracker_bins, hist_bins):
self.assertAlmostEqual(np.abs(tb - hb), 0, delta=0.5)
class FixedBinning(unittest.TestCase):
def setUp(self) -> None:
np.random.seed(0)
def test_normal_bins(self):
tr = FixedBucketsValTracker()
vals = np.random.randn(2_000_000) * 100
total_count = len(vals)
for v in vals:
tr.add(v)
for (tr_c, tr_b), (hist_c, hist_b) in zip(zip(*tr.summarize(10)), zip(*np.histogram(vals, bins=10))):
count_diff = np.abs(tr_c - hist_c) / total_count
bin_diff = np.abs(tr_b - hist_b)
self.assertLess(count_diff, 0.01)
self.assertLess(bin_diff, 10)
def test_uniform_bins(self):
tr = FixedBucketsValTracker()
vals = np.random.rand(2_000_000)
total_count = len(vals)
for v in vals:
tr.add(v)
for (tr_c, tr_b), (hist_c, hist_b) in zip(zip(*tr.summarize(10)), zip(*np.histogram(vals, bins=10))):
count_diff = np.abs(tr_c - hist_c) / total_count
bin_diff = np.abs(tr_b - hist_b)
self.assertLess(count_diff, 0.01)
self.assertLess(bin_diff, 0.01)
|
dolma-main
|
tests/python/test_binning.py
|
import json
import os
from pathlib import Path
from tempfile import TemporaryDirectory
from unittest import TestCase
import smart_open
from dolma.core.runtime import (
_make_paths_from_prefix,
_make_paths_from_substitution,
create_and_run_tagger,
)
LOCAL_DATA = Path(__file__).parent.parent / "data"
class TestRuntimeUtilities(TestCase):
def test_make_paths_from_substitution(self):
paths = [
"s3://bucket/common-crawl/documents/cc_*/*.json.gz",
"/local/path/to/documents/train/*",
]
new_paths = _make_paths_from_substitution(
paths=paths,
find="documents",
replace="attributes",
)
self.assertEqual(new_paths, ["s3://bucket/common-crawl/attributes", "/local/path/to/attributes/train"])
def test_make_paths_from_prefix(self):
paths = [
"s3://bucket/common-crawl/documents/cc_head/*.json.gz",
"s3://bucket/common-crawl/documents/cc_middle/*.json.gz",
"s3://bucket/common-crawl/documents/cc_tail/*.json.gz",
]
new_paths = _make_paths_from_prefix(
paths=paths,
prefix="s3://bucket/common-crawl/attributes/",
)
self.assertEqual(
new_paths,
[
"s3://bucket/common-crawl/attributes/cc_head",
"s3://bucket/common-crawl/attributes/cc_middle",
"s3://bucket/common-crawl/attributes/cc_tail",
],
)
paths = [
"s3://bucket/common-crawl/documents/*.json.gz",
"s3://bucket2/c4/documents/**/data/*.json.gz",
]
new_paths = _make_paths_from_prefix(
paths=paths,
prefix="/local/path/",
)
self.assertEqual(
new_paths,
[
"/local/path/bucket/common-crawl/documents",
"/local/path/bucket2/c4/documents",
],
)
def test_runtime_e2e(self):
documents_path = f"{LOCAL_DATA}/provided/documents/000.json.gz"
experiment_name = "test"
taggers = ["c4_v1"]
with TemporaryDirectory() as temp_dir:
create_and_run_tagger(
documents=[documents_path],
destination=temp_dir,
taggers=taggers,
experiment=experiment_name,
debug=True,
)
destination_file = os.path.join(temp_dir, experiment_name, "000.json.gz")
self.assertTrue(os.path.exists(destination_file))
with smart_open.open(documents_path, "rt") as f:
document = [json.loads(ln) for ln in f]
with smart_open.open(destination_file, "rt") as f:
attributes = [json.loads(ln) for ln in f]
self.assertEqual(len(document), len(attributes))
for d, a in zip(document, attributes):
self.assertEqual(d["id"], a["id"])
self.assertTrue(sorted(a.keys()), ["attributes", "id", "source"])
for key, value in a["attributes"].items():
parts = key.split("__")
self.assertEqual(len(parts), 3)
self.assertEqual(parts[0], experiment_name)
self.assertTrue(parts[1] in taggers)
for elem in value:
self.assertEqual(len(elem), 3)
self.assertTrue(isinstance(elem[0], int))
self.assertTrue(isinstance(elem[1], int))
self.assertTrue(isinstance(elem[2], float))
if len(value) == 1:
self.assertEqual(value[0][0], 0)
self.assertEqual(value[0][1], len(d["text"]))
def test_alt_src(self):
taggers = ["c4_v1"]
experiment_name = "test"
with TemporaryDirectory() as temp_dir:
create_and_run_tagger(
documents=[f"{LOCAL_DATA}/provided/documents/000.json.gz"],
destination=temp_dir,
taggers=taggers,
experiment=experiment_name,
debug=True,
)
destination_file = os.path.join(temp_dir, experiment_name, "000.json.gz")
self.assertTrue(os.path.exists(destination_file))
with smart_open.open(destination_file, "rt") as f:
attributes_full_name = [json.loads(ln) for ln in f]
with TemporaryDirectory() as temp_dir:
create_and_run_tagger(
documents=[f"{LOCAL_DATA}/provided/documents/*"],
destination=temp_dir,
taggers=taggers,
experiment=experiment_name,
debug=True,
)
destination_file = os.path.join(temp_dir, experiment_name, "000.json.gz")
self.assertTrue(os.path.exists(destination_file))
with smart_open.open(destination_file, "rt") as f:
attributes_star_in_path = [json.loads(ln) for ln in f]
with TemporaryDirectory() as temp_dir:
create_and_run_tagger(
documents=[f"{LOCAL_DATA}/provided/documents/"],
destination=temp_dir,
taggers=taggers,
experiment=experiment_name,
debug=True,
)
destination_file = os.path.join(temp_dir, experiment_name, "000.json.gz")
self.assertTrue(os.path.exists(destination_file))
with smart_open.open(destination_file, "rt") as f:
attributes_only_dir = [json.loads(ln) for ln in f]
self.assertEqual(attributes_full_name, attributes_star_in_path)
self.assertEqual(attributes_full_name, attributes_only_dir)
def test_alt_exp(self):
documents_path = f"{LOCAL_DATA}/provided/documents/000.json.gz"
taggers = ["c4_v1"]
with TemporaryDirectory() as temp_dir:
create_and_run_tagger(
documents=[documents_path],
destination=temp_dir,
taggers=taggers,
experiment="test",
debug=True,
)
destination_file = os.path.join(temp_dir, "test", "000.json.gz")
self.assertTrue(os.path.exists(destination_file))
with smart_open.open(destination_file, "rt") as f:
attributes_with_exp = [json.loads(ln) for ln in f]
with TemporaryDirectory() as temp_dir:
create_and_run_tagger(
documents=[documents_path],
destination=temp_dir,
taggers=taggers,
experiment=None,
debug=True,
)
destination_file = os.path.join(temp_dir, "c4_v1", "000.json.gz")
self.assertTrue(os.path.exists(destination_file))
with smart_open.open(destination_file, "rt") as f:
attributes_without_exp = [json.loads(ln) for ln in f]
for row_with_exp, row_without_exp in zip(attributes_with_exp, attributes_without_exp):
for key_with_exp, key_without_exp in zip(row_with_exp["attributes"], row_without_exp["attributes"]):
parts_with_exp = key_with_exp.split("__")
parts_without_exp = key_without_exp.split("__")
self.assertNotEqual(parts_with_exp[0], parts_without_exp[0])
self.assertEqual(parts_with_exp[1], parts_without_exp[1])
self.assertEqual(parts_with_exp[2], parts_without_exp[2])
self.assertEqual(parts_with_exp[0], "test")
self.assertEqual(parts_without_exp[0], "c4_v1")
|
dolma-main
|
tests/python/test_runtime.py
|
import json
from pathlib import Path
from tempfile import NamedTemporaryFile
from unittest import TestCase
from dolma.cli.__main__ import main
from .utils import (
clean_test_data,
download_s3_prefix,
get_test_prefix,
load_jsonl,
skip_aws_tests,
upload_s3_prefix,
)
EMAIL_SPANS = Path(__file__).parent.parent / "config/email-spans.json"
FILTER_BY_SPANS = Path(__file__).parent.parent / "config/filter-by-spans.json"
MIXER = Path(__file__).parent.parent / "config/mixer.json"
PARAGRAPH_SPANS = Path(__file__).parent.parent / "config/paragraph-spans.json"
class TestMixer(TestCase):
def setUp(self) -> None:
if skip_aws_tests():
self.remote_test_prefix = None
else:
self.remote_test_prefix = get_test_prefix()
upload_s3_prefix(s3_prefix=f"{self.remote_test_prefix}", local_prefix="tests/data/provided/**/*.gz")
upload_s3_prefix(
s3_prefix=f"{self.remote_test_prefix}", local_prefix="tests/data/provided/attributes/**/*.gz"
)
def tearDown(self) -> None:
if self.remote_test_prefix is not None:
clean_test_data(self.remote_test_prefix)
def test_email_spans(self):
main(argv=["-c", str(EMAIL_SPANS), "mix"])
self.assertEqual(
load_jsonl("tests/data/expected/email-spans.json.gz"),
load_jsonl("tests/work/output/email-spans/email-spans-0000.json.gz"),
)
def test_filter_by_spans(self):
main(argv=["-c", str(FILTER_BY_SPANS), "mix"])
self.assertEqual(
load_jsonl("tests/data/expected/filter-by-spans.json.gz"),
load_jsonl("tests/work/output/filter-by-spans/filter-by-spans-test-0000.json.gz"),
)
def test_mixer(self):
main(argv=["-c", str(MIXER), "mix"])
self.assertEqual(
load_jsonl("tests/data/expected/mixer.json.gz"),
load_jsonl("tests/work/output/mixer/mixer-test-0000.json.gz"),
)
def test_paragraph_spans(self):
main(argv=["-c", str(PARAGRAPH_SPANS), "mix"])
self.assertEqual(
load_jsonl("tests/data/expected/remove-paragraphs.json.gz"),
load_jsonl("tests/work/output/paragraph-spans/paragraph-spans-test-0000.json.gz"),
)
def test_local_input_remote_output(self):
if self.remote_test_prefix is None:
return self.skipTest("Skipping AWS tests")
with open(MIXER, "r") as f:
config = json.load(f)
# keep track of local output path
local_output = config["streams"][0]["output"]["path"]
# replace results path with s3 path
config["streams"][0]["output"]["path"] = f"{self.remote_test_prefix}/{local_output}"
with NamedTemporaryFile("w") as f:
json.dump(config, f)
f.flush()
main(argv=["-c", f.name, "mix"])
download_s3_prefix(f"{self.remote_test_prefix}/tests/work", "tests/work/remote")
self.assertEqual(
load_jsonl("tests/data/expected/mixer.json.gz"),
load_jsonl("tests/work/remote/output/mixer/mixer-test-0000.json.gz"),
)
def test_remote_input_remote_output(self):
if self.remote_test_prefix is None:
return self.skipTest("Skipping AWS tests")
with open(MIXER, "r") as f:
config = json.load(f)
# keep track of local output path
local_input = config["streams"][0]["documents"][0]
local_output = config["streams"][0]["output"]["path"]
# replace results path with s3 path
config["streams"][0]["output"]["path"] = f"{self.remote_test_prefix}/{local_output}"
# upload local input to s3, replace local input with s3 path
config["streams"][0]["documents"][0] = f"{self.remote_test_prefix}/{local_input}"
with NamedTemporaryFile("w") as f:
json.dump(config, f)
f.flush()
main(argv=["-c", f.name, "mix"])
download_s3_prefix(f"{self.remote_test_prefix}/tests/work", "tests/work/remote")
self.assertEqual(
load_jsonl("tests/data/expected/mixer.json.gz"),
load_jsonl("tests/work/remote/output/mixer/mixer-test-0000.json.gz"),
)
def test_remote_input_local_output(self):
if self.remote_test_prefix is None:
return self.skipTest("Skipping AWS tests")
with open(MIXER, "r") as f:
config = json.load(f)
# keep track of local output path
local_input = config["streams"][0]["documents"][0]
# upload local input to s3, replace local input with s3 path
config["streams"][0]["documents"][0] = f"{self.remote_test_prefix}/{local_input}"
with NamedTemporaryFile("w") as f:
json.dump(config, f)
f.flush()
main(argv=["-c", f.name, "mix"])
self.assertEqual(
load_jsonl("tests/data/expected/mixer.json.gz"),
load_jsonl("tests/work/output/mixer/mixer-test-0000.json.gz"),
)
|
dolma-main
|
tests/python/test_mixer.py
|
import warnings
# warning raised by pkg_resources used in a lot of google packages
warnings.filterwarnings("ignore", message=r".*declare_namespace\(\'.*google.*", category=DeprecationWarning)
# base warning raised when warning above are raised
warnings.filterwarnings("ignore", message=r".*pkg_resources is deprecated.*", category=DeprecationWarning)
|
dolma-main
|
tests/python/__init__.py
|
import json
import shutil
from contextlib import ExitStack
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from unittest import TestCase
from dolma.cli.__main__ import main
from .utils import (
clean_test_data,
download_s3_prefix,
get_test_prefix,
load_jsonl,
skip_aws_tests,
upload_s3_prefix,
)
DEDUPE_BY_URL = Path(__file__).parent.parent / "config/dedupe-by-url.json"
DEDUPE_PARAGRAPHS = Path(__file__).parent.parent / "config/dedupe-paragraphs.json"
class TestDeduper(TestCase):
def setUp(self) -> None:
self.stack = ExitStack()
self.local_temp_dir = self.stack.enter_context(TemporaryDirectory()).rstrip("/")
if skip_aws_tests():
self.remote_test_prefix = None
else:
self.remote_test_prefix = get_test_prefix()
# upload test data
upload_s3_prefix(
s3_prefix=f"{self.remote_test_prefix}", local_prefix="tests/data/provided/documents/*.gz"
)
# copy provided config files to local temp dir
shutil.copytree(
"tests/data/provided/documents",
f"{self.local_temp_dir}/tests/data/provided/documents",
dirs_exist_ok=True,
)
def tearDown(self) -> None:
if self.remote_test_prefix is not None:
clean_test_data(self.remote_test_prefix)
self.stack.close()
def test_dedupe_by_url(self):
with open(DEDUPE_BY_URL, "r") as f:
config = json.load(f)
config["documents"][0] = f'{self.local_temp_dir}/{config["documents"][0]}'
config["bloom_filter"]["file"] = f'{self.local_temp_dir}/{config["bloom_filter"]["file"]}'
with NamedTemporaryFile("w") as f:
json.dump(config, f)
f.flush()
main(argv=["-c", f.name, "dedupe"])
expected = load_jsonl("tests/data/expected/dedupe-by-url.json.gz")
computed = load_jsonl(f"{self.local_temp_dir}/tests/data/provided/attributes/dedupe_by_url/000.json.gz")
self.assertEqual(expected, computed)
def test_dedupe_paragraphs(self):
with open(DEDUPE_PARAGRAPHS, "r") as f:
config = json.load(f)
config["documents"][0] = f'{self.local_temp_dir}/{config["documents"][0]}'
config["bloom_filter"]["file"] = f'{self.local_temp_dir}/{config["bloom_filter"]["file"]}'
with NamedTemporaryFile("w") as f:
json.dump(config, f)
f.flush()
main(argv=["-c", f.name, "dedupe"])
expected = load_jsonl("tests/data/expected/dedupe-paragraphs.json.gz")
computed = load_jsonl(
f"{self.local_temp_dir}/tests/data/provided/attributes/dedupe_paragraphs/000.json.gz"
)
self.assertEqual(expected, computed)
def test_dedupe_by_url_remote_input(self):
if self.remote_test_prefix is None:
return self.skipTest("Skipping AWS tests")
with open(DEDUPE_BY_URL, "r") as f:
config = json.load(f)
config["documents"][0] = f'{self.remote_test_prefix}/{config["documents"][0]}'
config["bloom_filter"]["file"] = f'{self.local_temp_dir}/{config["bloom_filter"]["file"]}'
with NamedTemporaryFile("w") as f:
json.dump(config, f)
f.flush()
main(argv=["-c", f.name, "dedupe"])
download_s3_prefix(self.remote_test_prefix, self.local_temp_dir)
expected = load_jsonl("tests/data/expected/dedupe-by-url.json.gz")
computed = load_jsonl(f"{self.local_temp_dir}/tests/data/provided/attributes/dedupe_by_url/000.json.gz")
self.assertEqual(expected, computed)
|
dolma-main
|
tests/python/test_deduper.py
|
# mypy: disable-error-code="unused-ignore"
import os
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import Any
from unittest import TestCase
import smart_open
from dolma.core.parallel import BaseParallelProcessor, QueueType
LOCAL_DATA = Path(__file__).parent.parent / "data"
class MockProcessor(BaseParallelProcessor):
@classmethod
def increment_progressbar(cls, queue, /, cnt: int = 0): # type: ignore[override]
return super().increment_progressbar(queue, cnt=cnt)
@classmethod
def process_single(
cls,
source_path: str,
destination_path: str,
queue: QueueType,
**kwargs: Any,
):
with smart_open.open(source_path, "rb") as f, smart_open.open(destination_path, "wb") as g:
g.write(f.read())
queue.put((1,))
class TestParallel(TestCase):
def test_base_parallel_processor(self):
with self.assertRaises(ValueError):
MockProcessor(source_prefix=[], destination_prefix=[], metadata_prefix=[])
with TemporaryDirectory() as d:
proc = MockProcessor(
source_prefix=str(LOCAL_DATA / "expected"),
destination_prefix=f"{d}/destination",
metadata_prefix=f"{d}/metadata",
ignore_existing=False,
)
proc()
src = [p for p in os.listdir(LOCAL_DATA / "expected") if not p.startswith(".")]
meta = [p.rstrip(".done.txt") for p in os.listdir(f"{d}/metadata")]
dest = [p for p in os.listdir(f"{d}/destination") if not p.startswith(".")]
self.assertEqual(sorted(src), sorted(meta))
self.assertEqual(sorted(src), sorted(dest))
with TemporaryDirectory() as d:
proc = MockProcessor(
source_prefix=str(LOCAL_DATA / "expected" / "*-paragraphs.*"),
destination_prefix=f"{d}/destination",
metadata_prefix=f"{d}/metadata",
ignore_existing=False,
)
proc()
src = [p for p in os.listdir(LOCAL_DATA / "expected") if "paragraphs" in p]
meta = [p.rstrip(".done.txt") for p in os.listdir(f"{d}/metadata")]
dest = [p for p in os.listdir(f"{d}/destination")]
self.assertEqual(sorted(src), sorted(meta))
self.assertEqual(sorted(src), sorted(dest))
|
dolma-main
|
tests/python/test_parallel.py
|
import json
import os
import re
import uuid
from typing import List, Tuple
from urllib.parse import urlparse
import boto3
import smart_open
from smart_open import open
from dolma.core.paths import glob_path, mkdir_p
DOLMA_TESTS_S3_PREFIX_ENV_VAR = "DOLMA_TESTS_S3_PREFIX"
DOLMA_TESTS_SKIP_AWS_ENV_VAR = "DOLMA_TESTS_SKIP_AWS"
DOLMA_TESTS_S3_PREFIX_DEFAULT = "s3://dolma-tests"
def parse_s3_path(s3_path: str) -> Tuple[str, str]:
"""
Parse an s3 path into a bucket and key.
Args:
s3_path: The s3 path to parse.
Returns:
A tuple containing the bucket and key.
"""
if not re.match(r"^s3://[\w-]+", s3_path):
raise RuntimeError(f"Invalid s3 path: {s3_path}")
# use urlparse to parse the s3 path
parsed = urlparse(s3_path)
return parsed.netloc, parsed.path.lstrip("/")
def get_test_prefix() -> str:
# get the test prefix from the environment, or use the default if not set
test_prefix = os.environ.get(DOLMA_TESTS_S3_PREFIX_ENV_VAR, DOLMA_TESTS_S3_PREFIX_DEFAULT)
# this will check if it is a valid path
bucket, _ = parse_s3_path(test_prefix)
# check if the user has access to the test bucket using boto3
s3 = boto3.client("s3")
try:
s3.head_bucket(Bucket=bucket)
except Exception:
if not skip_aws_tests():
raise RuntimeError(
f"Unable to access test bucket '{test_prefix}'. To provide a different bucket, "
f"set the '{DOLMA_TESTS_S3_PREFIX_ENV_VAR}' environment variable before running the tests."
)
# add a uuid to the test prefix to avoid collisions
return f"{test_prefix.rstrip()}/{uuid.uuid4()}"
def skip_aws_tests() -> bool:
dolma_tests_skip = os.environ.get(DOLMA_TESTS_SKIP_AWS_ENV_VAR)
print(f"{DOLMA_TESTS_SKIP_AWS_ENV_VAR}: {dolma_tests_skip}")
return (dolma_tests_skip or "false").lower() == "true"
def upload_test_documents(local_input: str, test_prefix: str) -> Tuple[str, str]:
remote_input = f"{test_prefix}/input/documents"
remote_output = f"{test_prefix}/output/documents"
for i, local_fp in enumerate(glob_path(local_input)):
remote_fp = f"{remote_input}/{i:05d}.json.gz"
with open(local_fp, "rb") as f, open(remote_fp, "wb") as g:
g.write(f.read())
return remote_input, remote_output
def upload_test_attributes(local_attributes: str, test_prefix: str):
remote_attributes = f"{test_prefix}/input/attributes"
for i, local_fp in enumerate(glob_path(local_attributes)):
matched = re.match(r"^(attributes|duplicate)-(\w+)", local_fp)
if not matched:
raise RuntimeError(f"Unexpected filename: {local_fp}")
_, name = matched.groups()
remote_fp = f"{remote_attributes}/{name}/{i:05d}.json.gz"
with open(local_fp, "rb") as f, open(remote_fp, "wb") as g:
g.write(f.read())
def clean_test_data(test_prefix: str):
s3 = boto3.client("s3")
bucket_name, prefix = parse_s3_path(test_prefix)
for obj in s3.list_objects_v2(Bucket=bucket_name, Prefix=prefix).get("Contents", []):
s3.delete_object(Bucket=bucket_name, Key=obj["Key"])
def download_s3_prefix(s3_prefix: str, local_prefix: str):
s3 = boto3.client("s3")
bucket_name, prefix = parse_s3_path(s3_prefix)
for obj in s3.list_objects_v2(Bucket=bucket_name, Prefix=prefix).get("Contents", []):
name = obj["Key"].replace(prefix, "").lstrip("/")
local_fp = os.path.join(local_prefix, name)
mkdir_p(os.path.dirname(local_fp))
s3.download_file(Bucket=bucket_name, Key=obj["Key"], Filename=local_fp)
def upload_s3_prefix(s3_prefix: str, local_prefix: str):
s3 = boto3.client("s3")
bucket_name, prefix = parse_s3_path(s3_prefix)
for local_fp in glob_path(local_prefix):
name = local_fp.replace(local_prefix, "").lstrip("/")
s3.upload_file(Bucket=bucket_name, Key=f"{prefix}/{name}", Filename=local_fp)
def load_jsonl(fp: str) -> List[dict]:
with smart_open.open(fp, "r") as f:
return [json.loads(ln) for ln in f]
|
dolma-main
|
tests/python/utils.py
|
"""
Unit tests for core/data_types.py
@kylel
"""
from unittest import TestCase
from dolma.core.data_types import DocResult, Document, InputSpec, Span, TextSlice
class TestDocument(TestCase):
def test_document_to_from_json(self):
doc = Document(source="source", version="version", id="id", text="text")
doc_json = doc.to_json()
doc_json2 = {
"source": "source",
"version": "version",
"id": "id",
"text": "text",
}
self.assertEqual(doc_json, doc_json2)
doc2 = Document.from_json(doc_json2)
self.assertEqual(doc_json, doc2.to_json())
def test_document_to_from_spec(self):
doc = Document(source="source", version="version", id="id", text="text")
spec = doc.to_spec()
spec2 = InputSpec(source="source", version="version", id="id", text="text")
self.assertEqual(spec, spec2)
doc2 = Document.from_spec(spec2)
self.assertEqual(spec, doc2.to_spec())
class TestSpan(TestCase):
def test_span_to_from_json(self):
span = Span(start=0, end=1, type="type", score=1.0)
span_json = span.to_json()
span_json2 = {"start": 0, "end": 1, "type": "type", "score": 1.0}
self.assertEqual(span_json, span_json2)
span2 = Span.from_json(span_json2)
self.assertEqual(span_json, span2.to_json())
# TODO: add tests for to/from Spec
def test_span_to_from_spec(self):
span = Span(start=0, end=1, type="type", score=1.0)
with self.assertRaises(AssertionError):
span.to_spec()
class TestDocResult(TestCase):
def test_doc_result_to_from_json(self):
doc = Document(source="source", version="version", id="id", text="text")
spans = [
Span(start=0, end=2, type="xxx", score=1.0),
Span(start=2, end=4, type="yyy", score=0.5),
]
doc_result = DocResult(doc=doc, spans=spans)
# to_json() doesnt return Document by default
# also, it returns this extra field called `"mention"`
doc_result_json = doc_result.to_json()
doc_result_json2 = {
"spans": [
{"start": 0, "end": 2, "type": "xxx", "score": 1.0, "mention": "te"},
{"start": 2, "end": 4, "type": "yyy", "score": 0.5, "mention": "xt"},
]
}
self.assertEqual(doc_result_json, doc_result_json2)
# from_json() requires also providing the Document
with self.assertRaises(KeyError):
DocResult.from_json(doc_result_json2)
doc_result_json3 = {
"doc": {
"source": "source",
"version": "version",
"id": "id",
"text": "text",
},
"spans": [
{"start": 0, "end": 2, "type": "xxx", "score": 1.0, "mention": "te"},
{"start": 2, "end": 4, "type": "yyy", "score": 0.5, "mention": "xt"},
],
}
doc_result3 = DocResult.from_json(doc_result_json3)
self.assertEqual(doc_result_json, doc_result3.to_json())
class TestTextSlice(TestCase):
def test_text_slice_text(self):
text = "This is a test"
slice = TextSlice(doc=text, start=0, end=4)
self.assertEqual(slice.text, "This")
|
dolma-main
|
tests/python/test_data_types.py
|
import itertools
import os
from pathlib import Path
from unittest import TestCase
from dolma.core.paths import (
_escape_glob,
_pathify,
_unescape_glob,
add_suffix,
glob_path,
is_glob,
join_path,
make_relative,
split_glob,
split_path,
sub_prefix,
sub_suffix,
)
from .utils import clean_test_data, get_test_prefix, skip_aws_tests, upload_s3_prefix
LOCAL_DATA = Path(__file__).parent.parent / "data"
class TestPaths(TestCase):
def setUp(self) -> None:
if skip_aws_tests():
self.remote_test_prefix = None
else:
self.remote_test_prefix = get_test_prefix()
upload_s3_prefix(s3_prefix=f"{self.remote_test_prefix}", local_prefix="tests/data/expected/*")
def tearDown(self) -> None:
if self.remote_test_prefix is not None:
clean_test_data(self.remote_test_prefix)
def test_pathify(self):
path = "s3://path/to/file"
protocol, path = _pathify(path)
self.assertEqual(protocol, "s3")
self.assertEqual(path, Path("path/to/file"))
path = "path/to/file"
protocol, path = _pathify(path)
self.assertEqual(protocol, "")
self.assertEqual(path, Path("path/to/file"))
path = "/path/to/file"
protocol, path = _pathify(path)
self.assertEqual(protocol, "")
self.assertEqual(path, Path("/path/to/file"))
def test_local_glob_path(self):
local_glob = str(LOCAL_DATA / "*.json.gz")
paths = list(glob_path(local_glob))
expected = [str(LOCAL_DATA / fn) for fn in os.listdir(LOCAL_DATA) if fn.endswith(".json.gz")]
self.assertEqual(sorted(paths), sorted(expected))
def test_remote_glob_path(self):
if self.remote_test_prefix is None:
return self.skipTest("Skipping AWS tests")
paths = list(glob_path(f"{self.remote_test_prefix}/**/*.json.gz"))
expected = [
f"{self.remote_test_prefix}/tests/data/expected/{fn}"
for fn in os.listdir(LOCAL_DATA / "expected")
if fn.endswith(".json.gz")
]
self.assertEqual(sorted(paths), sorted(expected))
def test_local_glob_with_recursive(self):
local_glob = str(LOCAL_DATA / "**/*-paragraphs.json.gz")
paths = list(glob_path(local_glob))
expected = list(
itertools.chain.from_iterable(
(str(fp),)
if (fp := LOCAL_DATA / fn).is_file() and "paragraphs" in fn
else ((str(fp / sn) for sn in os.listdir(fp) if "paragraphs" in sn) if fp.is_dir() else ())
for fn in os.listdir(LOCAL_DATA)
)
)
self.assertEqual(sorted(paths), sorted(expected))
def test_sub_prefix(self):
path_a = "s3://path/to/b/and/more"
path_b = "s3://path/to/b"
self.assertEqual(sub_prefix(path_a, path_b), "and/more")
self.assertEqual(sub_prefix(path_b, path_a), path_b)
path_c = "/path/to/c"
path_d = "/path/to/c/and/more"
self.assertEqual(sub_prefix(path_d, path_c), "and/more")
self.assertEqual(sub_prefix(path_c, path_d), path_c)
with self.assertRaises(ValueError):
sub_prefix(path_a, path_c)
def test_sub_suffix(self):
path_a = "s3://path/to/dir/and/more"
path_b = "and/more"
self.assertEqual(sub_suffix(path_a, path_b), "s3://path/to/dir")
path_c = "/path/to/dir/and/more"
path_d = "path/to/dir/and/more"
self.assertEqual(sub_suffix(path_c, path_d), "/")
def test_add_prefix(self):
path_a = "s3://path/to/b"
path_b = "and/more"
self.assertEqual(add_suffix(path_a, path_b), "s3://path/to/b/and/more")
path_c = "/path/to/c"
path_d = "and/more"
self.assertEqual(add_suffix(path_c, path_d), "/path/to/c/and/more")
with self.assertRaises(ValueError):
add_suffix(path_a, path_c)
add_suffix(path_c, path_a)
add_suffix(path_a, path_a)
def test_wildcard_operations(self):
path_a = "s3://path/to/dir"
self.assertEqual(add_suffix(path_a, "*"), "s3://path/to/dir/*")
path_b = "/path/to/dir/**"
self.assertEqual(add_suffix(path_b, "*"), "/path/to/dir/**/*")
path_c = "s3://path/to/dir/**"
self.assertEqual(sub_prefix(path_c, path_a), "**")
def test_make_relative(self):
paths = [
"/path/to/dir/and/more",
"/path/to/dir/and/**.zip",
"/path/to/dir/more/**/stuff",
]
base, relative_paths = make_relative(paths)
self.assertEqual(base, "/path/to/dir")
self.assertEqual(relative_paths, ["and/more", "and/**.zip", "more/**/stuff"])
paths = [
"/foo",
"/bar/**",
"/baz/**/**",
]
base, relative_paths = make_relative(paths)
self.assertEqual(base, "/")
self.assertEqual(relative_paths, ["foo", "bar/**", "baz/**/**"])
paths = [
"s3://path/to/a/and/b",
"s3://path/to/a/and/**.zip",
"s3://path/to/b/more/**/stuff",
]
base, relative_paths = make_relative(paths)
self.assertEqual(base, "s3://path/to")
self.assertEqual(relative_paths, ["a/and/b", "a/and/**.zip", "b/more/**/stuff"])
paths = [
"s3://path_a/to/dir/and/more",
"s3://path_b/to/dir/and/**.zip",
"s3://path_c/to/dir/more/**/stuff",
]
base, relative_paths = make_relative(paths)
self.assertEqual(base, "s3://")
self.assertEqual(
relative_paths,
[
"path_a/to/dir/and/more",
"path_b/to/dir/and/**.zip",
"path_c/to/dir/more/**/stuff",
],
)
def test_split(self):
prot, parts = split_path("s3://path/to/dir")
self.assertEqual(prot, "s3")
self.assertEqual(parts, ("path", "to", "dir"))
prot, parts = split_path("s3://path/to/[abc]/dir")
self.assertEqual(prot, "s3")
self.assertEqual(parts, ("path", "to", "[abc]", "dir"))
prot, parts = split_path("/path/to/dir")
self.assertEqual(prot, "")
self.assertEqual(parts, ("/", "path", "to", "dir"))
prot, parts = split_path("s3://path/to/dir/")
self.assertEqual(prot, "s3")
self.assertEqual(parts, ("path", "to", "dir"))
prot, parts = split_path("path/to/dir/")
self.assertEqual(prot, "")
self.assertEqual(parts, ("path", "to", "dir"))
def test_join(self):
path = join_path("s3", ("path", "to", "dir"))
self.assertEqual(path, "s3://path/to/dir")
path = join_path("", ("/", "path", "to", "dir"))
self.assertEqual(path, "/path/to/dir")
path = join_path("s3", ("/", "path", "to", "dir", ""))
self.assertEqual(path, "s3://path/to/dir")
path = join_path("", ("path", "to", "dir", ""))
self.assertEqual(path, "path/to/dir")
path = join_path("s3", ("path/to", "dir/"))
self.assertEqual(path, "s3://path/to/dir")
path = join_path("", "path", ("to", "a", "very", "hidden"), "dir" "/with_file.txt")
self.assertEqual(path, "path/to/a/very/hidden/dir/with_file.txt")
def test_split_and_join(self):
paths = [
"s3://path/to/a/and/b",
"s3://path/to/a/and/**.zip",
"s3://path/to/b/more/**/stuff",
"/path/to/dir/and/more",
"/path/to/dir/and/**.zip",
"*",
]
for path in paths:
self.assertEqual(join_path(*split_path(path)), path)
def test_is_glob(self):
path = "s3://path/to/a/and/b"
self.assertFalse(is_glob(path))
path = "/user/?/docs/*"
self.assertTrue(is_glob(path))
path = "/user/1/docs/file.txt"
self.assertFalse(is_glob(path))
path = r"/user/\?/docs/*"
self.assertTrue(is_glob(path))
path = r"/user/\?/docs/\*"
self.assertFalse(is_glob(path))
def test_escape_glob(self):
path = "/user/?/docs/*"
self.assertEqual(_escape_glob(path), "/user/\u2582/docs/\u2581")
path = "/user/[abc]/docs/*"
self.assertEqual(_escape_glob(path), "/user/\u2583abc\u2584/docs/\u2581")
path = "/user/[abc]/docs/[a-z]/\\*"
self.assertEqual(_escape_glob(path), "/user/\u2583abc\u2584/docs/\u2583a-z\u2584/\\*")
path = "/no/glob/here"
self.assertEqual(_escape_glob(path), path)
def test_unescape_glob(self):
path = "/user/?/docs/*"
self.assertEqual(_unescape_glob(_escape_glob(path)), path)
path = "/user/[abc]/docs/*"
self.assertEqual(_unescape_glob(_escape_glob(path)), path)
path = "/user/[abc]/docs/[a-z]/\\*"
self.assertEqual(_unescape_glob(_escape_glob(path)), path)
path = "/no/glob/here"
self.assertEqual(_unescape_glob(_escape_glob(path)), path)
def test_split_glob(self):
path = "/user/_/docs/*"
self.assertEqual(split_glob(path), ("/user/_/docs", "*"))
path = "/user/?/docs/*"
self.assertEqual(split_glob(path), ("/user", "?/docs/*"))
path = "/user/[abc]/docs/*"
self.assertEqual(split_glob(path), ("/user", "[abc]/docs/*"))
path = "/user/\\[abc\\]/docs/[a-z]/\\*"
self.assertEqual(split_glob(path), ("/user/\\[abc\\]/docs", "[a-z]/\\*"))
path = "/no/glob/here"
self.assertEqual(split_glob(path), ("/no/glob/here", ""))
|
dolma-main
|
tests/python/test_paths.py
|
"""
Unit tests for taggers/*.py
@kylel
"""
from unittest import TestCase
from dolma.core.data_types import Document
from dolma.taggers.gopher import GopherTagger
class TestGopherTagger(TestCase):
def test_predict_short(self):
tagger = GopherTagger()
doc = Document(source="", version="", id="", text="This is a test.")
doc_result = tagger.predict(doc=doc)
d = doc_result.to_json()
self.assertEqual(len(d["spans"]), 13)
self.assertEqual(
d["spans"][0],
{
"start": 0,
"end": 15,
"type": "fraction_of_characters_in_most_common_2grams",
"score": 0.5,
"mention": "This is a test.",
},
)
self.assertEqual(
d["spans"][1],
{
"start": 0,
"end": 15,
"type": "fraction_of_characters_in_most_common_3grams",
"score": 0.5833333333333334,
"mention": "This is a test.",
},
)
self.assertEqual(
d["spans"][2],
{
"start": 0,
"end": 15,
"type": "fraction_of_characters_in_most_common_4grams",
"score": 1.0,
"mention": "This is a test.",
},
)
self.assertEqual(
d["spans"][3],
{"start": 0, "end": 15, "type": "character_count", "score": 15.0, "mention": "This is a test."},
)
self.assertEqual(
d["spans"][4],
{"start": 0, "end": 15, "type": "word_count", "score": 4.0, "mention": "This is a test."},
)
self.assertEqual(
d["spans"][5],
{"start": 0, "end": 15, "type": "median_word_length", "score": 3.0, "mention": "This is a test."},
)
self.assertEqual(
d["spans"][6],
{"start": 0, "end": 15, "type": "symbol_to_word_ratio", "score": 0.0, "mention": "This is a test."},
)
self.assertEqual(
d["spans"][7],
{
"start": 0,
"end": 15,
"type": "fraction_of_words_with_alpha_character",
"score": 1.0,
"mention": "This is a test.",
},
)
self.assertEqual(
d["spans"][8],
{"start": 0, "end": 15, "type": "required_word_count", "score": 0.0, "mention": "This is a test."},
)
self.assertEqual(
d["spans"][9],
{
"start": 0,
"end": 15,
"type": "fraction_of_lines_starting_with_bullet_point",
"score": 0.0,
"mention": "This is a test.",
},
)
self.assertEqual(
d["spans"][10],
{
"start": 0,
"end": 15,
"type": "fraction_of_lines_ending_with_ellipsis",
"score": 0.0,
"mention": "This is a test.",
},
)
self.assertEqual(
d["spans"][11],
{
"start": 0,
"end": 15,
"type": "fraction_of_duplicate_lines",
"score": 0.0,
"mention": "This is a test.",
},
)
self.assertEqual(
d["spans"][12],
{
"start": 0,
"end": 15,
"type": "fraction_of_characters_in_duplicate_lines",
"score": 0.0,
"mention": "This is a test.",
},
)
def test_predict_multiline(self):
tagger = GopherTagger()
text = "This is a sentence. \n \n This is another sentence.\n\n This is a third sentence."
doc = Document(source="", version="", id="", text=text)
doc_result = tagger.predict(doc=doc)
d = doc_result.to_json()
self.assertEqual(len(d["spans"]), 19)
self.assertEqual(
d["spans"][0],
{
"start": 0,
"end": 79,
"type": "fraction_of_characters_in_most_common_2grams",
"score": 0.3050847457627119,
"mention": text,
},
)
self.assertEqual(
d["spans"][1],
{
"start": 0,
"end": 79,
"type": "fraction_of_characters_in_most_common_3grams",
"score": 0.23728813559322035,
"mention": text,
},
)
self.assertEqual(
d["spans"][2],
{
"start": 0,
"end": 79,
"type": "fraction_of_characters_in_most_common_4grams",
"score": 0.2711864406779661,
"mention": text,
},
)
self.assertEqual(
d["spans"][3],
{
"start": 0,
"end": 79,
"type": "fraction_of_characters_in_duplicate_5grams",
"score": 0.0,
"mention": text,
},
)
self.assertEqual(
d["spans"][4],
{
"start": 0,
"end": 79,
"type": "fraction_of_characters_in_duplicate_6grams",
"score": 0.0,
"mention": text,
},
)
self.assertEqual(
d["spans"][5],
{
"start": 0,
"end": 79,
"type": "fraction_of_characters_in_duplicate_7grams",
"score": 0.0,
"mention": text,
},
)
self.assertEqual(
d["spans"][6],
{
"start": 0,
"end": 79,
"type": "fraction_of_characters_in_duplicate_8grams",
"score": 0.0,
"mention": text,
},
)
self.assertEqual(
d["spans"][7],
{
"start": 0,
"end": 79,
"type": "fraction_of_characters_in_duplicate_9grams",
"score": 0.0,
"mention": text,
},
)
self.assertEqual(
d["spans"][8],
{
"start": 0,
"end": 79,
"type": "fraction_of_characters_in_duplicate_10grams",
"score": 0.0,
"mention": text,
},
)
self.assertEqual(
d["spans"][9], {"start": 0, "end": 79, "type": "character_count", "score": 79.0, "mention": text}
)
self.assertEqual(
d["spans"][10], {"start": 0, "end": 79, "type": "word_count", "score": 13.0, "mention": text}
)
self.assertEqual(
d["spans"][11], {"start": 0, "end": 79, "type": "median_word_length", "score": 4.0, "mention": text}
)
self.assertEqual(
d["spans"][12], {"start": 0, "end": 79, "type": "symbol_to_word_ratio", "score": 0.0, "mention": text}
)
self.assertEqual(
d["spans"][13],
{
"start": 0,
"end": 79,
"type": "fraction_of_words_with_alpha_character",
"score": 1.0,
"mention": text,
},
)
self.assertEqual(
d["spans"][14], {"start": 0, "end": 79, "type": "required_word_count", "score": 0.0, "mention": text}
)
self.assertEqual(
d["spans"][15],
{
"start": 0,
"end": 79,
"type": "fraction_of_lines_starting_with_bullet_point",
"score": 0.0,
"mention": text,
},
)
self.assertEqual(
d["spans"][16],
{
"start": 0,
"end": 79,
"type": "fraction_of_lines_ending_with_ellipsis",
"score": 0.0,
"mention": text,
},
)
self.assertEqual(
d["spans"][17],
{"start": 0, "end": 79, "type": "fraction_of_duplicate_lines", "score": 0.0, "mention": text},
)
self.assertEqual(
d["spans"][18],
{
"start": 0,
"end": 79,
"type": "fraction_of_characters_in_duplicate_lines",
"score": 0.0,
"mention": text,
},
)
def test_word_count_is_whitespace_sep(self):
tagger = GopherTagger()
text = "T h i s \n \n\n\n isoneword !!!"
doc = Document(source="", version="", id="", text=text)
doc_result = tagger.predict(doc=doc)
d = doc_result.to_json()
self.assertEqual(d["spans"][6]["type"], "word_count")
self.assertEqual(d["spans"][6]["score"], 6.0)
def test_required_word_count(self):
tagger = GopherTagger()
text = "The.and.that"
doc = Document(source="", version="", id="", text=text)
doc_result = tagger.predict(doc=doc)
d = doc_result.to_json()
self.assertEqual(d["spans"][5]["type"], "required_word_count")
self.assertEqual(d["spans"][5]["score"], 0.0)
text = "The and that"
doc = Document(source="", version="", id="", text=text)
doc_result = tagger.predict(doc=doc)
d = doc_result.to_json()
self.assertEqual(d["spans"][7]["type"], "required_word_count")
self.assertEqual(d["spans"][7]["score"], 2.0)
|
dolma-main
|
tests/python/test_taggers.py
|
import argparse
import bisect
import copy
import hashlib
import json
import multiprocessing
import os
from collections import defaultdict
from contextlib import ExitStack
from copy import deepcopy
from dataclasses import dataclass, field
from itertools import chain
from queue import Queue
from tempfile import TemporaryDirectory
from typing import Any, Dict, Generator, List, Tuple, Type, TypeVar, Union
import blingfire
import msgspec
import numpy as np
import smart_open
import tldextract
import tqdm
from dolma.core.data_types import InputSpec, OutputSpec
from dolma.core.parallel import BaseParallelProcessor
from dolma.core.paths import glob_path
T = TypeVar("T", bound=Type["BaseStatsProcessor"])
PRONOUNS = (
("she", "her", "her", "hers", "herself"),
("he", "him", "his", "his", "himself"),
("they", "them", "their", "theirs", "themselves"),
("ze", "hir", "hir", "hirs", "hirself"),
("ze", "zir", "zir", "zirs", "zirself"),
("xey", "xem", "xyr", "xyrs", "xemself"),
("ae", "aer", "aer", "aers", "aerself"),
("e", "em", "eir", "eirs", "emself"),
("ey", "em", "eir", "eirs", "eirself"),
("fae", "faer", "faer", "faers", "faerself"),
("fey", "fem", "feir", "feirs", "feirself"),
("hu", "hum", "hus", "hus", "humself"),
("it", "it", "its", "its", "itself"),
("jee", "jem", "jeir", "jeirs", "jemself"),
("kit", "kit", "kits", "kits", "kitself"),
("ne", "nem", "nir", "nirs", "nemself"),
("peh", "pehm", "peh's", "peh's", "pehself"),
("per", "per", "per", "pers", "perself"),
("sie", "hir", "hir", "hirs", "hirself"),
("se", "sim", "ser", "sers", "serself"),
("shi", "hir", "hir", "hirs", "hirself"),
("si", "hyr", "hyr", "hyrs", "hyrself"),
("they", "them", "their", "theirs", "themself"),
("thon", "thon", "thons", "thons", "thonself"),
("ve", "ver", "vis", "vis", "verself"),
("ve", "vem", "vir", "virs", "vemself"),
("vi", "ver", "ver", "vers", "verself"),
("vi", "vim", "vir", "virs", "vimself"),
("vi", "vim", "vim", "vims", "vimself"),
("xie", "xer", "xer", "xers", "xerself"),
("xe", "xem", "xyr", "xyrs", "xemself"),
("xey", "xem", "xeir", "xeirs", "xemself"),
("yo", "yo", "yos", "yos", "yosself"),
("ze", "zem", "zes", "zes", "zirself"),
("ze", "mer", "zer", "zers", "zemself"),
("zee", "zed", "zeta", "zetas", "zedself"),
("zie", "zir", "zir", "zirs", "zirself"),
("zie", "zem", "zes", "zes", "zirself"),
("zie", "hir", "hir", "hirs", "hirself"),
("zme", "zmyr", "zmyr", "zmyrs", "zmyrself"),
)
@dataclass
class Domains:
pages: Dict[str, int] = field(default_factory=dict)
words: Dict[str, int] = field(default_factory=dict)
_size: int = 100_000
def add(self, domain: str, count_words: int, count_pages: int = 1, no_limit: bool = False) -> bool:
if domain not in self.pages:
if self._size < len(self.pages) and not no_limit:
return False
self.pages[domain] = 0
self.words[domain] = 0
self.pages[domain] += count_pages
self.words[domain] += count_words
return True
def shrink(self, to_size: bool = False) -> bool:
th = 1
if to_size:
# find the threshold that will keep the top self._size domains
p = max((1 - self._size / len(self.pages)) * 100, 0)
th = max(th, round(np.percentile(sorted(self.pages.values()), p)))
previous_size = len(self.pages)
self.pages = {k: v for k, v in self.pages.items() if v > th}
self.words = {k: v for k, v in self.words.items() if k in self.pages}
current_size = len(self.pages)
return previous_size < current_size
def to_dict(self) -> Dict[str, Any]:
return {
"pages": {k: v for k, v in sorted(self.pages.items(), key=lambda e: -e[1])},
"words": {k: v for k, v in sorted(self.words.items(), key=lambda e: -e[1])},
}
@classmethod
def from_dict(cls, d: Dict[str, Any]) -> "Domains":
return cls(pages=d["pages"], words=d["words"])
def merge(self, other: "Domains", inplace: bool = True, shrink: bool = False) -> "Domains":
self = self if inplace else deepcopy(self)
for page in other.pages:
self.add(domain=page, count_words=other.words[page], count_pages=other.pages[page], no_limit=True)
if shrink:
self.shrink(to_size=True)
return self
@dataclass
class Counts:
documents: int = 0
tokens: int = 0
domains: Domains = field(default_factory=Domains)
pronouns: Dict[str, int] = field(default_factory=lambda: {k: 0 for k in chain.from_iterable(PRONOUNS)})
_flush: int = 250_000
_current: int = 0
def shrink(self) -> bool:
self._current += 1
if self._current >= self._flush:
self._current = 0
self.domains.shrink()
return True
return False
def add(self, text: str, url: str) -> bool:
if not (text := text.strip()):
return False
words = [w.lower() for w in blingfire.text_to_words(text).split()]
extracted_url = tldextract.extract(url)
domain = ".".join(extracted_url[1:]).lower()
for w in words:
if w in self.pronouns:
self.pronouns[w] += 1
self.documents += 1
self.tokens += len(words)
self.domains.add(domain=domain, count_words=len(words))
self.shrink()
return True
def to_dict(self) -> Dict[str, Any]:
return {
"domains": self.domains.to_dict(),
"pronouns": {k: v for k, v in sorted(self.pronouns.items(), key=lambda e: -e[1])},
"documents": self.documents,
"words": self.tokens,
}
@classmethod
def from_dict(cls, d: Dict[str, Any]) -> "Counts":
return cls(
documents=d["documents"],
tokens=d["words"],
domains=Domains.from_dict(d["domains"]),
pronouns=d["pronouns"],
)
def merge(self, other: "Counts", inplace: bool = True, shrink: bool = False) -> "Counts":
self = self if inplace else deepcopy(self)
self.documents += other.documents
self.tokens += other.tokens
self.domains.merge(other.domains, inplace=True, shrink=shrink)
for pronoun, count in other.pronouns.items():
self.pronouns[pronoun] += count
return self
class Registry:
__registry__: Dict[str, Type["BaseStatsProcessor"]] = {}
@classmethod
def add(cls, obj: T) -> T:
cls.__registry__[obj.__name__] = obj
return obj
@classmethod
def get(cls, name: str) -> Type["BaseStatsProcessor"]:
return cls.__registry__[name]
@classmethod
def all(cls) -> Generator[Tuple[str, Type["BaseStatsProcessor"]], None, None]:
yield from cls.__registry__.items()
class BaseStatsProcessor(BaseParallelProcessor):
@classmethod
def increment_progressbar( # type: ignore[override]
cls,
queue: Queue[Union[Tuple[int, ...], None]],
/,
files: int = 0,
documents: int = 0,
) -> Dict[str, int]:
return super().increment_progressbar(queue, files=files, documents=documents)
@classmethod
def cli(cls, num_workers: int = 1, debug: bool = False, **process_single_kwargs: Any) -> None:
raise NotImplementedError()
@Registry.add
class common_crawl(BaseStatsProcessor):
@classmethod
def process_single(
cls, source_path: str, destination_path: str, queue: Queue[Union[Tuple[int, ...], None]], **kwargs: Any
):
os.environ["TOKENIZERS_PARALLELISM"] = "false"
# for the data sheet, what statistics you think we should include? I could
# do # of docs, # tokens, distribution of URLs, pronouns, s2 FOS, stack
# languages?
decoder = msgspec.json.Decoder(InputSpec)
counts = Counts()
interval = 10_000
with smart_open.open(source_path, "rb") as source_file:
for line in source_file:
document = decoder.decode(line)
counts.add(text=document.text, url=document.id)
if counts.documents % interval == 0:
cls.increment_progressbar(queue, documents=interval)
cls.increment_progressbar(queue, files=1, documents=counts.documents % interval)
with smart_open.open(destination_path, "wt") as destination_file:
destination_file.write(json.dumps(counts.to_dict(), indent=2))
@classmethod
def cli(cls, num_workers: int = 1, debug: bool = False, **process_single_kwargs: Any) -> None:
with TemporaryDirectory() as tempdir:
documents = "s3://ai2-llm/pretraining-data/sources/olmo-mix/v1/documents/common-crawl/cc_en_*/*.gz"
stats = "s3://ai2-llm/stats/olmo-mix/v1/web/common-crawl"
metadata = os.path.join(tempdir, "common-crawl")
processor = cls(
source_prefix=documents,
destination_prefix=stats,
metadata_prefix=metadata,
num_processes=num_workers,
debug=debug,
)
processor(**process_single_kwargs)
@Registry.add
class books(BaseStatsProcessor):
documents: str = "s3://ai2-llm/pretraining-data/sources/olmo-mix/v1/documents/books/*.gz"
stats: str = "s3://ai2-llm/stats/olmo-mix/v1/books/gutenberg/*.gz"
@staticmethod
# read all paths in using threads
def _read_json(path: str) -> Counts:
with smart_open.open(path, "rt") as source_file:
return msgspec.json.decode(source_file.read())
@classmethod
def process_single(
cls, source_path: str, destination_path: str, queue: Queue[Union[Tuple[int, ...], None]], **kwargs: Any
):
os.environ["TOKENIZERS_PARALLELISM"] = "false"
# for the data sheet, what statistics you think we should include? I could
# do # of docs, # tokens, distribution of URLs, pronouns, s2 FOS, stack
# languages?
decoder = msgspec.json.Decoder(InputSpec)
documents = words = 0
interval = 10_000
with smart_open.open(source_path, "rb") as source_file:
for line in source_file:
document = decoder.decode(line)
documents += 1
words += len(blingfire.text_to_words(document.text).split())
if documents % interval == 0:
cls.increment_progressbar(queue, documents=interval)
cls.increment_progressbar(queue, files=1, documents=documents % interval)
with smart_open.open(destination_path, "wt") as destination_file:
destination_file.write(json.dumps({"documents": documents, "words": words}, indent=2))
@classmethod
def _merge_dicts(cls, d1, d2):
d1 = copy.deepcopy(d1)
for k, v in d2.items():
if isinstance(v, dict):
d1[k] = cls._merge_dicts(d1.get(k, {}), v)
else:
d1[k] = d1.get(k, 0) + v
return d1
@classmethod
def cli(cls, num_workers: int = 1, debug: bool = False, **process_single_kwargs: Any) -> None:
stats_root = cls.stats.split("*", 1)[0].rstrip("/")
with TemporaryDirectory() as tempdir:
metadata = os.path.join(tempdir, hashlib.md5(cls.documents.encode()).hexdigest())
processor = cls(
source_prefix=cls.documents,
destination_prefix=stats_root,
metadata_prefix=metadata,
num_processes=num_workers,
debug=debug,
)
processor(**process_single_kwargs)
paths = list(glob_path(cls.stats))
counts: dict = {}
with multiprocessing.Pool(num_workers) as pool:
data = (cls._read_json(path) for path in paths) if debug else pool.imap(cls._read_json, paths)
for content in tqdm.tqdm(data, desc=f"Merging {cls.__name__} stats", unit=" files", total=len(paths)):
counts = cls._merge_dicts(counts, content)
summary_dest = f"{stats_root}/summary.json"
with smart_open.open(summary_dest, "wt") as destination_file:
destination_file.write(json.dumps(counts, indent=2, sort_keys=True))
@Registry.add
class wiki(books):
documents: str = "s3://ai2-llm/pretraining-data/sources/olmo-mix/v1/documents/wiki/*.gz"
stats: str = "s3://ai2-llm/stats/olmo-mix/v1/wiki/en_simple/*.gz"
@Registry.add
class cc_v1(books):
documents: str = "s3://ai2-llm/pretraining-data/sources/common-crawl/v1/documents/cc_en_*/*.gz"
stats: str = "s3://ai2-llm/stats/olmo-mix/v1/cc/v1/**/*.gz"
@classmethod
def process_single(
cls, source_path: str, destination_path: str, queue: Queue[Union[Tuple[int, ...], None]], **kwargs: Any
):
attributes = [source_path.replace("/documents/", "/attributes/c4_rules/")]
# for the data sheet, what statistics you think we should include? I could
# do # of docs, # tokens, distribution of URLs, pronouns, s2 FOS, stack languages?
doc_decoder = msgspec.json.Decoder(InputSpec)
attr_decoder = msgspec.json.Decoder(OutputSpec)
stats = {
"length": 0,
"count": 0,
"c4_count": 0,
"c4_length": 0,
"c4_matches": 0,
}
documents = 0
interval = 10_000
with ExitStack() as stack:
doc_file = stack.enter_context(smart_open.open(source_path, "rb"))
atts_files = [stack.enter_context(smart_open.open(path, "rb")) for path in attributes]
for doc_line, *attr_lines in zip(doc_file, *atts_files):
doc = doc_decoder.decode(doc_line)
stats["length"] += len(doc.text)
stats["count"] += 1
attrs = {}
for line in attr_lines:
attrs.update(attr_decoder.decode(line).attributes)
# C4 stats
c4_removal = attrs.get("c4_rules__c4_v1__lines_with_no_ending_punctuation", [])
stats["c4_count"] += len(c4_removal)
stats["c4_length"] += sum(s[-1] for s in c4_removal)
stats["c4_matches"] += 1 if c4_removal else 0
documents += 1
if documents % interval == 0:
cls.increment_progressbar(queue, documents=interval)
cls.increment_progressbar(queue, files=1, documents=documents % interval)
with smart_open.open(destination_path, "wt") as destination_file:
destination_file.write(json.dumps(stats, indent=2))
@Registry.add
class cc_v1_c4_cleaned(books):
documents: str = "s3://ai2-llm/pretraining-data/sources/common-crawl/v1-c4-cleaned/documents/cc_en_*/*.gz"
stats: str = "s3://ai2-llm/stats/olmo-mix/v1/cc/v1_c4_cleaned/**/*.gz"
@classmethod
def gopher_rules(cls, attrs: Dict[str, List[Tuple[int, int, float]]]) -> List[Tuple[int, int, float]]:
matching_spans: List[Tuple[int, int, float]] = []
for span in attrs.get("gopher_rules__gopher_v1__word_count", []):
if span[2] < 50 or span[2] > 100000:
bisect.insort(matching_spans, (span[0], span[1], 1.0))
for span in attrs.get("gopher_rules__gopher_v1__median_word_length", []):
if span[2] < 3 or span[2] > 10:
bisect.insort(matching_spans, (span[0], span[1], 1.0))
for span in attrs.get("gopher_rules__gopher_v1__symbol_to_word_ratio", []):
if span[2] > 0.1:
bisect.insort(matching_spans, (span[0], span[1], 1.0))
for span in attrs.get("gopher_rules__gopher_v1__fraction_of_words_with_alpha_character", []):
if span[2] < 0.8:
bisect.insort(matching_spans, (span[0], span[1], 1.0))
for span in attrs.get("gopher_rules__gopher_v1__required_word_count", []):
if span[2] < 2:
bisect.insort(matching_spans, (span[0], span[1], 1.0))
for span in attrs.get("gopher_rules__gopher_v1__fraction_of_lines_starting_with_bullet_point", []):
if span[2] > 0.9:
bisect.insort(matching_spans, (span[0], span[1], 1.0))
for span in attrs.get("gopher_rules__gopher_v1__fraction_of_lines_ending_with_ellipsis", []):
if span[2] > 0.3:
bisect.insort(matching_spans, (span[0], span[1], 1.0))
for span in attrs.get("gopher_rules__gopher_v1__fraction_of_duplicate_lines", []):
if span[2] > 0.3:
bisect.insort(matching_spans, (span[0], span[1], 1.0))
for span in attrs.get("gopher_rules__gopher_v1__fraction_of_characters_in_duplicate_lines", []):
if span[2] > 0.3:
bisect.insort(matching_spans, (span[0], span[1], 1.0))
for span in attrs.get("gopher_rules__gopher_v1__fraction_of_characters_in_most_common_2gram", []):
if span[2] > 0.2:
bisect.insort(matching_spans, (span[0], span[1], 1.0))
for span in attrs.get("gopher_rules__gopher_v1__fraction_of_characters_in_most_common_3gram", []):
if span[2] > 0.18:
bisect.insort(matching_spans, (span[0], span[1], 1.0))
for span in attrs.get("gopher_rules__gopher_v1__fraction_of_characters_in_most_common_4gram", []):
if span[2] > 0.16:
bisect.insort(matching_spans, (span[0], span[1], 1.0))
for span in attrs.get("gopher_rules__gopher_v1__fraction_of_characters_in_duplicate_5grams", []):
if span[2] > 0.15:
bisect.insort(matching_spans, (span[0], span[1], 1.0))
for span in attrs.get("gopher_rules__gopher_v1__fraction_of_characters_in_duplicate_6grams", []):
if span[2] > 0.14:
bisect.insort(matching_spans, (span[0], span[1], 1.0))
for span in attrs.get("gopher_rules__gopher_v1__fraction_of_characters_in_duplicate_7grams", []):
if span[2] > 0.13:
bisect.insort(matching_spans, (span[0], span[1], 1.0))
for span in attrs.get("gopher_rules__gopher_v1__fraction_of_characters_in_duplicate_8grams", []):
if span[2] > 0.12:
bisect.insort(matching_spans, (span[0], span[1], 1.0))
for span in attrs.get("gopher_rules__gopher_v1__fraction_of_characters_in_duplicate_9grams", []):
if span[2] > 0.11:
bisect.insort(matching_spans, (span[0], span[1], 1.0))
for span in attrs.get("gopher_rules__gopher_v1__fraction_of_characters_in_duplicate_10grams", []):
if span[2] > 0.10:
bisect.insort(matching_spans, (span[0], span[1], 1.0))
return cls._merge_spans(matching_spans)
@classmethod
def _merge_spans(cls, matching_spans: List[Tuple[int, int, float]]) -> List[Tuple[int, int, float]]:
# merge spans if overlapping
merged_spans: List[Tuple[int, int, float]] = []
current_span = None
for span in matching_spans:
if span[1] - span[0] <= 0:
continue
elif current_span is None:
current_span = span
elif span[0] <= current_span[1]: # type: ignore
current_span = (current_span[0], span[1], 1.0)
else:
merged_spans.append(current_span)
current_span = span
if current_span:
merged_spans.append(current_span)
return merged_spans
@classmethod
def process_single(
cls, source_path: str, destination_path: str, queue: Queue[Union[Tuple[int, ...], None]], **kwargs: Any
):
attributes = [
source_path.replace("/documents/", "/attributes/gopher_rules/"),
source_path.replace("/documents/", "/attributes/decontamination/"),
source_path.replace("/documents/", "/attributes/hatespeech_nsfw_cc_v3/"),
source_path.replace("/documents/", "/attributes/pii_detection/"),
]
doc_decoder = msgspec.json.Decoder(InputSpec)
attr_decoder = msgspec.json.Decoder(OutputSpec)
stats = {
"length": 0,
"count": 0,
"gopher_count": 0,
"gopher_length": 0,
"gopher_matches": 0,
"decontamination_count": 0,
"decontamination_length": 0,
"decontamination_matches": 0,
"hatespeech_nsfw_count": 0,
"hatespeech_nsfw_length": 0,
"hatespeech_nsfw_matches": 0,
"pii_count": 0,
"pii_length": 0,
"pii_matches": 0,
"pii_matches_gt_5": 0,
}
documents = 0
interval = 10_000
with ExitStack() as stack:
doc_file = stack.enter_context(smart_open.open(source_path, "rb"))
atts_files = [stack.enter_context(smart_open.open(path, "rb")) for path in attributes]
for doc_line, *attr_lines in zip(doc_file, *atts_files):
doc = doc_decoder.decode(doc_line)
stats["length"] += len(doc.text)
stats["count"] += 1
attrs = {}
for line in attr_lines:
attrs.update(attr_decoder.decode(line).attributes)
# # C4 stats
# c4_removal = attrs.get('c4_rules__c4_v1__lines_with_no_ending_punctuation', [])
# stats["c4_count"] += len(c4_removal)
# stats["c4_length"] += sum(s[-1] for s in c4_removal)
# stats["c4_matches"] += 1 if c4_removal else 0
# Gopher stats
gopher_removal = cls.gopher_rules(attrs)
stats["gopher_count"] += len(gopher_removal)
stats["gopher_length"] += sum(s[1] - s[0] for s in gopher_removal)
stats["gopher_matches"] += 1 if gopher_removal else 0
# Deduplication stats
decontamination_removal = attrs.get("bff_duplicate_paragraph_spans_decontamination", [])
stats["decontamination_count"] += len(decontamination_removal)
stats["decontamination_length"] += sum(s[1] - s[0] for s in decontamination_removal)
stats["decontamination_matches"] += 1 if decontamination_removal else 0
# jigsaw stats
jigsaw_match: List[Tuple[int, int, float]] = []
nsfw = attrs.get("hatespeech_nsfw_cc_v3__jigsaw_nsfw_sencence_v2____label__nsfw", [])
for span in nsfw:
if span[2] > 0.4:
bisect.insort(jigsaw_match, (span[0], span[1], 1.0))
toxic = attrs.get("hatespeech_nsfw_cc_v3__jigsaw_hatespeech_sentence_v2____label__toxic", [])
for span in toxic:
if span[2] > 0.4:
bisect.insort(jigsaw_match, (span[0], span[1], 1.0))
jigsaw_match = cls._merge_spans(jigsaw_match)
stats["hatespeech_nsfw_count"] += len(jigsaw_match)
stats["hatespeech_nsfw_length"] += sum(s[1] - s[0] for s in jigsaw_match)
stats["hatespeech_nsfw_matches"] += 1 if jigsaw_match else 0
# PII stats
pii_removal = (
attrs.get("pii_detection__pii_regex_with_counts_fast_v2__EMAIL_ADDRESS", [])
+ attrs.get("pii_detection__pii_regex_with_counts_fast_v2__PHONE_NUMBER", [])
+ attrs.get("pii_detection__pii_regex_with_counts_fast_v2__IP_ADDRESS", [])
)
stats["pii_count"] += len(pii_removal)
stats["pii_length"] += sum(s[1] - s[0] for s in pii_removal)
stats["pii_matches"] += 1 if pii_removal else 0
stats["pii_matches_gt_5"] += 1 if len(pii_removal) > 5 else 0
documents += 1
if documents % interval == 0:
cls.increment_progressbar(queue, documents=interval)
cls.increment_progressbar(queue, files=1, documents=documents % interval)
with smart_open.open(destination_path, "wt") as destination_file:
destination_file.write(json.dumps(stats, indent=2))
class C4InputSpec(InputSpec):
metadata: Dict[str, Any] = msgspec.field(default_factory=dict)
@Registry.add
class c4(BaseStatsProcessor):
@classmethod
def process_single(
cls, source_path: str, destination_path: str, queue: Queue[Union[Tuple[int, ...], None]], **kwargs: Any
):
attrs_path = source_path.replace("/documents/", "/attributes/decontamination/")
documents_decoder = msgspec.json.Decoder(C4InputSpec)
attributes_decoder = msgspec.json.Decoder(OutputSpec)
counts = Counts()
interval = 10_000
with smart_open.open(source_path, "rb") as doc_file, smart_open.open(attrs_path, "rb") as attrs_file:
for source_line, attributes_line in zip(doc_file, attrs_file):
document = documents_decoder.decode(source_line)
attributes = attributes_decoder.decode(attributes_line)
text = document.text
for start, end, _ in sorted(
attributes.attributes.get("bff_duplicate_paragraph_spans_decontamination", []),
key=lambda t: -t[1],
):
# remove duplicate
text = text[:start] + text[end:]
counts.add(text=text, url=document.metadata["url"])
if counts.documents % interval == 0:
cls.increment_progressbar(queue, documents=interval)
cls.increment_progressbar(queue, files=1, documents=counts.documents % interval)
with smart_open.open(destination_path, "wt") as destination_file:
destination_file.write(json.dumps(counts.to_dict(), indent=2))
@classmethod
def cli(cls, num_workers: int = 1, debug: bool = False, **process_single_kwargs: Any) -> None:
with TemporaryDirectory() as tempdir:
documents = "s3://ai2-llm/pretraining-data/sources/c4/v0/documents/train/*.gz"
stats = "s3://ai2-llm/stats/olmo-mix/v1/web/c4"
metadata = os.path.join(tempdir, "c4")
processor = cls(
source_prefix=documents,
destination_prefix=stats,
metadata_prefix=metadata,
num_processes=num_workers,
debug=debug,
)
processor(**process_single_kwargs)
@Registry.add
class s2(BaseStatsProcessor):
@classmethod
def process_single(
cls, source_path: str, destination_path: str, queue: Queue[Union[Tuple[int, ...], None]], **kwargs: Any
):
attrs_path = source_path.replace("/documents/", "/attributes/decontamination/")
documents_decoder = msgspec.json.Decoder(C4InputSpec)
attributes_decoder = msgspec.json.Decoder(OutputSpec)
interval = 10_000
counts: dict = {
f: {"year": {}, "s2fos": {}, "documents": 0, "tokens": 0} for f in ["full_text", "abstract"]
}
key = "full_text" if "dataset=s2orc" in source_path else "abstract"
cnt = 0
with smart_open.open(source_path, "rb") as doc_file, smart_open.open(attrs_path, "rb") as attrs_file:
for source_line, attributes_line in zip(doc_file, attrs_file):
cnt += 1
document = documents_decoder.decode(source_line)
attributes = attributes_decoder.decode(attributes_line)
text = document.text
for start, end, _ in sorted(
attributes.attributes.get("bff_duplicate_paragraph_spans_decontamination", []),
key=lambda t: -t[1],
):
# remove duplicate
text = text[:start] + text[end:]
if not (text := text.strip()):
continue
counts[key]["documents"] += 1
counts[key]["tokens"] += len(blingfire.text_to_words(text).split())
if document.metadata["year"] not in counts[key]["year"]:
counts[key]["year"][document.metadata["year"]] = 0
counts[key]["year"][document.metadata["year"]] += 1
if len(document.metadata["s2fieldsofstudy"]) == 0:
document.metadata["s2fieldsofstudy"] = ["null"]
for fos in document.metadata["s2fieldsofstudy"]:
if fos not in counts[key]["s2fos"]:
counts[key]["s2fos"][fos] = 0
counts[key]["s2fos"][fos] += 1
if cnt % interval == 0:
cls.increment_progressbar(queue, documents=interval)
cls.increment_progressbar(queue, files=1, documents=cnt % interval)
with smart_open.open(destination_path, "wt") as destination_file:
destination_file.write(json.dumps(counts, indent=2))
@classmethod
def cli(cls, num_workers: int = 1, debug: bool = False, **process_single_kwargs: Any) -> None:
with TemporaryDirectory() as tempdir:
documents = (
"s3://ai2-llm/pretraining-data/sources/s2/v3-fos/documents/dataset=*/split=train/part_id=*/*.gz"
)
stats = "s3://ai2-llm/stats/olmo-mix/v1/papers/peS2o"
metadata = os.path.join(tempdir, "s2")
processor = cls(
source_prefix=documents,
destination_prefix=stats,
metadata_prefix=metadata,
num_processes=num_workers,
debug=debug,
)
processor(**process_single_kwargs)
class StackInputSpec(InputSpec):
metadata: Dict[str, Any] = msgspec.field(default_factory=dict)
attributes: Dict[str, Any] = msgspec.field(default_factory=dict)
@Registry.add
class stack(BaseStatsProcessor):
@classmethod
def process_single(
cls, source_path: str, destination_path: str, queue: Queue[Union[Tuple[int, ...], None]], **kwargs: Any
):
attrs_basic = source_path.replace("/documents/", "/attributes/basic/")
attrs_code_secrets = source_path.replace("/documents/", "/attributes/code_secrets/")
# attrs_dedupe_documents = source_path.replace("/documents/", "/attributes/dedupe_documents/")
attrs_pii = source_path.replace("/documents/", "/attributes/pii/")
documents_decoder = msgspec.json.Decoder(StackInputSpec)
attributes_decoder = msgspec.json.Decoder(OutputSpec)
interval = 10_000
counts: dict = {"extension": defaultdict(int), "license": defaultdict(int), "documents": 0, "tokens": 0}
cnt = 0
with ExitStack() as stack:
doc_file = stack.enter_context(smart_open.open(source_path, "rb"))
attrs_basic_file = stack.enter_context(smart_open.open(attrs_basic, "rb"))
attrs_code_secrets_file = stack.enter_context(smart_open.open(attrs_code_secrets, "rb"))
# attrs_dedupe_documents_file = stack.enter_context(smart_open.open(attrs_dedupe_documents, "rb"))
attrs_pii_file = stack.enter_context(smart_open.open(attrs_pii, "rb"))
# with smart_open.open(source_path, "rb") as doc_file, smart_open.open(attrs_path, "rb") as attrs_file:
for source_line, *attributes_line in zip(
doc_file,
attrs_basic_file,
attrs_code_secrets_file,
# attrs_dedupe_documents_file,
attrs_pii_file,
):
cnt += 1
document = documents_decoder.decode(source_line)
for ln in attributes_line:
attributes = attributes_decoder.decode(ln)
document.attributes.update(attributes.attributes)
if document.attributes["basic__random_number_v1__random"][-1][-1] > 0.996:
# test set; see
# https://github.com/allenai/LLM/blob/642d0fad3fb2efd816af507250c4c65c8678cb44/pretrain_data/the_stack/v2-mixer/ablations/v2-mixer-held-out.json#L15
continue
counts["documents"] += 1
counts["tokens"] += len(blingfire.text_to_words(document.text).split())
counts["extension"][document.metadata["ext"]] += 1
for license in document.metadata["max_forks_repo_licenses"]:
counts["license"][license] += 1
cnt += 1
if cnt % interval == 0:
cls.increment_progressbar(queue, documents=interval)
cls.increment_progressbar(queue, files=1, documents=cnt % interval)
with smart_open.open(destination_path, "wt") as destination_file:
destination_file.write(json.dumps(counts, indent=2))
@staticmethod
# read all paths in using threads
def _read_json(path: str) -> dict:
with smart_open.open(path, "rt") as source_file:
content = msgspec.json.decode(source_file.read())
return content
@classmethod
def _merge_dicts(cls, d1, d2):
d1 = copy.deepcopy(d1)
for k, v in d2.items():
if isinstance(v, dict):
d1[k] = cls._merge_dicts(d1.get(k, {}), v)
else:
d1[k] = d1.get(k, 0) + v
return d1
@classmethod
def cli(cls, num_workers: int = 1, debug: bool = False, **process_single_kwargs: Any) -> None:
with TemporaryDirectory() as tempdir:
documents = "s3://ai2-llm/pretraining-data/sources/stack-dedup/v2-mixer/documents/*.gz"
stats = "s3://ai2-llm/stats/olmo-mix/v1/code/stack"
metadata = os.path.join(tempdir, "stack")
processor = cls(
source_prefix=documents,
destination_prefix=stats,
metadata_prefix=metadata,
num_processes=num_workers,
debug=debug,
)
processor(**process_single_kwargs)
paths = list(glob_path(f"{stats}/*.gz"))
counts: dict = {}
with multiprocessing.Pool(num_workers) as pool:
data = (cls._read_json(path) for path in paths) if debug else pool.imap(cls._read_json, paths)
for content in tqdm.tqdm(data, desc="Merging code stats", unit=" files", total=len(paths)):
counts = cls._merge_dicts(counts, content)
with smart_open.open("s3://ai2-llm/stats/olmo-mix/v1/code/summary.json", "wt") as destination_file:
destination_file.write(json.dumps(counts, indent=2, sort_keys=True))
# # # BELOW HERE: AGGREGATION # # #
@Registry.add
class web(BaseStatsProcessor):
@staticmethod
# read all paths in using threads
def _read_json(path: str) -> Counts:
with smart_open.open(path, "rt") as source_file:
content = msgspec.json.decode(source_file.read())
return Counts.from_dict(content)
@classmethod
def cli(cls, num_workers: int = 1, debug: bool = False, **process_single_kwargs: Any) -> None:
paths = list(
chain(
glob_path("s3://ai2-llm/stats/olmo-mix/v1/web/c4/*"),
glob_path("s3://ai2-llm/stats/olmo-mix/v1/web/common-crawl/**/*"),
)
)
assert len(paths), "Run c4 and common-crawl first"
with multiprocessing.Pool(num_workers) as pool:
data = (cls._read_json(path) for path in paths) if debug else pool.imap(cls._read_json, paths)
counts = Counts()
for content in tqdm.tqdm(data, desc="Merging web stats", unit=" files", total=len(paths)):
counts.merge(content, shrink=True)
with smart_open.open("s3://ai2-llm/stats/olmo-mix/v1/web/summary.json", "wt") as destination_file:
destination_file.write(json.dumps(counts.to_dict(), indent=2))
@Registry.add
class papers(BaseStatsProcessor):
@staticmethod
# read all paths in using threads
def _read_json(path: str) -> dict:
with smart_open.open(path, "rt") as source_file:
return json.loads(source_file.read())
@classmethod
def cli(cls, num_workers: int = 1, debug: bool = False, **process_single_kwargs: Any) -> None:
paths = list(glob_path("s3://ai2-llm/stats/olmo-mix/v1/papers/peS2o/**/*.gz"))
assert len(paths), "Run s2 first"
with multiprocessing.Pool(num_workers) as pool:
data = (cls._read_json(path) for path in paths) if debug else pool.imap(cls._read_json, paths)
counts: dict = {
f: {"year": {}, "s2fos": {}, "documents": 0, "tokens": 0} for f in ["full_text", "abstract"]
}
for content in tqdm.tqdm(data, desc="Merging web stats", unit=" files", total=len(paths)):
for key, values in content.items():
counts[key]["documents"] += values["documents"]
counts[key]["tokens"] += values["tokens"]
for year, count in values["year"].items():
if year not in counts[key]["year"]:
counts[key]["year"][year] = 0
counts[key]["year"][year] += count
for fos, count in values["s2fos"].items():
if fos not in counts[key]["s2fos"]:
counts[key]["s2fos"][fos] = 0
counts[key]["s2fos"][fos] += count
with smart_open.open("s3://ai2-llm/stats/olmo-mix/v1/papers/summary.json", "wt") as destination_file:
destination_file.write(json.dumps(counts, indent=2))
if __name__ == "__main__":
os.environ["TOKENIZERS_PARALLELISM"] = "false"
os.environ["PYTHONBREAKPOINT"] = "ipdb.set_trace"
ap = argparse.ArgumentParser()
ap.add_argument("stat", choices=[name for name, _ in Registry.all()])
ap.add_argument("--debug", action="store_true")
ap.add_argument("--num-workers", type=int, default=multiprocessing.cpu_count())
args = ap.parse_args()
Registry.get(args.stat).cli(num_workers=args.num_workers, debug=args.debug)
|
dolma-main
|
scripts/stats.py
|
blockwise-parallel-transformer-1-main
|
bpt/__init__.py
|
|
# coding=utf-8
# Copyright 2021 The EleutherAI and The HuggingFace Inc. team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from typing import Optional, Tuple
import json
import numpy as np
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
from flax.linen import combine_masks, make_causal_mask
from flax.linen.attention import dot_product_attention_weights
from flax.traverse_util import flatten_dict, unflatten_dict
from jax import lax
from flax.linen import partitioning as nn_partitioning
from transformers.configuration_utils import PretrainedConfig
from transformers.modeling_flax_outputs import FlaxBaseModelOutput, FlaxCausalLMOutput
from transformers.modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring
from transformers.utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging
from transformers.generation.flax_logits_process import FlaxLogitsProcessorList
from transformers import AutoTokenizer
from jax.sharding import PartitionSpec as PS
from ml_collections import ConfigDict
from ml_collections.config_dict import config_dict
from bpt.tools.utils import function_args_to_config, load_pickle, open_file
from bpt.tools.jax_utils import (
with_sharding_constraint, get_jax_mesh, get_gradient_checkpoint_policy
)
from bpt.blocks.memeff import AttentionBlock as MemEffAttentionBlock
from bpt.blocks.blockwise_parallel_v1 import AttentionBlock as BlockwiseParallelBlock_v1
from bpt.blocks.blockwise_parallel import AttentionBlock as BlockwiseParallelBlock, Blockwise_LM_Head
from bpt.blocks.vanilla import AttentionBlock as VanillaAttentionBlock
GPT_STANDARD_CONFIGS = {
# 1.3B
'1b': {
'vocab_size': 50432,
'n_embd': 2048,
'n_inner': 8192,
'n_layer': 24,
'n_head': 16,
'n_positions': 16384,
'initializer_range': 0.02,
'layer_norm_epsilon': 1e-5,
'use_cache': True,
'tie_word_embeddings': False,
'rotary_dim': 128,
'bos_token_id': 50256,
'eos_token_id': 50256,
'n_real_tokens': 50257,
},
# 2.7B
'3b': {
'vocab_size': 50432,
'n_embd': 2560,
'n_inner': 10240,
'n_layer': 32,
'n_head': 32,
'n_positions': 16384,
'initializer_range': 0.02,
'layer_norm_epsilon': 1e-5,
'use_cache': True,
'tie_word_embeddings': False,
'rotary_dim': 80,
'bos_token_id': 50256,
'eos_token_id': 50256,
'n_real_tokens': 50257,
},
# 6.7B
'7b': {
'vocab_size': 50432,
'n_embd': 4096,
'n_inner': 16384,
'n_layer': 32,
'n_head': 32,
'n_positions': 16384,
'initializer_range': 0.02,
'layer_norm_epsilon': 1e-5,
'use_cache': True,
'tie_word_embeddings': False,
'rotary_dim': 128,
'bos_token_id': 50256,
'eos_token_id': 50256,
'n_real_tokens': 50257,
},
# 13B
'13b': {
'vocab_size': 50432,
'n_embd': 5120,
'n_inner': 20480,
'n_layer': 40,
'n_head': 40,
'n_positions': 16384,
'initializer_range': 0.02,
'layer_norm_epsilon': 1e-5,
'use_cache': True,
'tie_word_embeddings': False,
'rotary_dim': 128,
'bos_token_id': 50256,
'eos_token_id': 50256,
'n_real_tokens': 50257,
},
# 30B
'30b': {
'vocab_size': 50432,
'n_embd': 7168,
'n_inner': 28672,
'n_layer': 48,
'n_head': 56,
'n_positions': 16384,
'initializer_range': 0.02,
'layer_norm_epsilon': 1e-5,
'use_cache': True,
'tie_word_embeddings': False,
'rotary_dim': 128,
'bos_token_id': 50256,
'eos_token_id': 50256,
'n_real_tokens': 50257,
},
# 70B
'70b': {
'vocab_size': 50432,
'n_embd': 8192,
'n_inner': 32768,
'n_layer': 80,
'n_head': 64,
'n_positions': 16384,
'initializer_range': 0.02,
'layer_norm_epsilon': 1e-5,
'use_cache': True,
'tie_word_embeddings': False,
'rotary_dim': 128,
'bos_token_id': 50256,
'eos_token_id': 50256,
'n_real_tokens': 50257,
},
'debug': { # A small model for debugging
'vocab_size': 50432,
'n_embd': 128,
'n_inner': 256,
'n_layer': 2,
'n_head': 4,
'n_positions': 16384,
'initializer_range': 0.02,
'layer_norm_epsilon': 1e-5,
'use_cache': True,
'tie_word_embeddings': False,
'rotary_dim': 32,
'bos_token_id': 50256,
'eos_token_id': 50256,
'n_real_tokens': 50257,
},
}
class GPTConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`GPTModel`]. It is used to instantiate a GPT-J
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the GPT-J
[EleutherAI/gpt-j-6B](https://huggingface.co/EleutherAI/gpt-j-6B) architecture. Configuration objects inherit from
[`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`]
for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50432):
Vocabulary size of the GPT-J model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`GPTModel`].
n_positions (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
n_embd (`int`, *optional*, defaults to 4096):
Dimensionality of the embeddings and hidden states.
n_layer (`int`, *optional*, defaults to 28):
Number of hidden layers in the Transformer encoder.
n_head (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
rotary_dim (`int`, *optional*, defaults to 64):
Number of dimensions in the embedding that Rotary Position Embedding is applied to.
n_inner (`int`, *optional*, defaults to 0):
Dimensionality of the inner feed-forward layers. 0 will set it to 4 times n_embd
activation_function (`str`, *optional*, defaults to `"gelu_new"`):
Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`.
resid_pdrop (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
embd_pdrop (`int`, *optional*, defaults to 0.1):
The dropout ratio for the embeddings.
attn_pdrop (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention.
layer_norm_epsilon (`float`, *optional*, defaults to 1e-5):
The epsilon to use in the layer normalization layers.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
scale_attn_weights (`bool`, *optional*, defaults to `True`):
Scale attention weights by dividing by sqrt(hidden_size).
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
Example:
```python
>>> from transformers import GPTModel, GPTConfig
>>> # Initializing a GPT-J 6B configuration
>>> configuration = GPTConfig()
>>> # Initializing a model from the configuration
>>> model = GPTModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "gpt"
attribute_map = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__(
self,
vocab_size=50432,
n_positions=2048,
n_embd=4096,
n_layer=28,
n_head=16,
rotary_dim=64,
n_inner=None,
activation_function="gelu_new",
resid_pdrop=0.0,
embd_pdrop=0.0,
attn_pdrop=0.0,
layer_norm_epsilon=1e-5,
initializer_range=0.02,
scale_attn_weights=True,
use_cache=True,
bos_token_id=50256,
eos_token_id=50256,
tie_word_embeddings=False,
gradient_checkpointing='nothing_saveable',
n_real_tokens=50257,
fcm_min_ratio=0.0,
fcm_max_ratio=0.0,
causal=True,
attn_type='dot',
q_chunk_size=1024,
k_chunk_size=2048,
scan_layers=True,
param_scan_axis=0,
float32_logits=False,
**kwargs
):
self.vocab_size = vocab_size
self.n_positions = n_positions
self.n_embd = n_embd
self.n_layer = n_layer
self.n_head = n_head
self.n_inner = n_inner
self.rotary_dim = rotary_dim
self.activation_function = activation_function
self.resid_pdrop = resid_pdrop
self.embd_pdrop = embd_pdrop
self.attn_pdrop = attn_pdrop
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_range = initializer_range
self.scale_attn_weights = scale_attn_weights
self.use_cache = use_cache
self.gradient_checkpointing = gradient_checkpointing
self.n_real_tokens = n_real_tokens
self.fcm_min_ratio = fcm_min_ratio
self.fcm_max_ratio = fcm_max_ratio
self.causal = causal
self.attn_type = attn_type
self.q_chunk_size = q_chunk_size
self.k_chunk_size = k_chunk_size
self.scan_layers = scan_layers
self.param_scan_axis = param_scan_axis
self.float32_logits = float32_logits
if self.n_real_tokens is None:
self.n_real_tokens = self.vocab_size
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
super().__init__(
bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs
)
@classmethod
def get_default_config(cls, updates=None):
none_arg_types = dict(
n_inner=int,
rotary_dim=int,
)
config = function_args_to_config(cls.__init__, none_arg_types=none_arg_types)
if updates is not None:
config.update(ConfigDict(updates).copy_and_resolve_references())
return config
@staticmethod
def get_jax_mesh(axis_dims):
return get_jax_mesh(axis_dims, ('dp', 'fsdp', 'mp'))
@staticmethod
def get_partition_rules(scan_layers=False):
""" Parition rules for GPT. Note that these rules are orderd, so that
the beginning rules match first. It is important to use
PartitionSpec() instead of None here because JAX does not treat
None as a pytree leaf.
"""
if scan_layers:
return (
('transformer/wte/embedding', PS('mp', 'fsdp')),
('attn/(k_proj|q_proj|v_proj)/kernel', PS(None, 'fsdp', 'mp')),
('attn/out_proj/kernel', PS(None, 'mp', 'fsdp')),
('attn/fc_in/kernel', PS(None, 'fsdp', 'mp')),
('attn/fc_in/bias', PS(None, 'mp')),
('attn/fc_out/kernel', PS(None, 'mp', 'fsdp')),
('attn/fc_out/bias', PS(None, None)),
('ln_[0-9]+/bias', PS(None, None)),
('[0-9]+/ln_[0-9]+/scale', PS(None, None)),
('ln_f/bias', PS(None)),
('ln_f/scale', PS(None)),
('lm_head/kernel', PS('fsdp', 'mp')),
('lm_head/bias', PS('mp')),
('.*', PS(None)),
)
else:
return (
('transformer/wte/embedding', PS('mp', 'fsdp')),
('attn/(k_proj|q_proj|v_proj)/kernel', PS('fsdp', 'mp')),
('attn/out_proj/kernel', PS('mp', 'fsdp')),
('attn/fc_in/kernel', PS('fsdp', 'mp')),
('attn/fc_in/bias', PS('mp')),
('attn/fc_out/kernel', PS('mp', 'fsdp')),
('attn/fc_out/bias', PS(None)),
('ln_[0-9]+/bias', PS(None)),
('[0-9]+/ln_[0-9]+/scale', PS(None)),
('ln_f/bias', PS(None)),
('ln_f/scale', PS(None)),
('lm_head/kernel', PS('fsdp', 'mp')),
('lm_head/bias', PS('mp')),
('.*', PS(None)),
)
@staticmethod
def get_weight_decay_exclusions():
return (
'ln_[0-9]+/bias', 'ln_[0-9]+/scale', 'ln_f/bias', 'ln_f/scale',
'bias'
)
@staticmethod
def rng_keys():
return ('params', 'dropout', 'fcm')
@staticmethod
def get_tokenizer_config(updates=None):
config = ConfigDict()
config.name = 'EleutherAI/gpt-j-6B'
config.bos_token = '<|endoftext|>'
config.eos_token = '<|endoftext|>'
config.pad_token = '<|extratoken_40|>'
config.cls_token = '<|extratoken_41|>'
config.mask_token = '<|extratoken_42|>'
if updates is not None:
config.update(ConfigDict(updates).copy_and_resolve_references())
return config
@classmethod
def get_tokenizer(cls, config, padding_side='left', truncation_side='right'):
config = cls.get_tokenizer_config(config)
return AutoTokenizer.from_pretrained(
config.name,
bos_token=config.bos_token,
eos_token=config.eos_token,
pad_token=config.pad_token,
cls_token=config.cls_token,
mask_token=config.mask_token,
padding_side=padding_side,
truncation_side=truncation_side,
)
@staticmethod
def load_pretrained(name, dtype=jnp.float32):
with jax.default_device(jax.devices("cpu")[0]):
params = FlaxGPTForCausalLM.from_pretrained(
name, _do_init=False, dtype=dtype
)[1]
params = freeze({'params': params})
return jax.device_get(params)
@classmethod
def load_config(cls, path):
if path in GPT_STANDARD_CONFIGS:
return cls.from_dict(GPT_STANDARD_CONFIGS[path])
load_type, load_path = path.split('::', 1)
if load_type == 'pickle':
return cls.from_dict(load_pickle(load_path)['gpt_config'])
elif load_type == 'json':
with open_file(load_path, 'r') as fin:
raw_config = fin.read()
return cls.from_dict(json.loads(raw_config))
elif load_type == 'huggingface':
return cls.from_pretrained(load_path)
else:
raise ValueError(f'Unsupported load config type: {load_type}')
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "gpt"
_CONFIG_FOR_DOC = "GPTConfig"
GPT_START_DOCSTRING = r"""
This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a Flax Linen
[flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a
regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.
Finally, this model supports inherent JAX features such as:
- [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
- [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
- [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
- [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
Parameters:
config ([`GPTConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
`jax.numpy.bfloat16` (on TPUs).
This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
specified all the computation will be performed with the given `dtype`.
**Note that this only specifies the dtype of the computation and does not influence the dtype of model
parameters.**
If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
[`~FlaxPreTrainedModel.to_bf16`].
"""
GPT_INPUTS_DOCSTRING = r"""
Args:
input_ids (`numpy.ndarray` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length`. Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`):
Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast
auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
def create_sinusoidal_positions(num_pos, dim):
inv_freq = 1.0 / (10000 ** (np.arange(0, dim, 2) / dim))
sinusoid_inp = np.einsum("i , j -> i j", np.arange(num_pos), inv_freq).astype("float32")
sin, cos = np.sin(sinusoid_inp), np.cos(sinusoid_inp)
sentinel = dim // 2 + dim % 2
out = np.zeros((num_pos, dim))
out[:, 0:sentinel] = sin
out[:, sentinel:] = cos
return jnp.array(out)
def rotate_every_two(tensor):
rotate_half_tensor = jnp.stack((-tensor[:, :, :, 1::2], tensor[:, :, :, ::2]), axis=-1)
rotate_half_tensor = rotate_half_tensor.reshape(rotate_half_tensor.shape[:-2] + (-1,))
return rotate_half_tensor
def apply_rotary_pos_emb(tensor, sincos):
sin_pos, cos_pos = sincos
sin_pos = sin_pos[:, :, None, :].repeat(2, 3)
cos_pos = cos_pos[:, :, None, :].repeat(2, 3)
return (tensor * cos_pos) + (rotate_every_two(tensor) * sin_pos)
class FlaxGPTBlock(nn.Module):
config: GPTConfig
dtype: jnp.dtype = jnp.float32
def setup(self):
hidden_size = self.config.hidden_size
inner_dim = self.config.n_inner if self.config.n_inner is not None else 4 * hidden_size
attention_blocks = {
# default vanilla transformer (Vaswani et al).
'vanilla': VanillaAttentionBlock,
# default memory efficient transformer (Rabe et al and Dao et al).
'memeff': MemEffAttentionBlock,
# default blockwise parallel transformer (Liu et al).
'blockwise_parallel': BlockwiseParallelBlock,
# less cleaner blockwise parallel transformer used in the paper.
'blockwise_parallel_v1': BlockwiseParallelBlock_v1,
}
if self.config.attn_type in attention_blocks:
Block = attention_blocks[self.config.attn_type]
else:
raise ValueError(f"Unknown attention type {self.config.attn_type}")
self.attn = Block(
self.config.q_chunk_size,
self.config.k_chunk_size,
self.config.hidden_size,
self.config.num_attention_heads,
self.config.rotary_dim,
inner_dim,
self.config.layer_norm_epsilon,
self.config.activation_function,
self.config.attn_pdrop,
self.config.resid_pdrop,
self.config.max_position_embeddings,
self.dtype,
self.config.causal,
policy=self.config.gradient_checkpointing,
prevent_cse=not self.config.scan_layers,
float32_logits=self.config.float32_logits,
)
def __call__(
self,
hidden_states,
attention_mask=None,
position_ids=None,
deterministic: bool = True,
init_cache: bool = False,
output_attentions: bool = False,
fcm_mask=None,
):
attn_outputs = self.attn(
hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
deterministic=deterministic,
init_cache=init_cache,
)
attn_weights = None
if self.config.scan_layers: # NOTE: this is a hack to work with scan_layers
outputs = attn_outputs, None
else:
outputs = (attn_outputs, attn_weights) if output_attentions else (attn_outputs,)
return outputs
class FlaxGPTPreTrainedModel(FlaxPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = GPTConfig
base_model_prefix = "transformer"
module_class: nn.Module = None
def __init__(
self,
config: GPTConfig,
input_shape: Tuple = (1, 1),
seed: int = 0,
dtype: jnp.dtype = jnp.float32,
_do_init: bool = True,
**kwargs,
):
module = self.module_class(config=config, dtype=dtype, **kwargs)
super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
# init input tensors
input_ids = jnp.zeros(input_shape, dtype="i4")
attention_mask = jnp.ones_like(input_ids)
position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape)
params_rng, dropout_rng = jax.random.split(rng)
rngs = {"params": params_rng, "dropout": dropout_rng}
if self.config.add_cross_attention:
encoder_hidden_states = jnp.zeros(input_shape + (self.config.n_embd,))
encoder_attention_mask = attention_mask
module_init_outputs = self.module.init(
rngs,
input_ids,
attention_mask,
position_ids,
encoder_hidden_states,
encoder_attention_mask,
return_dict=False,
)
else:
module_init_outputs = self.module.init(rngs, input_ids, attention_mask, position_ids, return_dict=False)
random_params = module_init_outputs["params"]
if params is not None:
random_params = flatten_dict(unfreeze(random_params))
params = flatten_dict(unfreeze(params))
for missing_key in self._missing_keys:
params[missing_key] = random_params[missing_key]
self._missing_keys = set()
return freeze(unflatten_dict(params))
else:
return random_params
def init_cache(self, batch_size, max_length):
r"""
Args:
batch_size (`int`):
batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
max_length (`int`):
maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
cache.
"""
# init input variables to retrieve cache
input_ids = jnp.ones((batch_size, max_length))
attention_mask = jnp.ones_like(input_ids)
position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
init_variables = self.module.init(
jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True
)
return init_variables["cache"]
def _get_logits_processor(self,*args, **kwargs) -> FlaxLogitsProcessorList:
processors = super()._get_logits_processor(*args, **kwargs)
def squash_extra_tokens(input_ids, scores, cur_len):
return scores.at[:, self.config.n_real_tokens:].set(-float('inf'))
processors.append(squash_extra_tokens)
return processors
@add_start_docstrings_to_model_forward(GPT_INPUTS_DOCSTRING)
def __call__(
self,
input_ids,
attention_mask=None,
position_ids=None,
params: dict = None,
past_key_values: dict = None,
dropout_rng: jax.random.PRNGKey = None,
train: bool = False,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.return_dict
batch_size, sequence_length = input_ids.shape
if position_ids is None:
if past_key_values is not None:
raise ValueError("Make sure to provide `position_ids` when passing `past_key_values`.")
position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
if attention_mask is None:
attention_mask = jnp.ones((batch_size, sequence_length))
# Handle any PRNG if needed
rngs = {}
if dropout_rng is not None:
rngs["dropout"] = dropout_rng
inputs = {"params": params or self.params}
# if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be changed by FlaxGPTAttention module
if past_key_values:
inputs["cache"] = past_key_values
mutable = ["cache"]
else:
mutable = False
outputs = self.module.apply(
inputs,
jnp.array(input_ids, dtype="i4"),
jnp.array(attention_mask, dtype="i4"),
jnp.array(position_ids, dtype="i4"),
not train,
False,
output_attentions,
output_hidden_states,
return_dict,
rngs=rngs,
mutable=mutable,
)
# add updated cache to model output
if past_key_values is not None and return_dict:
outputs, past_key_values = outputs
outputs["past_key_values"] = unfreeze(past_key_values["cache"])
return outputs
elif past_key_values is not None and not return_dict:
outputs, past_key_values = outputs
outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:]
return outputs
class FlaxGPTBlockCollection(nn.Module):
config: GPTConfig
dtype: jnp.dtype = jnp.float32
@nn.compact
def __call__(
self,
hidden_states,
attention_mask=None,
position_ids=None,
deterministic: bool = True,
init_cache: bool = False,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
all_attentions = () if output_attentions else None
all_hidden_states = () if output_hidden_states else None
if not deterministic and self.config.fcm_max_ratio > 0:
# Apply forgetful causal mask
batch_size, seq_length = hidden_states.shape[0], hidden_states.shape[1]
fcm_ratio = jax.random.uniform(
self.make_rng('fcm'), shape=(batch_size, 1, 1, 1),
minval=self.config.fcm_min_ratio,
maxval=self.config.fcm_max_ratio
)
fcm_mask = jax.random.uniform(
self.make_rng('fcm'),
shape=(batch_size, 1, seq_length, seq_length)
) > fcm_ratio
fcm_mask = fcm_mask.at[:, :, :, 0].set(True)
fcm_mask = fcm_mask.astype('bool')
else:
fcm_mask = None
block = FlaxGPTBlock
if self.config.gradient_checkpointing != '':
FlaxGPT2CheckpointBlock = nn.remat(
block, static_argnums=(3, 4, 5, 6),
prevent_cse=not self.config.scan_layers,
policy=get_gradient_checkpoint_policy(self.config.gradient_checkpointing)
)
block = FlaxGPT2CheckpointBlock
if self.config.scan_layers:
initializing = self.is_mutable_collection('params')
params_spec = (
self.config.param_scan_axis if initializing else
nn_partitioning.ScanIn(self.config.param_scan_axis))
cache_spec = 0
hidden_states, _ = nn.scan(
block,
variable_axes={
'params': params_spec,
'cache': cache_spec,
'intermediates': 0
},
split_rngs={
'params': True,
'dropout': True
},
in_axes=(nn.broadcast, nn.broadcast, nn.broadcast, nn.broadcast, nn.broadcast, nn.broadcast),
length=self.config.num_hidden_layers,
metadata_params={nn.PARTITION_NAME: 'scan_decoder_layer'},
)(config=self.config, name='scan_decoder', dtype=self.dtype)(
hidden_states,
attention_mask,
position_ids,
deterministic,
init_cache,
output_attentions,
fcm_mask,
)
else:
blocks = [
block(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers)
]
for block in blocks:
if output_hidden_states:
all_hidden_states += (hidden_states,)
layer_outputs = block(
hidden_states,
attention_mask,
position_ids,
deterministic,
init_cache,
output_attentions,
fcm_mask,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions += (layer_outputs[1],)
# this contains possible `None` values - `FlaxGPTModule` will filter them out
outputs = (hidden_states, all_hidden_states, all_attentions)
return outputs
class FlaxGPTModule(nn.Module):
config: GPTConfig
dtype: jnp.dtype = jnp.float32
def setup(self):
self.embed_dim = self.config.hidden_size
self.wte = nn.Embed(
self.config.vocab_size,
self.config.hidden_size,
embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
)
self.dropout = nn.Dropout(rate=self.config.embd_pdrop)
self.h = FlaxGPTBlockCollection(self.config, dtype=self.dtype)
self.ln_f = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype)
def __call__(
self,
input_ids,
attention_mask,
position_ids,
deterministic=True,
init_cache: bool = False,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
input_embeds = self.wte(input_ids.astype("i4"))
hidden_states = self.dropout(input_embeds, deterministic=deterministic)
outputs = self.h(
hidden_states,
attention_mask,
position_ids=position_ids,
deterministic=deterministic,
init_cache=init_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
hidden_states = self.ln_f(hidden_states)
if output_hidden_states:
all_hidden_states = outputs[1] + (hidden_states,)
outputs = (hidden_states, all_hidden_states) + outputs[2:]
else:
outputs = (hidden_states,) + outputs[1:]
if not return_dict:
return tuple(v for v in outputs if v is not None)
return FlaxBaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=outputs[1],
attentions=outputs[-1],
)
@add_start_docstrings(
"The bare GPT Model transformer outputting raw hidden-states without any specific head on top.",
GPT_START_DOCSTRING,
)
class FlaxGPTModel(FlaxGPTPreTrainedModel):
module_class = FlaxGPTModule
append_call_sample_docstring(
FlaxGPTModel,
_CHECKPOINT_FOR_DOC,
FlaxCausalLMOutput,
_CONFIG_FOR_DOC,
)
class FlaxGPTForCausalLMModule(nn.Module):
config: GPTConfig
dtype: jnp.dtype = jnp.float32
def setup(self):
self.transformer = FlaxGPTModule(self.config, dtype=self.dtype)
if self.config.attn_type == 'blockwise_parallel' or self.config.attn_type == 'blockwise_parallel_v1':
self.lm_head = Blockwise_LM_Head(self.config.vocab_size,
self.config.q_chunk_size, dtype=self.dtype,
prevent_cse=not self.config.scan_layers)
else:
self.lm_head = nn.Dense(
self.config.vocab_size,
dtype=self.dtype,
kernel_init=jax.nn.initializers.variance_scaling(
scale=1.0, mode='fan_in',
distribution='normal',
)
)
def __call__(
self,
input_ids,
attention_mask=None,
position_ids=None,
deterministic: bool = True,
init_cache: bool = False,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
batch_size, seq_length = input_ids.shape
if attention_mask is None:
attention_mask = jnp.ones_like(input_ids)
if position_ids is None:
position_ids = jnp.broadcast_to(
jnp.clip(jnp.cumsum(attention_mask, axis=-1) - 1, a_min=0),
(batch_size, seq_length)
)
outputs = self.transformer(
input_ids,
attention_mask,
position_ids,
deterministic=deterministic,
init_cache=init_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
if self.config.tie_word_embeddings:
shared_kernel = self.transformer.variables["params"]["wte"]["embedding"].T
lm_logits = self.lm_head.apply({"params": {"kernel": shared_kernel}}, hidden_states)
else:
lm_logits = self.lm_head(hidden_states)
if not return_dict:
return (lm_logits,) + outputs[1:]
return FlaxCausalLMOutput(logits=lm_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@add_start_docstrings(
"""
The GPT Model transformer with a language modeling head on top.
""",
GPT_START_DOCSTRING,
)
class FlaxGPTForCausalLM(FlaxGPTPreTrainedModel):
module_class = FlaxGPTForCausalLMModule
def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jnp.DeviceArray] = None):
# initializing the cache
batch_size, seq_length = input_ids.shape
past_key_values = self.init_cache(batch_size, max_length)
# Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
# But since GPT uses a causal mask, those positions are masked anyways.
# Thus we can create a single static attention_mask here, which is more efficient for compilation
extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
if attention_mask is not None:
position_ids = attention_mask.cumsum(axis=-1) - 1
extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0))
else:
position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length))
return {
"past_key_values": past_key_values,
"attention_mask": extended_attention_mask,
"position_ids": position_ids,
}
def update_inputs_for_generation(self, model_outputs, model_kwargs):
model_kwargs["past_key_values"] = model_outputs.past_key_values
model_kwargs["position_ids"] = model_kwargs["position_ids"][:, -1:] + 1
return model_kwargs
append_call_sample_docstring(
FlaxGPTForCausalLM,
_CHECKPOINT_FOR_DOC,
FlaxCausalLMOutput,
_CONFIG_FOR_DOC,
)
|
blockwise-parallel-transformer-1-main
|
bpt/model.py
|
import dataclasses
import pprint
from functools import partial
import re
from tqdm import tqdm, trange
import numpy as np
import bpt.tools.utils as utils
import jax
import jax.numpy as jnp
from jax.experimental.pjit import pjit
from jax.sharding import PartitionSpec as PS
import flax
from flax import linen as nn
from flax.jax_utils import prefetch_to_device
from flax.training.train_state import TrainState
import optax
from bpt.data import Dataset, TextProcessor
from bpt.tools.checkpoint import StreamingCheckpointer
from bpt.tools.optimizers import OptimizerFactory
from bpt.tools.jax_utils import (
JaxRNG, next_rng, match_partition_rules,
cross_entropy_loss_and_accuracy, named_tree_map, global_norm,
set_random_seed, average_metrics, get_weight_decay_mask,
make_shard_and_gather_fns, with_sharding_constraint, tree_apply, get_metrics,
)
from bpt.model import GPTConfig, FlaxGPTForCausalLMModule
from bpt.blocks.blockwise_parallel import blockwise_cross_entropy
FLAGS, FLAGS_DEF = utils.define_flags_with_default(
seed=42,
initialize_jax_distributed=False,
mesh_dim='1,-1,1',
total_steps=10000,
load_gpt_config='',
update_gpt_config='',
load_checkpoint='',
load_dataset_state='',
log_freq=50,
save_model_freq=0,
save_milestone_freq=0,
eval_steps=0,
tokenizer=GPTConfig.get_tokenizer_config(),
text_processor=TextProcessor.get_default_config(),
train_dataset=Dataset.get_default_config(),
eval_dataset=Dataset.get_default_config(),
optimizer=OptimizerFactory.get_default_config(),
checkpointer=StreamingCheckpointer.get_default_config(),
gpt=GPTConfig.get_default_config(),
logger=utils.WandBLogger.get_default_config(),
log_all_worker=False,
profile_steps=0,
stop_after_profile=True,
)
def main(argv):
if FLAGS.initialize_jax_distributed:
jax.distributed.initialize()
variant = utils.get_user_flags(FLAGS, FLAGS_DEF)
flags_config_dict = utils.user_flags_to_config_dict(FLAGS, FLAGS_DEF)
logger = utils.WandBLogger(
config=FLAGS.logger,
variant=variant,
enable=FLAGS.log_all_worker or (jax.process_index() == 0),
)
set_random_seed(FLAGS.seed)
if FLAGS.load_dataset_state != '':
dataset = utils.load_pickle(FLAGS.load_dataset_state)
else:
tokenizer = GPTConfig.get_tokenizer(FLAGS.tokenizer)
text_processor = TextProcessor(FLAGS.text_processor, tokenizer)
dataset = Dataset(FLAGS.train_dataset, tokenizer, text_processor)
if FLAGS.eval_steps > 0:
eval_dataset = Dataset(
FLAGS.eval_dataset, dataset.tokenizer, dataset.text_processor,
)
eval_iterator = iter(eval_dataset.val_iter())
seq_length = dataset.seq_length
if FLAGS.load_gpt_config != '':
gpt_config = GPTConfig.load_config(FLAGS.load_gpt_config)
update_gpt_config = GPTConfig(**FLAGS.gpt)
gpt_config.update(dict(
q_chunk_size=update_gpt_config.q_chunk_size,
k_chunk_size=update_gpt_config.k_chunk_size,
attn_type=update_gpt_config.attn_type,
n_positions=update_gpt_config.n_positions,
gradient_checkpointing=update_gpt_config.gradient_checkpointing,
scan_layers=update_gpt_config.scan_layers,
param_scan_axis=update_gpt_config.param_scan_axis,
))
else:
gpt_config = GPTConfig(**FLAGS.gpt)
if FLAGS.update_gpt_config != '':
gpt_config.update(dict(eval(FLAGS.update_gpt_config)))
gpt_config.update(dict(
bos_token_id=dataset.tokenizer.bos_token_id,
eos_token_id=dataset.tokenizer.eos_token_id,
))
if gpt_config.vocab_size < dataset.vocab_size:
gpt_config.update(dict(vocab_size=dataset.vocab_size))
model = FlaxGPTForCausalLMModule(gpt_config)
optimizer, optimizer_info = OptimizerFactory.get_optimizer(
FLAGS.optimizer,
get_weight_decay_mask(GPTConfig.get_weight_decay_exclusions()),
)
def create_trainstate_from_params(params):
return TrainState.create(params=params, tx=optimizer, apply_fn=None)
def init_fn(rng):
rng_generator = JaxRNG(rng)
params = model.init(
input_ids=jnp.zeros((4, seq_length), dtype=jnp.int32),
position_ids=jnp.zeros((4, seq_length), dtype=jnp.int32),
attention_mask=jnp.ones((4, seq_length), dtype=jnp.int32),
rngs=rng_generator(gpt_config.rng_keys()),
)
return TrainState.create(params=params, tx=optimizer, apply_fn=None)
if FLAGS.gpt.attn_type == 'blockwise_parallel' or FLAGS.gpt.attn_type == 'blockwise_parallel_v1':
cross_entropy_loss_and_accuracy_fn = partial(blockwise_cross_entropy,
policy=FLAGS.gpt.gradient_checkpointing,
chunk_size=FLAGS.gpt.q_chunk_size,
prevent_cse=not FLAGS.gpt.scan_layers,)
else:
cross_entropy_loss_and_accuracy_fn = cross_entropy_loss_and_accuracy
def train_step(train_state, rng, batch):
rng_generator = JaxRNG(rng)
input_tokens = with_sharding_constraint(batch['input_tokens'], PS(('dp', 'fsdp')))
output_tokens = with_sharding_constraint(batch['output_tokens'], PS(('dp', 'fsdp')))
loss_masks = with_sharding_constraint(batch['loss_masks'], PS(('dp', 'fsdp')))
def loss_and_accuracy(params):
logits = model.apply(
params,
input_tokens,
deterministic=False,
rngs=rng_generator(gpt_config.rng_keys()),
).logits
return cross_entropy_loss_and_accuracy_fn(logits, output_tokens, loss_masks)
grad_fn = jax.value_and_grad(loss_and_accuracy, has_aux=True)
(loss, accuracy), grads = grad_fn(train_state.params)
train_state = train_state.apply_gradients(grads=grads)
metrics = dict(
loss=loss,
accuracy=accuracy,
learning_rate=optimizer_info['learning_rate_schedule'](train_state.step),
gradient_norm=global_norm(grads),
param_norm=global_norm(train_state.params),
)
return train_state, rng_generator(), metrics
def eval_step(train_state, rng, batch):
rng_generator = JaxRNG(rng)
input_tokens = with_sharding_constraint(batch['input_tokens'], PS(('dp', 'fsdp')))
output_tokens = with_sharding_constraint(batch['output_tokens'], PS(('dp', 'fsdp')))
loss_masks = with_sharding_constraint(batch['loss_masks'], PS(('dp', 'fsdp')))
logits = model.apply(
train_state.params,
input_tokens,
deterministic=True,
rngs=rng_generator(gpt_config.rng_keys()),
).logits
loss, accuracy = cross_entropy_loss_and_accuracy_fn(logits, output_tokens, loss_masks)
metrics = dict(
loss=loss,
accuracy=accuracy,
)
return rng_generator(), metrics
train_state_shapes = jax.eval_shape(init_fn, next_rng())
train_state_partition = match_partition_rules(
GPTConfig.get_partition_rules(FLAGS.gpt.scan_layers), train_state_shapes
)
num_params = sum(x.size for x in jax.tree_leaves(train_state_shapes.params))
num_nonembed_params = num_params - gpt_config.vocab_size * gpt_config.n_embd
param_stats = {"num_params": num_params,"num_nonembed_params": num_nonembed_params}
logger.log(param_stats)
tqdm.write("\n" + pprint.pformat(param_stats) + "\n")
shard_fns, gather_fns = make_shard_and_gather_fns(
train_state_partition, train_state_shapes
)
checkpointer = StreamingCheckpointer(
FLAGS.checkpointer, logger.output_dir,
enable=jax.process_index() == 0,
)
sharded_init_fn = pjit(
init_fn,
in_shardings=PS(),
out_shardings=train_state_partition
)
sharded_create_trainstate_from_params = pjit(
create_trainstate_from_params,
in_shardings=(train_state_partition.params, ),
out_shardings=train_state_partition,
donate_argnums=(0, ),
)
sharded_train_step = pjit(
train_step,
in_shardings=(train_state_partition, PS(), PS()),
out_shardings=(train_state_partition, PS(), PS()),
donate_argnums=(0, 1),
)
sharded_eval_step = pjit(
eval_step,
in_shardings=(train_state_partition, PS(), PS()),
out_shardings=(PS(), PS()),
donate_argnums=(1,),
)
def save_checkpoint(train_state, milestone=False):
step = int(jax.device_get(train_state.step))
metadata = dict(
step=step,
variant=variant,
flags=flags_config_dict,
gpt_config=gpt_config.to_dict(),
)
checkpointer.save_all(
train_state=train_state,
gather_fns=gather_fns,
metadata=metadata,
dataset=dataset.get_state_dict(),
milestone=milestone,
)
if FLAGS.profile_steps > 0:
import os
os.makedirs(logger.profile_dir, exist_ok=True)
mesh = GPTConfig.get_jax_mesh(FLAGS.mesh_dim)
with mesh:
train_state, restored_params = None, None
if train_state is None and restored_params is None:
# Initialize from scratch
train_state = sharded_init_fn(next_rng())
elif train_state is None and restored_params is not None:
# Restore from params but initialize train_state
train_state = sharded_create_trainstate_from_params(restored_params)
del restored_params
sharded_rng = next_rng()
# warmup
for batch, dataset_metrics in dataset:
train_state, sharded_rng, metrics = sharded_train_step(
train_state, sharded_rng, batch
)
break
# profile
jax.profiler.start_trace(logger.profile_dir)
for step, (batch, dataset_metrics) in zip(trange(FLAGS.profile_steps), dataset):
train_state, sharded_rng, metrics = sharded_train_step(
train_state, sharded_rng, batch
)
jax.block_until_ready(train_state)
jax.profiler.save_device_memory_profile(f'{logger.profile_dir}/memory{step}.prof')
jax.profiler.stop_trace()
if FLAGS.stop_after_profile:
exit()
mesh = GPTConfig.get_jax_mesh(FLAGS.mesh_dim)
with mesh:
train_state, restored_params = None, None
if FLAGS.load_checkpoint != '':
load_type, load_path = FLAGS.load_checkpoint.split('::', 1)
if load_type == 'huggingface':
restored_params = tree_apply(
shard_fns.params, gpt_config.load_pretrained(load_path)
)
train_state = None
else:
train_state, restored_params = checkpointer.load_trainstate_checkpoint(
FLAGS.load_checkpoint, train_state_shapes, shard_fns
)
if train_state is None and restored_params is None:
# Initialize from scratch
train_state = sharded_init_fn(next_rng())
elif train_state is None and restored_params is not None:
# Restore from params but initialize train_state
train_state = sharded_create_trainstate_from_params(restored_params)
del restored_params
start_step = int(jax.device_get(train_state.step))
if FLAGS.save_model_freq > 0:
save_checkpoint(train_state)
sharded_rng = next_rng()
step_counter = trange(start_step, FLAGS.total_steps, ncols=0)
def run_eval(sharded_rng, eval_fn, batch, eval_steps, eval_name):
eval_metric_list = []
for _ in range(eval_steps):
sharded_rng, eval_metrics = eval_fn(
train_state, sharded_rng, batch
)
eval_metric_list.append(eval_metrics)
log_metrics = get_metrics(eval_metric_list, stack=True)
mean_metrics = {
f"{eval_name}/{k}": np.mean(v)
for k, v in log_metrics.items()
}
mean_metrics["step"] = step
logger.log(mean_metrics)
tqdm.write("\n" + pprint.pformat(mean_metrics) + "\n")
return sharded_rng
for step, (batch, dataset_metrics) in zip(step_counter, dataset):
train_state, sharded_rng, metrics = sharded_train_step(
train_state, sharded_rng, batch
)
if step % FLAGS.log_freq == 0:
if FLAGS.eval_steps > 0:
batch, _ = next(eval_iterator)
sharded_rng = run_eval(sharded_rng, sharded_eval_step,
batch, FLAGS.eval_steps, "val")
log_metrics = {"step": step}
log_metrics.update(metrics)
log_metrics.update(dataset_metrics)
log_metrics = jax.device_get(log_metrics)
logger.log(log_metrics)
tqdm.write("\n" + pprint.pformat(log_metrics) + "\n")
if FLAGS.save_milestone_freq > 0 and (step + 1) % FLAGS.save_milestone_freq == 0:
save_checkpoint(train_state, milestone=True)
elif FLAGS.save_model_freq > 0 and (step + 1) % FLAGS.save_model_freq == 0:
save_checkpoint(train_state)
if FLAGS.save_model_freq > 0:
save_checkpoint(train_state)
if __name__ == "__main__":
utils.run(main)
|
blockwise-parallel-transformer-1-main
|
bpt/train.py
|
import dataclasses
import pprint
import time
from functools import partial
import json
from multiprocessing import Pool
import h5py
import bpt.tools.utils as utils
from ml_collections.config_dict import config_dict
from ml_collections import ConfigDict
from tqdm import tqdm, trange
import numpy as np
from datasets import load_dataset
class TextProcessor(object):
""" Example processor that converts a dictionary of texts into tokens. """
@staticmethod
def get_default_config(updates=None):
config = ConfigDict()
config.fields_from_example = ''
config.fields = ''
config.subfield_separator = ' '
config.add_eos_token = True
config.prepend_text = ''
if updates is not None:
config.update(ConfigDict(updates).copy_and_resolve_references())
return config
def __init__(self, config, tokenizer):
self.config = self.get_default_config(config)
assert self.config.fields != '' or self.config.fields_from_example != '', (
'Either fields or fields_from_example must be specified.'
)
self.tokenizer = tokenizer
def __call__(self, example, has_aux=False):
if has_aux:
example, *aux = example
else:
aux = tuple()
token_buffer = []
loss_mask_buffer = []
if self.config.fields_from_example != '':
fields = example[self.config.fields_from_example].split(',')
else:
fields = self.config.fields.split(',')
for i, field in enumerate(fields):
if field.startswith('[') and field.endswith(']'):
# No loss for this field.
field = field[1:-1]
mask = 0.0
else:
mask = 1.0
if field == '<|bos|>':
token_buffer.append(self.tokenizer.bos_token_id)
loss_mask_buffer.append(mask)
elif field == '<|eos|>':
token_buffer.append(self.tokenizer.eos_token_id)
loss_mask_buffer.append(mask)
else:
subfields = field.split('+')
text = self.config.subfield_separator.join(
[example[subfield] for subfield in subfields]
)
if i == 0:
text = self.config.prepend_text + text
tokens = self.tokenizer.encode(text)
token_buffer.extend(tokens)
loss_mask_buffer.extend([mask for _ in range(len(tokens))])
if self.config.add_eos_token:
token_buffer.append(self.tokenizer.eos_token_id)
loss_mask_buffer.append(1.0)
return token_buffer, loss_mask_buffer, *aux
class Dataset(object):
@staticmethod
def get_default_config(updates=None):
config = ConfigDict()
config.path = ''
config.seq_length = 1024
config.batch_size = 8
config.start_seek_loc = 0
config.index_at_start = 0
config.tokenizer_processes = 1
config.tokenizer_parallel_chunk_size = 32
config.tokenizer_parallel_batch_size = 1024
if updates is not None:
config.update(ConfigDict(updates).copy_and_resolve_references())
return config
def __init__(self, config, tokenizer, text_processor):
self.config = self.get_default_config(config)
assert self.config.path != ''
self._tokenizer = tokenizer
self._text_processor = text_processor
self._index = self.config.index_at_start
self._file_loc = self.config.start_seek_loc
self._n_batch = 0
def parse_json(self, line):
if not line or line == '\n':
return None
try:
data = json.loads(line)
except json.decoder.JSONDecodeError:
print(f'Error parsing json line:\n{line}')
return None
return data
def json_iterator(self):
with utils.open_file(self.config.path, 'r') as fin:
fin.seek(self._file_loc)
while True:
line = fin.readline()
self._file_loc = fin.tell()
if not line: # Reached EOF
self._index = 0
fin.seek(0)
continue
data = self.parse_json(line)
if data is not None:
# JSON parsing succeeded
yield data, self._file_loc, self._index
self._index += 1
def batched(self, iterator, batch_size):
batch = []
for example in iterator:
batch.append(example)
if len(batch) == batch_size:
yield batch
batch = []
if len(batch) > 0:
yield batch
def parallel_example_iterator(self):
if self.config.tokenizer_processes == 1:
for example, loc, index in self.json_iterator():
yield self.text_processor((example, loc, index), has_aux=True)
else:
process_pool = Pool(self.config.tokenizer_processes)
batched_iterator = self.batched(
self.json_iterator(), self.config.tokenizer_parallel_batch_size
)
with process_pool as pool:
map_fn = partial(self.text_processor, has_aux=True)
next_batch = pool.map_async(
map_fn, next(batched_iterator),
chunksize=self.config.tokenizer_parallel_chunk_size
)
while True:
current_batch = next_batch
next_batch = pool.map_async(
map_fn, next(batched_iterator),
chunksize=self.config.tokenizer_parallel_chunk_size
)
for example in current_batch.get():
yield example
def __iter__(self):
chunk_size = self.config.batch_size * self.config.seq_length
token_buffer = []
loss_mask_buffer = []
total_tokens = 0
last_time = 0.0
for tokens, loss_masks, loc, index in self.parallel_example_iterator():
token_buffer.extend(tokens)
loss_mask_buffer.extend(loss_masks)
while len(token_buffer) > chunk_size + 1:
total_tokens += chunk_size
metrics = {
'dataset_file_loc': loc,
'dataset_example_index': index,
'dataset_total_tokens': total_tokens,
'dataset_throughput_tps': chunk_size / (time.time() - last_time),
}
last_time = time.time()
input_tokens = np.array(token_buffer[:chunk_size], dtype=np.int32)
output_tokens = np.array(token_buffer[1:chunk_size+1], dtype=np.int32)
# reshape to batch_size x seq_length
input_tokens = input_tokens.reshape(self.config.batch_size, -1)
output_tokens = output_tokens.reshape(self.config.batch_size, -1)
loss_masks = np.array(loss_mask_buffer[:chunk_size], dtype=np.float32).reshape(self.config.batch_size, -1)
yield {
"input_tokens": input_tokens,
"output_tokens": output_tokens,
"loss_masks": loss_masks,
}, metrics
token_buffer = token_buffer[chunk_size:]
loss_mask_buffer = loss_mask_buffer[chunk_size:]
def val_iter(self):
chunk_size = self.config.batch_size * self.config.seq_length
token_buffer = []
loss_mask_buffer = []
total_tokens = 0
last_time = 0.0
for tokens, loss_masks, loc, index in self.parallel_example_iterator():
token_buffer.extend(tokens)
loss_mask_buffer.extend(loss_masks)
while len(token_buffer) > chunk_size + 1:
total_tokens += chunk_size
metrics = {
'dataset_file_loc': loc,
'dataset_example_index': index,
'dataset_total_tokens': total_tokens,
'dataset_throughput_tps': chunk_size / (time.time() - last_time),
}
last_time = time.time()
input_tokens = np.array(token_buffer[:chunk_size], dtype=np.int32)
output_tokens = np.array(token_buffer[1:chunk_size+1], dtype=np.int32)
# reshape to batch_size x seq_length
input_tokens = input_tokens.reshape(self.config.batch_size, -1)
output_tokens = output_tokens.reshape(self.config.batch_size, -1)
loss_masks = np.array(loss_mask_buffer[:chunk_size], dtype=np.float32).reshape(self.config.batch_size, -1)
yield {
"input_tokens": input_tokens,
"output_tokens": output_tokens,
"loss_masks": loss_masks,
}, metrics
token_buffer = token_buffer[chunk_size:]
loss_mask_buffer = loss_mask_buffer[chunk_size:]
def get_state_dict(self):
return dict(
config=self.config,
index=self._index,
file_loc=self._file_loc,
)
def load_state_dict(self, state_dict):
self.config = state_dict.get('config', self.config)
self._index = state_dict.get('index', self.config.index_at_start)
self._file_loc = state_dict.get('file_loc', self.config.start_seek_loc)
@property
def seq_length(self):
return self.config.seq_length
@property
def tokenizer(self):
return self._tokenizer
@property
def text_processor(self):
return self._text_processor
@property
def vocab_size(self):
return len(self.tokenizer)
|
blockwise-parallel-transformer-1-main
|
bpt/data.py
|
import functools
import json
import math
from functools import partial
from typing import Optional, Tuple
import flax.linen as nn
import jax
import jax.numpy as jnp
import numpy as np
from einops import rearrange
from flax.linen import combine_masks, make_causal_mask
from jax import lax
from jax import numpy as jnp
def quick_gelu(x):
return x * jax.nn.sigmoid(1.702 * x)
ACT2FN = {
"gelu": partial(nn.gelu, approximate=False),
"relu": nn.relu,
"silu": nn.swish,
"swish": nn.swish,
"gelu_new": partial(nn.gelu, approximate=True),
"quick_gelu": quick_gelu,
}
MASK_VALUE = -1e10
Q_CHUNK_SIZE = 1024
K_CHUNK_SIZE = 1024
def create_sinusoidal_positions(num_pos, dim):
inv_freq = 1.0 / (10000 ** (np.arange(0, dim, 2) / dim))
sinusoid_inp = np.einsum("i , j -> i j", np.arange(num_pos), inv_freq).astype("float32")
sin, cos = np.sin(sinusoid_inp), np.cos(sinusoid_inp)
sentinel = dim // 2 + dim % 2
out = np.zeros((num_pos, dim))
out[:, 0:sentinel] = sin
out[:, sentinel:] = cos
return jnp.array(out)
def rotate_every_two(tensor):
rotate_half_tensor = jnp.stack((-tensor[:, :, :, 1::2], tensor[:, :, :, ::2]), axis=-1)
rotate_half_tensor = rotate_half_tensor.reshape(rotate_half_tensor.shape[:-2] + (-1,))
return rotate_half_tensor
def apply_rotary_pos_emb(tensor, sincos):
sin_pos, cos_pos = sincos
sin_pos = sin_pos[:, :, None, :].repeat(2, 3)
cos_pos = cos_pos[:, :, None, :].repeat(2, 3)
return (tensor * cos_pos) + (rotate_every_two(tensor) * sin_pos)
class _AttentionBlock(nn.Module):
hidden_size: int
num_heads: int
rotary_dim: Optional[int]
intermediate_size: int
layer_norm_epsilon: float = 1e-5
activation_function: str = "gelu"
resid_pdrop: float = 0.0
max_position_embeddings: int = 1024
dtype: jnp.dtype = jnp.float32
causal: bool = True
float32_logits: bool = False
def setup(self):
self.embed_dim = self.hidden_size
self.head_dim = self.embed_dim // self.num_heads
dense = partial(
nn.Dense,
self.embed_dim,
use_bias=False,
dtype=self.dtype,
kernel_init=jax.nn.initializers.variance_scaling(
scale=1.0, mode='fan_in',
distribution='normal',
)
)
self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense()
self.out_proj = dense()
self.ln_1 = nn.LayerNorm(epsilon=self.layer_norm_epsilon, dtype=self.dtype)
self.ln_2 = nn.LayerNorm(epsilon=self.layer_norm_epsilon, dtype=self.dtype)
self.fc_in = nn.Dense(self.intermediate_size,
dtype=self.dtype,
kernel_init=jax.nn.initializers.variance_scaling(
scale=1.0, mode='fan_in',
distribution='normal',
)
)
self.fc_out = nn.Dense(self.embed_dim,
dtype=self.dtype,
kernel_init=jax.nn.initializers.variance_scaling(
scale=1.0, mode='fan_in',
distribution='normal',
)
)
self.act = ACT2FN[self.activation_function]
self.resid_dropout = nn.Dropout(rate=self.resid_pdrop)
if self.rotary_dim is not None and self.rotary_dim > 0:
pos_embd_dim = self.rotary_dim
else:
pos_embd_dim = self.embed_dim // self.num_heads
self.embed_positions = create_sinusoidal_positions(self.max_position_embeddings, pos_embd_dim)
def _split_heads(self, hidden_states):
return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim))
def _merge_heads(self, hidden_states):
return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,))
def attn_out_proj(self, attn_output, deterministic):
attn_output = self._merge_heads(attn_output)
attn_output = self.out_proj(attn_output)
attn_output = self.resid_dropout(attn_output, deterministic=deterministic)
return attn_output
def forward_qkv(
self,
hidden_states,
position_ids,
deterministic: bool = True,
):
hidden_states = self.ln_1(hidden_states)
query = self.q_proj(hidden_states)
key = self.k_proj(hidden_states)
value = self.v_proj(hidden_states)
query = self._split_heads(query)
key = self._split_heads(key)
value = self._split_heads(value)
sincos = jnp.take(self.embed_positions, position_ids, axis=0)
sincos = jnp.split(sincos, 2, axis=-1)
if self.rotary_dim is not None and self.rotary_dim > 0:
k_rot = key[:, :, :, : self.rotary_dim]
k_pass = key[:, :, :, self.rotary_dim :]
q_rot = query[:, :, :, : self.rotary_dim]
q_pass = query[:, :, :, self.rotary_dim :]
k_rot = apply_rotary_pos_emb(k_rot, sincos)
q_rot = apply_rotary_pos_emb(q_rot, sincos)
key = jnp.concatenate([k_rot, k_pass], axis=-1)
query = jnp.concatenate([q_rot, q_pass], axis=-1)
else:
key = apply_rotary_pos_emb(key, sincos)
query = apply_rotary_pos_emb(query, sincos)
if self.float32_logits:
query = query.astype(jnp.float32)
key = key.astype(jnp.float32)
return query, key, value
def forward_ffn(
self,
hidden_states,
deterministic: bool = True,
):
hidden_states = self.ln_2(hidden_states)
hidden_states = self.fc_in(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.fc_out(hidden_states)
hidden_states = self.resid_dropout(hidden_states, deterministic=deterministic)
return hidden_states
class AttentionBlock(nn.Module):
q_chunk_size: int # not used
k_chunk_size: int # not used
hidden_size: int
num_heads: int
rotary_dim: Optional[int]
intermediate_size: int
layer_norm_epsilon: float = 1e-5
activation_function: str = "gelu"
attn_pdrop: float = 0.0
resid_pdrop: float = 0.0
max_position_embeddings: int = 1024
dtype: jnp.dtype = jnp.float32
causal: bool = True
policy: str = None # not used
prevent_cse: bool = False # not used
float32_logits: bool = False
def setup(self):
self.attn = _AttentionBlock(
self.hidden_size,
self.num_heads,
self.rotary_dim,
self.intermediate_size,
self.layer_norm_epsilon,
self.activation_function,
self.resid_pdrop,
self.max_position_embeddings,
self.dtype,
self.causal,
self.float32_logits,
)
self.causal_mask = make_causal_mask(jnp.ones((1, self.max_position_embeddings), dtype="bool"), dtype="bool")
@nn.compact
def _concatenate_to_cache(self, key, value, query, attention_mask):
"""
This function takes projected key, value states from a single input token and concatenates the states to cached
states from previous steps. This function is slighly adapted from the official Flax repository:
https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
"""
# detect if we're initializing by absence of existing cache data.
is_initialized = self.has_variable("cache", "cached_key")
cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
if is_initialized:
*batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
# update key, value caches with our new 1d spatial slices
cur_index = cache_index.value
indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
key = lax.dynamic_update_slice(cached_key.value, key, indices)
value = lax.dynamic_update_slice(cached_value.value, value, indices)
cached_key.value = key
cached_value.value = value
num_updated_cache_vectors = query.shape[1]
cache_index.value = cache_index.value + num_updated_cache_vectors
# causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements.
pad_mask = jnp.broadcast_to(
jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
)
attention_mask = combine_masks(pad_mask, attention_mask)
return key, value, attention_mask
def __call__(
self,
hidden_states,
attention_mask,
position_ids,
deterministic: bool = True,
init_cache: bool = False,
):
query, key, value = self.attn.forward_qkv(
hidden_states,
position_ids,
deterministic=deterministic,
)
query_length, key_length = query.shape[1], key.shape[1]
if self.has_variable("cache", "cached_key"):
mask_shift = self.variables["cache"]["cache_index"]
max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
causal_mask = lax.dynamic_slice(
self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)
)
else:
causal_mask = self.causal_mask[:, :, :query_length, :key_length]
batch_size = hidden_states.shape[0]
causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:])
attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape)
if self.causal:
attention_mask = combine_masks(attention_mask, causal_mask)
else:
attention_mask = attention_mask
dropout_rng = None
if not deterministic and self.attn_pdrop > 0.0:
dropout_rng = self.make_rng("dropout")
# During fast autoregressive decoding, we feed one position at a time,
# and cache the keys and values step by step.
if self.has_variable("cache", "cached_key") or init_cache:
key, value, attention_mask = self._concatenate_to_cache(key, value, query, attention_mask)
attention_bias = lax.select(
attention_mask > 0,
jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
jnp.full(attention_mask.shape, -1e9).astype(self.dtype),
)
attn_weights = nn.dot_product_attention_weights(
query,
key,
bias=attention_bias,
dropout_rng=dropout_rng,
dropout_rate=self.attn_pdrop,
deterministic=deterministic,
dtype=self.dtype,
precision=None,
)
attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value)
attn_output = self.attn.attn_out_proj(attn_output, deterministic=deterministic)
ffn_output = self.attn.forward_ffn(hidden_states + attn_output, deterministic=deterministic)
outputs = attn_output + ffn_output + hidden_states
return outputs
if __name__ == '__main__':
with jax.profiler.trace('/tmp/prof/vanilla'):
class Model(nn.Module):
def setup(self):
self.blocks = [
AttentionBlock(
q_chunk_size=256,
k_chunk_size=256,
hidden_size=2048,
num_heads=16,
rotary_dim=128,
intermediate_size=8192,
layer_norm_epsilon=1e-5,
activation_function="gelu",
resid_pdrop=0.0,
max_position_embeddings=2048,
dtype=jnp.float32,
causal=True,
)
for _ in range(2)
]
def __call__(self, hidden_states, attention_mask, position_ids):
for block in self.blocks:
hidden_states = block(hidden_states, attention_mask, position_ids)
return hidden_states
hidden_states = jnp.zeros((2, 1024, 2048))
attention_mask = jnp.zeros((2, 1024), dtype=jnp.int32)
position_ids = jnp.zeros((2, 1024), dtype=jnp.int32)
model = Model()
variables = model.init(jax.random.PRNGKey(0), hidden_states, attention_mask, position_ids)
output = model.apply(variables, hidden_states, attention_mask, position_ids)
output = output.block_until_ready()
|
blockwise-parallel-transformer-1-main
|
bpt/blocks/vanilla.py
|
import functools
import json
import math
from functools import partial
from typing import Callable, NamedTuple, Optional
import flax.linen as nn
import jax
import jax.numpy as jnp
import numpy as np
from einops import rearrange
from flax.linen import combine_masks, make_causal_mask
from jax import lax
from jax import numpy as jnp
def quick_gelu(x):
return x * jax.nn.sigmoid(1.702 * x)
ACT2FN = {
"gelu": partial(nn.gelu, approximate=False),
"relu": nn.relu,
"silu": nn.swish,
"swish": nn.swish,
"gelu_new": partial(nn.gelu, approximate=True),
"quick_gelu": quick_gelu,
}
def get_gradient_checkpoint_policy(name):
return {
'everything_saveable': jax.checkpoint_policies.everything_saveable,
'nothing_saveable': jax.checkpoint_policies.nothing_saveable,
'dots_saveable': jax.checkpoint_policies.dots_saveable,
'dots_with_no_batch_dims_saveable': jax.checkpoint_policies.dots_with_no_batch_dims_saveable,
}[name]
MASK_VALUE = -1e10
Q_CHUNK_SIZE = 1024
K_CHUNK_SIZE = 1024
def create_sinusoidal_positions(num_pos, dim):
inv_freq = 1.0 / (10000 ** (np.arange(0, dim, 2) / dim))
sinusoid_inp = np.einsum("i , j -> i j", np.arange(num_pos), inv_freq).astype("float32")
sin, cos = np.sin(sinusoid_inp), np.cos(sinusoid_inp)
sentinel = dim // 2 + dim % 2
out = np.zeros((num_pos, dim))
out[:, 0:sentinel] = sin
out[:, sentinel:] = cos
return jnp.array(out)
def rotate_every_two(tensor):
rotate_half_tensor = jnp.stack((-tensor[:, :, :, 1::2], tensor[:, :, :, ::2]), axis=-1)
rotate_half_tensor = rotate_half_tensor.reshape(rotate_half_tensor.shape[:-2] + (-1,))
return rotate_half_tensor
def apply_rotary_pos_emb(tensor, sincos):
sin_pos, cos_pos = sincos
sin_pos = sin_pos[:, :, None, :].repeat(2, 3)
cos_pos = cos_pos[:, :, None, :].repeat(2, 3)
return (tensor * cos_pos) + (rotate_every_two(tensor) * sin_pos)
class _AttentionBlock(nn.Module):
hidden_size: int
num_heads: int
rotary_dim: Optional[int]
intermediate_size: int
layer_norm_epsilon: float = 1e-5
activation_function: str = "gelu"
resid_pdrop: float = 0.0
max_position_embeddings: int = 1024
dtype: jnp.dtype = jnp.float32
causal: bool = True
float32_logits: bool = False
def setup(self):
self.embed_dim = self.hidden_size
self.head_dim = self.embed_dim // self.num_heads
dense = partial(
nn.Dense,
self.embed_dim,
use_bias=False,
dtype=self.dtype,
kernel_init=jax.nn.initializers.variance_scaling(
scale=1.0, mode='fan_in',
distribution='normal',
)
)
self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense()
self.out_proj = dense()
self.ln_1 = nn.LayerNorm(epsilon=self.layer_norm_epsilon, dtype=self.dtype)
self.ln_2 = nn.LayerNorm(epsilon=self.layer_norm_epsilon, dtype=self.dtype)
self.fc_in = nn.Dense(self.intermediate_size,
dtype=self.dtype,
kernel_init=jax.nn.initializers.variance_scaling(
scale=1.0, mode='fan_in',
distribution='normal',
)
)
self.fc_out = nn.Dense(self.embed_dim,
dtype=self.dtype,
kernel_init=jax.nn.initializers.variance_scaling(
scale=1.0, mode='fan_in',
distribution='normal',
)
)
self.act = ACT2FN[self.activation_function]
self.resid_dropout = nn.Dropout(rate=self.resid_pdrop)
if self.rotary_dim is not None and self.rotary_dim > 0:
pos_embd_dim = self.rotary_dim
else:
pos_embd_dim = self.embed_dim // self.num_heads
self.embed_positions = create_sinusoidal_positions(self.max_position_embeddings, pos_embd_dim)
def _split_heads(self, hidden_states):
return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim))
def _merge_heads(self, hidden_states):
return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,))
def attn_out_proj(self, attn_output, deterministic):
attn_output = self._merge_heads(attn_output)
attn_output = self.out_proj(attn_output)
attn_output = self.resid_dropout(attn_output, deterministic=deterministic)
return attn_output
def forward_qkv(
self,
hidden_states,
position_ids,
deterministic: bool = True,
):
hidden_states = self.ln_1(hidden_states)
query = self.q_proj(hidden_states)
key = self.k_proj(hidden_states)
value = self.v_proj(hidden_states)
query = self._split_heads(query)
key = self._split_heads(key)
value = self._split_heads(value)
sincos = jnp.take(self.embed_positions, position_ids, axis=0)
sincos = jnp.split(sincos, 2, axis=-1)
if self.rotary_dim is not None and self.rotary_dim > 0:
k_rot = key[:, :, :, : self.rotary_dim]
k_pass = key[:, :, :, self.rotary_dim :]
q_rot = query[:, :, :, : self.rotary_dim]
q_pass = query[:, :, :, self.rotary_dim :]
k_rot = apply_rotary_pos_emb(k_rot, sincos)
q_rot = apply_rotary_pos_emb(q_rot, sincos)
key = jnp.concatenate([k_rot, k_pass], axis=-1)
query = jnp.concatenate([q_rot, q_pass], axis=-1)
else:
key = apply_rotary_pos_emb(key, sincos)
query = apply_rotary_pos_emb(query, sincos)
if self.float32_logits:
query = query.astype(jnp.float32)
key = key.astype(jnp.float32)
return query, key, value
def forward_ffn(
self,
hidden_states,
deterministic: bool = True,
):
hidden_states = self.ln_2(hidden_states)
hidden_states = self.fc_in(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.fc_out(hidden_states)
hidden_states = self.resid_dropout(hidden_states, deterministic=deterministic)
return hidden_states
def forward_query(
self,
hidden_states,
position_ids,
deterministic: bool = True,
):
hidden_states = self.ln_1(hidden_states)
query = self.q_proj(hidden_states)
query = self._split_heads(query)
sincos = jnp.take(self.embed_positions, position_ids, axis=0)
sincos = jnp.split(sincos, 2, axis=-1)
if self.rotary_dim is not None and self.rotary_dim > 0:
q_rot = query[:, :, :, : self.rotary_dim]
q_pass = query[:, :, :, self.rotary_dim :]
q_rot = apply_rotary_pos_emb(q_rot, sincos)
query = jnp.concatenate([q_rot, q_pass], axis=-1)
else:
query = apply_rotary_pos_emb(query, sincos)
if self.float32_logits:
query = query.astype(jnp.float32)
return query
def forward_key_value(
self,
hidden_states,
position_ids,
deterministic: bool = True,
):
hidden_states = self.ln_1(hidden_states)
key = self.k_proj(hidden_states)
value = self.v_proj(hidden_states)
key = self._split_heads(key)
value = self._split_heads(value)
sincos = jnp.take(self.embed_positions, position_ids, axis=0)
sincos = jnp.split(sincos, 2, axis=-1)
if self.rotary_dim is not None and self.rotary_dim > 0:
k_rot = key[:, :, :, : self.rotary_dim]
k_pass = key[:, :, :, self.rotary_dim :]
k_rot = apply_rotary_pos_emb(k_rot, sincos)
key = jnp.concatenate([k_rot, k_pass], axis=-1)
else:
key = apply_rotary_pos_emb(key, sincos)
if self.float32_logits:
key = key.astype(jnp.float32)
return key, value
class AttentionBlock(nn.Module):
q_chunk_size: int
k_chunk_size: int
hidden_size: int
num_heads: int
rotary_dim: Optional[int]
intermediate_size: int
layer_norm_epsilon: float = 1e-5
activation_function: str = "gelu"
attn_pdrop: float = 0.0
resid_pdrop: float = 0.0
max_position_embeddings: int = 1024
dtype: jnp.dtype = jnp.float32
causal: bool = True
policy: str = 'nothing_saveable'
prevent_cse: bool = False
float32_logits: bool = False
def setup(self):
self.attn = _AttentionBlock(
self.hidden_size,
self.num_heads,
self.rotary_dim,
self.intermediate_size,
self.layer_norm_epsilon,
self.activation_function,
self.resid_pdrop,
self.max_position_embeddings,
self.dtype,
self.causal,
self.float32_logits,
)
@nn.compact
def _concatenate_to_cache(self, key, value, query, attention_mask):
"""
This function takes projected key, value states from a single input token and concatenates the states to cached
states from previous steps. This function is slighly adapted from the official Flax repository:
https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
"""
# detect if we're initializing by absence of existing cache data.
is_initialized = self.has_variable("cache", "cached_key")
cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
if is_initialized:
*batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
# update key, value caches with our new 1d spatial slices
cur_index = cache_index.value
indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
key = lax.dynamic_update_slice(cached_key.value, key, indices)
value = lax.dynamic_update_slice(cached_value.value, value, indices)
cached_key.value = key
cached_value.value = value
num_updated_cache_vectors = query.shape[1]
cache_index.value = cache_index.value + num_updated_cache_vectors
# causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements.
pad_mask = jnp.broadcast_to(
jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
)
attention_mask = combine_masks(pad_mask, attention_mask)
return key, value, attention_mask
def __call__(
self,
hidden_states,
attention_mask,
position_ids,
deterministic: bool = True,
init_cache: bool = False,
):
dropout_rng = None
if not deterministic and self.attn_pdrop > 0.0:
dropout_rng = self.make_rng("dropout")
attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
attention_bias = lax.select(
attention_mask > 0,
jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
jnp.full(attention_mask.shape, -1e9).astype(self.dtype),
)
# During fast autoregressive decoding, we feed one position at a time,
# and cache the keys and values step by step.
if self.has_variable("cache", "cached_key") or init_cache:
query, key, value = self.attn.forward_qkv(hidden_states, position_ids)
key, value, attention_mask = self._concatenate_to_cache(key, value, query, attention_mask)
# use standard dot product attention since query length is 1
attn_weights = nn.dot_product_attention_weights(
query,
key,
bias=attention_bias,
dropout_rng=dropout_rng,
dropout_rate=self.config.attn_pdrop,
deterministic=deterministic,
dtype=self.dtype,
precision=None,
)
attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value)
attn_output = self.attn.attn_out_proj(attn_output, deterministic=deterministic)
ffn_output = self.attn.forward_ffn(hidden_states + attn_output, deterministic=deterministic)
outputs = attn_output + ffn_output + hidden_states
else:
outputs = blockwise_compute(
self.attn,
hidden_states,
hidden_states,
position_ids,
num_heads=self.num_heads,
bias=attention_bias,
deterministic=deterministic,
dropout_rng=dropout_rng,
attn_pdrop=self.attn_pdrop,
causal_mask=self.causal,
query_chunk_size=self.q_chunk_size,
key_chunk_size=self.k_chunk_size,
dtype=self.dtype,
policy=self.policy,
precision=None,
prevent_cse=self.prevent_cse,
)
return outputs
def _chunk_attention_bias(query_chunk_size, key_chunk_size,
bias, deterministic, attn_dropout, attn_pdrop, causal_mask,
query_chunk_idx, key_chunk_idx):
query_offset = query_chunk_idx * query_chunk_size
key_offset = key_chunk_idx * key_chunk_size
chunk_bias = jnp.zeros((1, 1, 1, 1))
if bias is not None:
chunk_bias = lax.dynamic_slice(
bias,
start_indices=(0, 0, query_offset, key_offset),
slice_sizes=(*bias.shape[:2], min(bias.shape[-2], query_chunk_size), min(bias.shape[-1], key_chunk_size)),
)
if causal_mask:
query_idx = lax.broadcasted_iota(dtype=jnp.int32, shape=(query_chunk_size, 1), dimension=0)
key_idx = lax.broadcasted_iota(dtype=jnp.int32, shape=(1, key_chunk_size), dimension=1)
offset = query_offset - key_offset
query_idx += offset
causal_mask_value = (query_idx < key_idx) * MASK_VALUE
chunk_bias += causal_mask_value.reshape(1, 1, *causal_mask_value.shape)
if not deterministic and attn_pdrop > 0.0:
attn_dropout_slice = lax.dynamic_slice(
attn_dropout,
start_indices=(0, 0, query_offset, key_offset),
slice_sizes=(
*attn_dropout.shape[:2],
min(attn_dropout.shape[-2], query_chunk_size),
min(attn_dropout.shape[-1], key_chunk_size),
),
)
chunk_bias -= attn_dropout_slice * 1e6
return chunk_bias
class Carry(NamedTuple):
numerator: jax.Array
denominator: jax.Array
max_so_far: jax.Array
def blockwise_compute(cell,
q_inputs,
kv_inputs,
position_ids,
num_heads,
bias=None,
deterministic=False,
dropout_rng=None,
attn_pdrop=0.0,
causal_mask=True,
query_chunk_size=None,
key_chunk_size=None,
dtype=jnp.float32,
policy='nothing_saveable',
precision=lax.Precision.HIGHEST,
prevent_cse=False,):
q_len = q_inputs.shape[1]
kv_len = kv_inputs.shape[1]
q_inputs = rearrange(q_inputs, 'b (n c) d -> b n c d', c=query_chunk_size)
kv_inputs = rearrange(kv_inputs, 'b (n c) d -> b n c d', c=key_chunk_size)
q_inputs, kv_inputs = map(lambda t: rearrange(t, 'b n c d -> n b c d'), (q_inputs, kv_inputs))
num_q, batch, _, _ = q_inputs.shape
num_kv, _, _, _ = kv_inputs.shape
q_position_ids = rearrange(position_ids, 'b (n c) -> n b c', c=query_chunk_size)
kv_position_ids = rearrange(position_ids, 'b (n c) -> n b c', c=key_chunk_size)
for bias_dim, broadcast_dim in zip(bias.shape, (batch, num_heads, q_len, kv_len)):
assert bias_dim == 1 or bias_dim == broadcast_dim
if not deterministic and attn_pdrop > 0.0:
attn_dropout_rng, dropout_rng = jax.random.split(dropout_rng)
attn_dropout = jax.random.bernoulli(attn_dropout_rng, attn_pdrop, (batch, num_heads, q_len, kv_len))
else:
attn_dropout = None
_chunk_bias_fn = functools.partial(
_chunk_attention_bias,
query_chunk_size, key_chunk_size,
bias, deterministic, attn_dropout, attn_pdrop, causal_mask)
def _query_chunk_attention(cell, _, args):
input_chunk, query_chunk_idx, query_position_ids_chunk = args
query_chunk = cell.forward_query(input_chunk, query_position_ids_chunk)
query_chunk = query_chunk / jnp.sqrt(query_chunk.shape[-1])
dim_per_head = query_chunk.shape[-1]
def summarize_chunk(cell, carry, args):
kv_chunk, key_chunk_idx, kv_position_ids_chunk = args
(numerator, denominator, prev_max_score) = carry
key_chunk, value_chunk = cell.forward_key_value(kv_chunk, kv_position_ids_chunk)
attn_weights = jnp.einsum('bqhd,bkhd->bqhk', query_chunk, key_chunk, precision=precision)
bias_chunk = _chunk_bias_fn(query_chunk_idx, key_chunk_idx)
bias_chunk = jnp.moveaxis(bias_chunk, 1, 2)
attn_weights = attn_weights + bias_chunk
max_score = jnp.max(attn_weights, axis=-1, keepdims=True)
max_score = jnp.maximum(prev_max_score, max_score)
max_score = jax.lax.stop_gradient(max_score)
exp_weights = jnp.exp(attn_weights - max_score)
exp_values = jnp.einsum(
'bqhv,bvhf->bqhf', exp_weights, value_chunk, precision=precision
)
correction = jnp.exp(prev_max_score - max_score)
numerator = numerator * correction + exp_values
denominator = denominator * correction + exp_weights.sum(axis=-1, keepdims=True)
return Carry(numerator, denominator, max_score), None
init_carry = Carry(
jnp.zeros((batch, query_chunk_size, num_heads, dim_per_head), dtype=dtype),
jnp.zeros((batch, query_chunk_size, num_heads, dim_per_head), dtype=dtype),
(-jnp.inf) * jnp.ones((batch, query_chunk_size, num_heads, 1), dtype=dtype),
)
summarize_chunk = nn.remat(
summarize_chunk,
variables="params",
rngs={"params" : False, "dropout": False},
prevent_cse=prevent_cse,
policy=get_gradient_checkpoint_policy(policy),
)
(numerator, denominator, max_score), _ = nn.scan(
summarize_chunk,
variable_broadcast="params",
split_rngs={"params" : False, "dropout": False},
in_axes=0,
out_axes=0,
length=num_kv,
)(cell, init_carry, (kv_inputs, jnp.arange(0, num_kv), kv_position_ids))
attn_chunk = (numerator / denominator).astype(dtype)
attn_chunk = cell.attn_out_proj(attn_chunk, deterministic)
ffn_chunk = cell.forward_ffn(attn_chunk + input_chunk, deterministic)
outputs = ffn_chunk + attn_chunk + input_chunk
return _, outputs
_query_chunk_attention = nn.remat(
_query_chunk_attention,
variables="params",
rngs={"params" : False, "dropout": False},
prevent_cse=prevent_cse,
policy=get_gradient_checkpoint_policy(policy),
)
_, res = nn.scan(
_query_chunk_attention,
variable_broadcast="params",
split_rngs={"params" : False, "dropout": False},
in_axes=0,
out_axes=0,
length=num_q,
)(cell, None, (q_inputs, jnp.arange(0, num_q), q_position_ids))
res = rearrange(res, 'n b c d -> b (n c) d')
return res
if __name__ == '__main__':
with jax.profiler.trace('/tmp/prof/blockwise_parallel_v1'):
class Model(nn.Module):
def setup(self):
self.blocks = [
AttentionBlock(
q_chunk_size=256,
k_chunk_size=256,
hidden_size=2048,
num_heads=16,
rotary_dim=128,
intermediate_size=8192,
layer_norm_epsilon=1e-5,
activation_function="gelu",
resid_pdrop=0.0,
max_position_embeddings=2048,
dtype=jnp.float32,
causal=True,
)
for _ in range(2)
]
def __call__(self, hidden_states, attention_mask, position_ids):
for block in self.blocks:
hidden_states = block(hidden_states, attention_mask, position_ids)
return hidden_states
hidden_states = jnp.zeros((2, 1024, 2048))
attention_mask = jnp.zeros((2, 1024), dtype=jnp.int32)
position_ids = jnp.zeros((2, 1024), dtype=jnp.int32)
model = Model()
variables = model.init(jax.random.PRNGKey(0), hidden_states, attention_mask, position_ids)
output = model.apply(variables, hidden_states, attention_mask, position_ids)
output = output.block_until_ready()
|
blockwise-parallel-transformer-1-main
|
bpt/blocks/blockwise_parallel_v1.py
|
blockwise-parallel-transformer-1-main
|
bpt/blocks/__init__.py
|
|
import functools
import json
import math
from functools import partial
from typing import Callable, NamedTuple, Optional
import flax.linen as nn
import jax
import jax.numpy as jnp
import numpy as np
from einops import rearrange
from flax.linen import combine_masks, make_causal_mask
from jax import lax
from jax import numpy as jnp
def quick_gelu(x):
return x * jax.nn.sigmoid(1.702 * x)
ACT2FN = {
"gelu": partial(nn.gelu, approximate=False),
"relu": nn.relu,
"silu": nn.swish,
"swish": nn.swish,
"gelu_new": partial(nn.gelu, approximate=True),
"quick_gelu": quick_gelu,
}
def get_gradient_checkpoint_policy(name):
return {
'everything_saveable': jax.checkpoint_policies.everything_saveable,
'nothing_saveable': jax.checkpoint_policies.nothing_saveable,
'dots_saveable': jax.checkpoint_policies.dots_saveable,
'dots_with_no_batch_dims_saveable': jax.checkpoint_policies.dots_with_no_batch_dims_saveable,
}[name]
MASK_VALUE = -1e10
Q_CHUNK_SIZE = 1024
K_CHUNK_SIZE = 1024
def create_sinusoidal_positions(num_pos, dim):
inv_freq = 1.0 / (10000 ** (np.arange(0, dim, 2) / dim))
sinusoid_inp = np.einsum("i , j -> i j", np.arange(num_pos), inv_freq).astype("float32")
sin, cos = np.sin(sinusoid_inp), np.cos(sinusoid_inp)
sentinel = dim // 2 + dim % 2
out = np.zeros((num_pos, dim))
out[:, 0:sentinel] = sin
out[:, sentinel:] = cos
return jnp.array(out)
def rotate_every_two(tensor):
rotate_half_tensor = jnp.stack((-tensor[:, :, :, 1::2], tensor[:, :, :, ::2]), axis=-1)
rotate_half_tensor = rotate_half_tensor.reshape(rotate_half_tensor.shape[:-2] + (-1,))
return rotate_half_tensor
def apply_rotary_pos_emb(tensor, sincos):
sin_pos, cos_pos = sincos
sin_pos = sin_pos[:, :, None, :].repeat(2, 3)
cos_pos = cos_pos[:, :, None, :].repeat(2, 3)
return (tensor * cos_pos) + (rotate_every_two(tensor) * sin_pos)
class _AttentionBlock(nn.Module):
hidden_size: int
num_heads: int
rotary_dim: Optional[int]
intermediate_size: int
layer_norm_epsilon: float = 1e-5
activation_function: str = "gelu"
resid_pdrop: float = 0.0
max_position_embeddings: int = 1024
dtype: jnp.dtype = jnp.float32
causal: bool = True
float32_logits: bool = False
def setup(self):
self.embed_dim = self.hidden_size
self.head_dim = self.embed_dim // self.num_heads
dense = partial(
nn.Dense,
self.embed_dim,
use_bias=False,
dtype=self.dtype,
kernel_init=jax.nn.initializers.variance_scaling(
scale=1.0, mode='fan_in',
distribution='normal',
)
)
self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense()
self.out_proj = dense()
self.ln_1 = nn.LayerNorm(epsilon=self.layer_norm_epsilon, dtype=self.dtype)
self.ln_2 = nn.LayerNorm(epsilon=self.layer_norm_epsilon, dtype=self.dtype)
self.fc_in = nn.Dense(self.intermediate_size,
dtype=self.dtype,
kernel_init=jax.nn.initializers.variance_scaling(
scale=1.0, mode='fan_in',
distribution='normal',
)
)
self.fc_out = nn.Dense(self.embed_dim,
dtype=self.dtype,
kernel_init=jax.nn.initializers.variance_scaling(
scale=1.0, mode='fan_in',
distribution='normal',
)
)
self.act = ACT2FN[self.activation_function]
self.resid_dropout = nn.Dropout(rate=self.resid_pdrop)
if self.rotary_dim is not None and self.rotary_dim > 0:
pos_embd_dim = self.rotary_dim
else:
pos_embd_dim = self.embed_dim // self.num_heads
self.embed_positions = create_sinusoidal_positions(self.max_position_embeddings, pos_embd_dim)
def _split_heads(self, hidden_states):
return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim))
def _merge_heads(self, hidden_states):
return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,))
def attn_out_proj(self, attn_output, deterministic):
attn_output = self._merge_heads(attn_output)
attn_output = self.out_proj(attn_output)
attn_output = self.resid_dropout(attn_output, deterministic=deterministic)
return attn_output
def forward_qkv(
self,
hidden_states,
position_ids,
deterministic: bool = True,
):
hidden_states = self.ln_1(hidden_states)
query = self.q_proj(hidden_states)
key = self.k_proj(hidden_states)
value = self.v_proj(hidden_states)
query = self._split_heads(query)
key = self._split_heads(key)
value = self._split_heads(value)
sincos = jnp.take(self.embed_positions, position_ids, axis=0)
sincos = jnp.split(sincos, 2, axis=-1)
if self.rotary_dim is not None and self.rotary_dim > 0:
k_rot = key[:, :, :, : self.rotary_dim]
k_pass = key[:, :, :, self.rotary_dim :]
q_rot = query[:, :, :, : self.rotary_dim]
q_pass = query[:, :, :, self.rotary_dim :]
k_rot = apply_rotary_pos_emb(k_rot, sincos)
q_rot = apply_rotary_pos_emb(q_rot, sincos)
key = jnp.concatenate([k_rot, k_pass], axis=-1)
query = jnp.concatenate([q_rot, q_pass], axis=-1)
else:
key = apply_rotary_pos_emb(key, sincos)
query = apply_rotary_pos_emb(query, sincos)
if self.float32_logits:
query = query.astype(jnp.float32)
key = key.astype(jnp.float32)
return query, key, value
def forward_ffn(
self,
hidden_states,
deterministic: bool = True,
):
hidden_states = self.ln_2(hidden_states)
hidden_states = self.fc_in(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.fc_out(hidden_states)
hidden_states = self.resid_dropout(hidden_states, deterministic=deterministic)
return hidden_states
class AttentionBlock(nn.Module):
q_chunk_size: int
k_chunk_size: int
hidden_size: int
num_heads: int
rotary_dim: Optional[int]
intermediate_size: int
layer_norm_epsilon: float = 1e-5
activation_function: str = "gelu"
attn_pdrop: float = 0.0
resid_pdrop: float = 0.0
max_position_embeddings: int = 1024
dtype: jnp.dtype = jnp.float32
causal: bool = True
policy: str = 'nothing_saveable'
prevent_cse: bool = False
float32_logits: bool = False
def setup(self):
self.attn = _AttentionBlock(
self.hidden_size,
self.num_heads,
self.rotary_dim,
self.intermediate_size,
self.layer_norm_epsilon,
self.activation_function,
self.resid_pdrop,
self.max_position_embeddings,
self.dtype,
self.causal,
self.float32_logits,
)
@nn.compact
def _concatenate_to_cache(self, key, value, query, attention_mask):
"""
This function takes projected key, value states from a single input token and concatenates the states to cached
states from previous steps. This function is slighly adapted from the official Flax repository:
https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
"""
# detect if we're initializing by absence of existing cache data.
is_initialized = self.has_variable("cache", "cached_key")
cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
if is_initialized:
*batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
# update key, value caches with our new 1d spatial slices
cur_index = cache_index.value
indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
key = lax.dynamic_update_slice(cached_key.value, key, indices)
value = lax.dynamic_update_slice(cached_value.value, value, indices)
cached_key.value = key
cached_value.value = value
num_updated_cache_vectors = query.shape[1]
cache_index.value = cache_index.value + num_updated_cache_vectors
# causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements.
pad_mask = jnp.broadcast_to(
jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
)
attention_mask = combine_masks(pad_mask, attention_mask)
return key, value, attention_mask
def __call__(
self,
hidden_states,
attention_mask,
position_ids,
deterministic: bool = True,
init_cache: bool = False,
):
query, key, value = self.attn.forward_qkv(hidden_states, position_ids)
query = query / jnp.sqrt(query.shape[-1])
dropout_rng = None
if not deterministic and self.attn_pdrop > 0.0:
dropout_rng = self.make_rng("dropout")
attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
attention_bias = lax.select(
attention_mask > 0,
jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
jnp.full(attention_mask.shape, -1e9).astype(self.dtype),
)
# During fast autoregressive decoding, we feed one position at a time,
# and cache the keys and values step by step.
if self.has_variable("cache", "cached_key") or init_cache:
query, key, value = self.attn.forward_qkv(hidden_states, position_ids)
key, value, attention_mask = self._concatenate_to_cache(key, value, query, attention_mask)
# use standard dot product attention since query length is 1
attn_weights = nn.dot_product_attention_weights(
query,
key,
bias=attention_bias,
dropout_rng=dropout_rng,
dropout_rate=self.config.attn_pdrop,
deterministic=deterministic,
dtype=self.dtype,
precision=None,
)
attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value)
attn_output = self.attn.attn_out_proj(attn_output, deterministic=deterministic)
ffn_output = self.attn.forward_ffn(hidden_states + attn_output, deterministic=deterministic)
outputs = attn_output + ffn_output + hidden_states
else:
attn_output = blockwise_compute_attn(
query,
key,
value,
bias=attention_bias,
deterministic=not deterministic,
dropout_rng=dropout_rng,
attn_pdrop=self.attn_pdrop,
causal_mask=self.causal,
query_chunk_size=self.q_chunk_size,
key_chunk_size=self.k_chunk_size,
dtype=self.dtype,
policy=self.policy,
precision=None,
prevent_cse=self.prevent_cse,
)
attn_output = self.attn.attn_out_proj(attn_output, deterministic=deterministic)
ffn_output = blockwise_compute_ffn(
self.attn,
hidden_states + attn_output,
chunk_size=self.q_chunk_size,
deterministic=deterministic,
policy=self.policy,
prevent_cse=self.prevent_cse,
)
outputs = ffn_output + hidden_states + attn_output
return outputs
def _chunk_attention_bias(query_chunk_size, key_chunk_size,
bias, deterministic, attn_dropout, attn_pdrop, causal_mask,
query_chunk_idx, key_chunk_idx):
query_offset = query_chunk_idx * query_chunk_size
key_offset = key_chunk_idx * key_chunk_size
chunk_bias = jnp.zeros((1, 1, 1, 1))
if bias is not None:
chunk_bias = lax.dynamic_slice(
bias,
start_indices=(0, 0, query_offset, key_offset),
slice_sizes=(*bias.shape[:2], min(bias.shape[-2], query_chunk_size), min(bias.shape[-1], key_chunk_size)),
)
if causal_mask:
query_idx = lax.broadcasted_iota(dtype=jnp.int32, shape=(query_chunk_size, 1), dimension=0)
key_idx = lax.broadcasted_iota(dtype=jnp.int32, shape=(1, key_chunk_size), dimension=1)
offset = query_offset - key_offset
query_idx += offset
causal_mask_value = (query_idx < key_idx) * MASK_VALUE
chunk_bias += causal_mask_value.reshape(1, 1, *causal_mask_value.shape)
if not deterministic and attn_pdrop > 0.0:
attn_dropout_slice = lax.dynamic_slice(
attn_dropout,
start_indices=(0, 0, query_offset, key_offset),
slice_sizes=(
*attn_dropout.shape[:2],
min(attn_dropout.shape[-2], query_chunk_size),
min(attn_dropout.shape[-1], key_chunk_size),
),
)
chunk_bias -= attn_dropout_slice * 1e6
return chunk_bias
class Carry(NamedTuple):
numerator: jax.Array
denominator: jax.Array
max_so_far: jax.Array
def blockwise_compute_attn(query, key, value,
bias=None,
deterministic=False,
dropout_rng=None,
attn_pdrop=0.0,
causal_mask=True,
query_chunk_size=None,
key_chunk_size=None,
dtype=jnp.float32,
policy='nothing_saveable',
precision=lax.Precision.HIGHEST,
prevent_cse=False,):
q_len = query.shape[1]
kv_len = key.shape[1]
query = rearrange(query, 'b (n c) h q -> b n c h q', c=query_chunk_size)
key, value = map(lambda t: rearrange(t, 'b (n c) h v -> b n c h v', c=key_chunk_size), (key, value))
query, key, value = map(lambda t: rearrange(t, 'b n c h d -> n b c h d'), (query, key, value))
num_q, batch, _, num_heads, dim_per_head = query.shape
num_kv, _, _, _, _ = key.shape
for bias_dim, broadcast_dim in zip(bias.shape, (batch, num_heads, q_len, kv_len)):
assert bias_dim == 1 or bias_dim == broadcast_dim
if not deterministic and attn_pdrop > 0.0:
attn_dropout_rng, dropout_rng = jax.random.split(dropout_rng)
attn_dropout = jax.random.bernoulli(attn_dropout_rng, attn_pdrop, (batch, num_heads, q_len, kv_len))
else:
attn_dropout = None
_chunk_bias_fn = functools.partial(
_chunk_attention_bias,
query_chunk_size, key_chunk_size,
bias, deterministic, attn_dropout, attn_pdrop, causal_mask)
def _query_chunk_attention(args):
query_chunk, query_chunk_idx = args
@functools.partial(jax.checkpoint, prevent_cse=prevent_cse,
policy=get_gradient_checkpoint_policy(policy))
def summarize_chunk(carry, args):
key_chunk, value_chunk, key_chunk_idx = args
(numerator, denominator, prev_max_score) = carry
attn_weights = jnp.einsum('bqhd,bkhd->bqhk', query_chunk, key_chunk, precision=precision)
bias_chunk = _chunk_bias_fn(query_chunk_idx, key_chunk_idx)
bias_chunk = jnp.moveaxis(bias_chunk, 1, 2)
attn_weights = attn_weights + bias_chunk
max_score = jnp.max(attn_weights, axis=-1, keepdims=True)
max_score = jnp.maximum(prev_max_score, max_score)
max_score = jax.lax.stop_gradient(max_score)
exp_weights = jnp.exp(attn_weights - max_score)
exp_values = jnp.einsum(
'bqhv,bvhf->bqhf', exp_weights, value_chunk, precision=precision
)
correction = jnp.exp(prev_max_score - max_score)
numerator = numerator * correction + exp_values
denominator = denominator * correction + exp_weights.sum(axis=-1, keepdims=True)
return Carry(numerator, denominator, max_score), None
init_carry = Carry(
jnp.zeros((batch, query_chunk_size, num_heads, dim_per_head), dtype=dtype),
jnp.zeros((batch, query_chunk_size, num_heads, dim_per_head), dtype=dtype),
(-jnp.inf) * jnp.ones((batch, query_chunk_size, num_heads, 1), dtype=dtype),
)
(numerator, denominator, max_score), _ = lax.scan(
summarize_chunk, init_carry, xs=(key, value, jnp.arange(0, num_kv))
)
outputs = (numerator / denominator).astype(dtype)
return outputs
_, res = lax.scan(
lambda _, x: ((), _query_chunk_attention(x)),
(), xs=(query, jnp.arange(0, num_q))
)
res = rearrange(res, 'n b c h d -> b (n c) h d')
return res
def blockwise_compute_ffn(cell, inputs, chunk_size, deterministic, policy, prevent_cse):
inputs = rearrange(inputs, 'b (n c) d -> b n c d', c=chunk_size)
inputs = rearrange(inputs, 'b n c d -> n b c d')
num_q, _, _, _ = inputs.shape
def ffn(cell, _, hidden_states):
outputs = cell.forward_ffn(hidden_states, deterministic=deterministic)
return _, outputs
ffn_remat = nn.remat(
ffn,
variables="params",
rngs={"params" : False},
prevent_cse=prevent_cse,
policy=get_gradient_checkpoint_policy(policy),
)
_, res = nn.scan(
ffn_remat,
variable_broadcast="params",
split_rngs={"params": False},
in_axes=0,
out_axes=0,
length=num_q,
)(cell, None, inputs)
res = rearrange(res, 'n b c d -> b (n c) d')
return res
class Blockwise_LM_Head(nn.Module):
vocab_size: int
chunk_size: int
policy: str = 'nothing_saveable'
dtype: jnp.dtype = jnp.float32
prevent_cse: bool = False
def setup(self):
self.lm_head = nn.Dense(
self.vocab_size,
dtype=self.dtype,
kernel_init=jax.nn.initializers.variance_scaling(
scale=1.0, mode='fan_in',
distribution='normal',
)
)
def __call__(self, inputs):
inputs = rearrange(inputs, 'b (n c) d -> b n c d', c=self.chunk_size)
inputs = rearrange(inputs, 'b n c d -> n b c d')
num_q, _, _, _ = inputs.shape
def lm_head(cell, _, hidden_states):
outputs = cell(hidden_states)
return _, outputs
lm_head_remat = nn.remat(
lm_head,
variables="params",
rngs={"params" : False},
prevent_cse=self.prevent_cse,
policy=get_gradient_checkpoint_policy(self.policy),
)
_, res = nn.scan(
lm_head_remat,
variable_broadcast="params",
split_rngs={"params": False},
in_axes=0,
out_axes=0,
length=num_q,
)(self.lm_head, None, inputs)
res = rearrange(res, 'n b c d -> b (n c) d')
return res
def blockwise_cross_entropy(logits, tokens, valid=None,
chunk_size=None, policy=None, prevent_cse=None):
if valid is None:
valid = jnp.ones(tokens.shape[:2])
valid = valid.astype(jnp.float32)
logits = jnp.reshape(logits, (-1, logits.shape[-1]))
tokens = jnp.reshape(tokens, (-1,))
valid = jnp.reshape(valid, (-1,))
def _cross_entropy_loss_and_accuracy(logits, tokens, valid):
valid_text_length = jnp.maximum(jnp.sum(valid, axis=-1), 1e-10)
token_log_prob = jnp.squeeze(
jnp.take_along_axis(
jax.nn.log_softmax(logits, axis=-1),
jnp.expand_dims(tokens, -1),
axis=-1,
),
-1,
)
token_log_prob = jnp.where(valid > 0.0, token_log_prob, jnp.array(0.0))
correct = jnp.where(
valid > 0.0,
jnp.argmax(logits, axis=-1) == tokens,
jnp.array(False)
)
return token_log_prob, correct, valid_text_length
@partial(jax.checkpoint, prevent_cse=prevent_cse,
policy=get_gradient_checkpoint_policy(policy))
def _loss_and_accuracy(carry, args):
loss, accuracy, num = carry
logits, tokens, valid = args
token_log_prob, correct, valid_text_length = \
_cross_entropy_loss_and_accuracy(logits, tokens, valid)
loss = loss + jnp.sum(token_log_prob, axis=-1) / valid_text_length
accuracy = accuracy + jnp.sum(correct, axis=-1) / valid_text_length
num = num + 1
return (loss, accuracy, num), None
num_chunk = logits.shape[0] // chunk_size
logits = rearrange(logits, '(n c) d -> n c d', c=chunk_size)
tokens = rearrange(tokens, '(n c) -> n c', c=chunk_size)
valid = rearrange(valid, '(n c) -> n c', c=chunk_size)
(loss, accuracy, num), _ = jax.lax.scan(
_loss_and_accuracy, (0.0, 0.0, 0), xs=(logits, tokens, valid),
length=num_chunk,
)
loss = - loss / num
accuracy = accuracy / num
return loss, accuracy
if __name__ == '__main__':
with jax.profiler.trace('/tmp/prof/blockwise_parallel_simplified'):
class Model(nn.Module):
def setup(self):
self.blocks = [
AttentionBlock(
q_chunk_size=256,
k_chunk_size=256,
hidden_size=2048,
num_heads=16,
rotary_dim=128,
intermediate_size=8192,
layer_norm_epsilon=1e-5,
activation_function="gelu",
resid_pdrop=0.0,
max_position_embeddings=2048,
dtype=jnp.float32,
causal=True,
)
for _ in range(2)
]
def __call__(self, hidden_states, attention_mask, position_ids):
for block in self.blocks:
hidden_states = block(hidden_states, attention_mask, position_ids)
return hidden_states
hidden_states = jnp.zeros((2, 1024, 2048))
attention_mask = jnp.zeros((2, 1024), dtype=jnp.int32)
position_ids = jnp.zeros((2, 1024), dtype=jnp.int32)
model = Model()
variables = model.init(jax.random.PRNGKey(0), hidden_states, attention_mask, position_ids)
output = model.apply(variables, hidden_states, attention_mask, position_ids)
output = output.block_until_ready()
|
blockwise-parallel-transformer-1-main
|
bpt/blocks/blockwise_parallel.py
|
import functools
import json
import math
from functools import partial
from typing import Callable, NamedTuple, Optional
import flax.linen as nn
import jax
import jax.numpy as jnp
import numpy as np
from einops import rearrange
from flax.linen import combine_masks, make_causal_mask
from jax import lax
from jax import numpy as jnp
def quick_gelu(x):
return x * jax.nn.sigmoid(1.702 * x)
ACT2FN = {
"gelu": partial(nn.gelu, approximate=False),
"relu": nn.relu,
"silu": nn.swish,
"swish": nn.swish,
"gelu_new": partial(nn.gelu, approximate=True),
"quick_gelu": quick_gelu,
}
def get_gradient_checkpoint_policy(name):
return {
'everything_saveable': jax.checkpoint_policies.everything_saveable,
'nothing_saveable': jax.checkpoint_policies.nothing_saveable,
'dots_saveable': jax.checkpoint_policies.dots_saveable,
'dots_with_no_batch_dims_saveable': jax.checkpoint_policies.dots_with_no_batch_dims_saveable,
}[name]
MASK_VALUE = -1e10
Q_CHUNK_SIZE = 1024
K_CHUNK_SIZE = 1024
def create_sinusoidal_positions(num_pos, dim):
inv_freq = 1.0 / (10000 ** (np.arange(0, dim, 2) / dim))
sinusoid_inp = np.einsum("i , j -> i j", np.arange(num_pos), inv_freq).astype("float32")
sin, cos = np.sin(sinusoid_inp), np.cos(sinusoid_inp)
sentinel = dim // 2 + dim % 2
out = np.zeros((num_pos, dim))
out[:, 0:sentinel] = sin
out[:, sentinel:] = cos
return jnp.array(out)
def rotate_every_two(tensor):
rotate_half_tensor = jnp.stack((-tensor[:, :, :, 1::2], tensor[:, :, :, ::2]), axis=-1)
rotate_half_tensor = rotate_half_tensor.reshape(rotate_half_tensor.shape[:-2] + (-1,))
return rotate_half_tensor
def apply_rotary_pos_emb(tensor, sincos):
sin_pos, cos_pos = sincos
sin_pos = sin_pos[:, :, None, :].repeat(2, 3)
cos_pos = cos_pos[:, :, None, :].repeat(2, 3)
return (tensor * cos_pos) + (rotate_every_two(tensor) * sin_pos)
class _AttentionBlock(nn.Module):
hidden_size: int
num_heads: int
rotary_dim: Optional[int]
intermediate_size: int
layer_norm_epsilon: float = 1e-5
activation_function: str = "gelu"
resid_pdrop: float = 0.0
max_position_embeddings: int = 1024
dtype: jnp.dtype = jnp.float32
causal: bool = True
float32_logits: bool = False
def setup(self):
self.embed_dim = self.hidden_size
self.head_dim = self.embed_dim // self.num_heads
dense = partial(
nn.Dense,
self.embed_dim,
use_bias=False,
dtype=self.dtype,
kernel_init=jax.nn.initializers.variance_scaling(
scale=1.0, mode='fan_in',
distribution='normal',
)
)
self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense()
self.out_proj = dense()
self.ln_1 = nn.LayerNorm(epsilon=self.layer_norm_epsilon, dtype=self.dtype)
self.ln_2 = nn.LayerNorm(epsilon=self.layer_norm_epsilon, dtype=self.dtype)
self.fc_in = nn.Dense(self.intermediate_size,
dtype=self.dtype,
kernel_init=jax.nn.initializers.variance_scaling(
scale=1.0, mode='fan_in',
distribution='normal',
)
)
self.fc_out = nn.Dense(self.embed_dim,
dtype=self.dtype,
kernel_init=jax.nn.initializers.variance_scaling(
scale=1.0, mode='fan_in',
distribution='normal',
)
)
self.act = ACT2FN[self.activation_function]
self.resid_dropout = nn.Dropout(rate=self.resid_pdrop)
if self.rotary_dim is not None and self.rotary_dim > 0:
pos_embd_dim = self.rotary_dim
else:
pos_embd_dim = self.embed_dim // self.num_heads
self.embed_positions = create_sinusoidal_positions(self.max_position_embeddings, pos_embd_dim)
def _split_heads(self, hidden_states):
return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim))
def _merge_heads(self, hidden_states):
return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,))
def attn_out_proj(self, attn_output, deterministic):
attn_output = self._merge_heads(attn_output)
attn_output = self.out_proj(attn_output)
attn_output = self.resid_dropout(attn_output, deterministic=deterministic)
return attn_output
def forward_qkv(
self,
hidden_states,
position_ids,
deterministic: bool = True,
):
hidden_states = self.ln_1(hidden_states)
query = self.q_proj(hidden_states)
key = self.k_proj(hidden_states)
value = self.v_proj(hidden_states)
query = self._split_heads(query)
key = self._split_heads(key)
value = self._split_heads(value)
sincos = jnp.take(self.embed_positions, position_ids, axis=0)
sincos = jnp.split(sincos, 2, axis=-1)
if self.rotary_dim is not None and self.rotary_dim > 0:
k_rot = key[:, :, :, : self.rotary_dim]
k_pass = key[:, :, :, self.rotary_dim :]
q_rot = query[:, :, :, : self.rotary_dim]
q_pass = query[:, :, :, self.rotary_dim :]
k_rot = apply_rotary_pos_emb(k_rot, sincos)
q_rot = apply_rotary_pos_emb(q_rot, sincos)
key = jnp.concatenate([k_rot, k_pass], axis=-1)
query = jnp.concatenate([q_rot, q_pass], axis=-1)
else:
key = apply_rotary_pos_emb(key, sincos)
query = apply_rotary_pos_emb(query, sincos)
if self.float32_logits:
query = query.astype(jnp.float32)
key = key.astype(jnp.float32)
return query, key, value
def forward_ffn(
self,
hidden_states,
deterministic: bool = True,
):
hidden_states = self.ln_2(hidden_states)
hidden_states = self.fc_in(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.fc_out(hidden_states)
hidden_states = self.resid_dropout(hidden_states, deterministic=deterministic)
return hidden_states
class AttentionBlock(nn.Module):
q_chunk_size: int
k_chunk_size: int
hidden_size: int
num_heads: int
rotary_dim: Optional[int]
intermediate_size: int
layer_norm_epsilon: float = 1e-5
activation_function: str = "gelu"
attn_pdrop: float = 0.0
resid_pdrop: float = 0.0
max_position_embeddings: int = 1024
dtype: jnp.dtype = jnp.float32
causal: bool = True
policy: str = 'nothing_saveable'
prevent_cse: bool = False
float32_logits: bool = False
def setup(self):
self.attn = _AttentionBlock(
self.hidden_size,
self.num_heads,
self.rotary_dim,
self.intermediate_size,
self.layer_norm_epsilon,
self.activation_function,
self.resid_pdrop,
self.max_position_embeddings,
self.dtype,
self.causal,
self.float32_logits,
)
@nn.compact
def _concatenate_to_cache(self, key, value, query, attention_mask):
"""
This function takes projected key, value states from a single input token and concatenates the states to cached
states from previous steps. This function is slighly adapted from the official Flax repository:
https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
"""
# detect if we're initializing by absence of existing cache data.
is_initialized = self.has_variable("cache", "cached_key")
cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
if is_initialized:
*batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
# update key, value caches with our new 1d spatial slices
cur_index = cache_index.value
indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
key = lax.dynamic_update_slice(cached_key.value, key, indices)
value = lax.dynamic_update_slice(cached_value.value, value, indices)
cached_key.value = key
cached_value.value = value
num_updated_cache_vectors = query.shape[1]
cache_index.value = cache_index.value + num_updated_cache_vectors
# causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements.
pad_mask = jnp.broadcast_to(
jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
)
attention_mask = combine_masks(pad_mask, attention_mask)
return key, value, attention_mask
def __call__(
self,
hidden_states,
attention_mask,
position_ids,
deterministic: bool = True,
init_cache: bool = False,
):
query, key, value = self.attn.forward_qkv(
hidden_states,
position_ids,
deterministic=deterministic,
)
query = query / jnp.sqrt(query.shape[-1])
dropout_rng = None
if not deterministic and self.attn_pdrop > 0.0:
dropout_rng = self.make_rng("dropout")
attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
attention_bias = lax.select(
attention_mask > 0,
jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
jnp.full(attention_mask.shape, -1e9).astype(self.dtype),
)
# During fast autoregressive decoding, we feed one position at a time,
# and cache the keys and values step by step.
if self.has_variable("cache", "cached_key") or init_cache:
query, key, value = self.attn.forward_qkv(hidden_states, position_ids)
key, value, attention_mask = self._concatenate_to_cache(key, value, query, attention_mask)
# use standard dot product attention since query length is 1
attn_weights = nn.dot_product_attention_weights(
query,
key,
bias=attention_bias,
dropout_rng=dropout_rng,
dropout_rate=self.config.attn_pdrop,
deterministic=deterministic,
dtype=self.dtype,
precision=None,
)
attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value)
attn_output = self.attn.attn_out_proj(attn_output, deterministic=deterministic)
ffn_output = self.attn.forward_ffn(hidden_states + attn_output, deterministic=deterministic)
outputs = attn_output + ffn_output + hidden_states
else:
attn_output = blockwise_compute_attn(
query,
key,
value,
bias=attention_bias,
deterministic=not deterministic,
dropout_rng=dropout_rng,
attn_pdrop=self.attn_pdrop,
causal_mask=self.causal,
query_chunk_size=self.q_chunk_size,
key_chunk_size=self.k_chunk_size,
dtype=self.dtype,
policy=self.policy,
precision=None,
prevent_cse=self.prevent_cse,
)
attn_output = self.attn.attn_out_proj(attn_output, deterministic=deterministic)
ffn_output = self.attn.forward_ffn(hidden_states + attn_output, deterministic=deterministic)
outputs = ffn_output + hidden_states + attn_output
return outputs
def _chunk_attention_bias(query_chunk_size, key_chunk_size,
bias, deterministic, attn_dropout, attn_pdrop, causal_mask,
query_chunk_idx, key_chunk_idx):
query_offset = query_chunk_idx * query_chunk_size
key_offset = key_chunk_idx * key_chunk_size
chunk_bias = jnp.zeros((1, 1, 1, 1))
if bias is not None:
chunk_bias = lax.dynamic_slice(
bias,
start_indices=(0, 0, query_offset, key_offset),
slice_sizes=(*bias.shape[:2], min(bias.shape[-2], query_chunk_size), min(bias.shape[-1], key_chunk_size)),
)
if causal_mask:
query_idx = lax.broadcasted_iota(dtype=jnp.int32, shape=(query_chunk_size, 1), dimension=0)
key_idx = lax.broadcasted_iota(dtype=jnp.int32, shape=(1, key_chunk_size), dimension=1)
offset = query_offset - key_offset
query_idx += offset
causal_mask_value = (query_idx < key_idx) * MASK_VALUE
chunk_bias += causal_mask_value.reshape(1, 1, *causal_mask_value.shape)
if not deterministic and attn_pdrop > 0.0:
attn_dropout_slice = lax.dynamic_slice(
attn_dropout,
start_indices=(0, 0, query_offset, key_offset),
slice_sizes=(
*attn_dropout.shape[:2],
min(attn_dropout.shape[-2], query_chunk_size),
min(attn_dropout.shape[-1], key_chunk_size),
),
)
chunk_bias -= attn_dropout_slice * 1e6
return chunk_bias
class Carry(NamedTuple):
numerator: jax.Array
denominator: jax.Array
max_so_far: jax.Array
def blockwise_compute_attn(query, key, value,
bias=None,
deterministic=False,
dropout_rng=None,
attn_pdrop=0.0,
causal_mask=True,
query_chunk_size=None,
key_chunk_size=None,
dtype=jnp.float32,
policy='nothing_saveable',
precision=lax.Precision.HIGHEST,
prevent_cse=False,):
q_len = query.shape[1]
kv_len = key.shape[1]
query = rearrange(query, 'b (n c) h q -> b n c h q', c=query_chunk_size)
key, value = map(lambda t: rearrange(t, 'b (n c) h v -> b n c h v', c=key_chunk_size), (key, value))
query, key, value = map(lambda t: rearrange(t, 'b n c h d -> n b c h d'), (query, key, value))
num_q, batch, _, num_heads, dim_per_head = query.shape
num_kv, _, _, _, _ = key.shape
for bias_dim, broadcast_dim in zip(bias.shape, (batch, num_heads, q_len, kv_len)):
assert bias_dim == 1 or bias_dim == broadcast_dim
if not deterministic and attn_pdrop > 0.0:
attn_dropout_rng, dropout_rng = jax.random.split(dropout_rng)
attn_dropout = jax.random.bernoulli(attn_dropout_rng, attn_pdrop, (batch, num_heads, q_len, kv_len))
else:
attn_dropout = None
_chunk_bias_fn = functools.partial(
_chunk_attention_bias,
query_chunk_size, key_chunk_size,
bias, deterministic, attn_dropout, attn_pdrop, causal_mask)
def _query_chunk_attention(args):
query_chunk, query_chunk_idx = args
@functools.partial(jax.checkpoint, prevent_cse=prevent_cse,
policy=get_gradient_checkpoint_policy(policy))
def summarize_chunk(carry, args):
key_chunk, value_chunk, key_chunk_idx = args
(numerator, denominator, prev_max_score) = carry
attn_weights = jnp.einsum('bqhd,bkhd->bqhk', query_chunk, key_chunk, precision=precision)
bias_chunk = _chunk_bias_fn(query_chunk_idx, key_chunk_idx)
bias_chunk = jnp.moveaxis(bias_chunk, 1, 2)
attn_weights = attn_weights + bias_chunk
max_score = jnp.max(attn_weights, axis=-1, keepdims=True)
max_score = jnp.maximum(prev_max_score, max_score)
max_score = jax.lax.stop_gradient(max_score)
exp_weights = jnp.exp(attn_weights - max_score)
exp_values = jnp.einsum(
'bqhv,bvhf->bqhf', exp_weights, value_chunk, precision=precision
)
correction = jnp.exp(prev_max_score - max_score)
numerator = numerator * correction + exp_values
denominator = denominator * correction + exp_weights.sum(axis=-1, keepdims=True)
return Carry(numerator, denominator, max_score), None
init_carry = Carry(
jnp.zeros((batch, query_chunk_size, num_heads, dim_per_head), dtype=dtype),
jnp.zeros((batch, query_chunk_size, num_heads, dim_per_head), dtype=dtype),
(-jnp.inf) * jnp.ones((batch, query_chunk_size, num_heads, 1), dtype=dtype),
)
(numerator, denominator, max_score), _ = lax.scan(
summarize_chunk, init_carry, xs=(key, value, jnp.arange(0, num_kv))
)
outputs = (numerator / denominator).astype(dtype)
return outputs
_, res = lax.scan(
lambda _, x: ((), _query_chunk_attention(x)),
(), xs=(query, jnp.arange(0, num_q))
)
res = rearrange(res, 'n b c h d -> b (n c) h d')
return res
if __name__ == '__main__':
with jax.profiler.trace('/tmp/prof/memeff'):
class Model(nn.Module):
def setup(self):
self.blocks = [
AttentionBlock(
q_chunk_size=256,
k_chunk_size=256,
hidden_size=2048,
num_heads=16,
rotary_dim=128,
intermediate_size=8192,
layer_norm_epsilon=1e-5,
activation_function="gelu",
resid_pdrop=0.0,
max_position_embeddings=2048,
dtype=jnp.float32,
causal=True,
)
for _ in range(2)
]
def __call__(self, hidden_states, attention_mask, position_ids):
for block in self.blocks:
hidden_states = block(hidden_states, attention_mask, position_ids)
return hidden_states
hidden_states = jnp.zeros((2, 1024, 2048))
attention_mask = jnp.zeros((2, 1024), dtype=jnp.int32)
position_ids = jnp.zeros((2, 1024), dtype=jnp.int32)
model = Model()
variables = model.init(jax.random.PRNGKey(0), hidden_states, attention_mask, position_ids)
output = model.apply(variables, hidden_states, attention_mask, position_ids)
output = output.block_until_ready()
|
blockwise-parallel-transformer-1-main
|
bpt/blocks/memeff.py
|
import os
import numpy as np
from ml_collections import ConfigDict
import bpt.tools.utils as utils
import jax
import jax.numpy as jnp
import flax
from flax.serialization import (
from_bytes, to_bytes, to_state_dict, from_state_dict
)
from flax.traverse_util import flatten_dict, unflatten_dict, empty_node
import msgpack
from bpt.tools.jax_utils import tree_apply, float_tensor_to_dtype
class StreamingCheckpointer(object):
""" Custom msgpack checkpointer that saves large train states by serializing
and saving tensors one by one in a streaming fashion. Avoids running
out of memory or local TPU disk with default flax checkpointer.
"""
@staticmethod
def get_default_config(updates=None):
config = ConfigDict()
config.float_dtype = 'bf16'
config.save_optimizer_state = False
if updates is not None:
config.update(ConfigDict(updates).copy_and_resolve_references())
return config
def __init__(self, config, checkpoint_dir, enable=True):
self.config = self.get_default_config(config)
self.checkpoint_dir = checkpoint_dir
self.enable = enable
def save_checkpoint(self, train_state, filename, gather_fns=None):
if self.enable:
path = os.path.join(self.checkpoint_dir, filename)
else:
path = '/dev/null'
self.save_train_state_to_file(
train_state, path, gather_fns, self.config.float_dtype
)
@staticmethod
def save_train_state_to_file(train_state, path, gather_fns=None, float_dtype=None):
train_state = to_state_dict(train_state)
packer = msgpack.Packer()
flattend_train_state = flatten_dict(train_state)
if gather_fns is not None:
gather_fns = flatten_dict(to_state_dict(gather_fns))
with utils.open_file(path, "wb") as fout:
for key, value in flattend_train_state.items():
if gather_fns is not None:
value = gather_fns[key](value)
value = float_tensor_to_dtype(value, float_dtype)
fout.write(packer.pack((key, to_bytes(value))))
def save_pickle(self, obj, filename):
if self.enable:
path = os.path.join(self.checkpoint_dir, filename)
else:
path = '/dev/null'
utils.save_pickle(obj, path)
def save_all(self, train_state, gather_fns, metadata=None, dataset=None, milestone=False):
step = int(jax.device_get(train_state.step))
if self.config.save_optimizer_state:
checkpoint_state = train_state
checkpoint_name = 'streaming_train_state'
checkpoint_gather_fns = gather_fns
else:
checkpoint_state = train_state.params['params']
checkpoint_name = 'streaming_params'
checkpoint_gather_fns = gather_fns.params['params']
if milestone:
# Save a milestone checkpoint that will not be overwritten
self.save_pickle(metadata, f'metadata_{step}.pkl')
self.save_pickle(dataset, f'dataset_{step}.pkl')
self.save_checkpoint(
checkpoint_state, f'{checkpoint_name}_{step}', checkpoint_gather_fns
)
else:
# Save a normal checkpoint that can be overwritten
self.save_pickle(metadata, 'metadata.pkl')
self.save_pickle(dataset, 'dataset.pkl')
self.save_checkpoint(
checkpoint_state, f'{checkpoint_name}', checkpoint_gather_fns
)
@staticmethod
def load_checkpoint(path, target=None, shard_fns=None, remove_dict_prefix=None):
if shard_fns is not None:
shard_fns = flatten_dict(
to_state_dict(shard_fns)
)
if remove_dict_prefix is not None:
remove_dict_prefix = tuple(remove_dict_prefix)
flattend_train_state = {}
with utils.open_file(path) as fin:
# 83886080 bytes = 80 MB, which is 16 blocks on GCS
unpacker = msgpack.Unpacker(fin, read_size=83886080, max_buffer_size=0)
for key, value in unpacker:
key = tuple(key)
if remove_dict_prefix is not None:
if key[:len(remove_dict_prefix)] == remove_dict_prefix:
key = key[len(remove_dict_prefix):]
else:
continue
tensor = from_bytes(None, value)
if shard_fns is not None:
tensor = shard_fns[key](tensor)
flattend_train_state[key] = tensor
if target is not None:
flattened_target = flatten_dict(
to_state_dict(target), keep_empty_nodes=True
)
for key, value in flattened_target.items():
if key not in flattend_train_state and value == empty_node:
flattend_train_state[key] = value
train_state = unflatten_dict(flattend_train_state)
if target is None:
return train_state
return from_state_dict(target, train_state)
@staticmethod
def load_flax_checkpoint(path, target=None, shard_fns=None):
""" Load a standard flax checkpoint that's not saved with the
msgpack streaming format.
"""
with utils.open_file(path, "rb") as fin:
encoded_bytes = fin.read()
state_dict = flax.serialization.msgpack_restore(encoded_bytes)
if shard_fns is not None:
shard_fns = to_state_dict(shard_fns)
state_dict = tree_apply(shard_fns, state_dict)
if target is None:
return state_dict
return from_state_dict(target, state_dict)
@classmethod
def load_trainstate_checkpoint(cls, load_from, trainstate_target=None,
trainstate_shard_fns=None,
disallow_trainstate=False):
if trainstate_target is not None:
params_target = trainstate_target.params['params']
else:
params_target = None
if trainstate_shard_fns is not None:
params_shard_fns = trainstate_shard_fns.params['params']
else:
params_shard_fns = None
load_type, load_path = load_from.split('::', 1)
if disallow_trainstate:
assert load_type != 'trainstate', 'Loading full trainstate is not allowed!'
train_state = None
restored_params = None
if load_type == 'trainstate':
# Load the entire train state in the streaming format
train_state = cls.load_checkpoint(
path=load_path,
target=trainstate_target,
shard_fns=trainstate_shard_fns,
)
elif load_type == 'trainstate_params':
# Load the params part of the train state in the streaming format
restored_params = cls.load_checkpoint(
path=load_path,
target=params_target,
shard_fns=params_shard_fns,
remove_dict_prefix=('params', 'params'),
)
restored_params = flax.core.frozen_dict.freeze(
{'params': restored_params}
)
elif load_type == 'params':
# Load the params in the streaming format
restored_params = cls.load_checkpoint(
path=load_path,
target=params_target,
shard_fns=params_shard_fns,
)
restored_params = flax.core.frozen_dict.freeze(
{'params': restored_params}
)
elif load_type == 'flax_params':
# Load the params in the standard flax format (non-streaming)
# This requires the entire params to fit in memory
restored_params = cls.load_flax_checkpoint(
path=load_path,
target=params_target,
shard_fns=params_shard_fns
)
restored_params = flax.core.frozen_dict.freeze(
{'params': restored_params}
)
else:
raise ValueError(f'Invalid load_from type: {load_type}')
return train_state, restored_params
|
blockwise-parallel-transformer-1-main
|
bpt/tools/checkpoint.py
|
from datasets import load_dataset
import json
from multiprocessing import Pool, cpu_count
dataset = load_dataset("openwebtext")
split_dataset = dataset["train"].train_test_split(test_size=0.0005, seed=2357, shuffle=True)
split_dataset['val'] = split_dataset.pop('test')
def save_split(split):
with open(f"openwebtext_{split}.jsonl", "w") as f:
for example in split_dataset[split]:
json.dump({"text": example["text"]}, f)
f.write("\n")
with Pool(cpu_count()) as p:
p.map(save_split, ["train", "val"])
|
blockwise-parallel-transformer-1-main
|
bpt/tools/prepare_owt.py
|
blockwise-parallel-transformer-1-main
|
bpt/tools/__init__.py
|
|
import os
import math
from typing import Any, Mapping, Text, Tuple, Union, NamedTuple
from functools import partial
import re
import dataclasses
import random
import dill
import flax
import jax
import jax.numpy as jnp
from jax.sharding import PartitionSpec as PS
from jax.sharding import Mesh
from jax.experimental.pjit import with_sharding_constraint as _with_sharding_constraint
from jax.experimental.pjit import pjit
from jax.interpreters import pxla
import numpy as np
from absl import logging
from flax import jax_utils
from flax.training.train_state import TrainState
from flax.core import FrozenDict
import optax
from transformers import FlaxLogitsWarper
class JaxRNG(object):
""" A convenient stateful Jax RNG wrapper. Can be used to wrap RNG inside
pure function.
"""
@classmethod
def from_seed(cls, seed):
return cls(jax.random.PRNGKey(seed))
def __init__(self, rng):
self.rng = rng
def __call__(self, keys=None):
if keys is None:
self.rng, split_rng = jax.random.split(self.rng)
return split_rng
elif isinstance(keys, int):
split_rngs = jax.random.split(self.rng, num=keys + 1)
self.rng = split_rngs[0]
return tuple(split_rngs[1:])
else:
split_rngs = jax.random.split(self.rng, num=len(keys) + 1)
self.rng = split_rngs[0]
return {key: val for key, val in zip(keys, split_rngs[1:])}
class FlaxTemperatureLogitsWarper(FlaxLogitsWarper):
""" JIT traceable version of FlaxLogitsWarper that performs temperature scaling."""
def __init__(self, temperature):
self.temperature = temperature
def __call__(self, input_ids, scores, cur_len):
return scores / jnp.clip(self.temperature, a_min=1e-8)
def make_shard_and_gather_fns(partition_specs, dtype_specs=None):
""" Create pytree of sharding and gathering functions from pytree of
partition specs.
"""
float_dtypes = (jnp.bfloat16, jnp.float16, jnp.float32, jnp.float64)
def make_to_dtype_fn(dtype_spec):
def to_dtype(tensor):
if dtype_specs in float_dtypes and getattr(tensor, 'dtype', None) in float_dtypes:
# Convert all float tensors to the same dtype
return tensor.astype(dtype_specs)
elif hasattr(dtype_spec, 'dtype') and hasattr(tensor, 'dtype'):
return tensor.astype(dtype_spec.dtype)
return tensor
return to_dtype
def make_shard_fn(partition_spec, dtype_spec=None):
jax_shard_function = pjit(
make_to_dtype_fn(dtype_spec),
in_shardings=None,
out_shardings=partition_spec
)
def shard_fn(tensor):
return jax_shard_function(tensor).block_until_ready()
return shard_fn
def make_gather_fn(partition_spec, dtype_spec=None):
jax_gather_fn = pjit(
make_to_dtype_fn(dtype_spec),
in_shardings=partition_spec,
out_shardings=None
)
def gather_fn(tensor):
return jax.device_get(jax_gather_fn(tensor))
return gather_fn
if dtype_specs is None or dtype_specs in float_dtypes:
shard_fns = jax.tree_util.tree_map(make_shard_fn, partition_specs)
gather_fns = jax.tree_util.tree_map(make_gather_fn, partition_specs)
else:
shard_fns = jax.tree_util.tree_map(
make_shard_fn, partition_specs, dtype_specs
)
gather_fns = jax.tree_util.tree_map(
make_gather_fn, partition_specs, dtype_specs
)
return shard_fns, gather_fns
def set_random_seed(seed):
np.random.seed(seed)
random.seed(seed)
init_rng(seed)
def get_jax_mesh(axis_dims, names):
if ':' in axis_dims:
dims = []
dim_names = []
for axis in axis_dims.split(','):
name, dim = axis.split(':')
assert name in names
dims.append(int(dim))
dim_names.append(name)
assert(set(dim_names) == set(names))
else:
dims = [int(x) for x in axis_dims.split(',')]
dim_names = names
assert len(dims) == len(names)
return Mesh(np.array(jax.devices()).reshape(dims), dim_names)
def names_in_current_mesh(*names):
""" Check if current mesh axes contain these names. """
mesh_axis_names = pxla.thread_resources.env.physical_mesh.axis_names
return set(names) <= set(mesh_axis_names)
def get_names_from_parition_spec(partition_specs):
""" Return axis names from partition specs. """
names = set()
if isinstance(partition_specs, dict):
partition_specs = partition_specs.values()
for item in partition_specs:
if item is None:
continue
elif isinstance(item, str):
names.add(item)
else:
names.update(get_names_from_parition_spec(item))
return list(names)
def with_sharding_constraint(x, partition_specs):
""" A smarter version of with_sharding_constraint that only applies the
constraint if the current mesh contains the axes in the partition specs.
"""
axis_names = get_names_from_parition_spec(partition_specs)
if names_in_current_mesh(*axis_names):
x = _with_sharding_constraint(x, partition_specs)
return x
def wrap_function_with_rng(rng):
""" To be used as decorator, automatically bookkeep a RNG for the wrapped function. """
def wrap_function(function):
def wrapped(*args, **kwargs):
nonlocal rng
rng, split_rng = jax.random.split(rng)
return function(split_rng, *args, **kwargs)
return wrapped
return wrap_function
def init_rng(seed):
global jax_utils_rng
jax_utils_rng = JaxRNG.from_seed(seed)
def next_rng(*args, **kwargs):
global jax_utils_rng
return jax_utils_rng(*args, **kwargs)
def get_metrics(metrics, unreplicate=False, stack=False):
if unreplicate:
metrics = flax.jax_utils.unreplicate(metrics)
metrics = jax.device_get(metrics)
if stack:
return jax.tree_map(lambda *args: np.stack(args), *metrics)
else:
return {key: float(val) for key, val in metrics.items()}
def mse_loss(val, target, valid=None):
if valid is None:
valid = jnp.ones((*target.shape[:2], 1))
valid = valid.astype(jnp.float32)
loss = jnp.mean(
jnp.where(
valid > 0.0,
jnp.square(val - target),
0.0
)
)
return loss
def cross_entropy_loss(logits, labels, smoothing_factor=0.):
num_classes = logits.shape[-1]
if labels.dtype == jnp.int32 or labels.dtype == jnp.int64:
labels = jax.nn.one_hot(labels, num_classes)
if smoothing_factor > 0.:
labels = labels * (1. - smoothing_factor) + smoothing_factor / num_classes
logp = jax.nn.log_softmax(logits, axis=-1)
return -jnp.mean(jnp.sum(logp * labels, axis=-1))
def cross_entropy_loss_and_accuracy(logits, tokens, valid=None):
if valid is None:
valid = jnp.ones(tokens.shape[:2])
valid = valid.astype(jnp.float32)
valid_text_length = jnp.maximum(jnp.sum(valid, axis=-1), 1e-10)
token_log_prob = jnp.squeeze(
jnp.take_along_axis(
jax.nn.log_softmax(logits, axis=-1),
jnp.expand_dims(tokens, -1),
axis=-1,
),
-1,
)
token_log_prob = jnp.where(valid > 0.0, token_log_prob, jnp.array(0.0))
loss = -jnp.mean(jnp.sum(token_log_prob, axis=-1) / valid_text_length)
correct = jnp.where(
valid > 0.0,
jnp.argmax(logits, axis=-1) == tokens,
jnp.array(False)
)
accuracy = jnp.mean(jnp.sum(correct, axis=-1) / valid_text_length)
return loss, accuracy
def global_norm(tree):
""" Return the global L2 norm of a pytree. """
squared = jax.tree_util.tree_map(lambda x: jnp.sum(jnp.square(x)), tree)
flattened, _ = jax.flatten_util.ravel_pytree(squared)
return jnp.sqrt(jnp.sum(flattened))
def average_metrics(metrics):
return jax.tree_map(
lambda *args: jnp.mean(jnp.stack(args)),
*metrics
)
def get_float_dtype_by_name(dtype):
return {
'bf16': jnp.bfloat16,
'fp16': jnp.float16,
'fp32': jnp.float32,
'fp64': jnp.float64,
}[dtype]
def float_tensor_to_dtype(tensor, dtype):
if dtype is None or dtype == '':
return tensor
if isinstance(dtype, str):
dtype = get_float_dtype_by_name(dtype)
float_dtypes = (jnp.bfloat16, jnp.float16, jnp.float32, jnp.float64)
if getattr(tensor, 'dtype', None) in float_dtypes:
tensor = tensor.astype(dtype)
return tensor
def float_to_dtype(tree, dtype):
return jax.tree_util.tree_map(
partial(float_tensor_to_dtype, dtype=dtype), tree
)
def get_gradient_checkpoint_policy(name):
return {
'everything_saveable': jax.checkpoint_policies.everything_saveable,
'nothing_saveable': jax.checkpoint_policies.nothing_saveable,
'dots_saveable': jax.checkpoint_policies.dots_saveable,
'dots_with_no_batch_dims_saveable': jax.checkpoint_policies.dots_with_no_batch_dims_saveable,
}[name]
def tree_path_to_string(path, sep=None):
keys = []
for key in path:
if isinstance(key, jax.tree_util.SequenceKey):
keys.append(str(key.idx))
elif isinstance(key, jax.tree_util.DictKey):
keys.append(str(key.key))
elif isinstance(key, jax.tree_util.GetAttrKey):
keys.append(str(key.name))
elif isinstance(key, jax.tree_util.FlattenedIndexKey):
keys.append(str(key.key))
else:
keys.append(str(key))
if sep is None:
return tuple(keys)
return sep.join(keys)
def flatten_tree(xs, is_leaf=None, sep=None):
flattened, _ = jax.tree_util.tree_flatten_with_path(xs, is_leaf=is_leaf)
output = {}
for key, val in flattened:
output[tree_path_to_string(key, sep=sep)] = val
return output
def named_tree_map(f, tree, *rest, is_leaf=None, sep=None):
""" An extended version of jax.tree_util.tree_map, where the mapped function
f takes both the name (path) and the tree leaf as input.
"""
return jax.tree_util.tree_map_with_path(
lambda path, x, *r: f(tree_path_to_string(path, sep=sep), x, *r),
tree, *rest,
is_leaf=is_leaf
)
def match_partition_rules(rules, params):
""" Returns a pytree of PartitionSpec according to rules. Supports handling
Flax TrainState and Optax optimizer state.
"""
def get_partition_spec(name, leaf):
if len(leaf.shape) == 0 or np.prod(leaf.shape) == 1:
""" Don't partition scalar values. """
return PS()
for rule, ps in rules:
if re.search(rule, name) is not None:
return ps
raise ValueError(f'Partition rule not found for param: {name}')
return named_tree_map(get_partition_spec, params, sep='/')
def get_weight_decay_mask(exclusions):
""" Return a weight decay mask function that computes the pytree masks
according to the given exclusion rules.
"""
def decay(name, _):
for rule in exclusions:
if re.search(rule, name) is not None:
return False
return True
def weight_decay_mask(params):
return named_tree_map(decay, params, sep='/')
return weight_decay_mask
def tree_apply(fns, tree):
""" Apply a pytree of functions to the pytree. """
return jax.tree_util.tree_map(lambda fn, x: fn(x), fns, tree)
|
blockwise-parallel-transformer-1-main
|
bpt/tools/jax_utils.py
|
import os
import time
from typing import Any, Mapping, Text, Tuple, Union, NamedTuple
from functools import partial
import re
import dataclasses
import random
from ml_collections.config_dict import config_dict
from ml_collections import ConfigDict
import jax
import jax.numpy as jnp
import numpy as np
from absl import logging
import optax
from bpt.tools.jax_utils import float_to_dtype
class OptimizerFactory(object):
""" Configurable optax optimizer factory. """
def __init__(self):
raise NotImplementedError
@staticmethod
def get_default_config(updates=None):
config = ConfigDict()
config.accumulate_gradient_steps = 1
config.type = 'adamw'
config.palm_optimizer = PalmOptimizerFactory.get_default_config()
config.adamw_optimizer = AdamWOptimizerFactory.get_default_config()
if updates is not None:
config.update(ConfigDict(updates).copy_and_resolve_references())
return config
@classmethod
def get_optimizer(cls, config, weight_decay_mask=None):
config = cls.get_default_config(config)
if config.type == 'palm':
optimizer, optimizer_info = PalmOptimizerFactory.get_optimizer(
config.palm_optimizer, weight_decay_mask
)
elif config.type == 'adamw':
optimizer, optimizer_info = AdamWOptimizerFactory.get_optimizer(
config.adamw_optimizer, weight_decay_mask
)
else:
raise ValueError(f'Unknown optimizer type: {config.type}')
if config.accumulate_gradient_steps > 1:
optimizer = optax.MultiSteps(
optimizer, config.accumulate_gradient_steps
)
return optimizer, optimizer_info
class PalmOptimizerFactory(object):
""" PaLM optimizer factory. This optimizer implements the optimizer
described in the PaLM paper: https://arxiv.org/abs/2204.02311
"""
def __init__(self):
raise NotImplementedError
@staticmethod
def get_default_config(updates=None):
config = ConfigDict()
config.lr = 0.01
config.lr_warmup_steps = 10000
config.b1 = 0.9
config.b2 = 0.99
config.clip_gradient = 1.0
config.weight_decay = 1e-4
config.bf16_momentum = True
if updates is not None:
config.update(ConfigDict(updates).copy_and_resolve_references())
return config
@classmethod
def get_optimizer(cls, config, weight_decay_mask=None):
config = cls.get_default_config(config)
def learning_rate_schedule(step):
multiplier = config.lr / 0.01
return multiplier / jnp.sqrt(jnp.maximum(step, config.lr_warmup_steps))
def weight_decay_schedule(step):
multiplier = config.weight_decay / 1e-4
return -multiplier * jnp.square(learning_rate_schedule(step))
optimizer_info = dict(
learning_rate_schedule=learning_rate_schedule,
weight_decay_schedule=weight_decay_schedule,
)
optimizer = optax.chain(
optax.clip_by_global_norm(config.clip_gradient),
optax.adafactor(
learning_rate=learning_rate_schedule,
multiply_by_parameter_scale=True,
momentum=config.b1,
decay_rate=config.b2,
factored=False,
clipping_threshold=None,
dtype_momentum=jnp.bfloat16 if config.bf16_momentum else jnp.float32,
),
optax_add_scheduled_weight_decay(
weight_decay_schedule, weight_decay_mask
)
)
return optimizer, optimizer_info
class AdamWOptimizerFactory(object):
""" AdamW optimizer with cosine schedule. """
def __init__(self):
raise NotImplementedError
@staticmethod
def get_default_config(updates=None):
config = ConfigDict()
config.init_lr = 0.0
config.end_lr = 0.001
config.lr = 0.01
config.lr_warmup_steps = 2000
config.lr_decay_steps = 500000
config.b1 = 0.9
config.b2 = 0.95
config.clip_gradient = 1.0
config.weight_decay = 1e-4
config.bf16_momentum = True
config.multiply_by_parameter_scale = True
if updates is not None:
config.update(ConfigDict(updates).copy_and_resolve_references())
return config
@classmethod
def get_optimizer(cls, config, weight_decay_mask=None):
config = cls.get_default_config(config)
learning_rate_schedule = optax.warmup_cosine_decay_schedule(
init_value=config.init_lr,
peak_value=config.lr,
warmup_steps=config.lr_warmup_steps,
decay_steps=config.lr_decay_steps,
end_value=config.end_lr,
)
optimizer_info = dict(
learning_rate_schedule=learning_rate_schedule,
)
if config.multiply_by_parameter_scale:
optimizer = optax.chain(
optax.clip_by_global_norm(config.clip_gradient),
optax.adafactor(
learning_rate=learning_rate_schedule,
multiply_by_parameter_scale=True,
momentum=config.b1,
decay_rate=config.b2,
factored=False,
clipping_threshold=None,
dtype_momentum=jnp.bfloat16 if config.bf16_momentum else jnp.float32,
),
optax_add_scheduled_weight_decay(
lambda step: -learning_rate_schedule(step) * config.weight_decay,
weight_decay_mask
)
)
else:
optimizer = optax.chain(
optax.clip_by_global_norm(config.clip_gradient),
optax.adamw(
learning_rate=learning_rate_schedule,
weight_decay=config.weight_decay,
b1=0.9,
b2=0.95,
mask=weight_decay_mask,
mu_dtype=jnp.bfloat16 if config.bf16_momentum else jnp.float32,
),
)
return optimizer, optimizer_info
class OptaxScheduledWeightDecayState(NamedTuple):
count: jnp.DeviceArray
def optax_add_scheduled_weight_decay(schedule_fn, mask=None):
""" Apply weight decay with schedule. """
def init_fn(params):
del params
return OptaxScheduledWeightDecayState(count=jnp.zeros([], jnp.int32))
def update_fn(updates, state, params):
if params is None:
raise ValueError('Params cannot be None for weight decay!')
weight_decay = schedule_fn(state.count)
updates = jax.tree_util.tree_map(
lambda g, p: g + weight_decay * p, updates, params
)
return updates, OptaxScheduledWeightDecayState(
count=optax.safe_int32_increment(state.count)
)
if mask is not None:
return optax.masked(optax.GradientTransformation(init_fn, update_fn), mask)
return optax.GradientTransformation(init_fn, update_fn)
|
blockwise-parallel-transformer-1-main
|
bpt/tools/optimizers.py
|
import inspect
import logging
import os
import pprint
import random
import tempfile
import time
import uuid
from concurrent.futures import ThreadPoolExecutor
from copy import copy
from io import BytesIO
from socket import gethostname
import dataclasses
import absl.flags
import absl.logging
import cloudpickle as pickle
import flax
import gcsfs
import jax
import jax.numpy as jnp
import msgpack
import numpy as np
import wandb
from flax.serialization import from_bytes, to_bytes
from ml_collections import ConfigDict
from ml_collections.config_dict.config_dict import placeholder
from ml_collections.config_flags import config_flags
from flax.training.train_state import TrainState
from flax.core import FrozenDict
from absl.app import run
class WandBLogger(object):
@staticmethod
def get_default_config(updates=None):
config = ConfigDict()
config.project_id = ""
config.project_entity = placeholder(str)
config.experiment_id = placeholder(str)
config.append_uuid = True
config.experiment_note = placeholder(str)
config.output_dir = "/tmp/"
config.wandb_dir = ""
config.profile_dir = ""
config.online = False
if updates is not None:
config.update(ConfigDict(updates).copy_and_resolve_references())
return config
def __init__(self, config, variant, enable=True):
self.enable = enable
self.config = self.get_default_config(config)
if self.config.experiment_id is None or self.config.experiment_id == "":
self.config.experiment_id = uuid.uuid4().hex
else:
if self.config.append_uuid:
self.config.experiment_id = (
str(self.config.experiment_id) + "_" + uuid.uuid4().hex
)
else:
self.config.experiment_id = str(self.config.experiment_id)
if self.enable:
if self.config.output_dir == "":
self.config.output_dir = tempfile.mkdtemp()
else:
self.config.output_dir = os.path.join(
self.config.output_dir, self.config.experiment_id
)
if not self.config.output_dir.startswith("gs://"):
os.makedirs(self.config.output_dir, exist_ok=True)
if self.config.wandb_dir == "":
if not self.config.output_dir.startswith("gs://"):
# Use the same directory as output_dir if it is not a GCS path.
self.config.wandb_dir = self.config.output_dir
else:
# Otherwise, use a temporary directory.
self.config.wandb_dir = tempfile.mkdtemp()
else:
# Join the wandb_dir with the experiment_id.
self.config.wandb_dir = os.path.join(
self.config.wandb_dir, self.config.experiment_id
)
os.makedirs(self.config.wandb_dir, exist_ok=True)
if self.config.profile_dir == "":
if not self.config.output_dir.startswith("gs://"):
# Use the same directory as output_dir if it is not a GCS path.
self.config.profile_dir = self.config.output_dir
else:
# Otherwise, use a temporary directory.
self.config.profile_dir = tempfile.mkdtemp()
else:
# Join the profile_dir with the experiment_id.
self.config.profile_dir = os.path.join(
self.config.profile_dir, self.config.experiment_id
)
os.makedirs(self.config.profile_dir, exist_ok=True)
self._variant = flatten_config_dict(variant)
if "hostname" not in self._variant:
self._variant["hostname"] = gethostname()
if self.enable:
self.run = wandb.init(
reinit=True,
config=self._variant,
project=self.config.project_id,
dir=self.config.wandb_dir,
id=self.config.experiment_id,
resume="allow",
notes=self.config.experiment_note,
entity=self.config.project_entity,
settings=wandb.Settings(
start_method="thread",
_disable_stats=True,
),
mode="online" if self.config.online else "offline",
)
else:
self.run = None
def log(self, *args, **kwargs):
if self.enable:
self.run.log(*args, **kwargs)
def save_pickle(self, obj, filename):
if self.enable:
save_pickle(obj, os.path.join(self.config.output_dir, filename))
@property
def experiment_id(self):
return self.config.experiment_id
@property
def variant(self):
return self.config.variant
@property
def output_dir(self):
return self.config.output_dir
@property
def wandb_dir(self):
return self.config.wandb_dir
@property
def profile_dir(self):
return self.config.profile_dir
def config_dict(*args, **kwargs):
return ConfigDict(dict(*args, **kwargs))
def define_flags_with_default(**kwargs):
for key, val in kwargs.items():
if isinstance(val, tuple):
val, help_str = val
else:
help_str = ""
if isinstance(val, ConfigDict):
config_flags.DEFINE_config_dict(key, val)
elif isinstance(val, bool):
# Note that True and False are instances of int.
absl.flags.DEFINE_bool(key, val, help_str)
elif isinstance(val, int):
absl.flags.DEFINE_integer(key, val, help_str)
elif isinstance(val, float):
absl.flags.DEFINE_float(key, val, help_str)
elif isinstance(val, str):
absl.flags.DEFINE_string(key, val, help_str)
else:
raise ValueError("Incorrect value type")
return absl.flags.FLAGS, kwargs
def print_flags(flags, flags_def):
flag_srings = [
"{}: {}".format(key, val)
for key, val in get_user_flags(flags, flags_def).items()
]
logging.info(
"Hyperparameter configs: \n{}".format(
pprint.pformat(flag_srings)
)
)
def get_user_flags(flags, flags_def):
output = {}
for key in flags_def:
val = getattr(flags, key)
if isinstance(val, ConfigDict):
output.update(flatten_config_dict(val, prefix=key))
else:
output[key] = val
return output
def user_flags_to_config_dict(flags, flags_def):
output = ConfigDict()
for key in flags_def:
output[key] = getattr(flags, key)
return output
def flatten_config_dict(config, prefix=None):
output = {}
for key, val in config.items():
if isinstance(val, ConfigDict) or isinstance(val, dict):
output.update(flatten_config_dict(val, prefix=key))
else:
if prefix is not None:
output["{}.{}".format(prefix, key)] = val
else:
output[key] = val
return output
def function_args_to_config(fn, none_arg_types=None, exclude_args=None, override_args=None):
config = ConfigDict()
arg_spec = inspect.getargspec(fn)
n_args = len(arg_spec.defaults)
arg_names = arg_spec.args[-n_args:]
default_values = arg_spec.defaults
for name, value in zip(arg_names, default_values):
if exclude_args is not None and name in exclude_args:
continue
elif override_args is not None and name in override_args:
config[name] = override_args[name]
elif none_arg_types is not None and value is None and name in none_arg_types:
config[name] = placeholder(none_arg_types[name])
else:
config[name] = value
return config
def prefix_metrics(metrics, prefix):
return {"{}/{}".format(prefix, key): value for key, value in metrics.items()}
def open_file(path, mode='rb', cache_type='readahead'):
if path.startswith("gs://"):
logging.getLogger("fsspec").setLevel(logging.WARNING)
return gcsfs.GCSFileSystem().open(path, mode, cache_type=cache_type)
else:
return open(path, mode)
def save_pickle(obj, path):
with open_file(path, "wb") as fout:
pickle.dump(obj, fout)
def load_pickle(path):
with open_file(path, "rb") as fin:
data = pickle.load(fin)
return data
def text_to_array(text, encoding="utf-8"):
return np.frombuffer(text.encode(encoding), dtype="uint8")
def array_to_text(array, encoding="utf-8"):
with BytesIO(array) as fin:
text = fin.read().decode(encoding)
return text
class JaxRNG(object):
""" A convenient stateful Jax RNG wrapper. Can be used to wrap RNG inside
pure function.
"""
@classmethod
def from_seed(cls, seed):
return cls(jax.random.PRNGKey(seed))
def __init__(self, rng):
self.rng = rng
def __call__(self, keys=None):
if keys is None:
self.rng, split_rng = jax.random.split(self.rng)
return split_rng
elif isinstance(keys, int):
split_rngs = jax.random.split(self.rng, num=keys + 1)
self.rng = split_rngs[0]
return tuple(split_rngs[1:])
else:
split_rngs = jax.random.split(self.rng, num=len(keys) + 1)
self.rng = split_rngs[0]
return {key: val for key, val in zip(keys, split_rngs[1:])}
def wrap_function_with_rng(rng):
""" To be used as decorator, automatically bookkeep a RNG for the wrapped function. """
def wrap_function(function):
def wrapped(*args, **kwargs):
nonlocal rng
rng, split_rng = jax.random.split(rng)
return function(split_rng, *args, **kwargs)
return wrapped
return wrap_function
def init_rng(seed):
global jax_utils_rng
jax_utils_rng = JaxRNG.from_seed(seed)
def next_rng(*args, **kwargs):
global jax_utils_rng
return jax_utils_rng(*args, **kwargs)
def flatten_tree(xs, is_leaf=None, sep=None):
""" A stronger version of flax.traverse_util.flatten_dict, supports
dict, tuple, list and TrainState. Tuple and list indices will be
converted to strings.
"""
tree_node_classes = (FrozenDict, dict, tuple, list, TrainState)
if not isinstance(xs, tree_node_classes):
ValueError('fUnsupported node type: {type(xs)}')
def _is_leaf(prefix, fx):
if is_leaf is not None:
return is_leaf(prefix, xs)
return False
def _key(path):
if sep is None:
return path
return sep.join(path)
def _convert_to_dict(xs):
if isinstance(xs, (FrozenDict, dict)):
return xs
elif isinstance(xs, (tuple, list)):
return {f'{i}': v for i, v in enumerate(xs)}
elif isinstance(xs, TrainState):
output = {}
for field in dataclasses.fields(xs):
if 'pytree_node' not in field.metadata or field.metadata['pytree_node']:
output[field.name] = getattr(xs, field.name)
return output
else:
raise ValueError('fUnsupported node type: {type(xs)}')
def _flatten(xs, prefix):
if not isinstance(xs, tree_node_classes) or _is_leaf(prefix, xs):
return {_key(prefix): xs}
result = {}
is_empty = True
for (key, value) in _convert_to_dict(xs).items():
is_empty = False
path = prefix + (key, )
result.update(_flatten(value, path))
return result
return _flatten(xs, ())
def named_tree_map(f, tree, is_leaf=None, sep=None):
""" An extended version of jax.tree_util.tree_map, where the mapped function
f takes both the name (path) and the tree leaf as input.
"""
flattened_tree = flatten_tree(tree, is_leaf=is_leaf, sep=sep)
id_to_name = {id(val): key for key, val in flattened_tree.items()}
def map_fn(leaf):
name = id_to_name[id(leaf)]
return f(name, leaf)
return jax.tree_util.tree_map(map_fn, tree)
def get_pytree_shape_info(tree):
flattend_tree = flatten_tree(tree, sep='/')
shapes = []
for key in sorted(list(flattend_tree.keys())):
val = flattend_tree[key]
shapes.append(f'{key}: {val.dtype}, {val.shape}')
return '\n'.join(shapes)
def collect_metrics(metrics, names, prefix=None):
collected = {}
for name in names:
if name in metrics:
collected[name] = jnp.mean(metrics[name])
if prefix is not None:
collected = {
'{}/{}'.format(prefix, key): value for key, value in collected.items()
}
return collected
def set_random_seed(seed):
np.random.seed(seed)
random.seed(seed)
init_rng(seed)
|
blockwise-parallel-transformer-1-main
|
bpt/tools/utils.py
|
# Python file for Paperspace Gradient NLP Text Generation Tutorial example
# It runs the GPT-2 model from HuggingFace: https://huggingface.co/gpt2
#
# The Workflow is triggered when its YAML file is present in the .gradient/workflows/ directory
# in a GitHub repository linked to the user's Gradient project
# It clones this repo and then in turn calls this file
# This file outputs the generated text to outputs.txt in a Gradient-managed Dataset
# The Workflow runs on the Paperspace HuggingFace NLP container (paperspace/transformers-gpu:0.4.0)
# See the Gradient documentation page for more details: ...
#
# The 4 values under "Settings" below can be altered to generate different text
# If the resulting updated version of this file is uploaded to the repo .gradient/workflows/
# directory, the Workflow will be rerun, and a new output.txt file will be generated
#
# Last updated: Sep 13th 2021
# Setup
from transformers import pipeline, set_seed
# Settings
random_seed = 42
max_length = 30
num_return_sequences = 5
initial_sentence = "Hello, I'm a language model,"
# Create generator that uses GPT-2
generator = pipeline('text-generation', model='gpt2')
# Random seed for text generation
set_seed(random_seed)
# Run the generator
output = generator(initial_sentence, max_length = max_length, num_return_sequences = num_return_sequences)
# Write the output to a file
with open('output.txt', 'w') as f:
ival = 1
for val in output:
print('---\nOutput {} of {}\n---\n'.format(ival, num_return_sequences), file=f)
print(val['generated_text'], file=f)
if ival < num_return_sequences: print(file=f)
ival += 1
print('Done')
|
kosmos-model-main
|
nlp_text_generation.py
|
import os
INITIAL_PEERS = os.environ.get("INITIAL_PEERS")
if not INITIAL_PEERS:
raise RuntimeError("Must specify INITIAL_PEERS environment variable with one or more peer ids")
INITIAL_PEERS = INITIAL_PEERS.split()
MODEL_NAME = os.environ.get("MODEL_NAME")
if not MODEL_NAME:
raise RuntimeError("Must specify MODEL_NAME as an index of a transformer block to be tested")
REF_NAME = os.environ.get("REF_NAME")
ADAPTER_NAME = os.environ.get("ADAPTER_NAME")
|
TheGrid-main
|
tests/test_utils.py
|
import asyncio
import gc
from contextlib import suppress
import psutil
import pytest
from hivemind.utils.crypto import RSAPrivateKey
from hivemind.utils.logging import get_logger
from hivemind.utils.mpfuture import MPFuture
logger = get_logger(__name__)
@pytest.fixture
def event_loop():
"""
This overrides the ``event_loop`` fixture from pytest-asyncio
(e.g. to make it compatible with ``asyncio.subprocess``).
This fixture is identical to the original one but does not call ``loop.close()`` in the end.
Indeed, at this point, the loop is already stopped (i.e. next tests are free to create new loops).
However, finalizers of objects created in the current test may reference the current loop and fail if it is closed.
For example, this happens while using ``asyncio.subprocess`` (the ``asyncio.subprocess.Process`` finalizer
fails if the loop is closed, but works if the loop is only stopped).
"""
yield asyncio.get_event_loop()
@pytest.fixture(autouse=True, scope="session")
def cleanup_children():
yield
with RSAPrivateKey._process_wide_key_lock:
RSAPrivateKey._process_wide_key = None
gc.collect() # Call .__del__() for removed objects
children = psutil.Process().children(recursive=True)
if children:
logger.info(f"Cleaning up {len(children)} leftover child processes")
for child in children:
with suppress(psutil.NoSuchProcess):
child.terminate()
psutil.wait_procs(children, timeout=1)
for child in children:
with suppress(psutil.NoSuchProcess):
child.kill()
MPFuture.reset_backend()
|
TheGrid-main
|
tests/conftest.py
|
import random
import pytest
import torch
import transformers
from tensor_parallel import TensorParallel
from tensor_parallel.slicing_configs import get_bloom_config
from grid.server.from_pretrained import load_pretrained_block
from test_utils import MODEL_NAME
@pytest.mark.forked
@pytest.mark.parametrize("custom_config", [True, False])
@pytest.mark.parametrize("devices", [("cpu",) * 2, ("cpu",) * 3, ("cpu",) * 4])
def test_tp_block(devices, custom_config):
block_index = random.randint(0, 10)
model_config = transformers.AutoConfig.from_pretrained(MODEL_NAME)
block = load_pretrained_block(MODEL_NAME, block_index=block_index, torch_dtype=torch.float32).to(devices[0])
tp_config = None
if custom_config:
tp_config = get_bloom_config(model_config, devices)
batch_size = 2
prefix_length = 5
test_inputs1 = torch.randn(batch_size, 3, 1024, requires_grad=True, device=devices[0])
test_inputs2 = test_inputs1.detach().clone().requires_grad_(True)
test_prefix1 = torch.randn(batch_size, prefix_length, 1024, requires_grad=True, device=devices[0])
test_prefix2 = test_prefix1.detach().clone().requires_grad_(True)
grad_proj = torch.rand_like(test_inputs1)
y_prefix_ref, layer_past = block(test_prefix1, use_cache=True)
y_ref, cache_ref = block(test_inputs1, use_cache=True, layer_past=layer_past)
y_ref.backward(grad_proj)
block_tp = TensorParallel(block, devices, config=tp_config)
y_prefix, layer_past = block_tp(test_prefix2, use_cache=True)
y_ours, cache_ours = block_tp(test_inputs2, use_cache=True, layer_past=layer_past)
y_ours.backward(grad_proj)
assert torch.allclose(y_prefix, y_prefix_ref, atol=1e-5)
assert torch.allclose(y_ours, y_ref, atol=1e-5)
assert torch.allclose(test_inputs1.grad, test_inputs2.grad, atol=1e-4)
assert torch.allclose(test_prefix1.grad, test_prefix2.grad, atol=1e-4)
|
TheGrid-main
|
tests/test_tensor_parallel.py
|
import subprocess
import sys
import pytest
import torch
from grid import AutoDistributedConfig
from grid.server.throughput import measure_compute_rps
from grid.utils.convert_block import QuantType
from test_utils import MODEL_NAME
def test_bnb_not_imported_when_unnecessary():
"""
We avoid importing bitsandbytes when it's not used,
since bitsandbytes doesn't always find correct CUDA libs and may raise exceptions because of that.
If this test fails, please change your code to import bitsandbytes and/or grid.utils.peft
in the function's/method's code when it's actually needed instead of importing them in the beginning of the file.
This won't slow down the code - importing a module for the 2nd time doesn't rerun module code.
"""
subprocess.check_call([sys.executable, "-c", "import grid, sys; assert 'bitsandbytes' not in sys.modules"])
@pytest.mark.forked
@pytest.mark.parametrize("inference", [False, True])
@pytest.mark.parametrize("n_tokens", [1, 16])
@pytest.mark.parametrize("tensor_parallel", [False, True])
def test_compute_throughput(inference: bool, n_tokens: int, tensor_parallel: bool):
config = AutoDistributedConfig.from_pretrained(MODEL_NAME)
tensor_parallel_devices = ("cpu", "cpu") if tensor_parallel else ()
compute_rps = measure_compute_rps(
config,
device=torch.device("cpu"),
dtype=torch.bfloat16,
quant_type=QuantType.NONE,
tensor_parallel_devices=tensor_parallel_devices,
n_tokens=n_tokens,
n_steps=5,
inference=inference,
)
assert isinstance(compute_rps, float) and compute_rps > 0
|
TheGrid-main
|
tests/test_aux_functions.py
|
import os
import shutil
import pytest
from huggingface_hub import snapshot_download
from grid.utils.peft import check_peft_repository, load_peft
UNSAFE_PEFT_REPO = "artek0chumak/bloom-560m-unsafe-peft"
SAFE_PEFT_REPO = "artek0chumak/bloom-560m-safe-peft"
TMP_CACHE_DIR = "tmp_cache/"
def clear_dir(path_to_dir):
shutil.rmtree(path_to_dir)
os.mkdir(path_to_dir)
def dir_empty(path_to_dir):
files = os.listdir(path_to_dir)
return len(files) == 0
@pytest.mark.forked
def test_check_peft():
assert not check_peft_repository(UNSAFE_PEFT_REPO), "NOSAFE_PEFT_REPO is safe to load."
assert check_peft_repository(SAFE_PEFT_REPO), "SAFE_PEFT_REPO is not safe to load."
@pytest.mark.forked
def test_load_noncached(tmpdir):
clear_dir(tmpdir)
with pytest.raises(Exception):
load_peft(UNSAFE_PEFT_REPO, cache_dir=tmpdir)
assert dir_empty(tmpdir), "UNSAFE_PEFT_REPO is loaded"
load_peft(SAFE_PEFT_REPO, cache_dir=tmpdir)
assert not dir_empty(tmpdir), "SAFE_PEFT_REPO is not loaded"
@pytest.mark.forked
def test_load_cached(tmpdir):
clear_dir(tmpdir)
snapshot_download(SAFE_PEFT_REPO, cache_dir=tmpdir)
load_peft(SAFE_PEFT_REPO, cache_dir=tmpdir)
@pytest.mark.forked
def test_load_layer_exists(tmpdir):
clear_dir(tmpdir)
load_peft(SAFE_PEFT_REPO, block_idx=2, cache_dir=tmpdir)
@pytest.mark.forked
def test_load_layer_nonexists(tmpdir):
clear_dir(tmpdir)
load_peft(
SAFE_PEFT_REPO,
block_idx=1337,
cache_dir=tmpdir,
)
|
TheGrid-main
|
tests/test_peft.py
|
import multiprocessing as mp
import time
import pytest
import torch
from hivemind.moe.server.runtime import Runtime
from grid.server.task_pool import PrioritizedTaskPool
@pytest.mark.forked
def test_priority_pools():
outputs_queue = mp.SimpleQueue()
results_valid = mp.Event()
def dummy_pool_func(x):
time.sleep(0.1)
y = x**2
outputs_queue.put((x, y))
return (y,)
class DummyBackend:
def __init__(self, pools):
self.pools = pools
def get_pools(self):
return self.pools
pools = (
PrioritizedTaskPool(dummy_pool_func, name="A", max_batch_size=1),
PrioritizedTaskPool(dummy_pool_func, name="B", max_batch_size=1),
)
runtime = Runtime({str(i): DummyBackend([pool]) for i, pool in enumerate(pools)}, prefetch_batches=0)
runtime.start()
def process_tasks():
futures = []
futures.append(pools[0].submit_task(torch.tensor([0]), priority=1))
futures.append(pools[0].submit_task(torch.tensor([1]), priority=1))
time.sleep(0.01)
futures.append(pools[1].submit_task(torch.tensor([2]), priority=1))
futures.append(pools[0].submit_task(torch.tensor([3]), priority=2))
futures.append(pools[0].submit_task(torch.tensor([4]), priority=10))
futures.append(pools[0].submit_task(torch.tensor([5]), priority=0))
futures.append(pools[0].submit_task(torch.tensor([6]), priority=1))
futures.append(pools[1].submit_task(torch.tensor([7]), priority=11))
futures.append(pools[1].submit_task(torch.tensor([8]), priority=1))
for i, f in enumerate(futures):
assert f.result()[0].item() == i**2
results_valid.set()
proc = mp.Process(target=process_tasks)
proc.start()
proc.join()
assert results_valid.is_set()
ordered_outputs = []
while not outputs_queue.empty():
ordered_outputs.append(outputs_queue.get()[0].item())
assert ordered_outputs == [0, 5, 1, 2, 6, 8, 3, 4, 7]
# 0 - first batch is loaded immediately, before everything else
# 5 - highest priority task overall
# 1 - first of several tasks with equal lowest priority (1)
# 2 - second earliest task with priority 1, fetched from pool B
# 6 - third earliest task with priority 1, fetched from pool A again
# 8 - last priority-1 task, pool B
# 3 - task with priority 2 from pool A
# 4 - task with priority 10 from pool A
# 7 - task with priority 11 from pool B
|
TheGrid-main
|
tests/test_priority_pool.py
|
import time
import hivemind
import pytest
import torch
from grid import DistributedBloomConfig, RemoteSequential
from grid.server.handler import CACHE_TOKENS_AVAILABLE
from test_utils import *
@pytest.mark.forked
def test_server_info(block_from: int = 22, block_to: int = 24, max_length: int = 100, max_length2: int = 50):
config = DistributedBloomConfig.from_pretrained(MODEL_NAME)
dht = hivemind.DHT(initial_peers=INITIAL_PEERS, client_mode=True, start=True)
blocks1 = RemoteSequential(config, dht=dht, start_block=block_from, end_block=block_to)
blocks2 = RemoteSequential(config, dht=dht, start_block=block_to - 1, end_block=block_to)
info_before = blocks1.sequence_manager.rpc_info
with blocks1.inference_session(max_length=max_length) as sess:
sess.step(torch.randn(1, 1, config.hidden_size))
blocks1.sequence_manager.state.rpc_info = None # invalidate cache
info_inside = blocks1.sequence_manager.rpc_info
with blocks2.inference_session(max_length=max_length2) as sess2:
sess2.step(torch.randn(1, 1, config.hidden_size))
blocks2.sequence_manager.state.rpc_info = None # invalidate cache
info_inside2 = blocks2.sequence_manager.rpc_info
time.sleep(0.1)
blocks1.sequence_manager.state.rpc_info = None # invalidate cache
info_after = blocks1.sequence_manager.rpc_info
assert info_before[CACHE_TOKENS_AVAILABLE] == info_after[CACHE_TOKENS_AVAILABLE]
assert info_before[CACHE_TOKENS_AVAILABLE] - info_inside[CACHE_TOKENS_AVAILABLE] == max_length * len(blocks1)
assert info_inside[CACHE_TOKENS_AVAILABLE] - info_inside2[CACHE_TOKENS_AVAILABLE] == max_length2 * len(blocks2)
|
TheGrid-main
|
tests/test_server_stats.py
|
import pytest
import torch
import torch.nn.functional as F
from hivemind import DHT, BatchTensorDescriptor, get_logger
from hivemind.proto import runtime_pb2
from grid import DistributedBloomConfig
from grid.client import RemoteSequenceManager, RemoteSequential
from grid.data_structures import UID_DELIMITER
from grid.server.from_pretrained import load_pretrained_block
from test_utils import *
logger = get_logger(__name__)
@pytest.mark.forked
def test_remote_sequential():
config = DistributedBloomConfig.from_pretrained(MODEL_NAME, initial_peers=INITIAL_PEERS)
dht = DHT(initial_peers=config.initial_peers, client_mode=True, start=True)
test_inputs = torch.randn(1, 5, config.hidden_size, requires_grad=True)
grad_proj = torch.randn(1, 5, config.hidden_size)
sequential = RemoteSequential(config, dht=dht)
full_outputs = sequential(test_inputs)
(full_outputs * grad_proj).sum().backward()
assert test_inputs.grad is not None
full_grad = test_inputs.grad.clone()
test_inputs.grad.data.zero_()
first_half = sequential[: config.num_hidden_layers // 2]
second_half = sequential[config.num_hidden_layers // 2 :]
assert len(first_half) + len(second_half) == len(sequential)
assert abs(len(first_half) - len(second_half)) == config.num_hidden_layers % 2
for m in sequential, first_half, second_half:
assert isinstance(repr(m), str)
hidden = first_half(test_inputs)
assert isinstance(hidden, torch.Tensor)
assert hidden.shape == test_inputs.shape
assert hidden.requires_grad
second_half_outputs = second_half(hidden)
assert torch.allclose(second_half_outputs, full_outputs, atol=1e-4)
(second_half_outputs * grad_proj).sum().backward()
assert torch.allclose(test_inputs.grad, full_grad, atol=1e-3)
# test RemoteSequential with lossy compression
block_uids = [f"{config.dht_prefix}{UID_DELIMITER}{i}" for i in range(config.num_hidden_layers)]
lossy_sequential = RemoteSequential(
config, sequence_manager=DummyCustomSequenceManager(config, block_uids, dht=dht)
)
test_inputs.grad = None
approx_outputs = lossy_sequential(test_inputs)
(approx_outputs * grad_proj).sum().backward()
assert not torch.allclose(approx_outputs, full_outputs, rtol=0, atol=1e-4), "compression was not used"
assert not torch.allclose(test_inputs.grad, full_grad, rtol=0, atol=1e-2), "compression was not used"
assert abs(approx_outputs - full_outputs).mean() < 0.01
absmax = abs(full_grad).max()
assert abs(test_inputs.grad / absmax - full_grad / absmax).mean() < 0.05
class DummyCustomSequenceManager(RemoteSequenceManager):
"""A sequence manager that compresses inputs/outputs during forward and backward pass."""
@property
def rpc_info(self):
rpc_info = super().rpc_info
dims = (2048, 1024)
compressed_input_schema = BatchTensorDescriptor(dims, compression=runtime_pb2.CompressionType.FLOAT16)
rpc_info["forward_schema"] = (compressed_input_schema,), dict() # (args, kwargs)
return rpc_info
def get_request_metadata(self, protocol: str, *args, **kwargs):
metadata = super().get_request_metadata(protocol, *args, **kwargs)
if protocol == "rpc_forward":
metadata["output_compression"] = (runtime_pb2.CompressionType.FLOAT16,)
elif protocol == "rpc_backward":
metadata["output_compression"] = (runtime_pb2.CompressionType.FLOAT16,)
# FIXME: Initially, we used CompressionType.BLOCKWISE_8BIT for rpc_backward() here.
# This is currently broken since hivemind==1.1.8 is not compatible with bitsandbytes==0.39.1.
# Please revert to BLOCKWISE_8BIT once this is fixed: https://github.com/learning-at-home/hivemind/issues/572
return metadata
@pytest.mark.forked
def test_remote_sequential_prompts(batch_size=2, seq_len=5, pre_seq_len=3):
config = DistributedBloomConfig.from_pretrained(MODEL_NAME, initial_peers=INITIAL_PEERS)
remote_sequential = RemoteSequential(config)
inputs = F.normalize(torch.randn(batch_size, seq_len, config.hidden_size), dim=-1)
output_proj = F.normalize(torch.randn(batch_size, seq_len + pre_seq_len, config.hidden_size), dim=-1)
input_prompts = F.normalize(torch.randn(batch_size, pre_seq_len, config.hidden_size, requires_grad=True), dim=-1)
intermediate_prompts = torch.randn(
config.num_hidden_layers, batch_size, pre_seq_len, config.hidden_size, requires_grad=True
)
input_prompts = input_prompts.detach().requires_grad_(True)
intermediate_prompts = intermediate_prompts.detach().requires_grad_(True)
inputs_with_prompts = torch.cat([inputs, input_prompts], dim=1)
assert inputs_with_prompts.shape == (batch_size, seq_len + pre_seq_len, config.hidden_size)
outputs = remote_sequential(inputs_with_prompts, prompts=intermediate_prompts)
(outputs * output_proj).sum().backward()
assert intermediate_prompts.grad is not None
input_prompts_ref = input_prompts.clone().detach().requires_grad_(True)
intermediate_prompts_ref = intermediate_prompts.clone().detach().requires_grad_(True)
assert input_prompts_ref.grad is None
assert intermediate_prompts_ref.grad is None
outputs_ref = torch.cat([inputs, input_prompts_ref], dim=1)
for block_index in range(config.num_hidden_layers):
block_prompt = intermediate_prompts_ref[block_index]
outputs_ref[:, : block_prompt.shape[1]] += block_prompt
block = load_pretrained_block(MODEL_NAME, block_index=block_index, torch_dtype=torch.float32)
(outputs_ref,) = block(outputs_ref)
assert torch.allclose(outputs_ref, outputs, atol=1e-3)
(outputs_ref * output_proj).sum().backward()
assert input_prompts_ref.grad is not None
assert torch.allclose(input_prompts_ref.grad, input_prompts.grad, atol=1e-2)
assert intermediate_prompts_ref.grad is not None
assert torch.allclose(intermediate_prompts_ref.grad, intermediate_prompts.grad, atol=1e-2)
|
TheGrid-main
|
tests/test_remote_sequential.py
|
import peft
import pytest
import torch
import transformers
from hivemind import get_logger
from transformers.generation import BeamSearchScorer
from transformers.models.bloom import BloomForCausalLM
from grid import DistributedBloomForCausalLM
from test_utils import *
logger = get_logger(__name__)
@pytest.mark.forked
@pytest.mark.parametrize("use_peft", (True, False) if ADAPTER_NAME else (False,))
@pytest.mark.parametrize("pass_empty_tensors", (True, False))
def test_full_model_exact_match(use_peft: bool, pass_empty_tensors: bool, atol_forward=1e-3, atol_inference=1e-3):
tokenizer = transformers.BloomTokenizerFast.from_pretrained(MODEL_NAME)
model = DistributedBloomForCausalLM.from_pretrained(
MODEL_NAME,
initial_peers=INITIAL_PEERS,
low_cpu_mem_usage=True,
torch_dtype=torch.float32,
active_adapter=ADAPTER_NAME if use_peft else None,
)
config = model.config
assert isinstance(model, DistributedBloomForCausalLM)
assert len(model.transformer.h) == model.config.num_hidden_layers
test_inputs = tokenizer("A quick brown fox was minding its own buisness", return_tensors="pt")["input_ids"]
with torch.inference_mode():
parallel_outputs = model.forward(test_inputs).logits
assert torch.all(torch.isfinite(parallel_outputs))
logger.info("Forward outputs are finite")
embs = model.transformer.word_embeddings(test_inputs)
embs = model.transformer.word_embeddings_layernorm(embs)
recurrent_outputs = []
with model.transformer.h.inference_session(max_length=embs.shape[1]) as sess:
if pass_empty_tensors:
recurrent_outputs.append(sess.step(torch.empty(1, 0, config.hidden_size)))
for t in range(embs.shape[1]):
if t == 4:
recurrent_outputs.append(sess.step(embs[:, 4:9, :]))
elif 4 < t < 9:
continue
else:
recurrent_outputs.append(sess.step(embs[:, t : t + 1, :]))
if t == 2 and pass_empty_tensors:
recurrent_outputs.append(sess.step(torch.empty(1, 0, config.hidden_size)))
recurrent_outputs.append(sess.step(torch.empty(1, 0, config.hidden_size)))
recurrent_outputs = torch.cat(recurrent_outputs, dim=1)
recurrent_outputs = model.transformer.ln_f(recurrent_outputs)
recurrent_outputs = model.lm_head(recurrent_outputs)
assert torch.allclose(recurrent_outputs, parallel_outputs, rtol=0, atol=atol_inference)
logger.info("Inference is consistent with forward")
del model, embs, recurrent_outputs
if REF_NAME:
ref_model = transformers.BloomForCausalLM.from_pretrained(
REF_NAME, low_cpu_mem_usage=True, torch_dtype=torch.float32
)
if use_peft:
ref_model = peft.PeftModel.from_pretrained(ref_model, ADAPTER_NAME)
ref_model.train(False)
if config.vocab_size < ref_model.config.vocab_size:
ref_model.resize_token_embeddings(config.vocab_size)
logger.warning(f"Resized the reference model embeddings, new total = {ref_model.config.vocab_size}")
dummy_mask = torch.ones_like(test_inputs, dtype=torch.bool)
# note: this creates a dummy mask to make the test compatible with older transformer versions
# prior to https://github.com/huggingface/transformers/pull/17837
ref_outputs = ref_model.forward(test_inputs, attention_mask=dummy_mask).logits.float()
assert torch.allclose(ref_outputs, parallel_outputs, rtol=0, atol=atol_forward)
logger.warning(f"Distributed forward is consistent with {type(ref_model)}.forward")
del ref_model, ref_outputs, dummy_mask
else:
logger.warning("Did not test exact match with local model: REF_NAME environment variable is not set")
assert False
@pytest.mark.forked
def test_greedy_generation(max_new_tokens=4):
tokenizer = transformers.BloomTokenizerFast.from_pretrained(MODEL_NAME)
model = DistributedBloomForCausalLM.from_pretrained(
MODEL_NAME, initial_peers=INITIAL_PEERS, low_cpu_mem_usage=True, torch_dtype=torch.float32
)
inputs = tokenizer("A cat sat on a mat", return_tensors="pt")["input_ids"]
remote_outputs = model.generate(
inputs,
max_new_tokens=max_new_tokens,
)
hf_outputs = BloomForCausalLM.greedy_search(model, input_ids=inputs, max_length=inputs.size(1) + max_new_tokens)
assert torch.allclose(remote_outputs, hf_outputs), "Greedy search results are not identical to HF"
inputs_batch = tokenizer(["A cat sat on a mat", "A dog sat on a mat"], return_tensors="pt", padding=True)[
"input_ids"
]
remote_outputs_batch = model.generate(
inputs_batch,
max_new_tokens=max_new_tokens,
)
hf_outputs_batch = BloomForCausalLM.greedy_search(
model, input_ids=inputs_batch, max_length=inputs_batch.size(1) + max_new_tokens
)
assert torch.allclose(
remote_outputs_batch, hf_outputs_batch
), "Greedy search results are not identical to HF in multibatch mode"
@pytest.mark.forked
@pytest.mark.parametrize("sampling_options", [dict(), dict(temperature=100.0), dict(top_k=5), dict(top_p=0.9)])
@pytest.mark.skip("Sampling is currently not consistent with outputs from Transformers")
def test_sampling(sampling_options, max_new_tokens=4):
torch.manual_seed(0)
tokenizer = transformers.BloomTokenizerFast.from_pretrained(MODEL_NAME)
model = DistributedBloomForCausalLM.from_pretrained(
MODEL_NAME, initial_peers=INITIAL_PEERS, low_cpu_mem_usage=True, torch_dtype=torch.float32
)
logits_warper = BloomForCausalLM._get_logits_warper(model, num_beams=1, **sampling_options)
inputs = tokenizer("A cat sat on a mat", return_tensors="pt")["input_ids"]
with torch.random.fork_rng():
remote_outputs = model.generate(
inputs,
max_new_tokens=max_new_tokens,
do_sample=True,
**sampling_options,
)
with torch.random.fork_rng():
hf_outputs = BloomForCausalLM.sample(
model, input_ids=inputs, max_length=inputs.size(1) + max_new_tokens, logits_warper=logits_warper
)
assert torch.allclose(remote_outputs, hf_outputs), "Sampling results are not identical to HF"
inputs_batch = tokenizer(["A cat sat on a mat", "A dog sat on a mat"], return_tensors="pt", padding=True)[
"input_ids"
]
with torch.random.fork_rng():
remote_outputs_batch = model.generate(
inputs_batch,
max_new_tokens=max_new_tokens,
do_sample=True,
**sampling_options,
)
with torch.random.fork_rng():
hf_outputs_batch = BloomForCausalLM.sample(
model,
input_ids=inputs_batch,
max_length=inputs_batch.size(1) + max_new_tokens,
logits_warper=logits_warper,
)
assert torch.allclose(
remote_outputs_batch, hf_outputs_batch
), "Sampling results are not identical to HF in multibatch mode"
@pytest.mark.forked
def test_beam_search_generation(max_new_tokens=4, num_beams=2):
tokenizer = transformers.BloomTokenizerFast.from_pretrained(MODEL_NAME)
model = DistributedBloomForCausalLM.from_pretrained(
MODEL_NAME, initial_peers=INITIAL_PEERS, low_cpu_mem_usage=True, torch_dtype=torch.float32
)
text = "A cat sat on a mat"
inputs = tokenizer(text, return_tensors="pt")["input_ids"]
remote_outputs = model.generate(
inputs,
max_new_tokens=max_new_tokens,
num_beams=num_beams,
)
beam_scorer = BeamSearchScorer(
batch_size=inputs.size(0),
num_beams=num_beams,
device=inputs.device,
length_penalty=0,
do_early_stopping=False,
)
hf_inputs = tokenizer([text] * 2, return_tensors="pt")["input_ids"]
hf_outputs = BloomForCausalLM.beam_search(
model, input_ids=hf_inputs, max_length=inputs.size(1) + max_new_tokens, beam_scorer=beam_scorer
)
assert torch.allclose(remote_outputs, hf_outputs), "Beam search results are not identical to HF"
|
TheGrid-main
|
tests/test_full_model.py
|
######
# Warning:torch this test is a work in progress. It will be modified soon.
# - if you want more stable tests, see test_block_exact_match
# - if you want to figure out chained inference, ask yozh
import pytest
import torch
from grid import DistributedBloomConfig
from grid.client.remote_sequential import RemoteSequential
from grid.server.from_pretrained import load_pretrained_block
from test_utils import *
@pytest.mark.forked
def test_forward_backward_exact_match(atol_forward=1e-4, atol_backward=1e-4, seq_length=1):
config = DistributedBloomConfig.from_pretrained(MODEL_NAME, initial_peers=INITIAL_PEERS)
remote_blocks = RemoteSequential(config, start_block=3, end_block=6)
assert isinstance(remote_blocks, RemoteSequential)
ref_blocks = [
load_pretrained_block(MODEL_NAME, 3, torch_dtype=torch.float32),
load_pretrained_block(MODEL_NAME, 4, torch_dtype=torch.float32),
load_pretrained_block(MODEL_NAME, 5, torch_dtype=torch.float32),
]
inputs = torch.randn(1, seq_length, config.hidden_size, requires_grad=True)
outputs_rpc = remote_blocks.forward(inputs)
outputs_rpc.sum().backward()
grads_rpc = inputs.grad
inputs.grad = None
hidden_states = inputs
for ref_block in ref_blocks:
hidden_states = ref_block.forward(hidden_states)[0]
outputs_ref = hidden_states
outputs_ref.sum().backward()
grads_ref = inputs.grad
assert torch.allclose(outputs_ref, outputs_rpc, rtol=0, atol=atol_forward)
assert torch.allclose(grads_ref, grads_rpc, rtol=0, atol=atol_backward)
@pytest.mark.forked
def test_chained_inference_exact_match(atol_inference=1e-4):
config = DistributedBloomConfig.from_pretrained(MODEL_NAME, initial_peers=INITIAL_PEERS)
remote_blocks = RemoteSequential(config, start_block=3, end_block=5)
inputs = torch.randn(1, 8, config.hidden_size)
outputs_inference = []
with remote_blocks.inference_session(max_length=inputs.shape[1]) as sess:
for i in range(inputs.shape[1]):
outputs_inference.append(sess.step(inputs[:, i : i + 1, :]))
outputs_inference = torch.cat(outputs_inference, dim=1)
ref_blocks = [
load_pretrained_block(MODEL_NAME, 3, torch_dtype=torch.float32),
load_pretrained_block(MODEL_NAME, 4, torch_dtype=torch.float32),
]
outputs_ref = []
caches = [None, None]
for i in range(inputs.shape[1]):
new_caches = []
hidden_states = inputs[:, i : i + 1, :]
for ref_block, cache in zip(ref_blocks, caches):
with torch.no_grad():
hidden_states, new_cache = ref_block.forward(hidden_states, use_cache=True, layer_past=cache)
new_caches.append(new_cache)
outputs_ref.append(hidden_states)
caches = new_caches
outputs_ref = torch.cat(outputs_ref, dim=1)
assert torch.allclose(outputs_ref, outputs_inference, rtol=0, atol=atol_inference)
|
TheGrid-main
|
tests/test_chained_calls.py
|
import threading
import time
import pytest
import torch
from hivemind import DHT, get_logger
from grid import DistributedBloomConfig
from grid.client import RemoteSequenceManager, RemoteSequential
from grid.data_structures import UID_DELIMITER
from test_utils import *
logger = get_logger(__name__)
@pytest.mark.forked
@pytest.mark.parametrize("mode", ["max_throughput", "min_latency"])
def test_sequence_manager_basics(mode: str):
config = DistributedBloomConfig.from_pretrained(MODEL_NAME, initial_peers=INITIAL_PEERS)
dht = DHT(initial_peers=config.initial_peers, client_mode=True, start=True)
sequential = RemoteSequential(config, dht=dht)
shutdown_evt = threading.Event()
# test RemoteSequential with lossy compression
block_uids = [f"{config.dht_prefix}{UID_DELIMITER}{i}" for i in range(config.num_hidden_layers)]
sequential = RemoteSequential(
config,
sequence_manager=RemoteSequenceManagerWithChecks(config, block_uids, dht=dht, _was_shut_down=shutdown_evt),
)
sequence = sequential.sequence_manager.make_sequence(mode=mode)
assert all(sequence[i].peer_id != sequence[i + 1].peer_id for i in range(len(sequence) - 1))
assert sequential.sequence_manager.is_alive()
assert sequential.sequence_manager._thread.ready.is_set()
assert not shutdown_evt.is_set()
sequential(torch.randn(1, 2, config.hidden_size))
sequential.sequence_manager.shutdown()
del sequential
time.sleep(1)
assert shutdown_evt.is_set()
class RemoteSequenceManagerWithChecks(RemoteSequenceManager):
"""A sequence manager that signals if it was shut down"""
def __init__(self, *args, _was_shut_down: threading.Event, **kwargs):
super().__init__(*args, **kwargs)
self._was_shut_down = _was_shut_down
def shutdown(self):
super().shutdown()
assert not self.is_alive()
self._was_shut_down.set()
|
TheGrid-main
|
tests/test_sequence_manager.py
|
import pytest
import torch
from grid.server.block_utils import resolve_block_dtype
from grid.server.from_pretrained import load_pretrained_block
from grid.utils.auto_config import AutoDistributedConfig
from test_utils import MODEL_NAME
@pytest.mark.forked
@pytest.mark.parametrize("torch_dtype", [torch.float32, torch.float16, "auto"])
def test_block_dtype(torch_dtype):
config = AutoDistributedConfig.from_pretrained(MODEL_NAME)
block = load_pretrained_block(MODEL_NAME, 0, config=config, torch_dtype=torch_dtype)
expected_dtype = resolve_block_dtype(config, torch_dtype)
assert all(param.dtype == expected_dtype for param in block.parameters())
|
TheGrid-main
|
tests/test_dtype.py
|
import random
import pytest
import torch
from grid import DistributedBloomConfig, RemoteSequential
from grid.server.from_pretrained import load_pretrained_block
from test_utils import *
@pytest.mark.forked
def test_remote_block_exact_match(atol_forward=1e-4, atol_inference=1e-3):
config = DistributedBloomConfig.from_pretrained(MODEL_NAME, initial_peers=INITIAL_PEERS)
remote_sequential = RemoteSequential(config)
for block_index in random.sample(range(config.num_hidden_layers), 3):
remote_block = remote_sequential[block_index]
inputs = torch.randn(1, 8, config.hidden_size)
outputs_forward = remote_block(inputs)
outputs_inference = []
with torch.inference_mode():
with remote_block.inference_session(max_length=inputs.shape[1]) as sess:
for i in range(inputs.shape[1]):
outputs_inference.append(sess.step(inputs[:, i : i + 1, :]))
# test that max length is respected
with pytest.raises(ValueError, match=r"Maximum length exceeded") as exc_info:
sess.step(inputs[:, -1:, :])
assert "Maximum length exceeded" in repr(exc_info.value)
outputs_inference = torch.cat(outputs_inference, dim=1)
ref_block = load_pretrained_block(MODEL_NAME, block_index, torch_dtype=torch.float32)
(outputs_local,) = ref_block(inputs)
assert torch.allclose(outputs_local, outputs_forward, rtol=0, atol=atol_forward)
assert torch.allclose(outputs_local, outputs_inference, rtol=0, atol=atol_inference)
|
TheGrid-main
|
tests/test_block_exact_match.py
|
#!/usr/bin/env python3
import argparse
import multiprocessing as mp
from time import perf_counter
import numpy as np
import torch
from hivemind.utils.logging import get_logger
from grid import AutoDistributedModelForCausalLM, AutoDistributedModelForSequenceClassification
from grid.constants import DTYPE_MAP, PUBLIC_INITIAL_PEERS
logger = get_logger()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, default="Agora/bloom")
parser.add_argument("--device", type=str, default="cpu")
parser.add_argument("--task", type=str, default="cls")
parser.add_argument("--initial_peers", type=str, nargs="+", default=PUBLIC_INITIAL_PEERS)
parser.add_argument("--torch_dtype", type=str, default="bfloat16")
parser.add_argument("--n_processes", type=str, default=1)
parser.add_argument("--seq_len", type=int, default=128)
parser.add_argument("--pre_seq_len", type=int, default=16)
parser.add_argument("--n_steps", type=int, default=10)
parser.add_argument("--batch_size", type=int, required=True)
parser.add_argument("--warmup_steps", type=int, default=1)
args = parser.parse_args()
assert args.task in ["cls", "causal_lm"]
if args.n_processes == "n_gpus":
args.n_processes = torch.cuda.device_count()
else:
args.n_processes = int(args.n_processes)
processes = [mp.Process(target=benchmark_training, args=(i, args)) for i in range(args.n_processes)]
for proc in processes:
proc.start()
for proc in processes:
proc.join()
def benchmark_training(process_idx, args):
if args.task == "cls":
model = AutoDistributedModelForSequenceClassification.from_pretrained(
args.model,
initial_peers=args.initial_peers,
torch_dtype=DTYPE_MAP[args.torch_dtype],
tuning_mode="deep_ptune",
pre_seq_len=args.pre_seq_len,
num_labels=2,
)
elif args.task == "causal_lm":
model = AutoDistributedModelForCausalLM.from_pretrained(
args.model,
initial_peers=args.initial_peers,
torch_dtype=DTYPE_MAP[args.torch_dtype],
tuning_mode="deep_ptune",
pre_seq_len=args.pre_seq_len,
)
model = model.to(args.device)
opt = torch.optim.Adam(model.parameters())
logger.info(f"Created model: {process_idx=} {model.device=}")
torch.manual_seed(42)
fwd_times = []
bwd_times = []
for step in range(args.warmup_steps + args.n_steps):
input_ids = torch.randint(0, model.config.vocab_size, size=(args.batch_size, args.seq_len), device=args.device)
if args.task == "cls":
labels = torch.randint(0, 2, size=[args.batch_size], device=args.device)
else:
labels = input_ids
logger.info(f"{process_idx=} {step=} Forward")
start_time = perf_counter()
outputs = model(input_ids, labels=labels)
if step >= args.warmup_steps:
fwd_times.append(perf_counter() - start_time)
logger.info(f"{process_idx=} {step=} Backward")
start_time = perf_counter()
outputs.loss.backward()
if step >= args.warmup_steps:
bwd_times.append(perf_counter() - start_time)
logger.info(f"{process_idx=} {step=} Optimizer step")
opt.step()
opt.zero_grad()
if step >= args.warmup_steps:
fwd_speed = input_ids.numel() / np.mean(fwd_times)
bwd_speed = input_ids.numel() / np.mean(bwd_times)
logger.info(f"{process_idx=} Fwd speed: {fwd_speed:.2f} | Bwd speed: {bwd_speed:.2f}")
logger.info(f"Final result: {process_idx=} {fwd_speed=:.2f} | {bwd_speed=:.2f}")
if __name__ == "__main__":
main()
|
TheGrid-main
|
benchmarks/benchmark_training.py
|
#!/usr/bin/env python3
import argparse
import multiprocessing as mp
from time import perf_counter
import numpy as np
import torch
from hivemind.utils.logging import get_logger
from transformers import AutoTokenizer
from grid import AutoDistributedModelForCausalLM
from grid.constants import DTYPE_MAP, PUBLIC_INITIAL_PEERS
logger = get_logger()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, default="Agora/bloom")
parser.add_argument("--initial_peers", type=str, nargs="+", default=PUBLIC_INITIAL_PEERS)
parser.add_argument("--torch_dtype", type=str, default="bfloat16")
parser.add_argument("--n_processes", type=str, default=1)
parser.add_argument("--seq_len", type=int, default=2048)
parser.add_argument("--warmup_steps", type=int, default=1)
args = parser.parse_args()
if args.n_processes == "n_gpus":
args.n_processes = torch.cuda.device_count()
else:
args.n_processes = int(args.n_processes)
processes = [mp.Process(target=benchmark_inference, args=(i, args)) for i in range(args.n_processes)]
for proc in processes:
proc.start()
for proc in processes:
proc.join()
@torch.inference_mode()
def benchmark_inference(process_idx, args):
tokenizer = AutoTokenizer.from_pretrained(args.model, use_fast=False)
# Using use_fast=False since LlamaTokenizerFast takes a long time to start, and we decode 1 token at a time anyway
model = AutoDistributedModelForCausalLM.from_pretrained(
args.model, initial_peers=args.initial_peers, torch_dtype=DTYPE_MAP[args.torch_dtype]
)
logger.info(f"Created model: {process_idx=} {model.device=}")
result = ""
step_times = []
with model.transformer.h.inference_session(max_length=args.seq_len) as sess:
for step in range(args.seq_len):
start_time = perf_counter()
outputs = model.generate(max_new_tokens=1, session=sess)
result += tokenizer.decode(outputs[0])
if step >= args.warmup_steps:
step_times.append(perf_counter() - start_time)
speed = 1 / np.mean(step_times)
logger.info(f"{process_idx=} {step=} {speed=:.2f}")
logger.info(f"Final result: {process_idx=} {speed=:.2f}")
if __name__ == "__main__":
main()
|
TheGrid-main
|
benchmarks/benchmark_inference.py
|
#!/usr/bin/env python3
import argparse
import multiprocessing as mp
from time import perf_counter
import numpy as np
import torch
from hivemind.utils.logging import get_logger
from grid import AutoDistributedModel
from grid.constants import DTYPE_MAP, PUBLIC_INITIAL_PEERS
logger = get_logger()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, default="Agora/bloom")
parser.add_argument("--initial_peers", type=str, nargs="+", default=PUBLIC_INITIAL_PEERS)
parser.add_argument("--torch_dtype", type=str, default="bfloat16")
parser.add_argument("--n_processes", type=str, default=1)
parser.add_argument("--seq_len", type=int, default=128)
parser.add_argument("--n_steps", type=int, default=100)
parser.add_argument("--batch_size", type=int, required=True)
parser.add_argument("--warmup_steps", type=int, default=1)
args = parser.parse_args()
if args.n_processes == "n_gpus":
args.n_processes = torch.cuda.device_count()
else:
args.n_processes = int(args.n_processes)
processes = [mp.Process(target=benchmark_forward, args=(i, args)) for i in range(args.n_processes)]
for proc in processes:
proc.start()
for proc in processes:
proc.join()
@torch.inference_mode()
def benchmark_forward(process_idx, args):
model = AutoDistributedModel.from_pretrained(
args.model,
initial_peers=args.initial_peers,
torch_dtype=DTYPE_MAP[args.torch_dtype],
)
logger.info(f"Created model: {process_idx=} {model.device=}")
torch.manual_seed(42)
step_times = []
for step in range(args.warmup_steps + args.n_steps):
start_time = perf_counter()
input_ids = torch.randint(0, model.config.vocab_size, size=(args.batch_size, args.seq_len))
logger.info(f"{process_idx=} Fwd begin {input_ids.shape=}")
model(input_ids)
# We don't use model.lm_head
logger.info(f"{process_idx=} Fwd end")
if step >= args.warmup_steps:
step_times.append(perf_counter() - start_time)
speed = input_ids.numel() / np.mean(step_times)
logger.info(f"{process_idx=} {step=} {speed=:.2f}")
logger.info(f"Final result: {process_idx=} {speed=:.2f}")
if __name__ == "__main__":
main()
|
TheGrid-main
|
benchmarks/benchmark_forward.py
|
"""
Utilities for declaring and retrieving active model layers using a shared DHT.
"""
from __future__ import annotations
import math
from functools import partial
from typing import Dict, List, Optional, Sequence, Union
from hivemind.dht import DHT, DHTNode, DHTValue
from hivemind.p2p import PeerID
from hivemind.utils import DHTExpiration, MPFuture, get_dht_time, get_logger
from grid.data_structures import CHAIN_DELIMITER, UID_DELIMITER, ModuleUID, RemoteModuleInfo, ServerInfo
logger = get_logger(__name__)
def declare_active_modules(
dht: DHT,
uids: Sequence[ModuleUID],
server_info: ServerInfo,
expiration_time: DHTExpiration,
wait: bool = True,
) -> Union[Dict[ModuleUID, bool], MPFuture[Dict[ModuleUID, bool]]]:
"""
Declare that your node serves the specified modules; update timestamps if declared previously
:param uids: a list of module ids to declare
:param wait: if True, awaits for declaration to finish, otherwise runs in background
:param throughput: specify your performance in terms of compute throughput
:param expiration_time: declared modules will be visible for this many seconds
:returns: if wait, returns store status for every key (True = store succeeded, False = store rejected)
"""
if isinstance(uids, str):
uids = [uids]
if not isinstance(uids, list):
uids = list(uids)
for uid in uids:
assert isinstance(uid, ModuleUID) and UID_DELIMITER in uid and CHAIN_DELIMITER not in uid
return dht.run_coroutine(
partial(_declare_active_modules, uids=uids, server_info=server_info, expiration_time=expiration_time),
return_future=not wait,
)
async def _declare_active_modules(
dht: DHT,
node: DHTNode,
uids: List[ModuleUID],
server_info: ServerInfo,
expiration_time: DHTExpiration,
) -> Dict[ModuleUID, bool]:
num_workers = len(uids) if dht.num_workers is None else min(len(uids), dht.num_workers)
return await node.store_many(
keys=uids,
subkeys=[dht.peer_id.to_base58()] * len(uids),
values=[server_info.to_tuple()] * len(uids),
expiration_time=expiration_time,
num_workers=num_workers,
)
def get_remote_module_infos(
dht: DHT,
uids: Sequence[ModuleUID],
expiration_time: Optional[DHTExpiration] = None,
active_adapter: Optional[str] = None,
*,
latest: bool = False,
return_future: bool = False,
) -> Union[List[Optional[RemoteModuleInfo]], MPFuture]:
return dht.run_coroutine(
partial(
_get_remote_module_infos,
uids=uids,
active_adapter=active_adapter,
expiration_time=expiration_time,
latest=latest,
),
return_future=return_future,
)
async def _get_remote_module_infos(
dht: DHT,
node: DHTNode,
uids: List[ModuleUID],
active_adapter: Optional[str],
expiration_time: Optional[DHTExpiration],
latest: bool,
) -> List[Optional[RemoteModuleInfo]]:
if latest:
assert expiration_time is None, "You should define either `expiration_time` or `latest`, not both"
expiration_time = math.inf
elif expiration_time is None:
expiration_time = get_dht_time()
num_workers = len(uids) if dht.num_workers is None else min(len(uids), dht.num_workers)
found: Dict[ModuleUID, DHTValue] = await node.get_many(uids, expiration_time, num_workers=num_workers)
modules: List[Optional[RemoteModuleInfo]] = [None] * len(uids)
for i, uid in enumerate(uids):
metadata = found[uid]
if metadata is None or not isinstance(metadata.value, dict):
if metadata is not None:
logger.warning(f"Incorrect metadata for {uid}: {metadata}")
continue
servers = {}
for peer_id, server_info in metadata.value.items():
try:
peer_id = PeerID.from_base58(peer_id)
server_info = ServerInfo.from_tuple(server_info.value)
if active_adapter and active_adapter not in server_info.adapters:
logger.debug(f"Skipped server {peer_id} since it does not have adapter {active_adapter}")
continue
servers[peer_id] = server_info
except (TypeError, ValueError) as e:
logger.warning(f"Incorrect peer entry for uid={uid}, peer_id={peer_id}: {e}")
if servers:
modules[i] = RemoteModuleInfo(uid, servers)
return modules
|
TheGrid-main
|
grid/dht_utils.py
|
import torch
PUBLIC_INITIAL_PEERS = [
# IPv4 DNS addresses
"/dns/bootstrap1.grid.dev/tcp/31337/p2p/QmedTaZXmULqwspJXz44SsPZyTNKxhnnFvYRajfH7MGhCY",
"/dns/bootstrap2.grid.dev/tcp/31338/p2p/QmQGTqmM7NKjV6ggU1ZCap8zWiyKR89RViDXiqehSiCpY5",
# IPv6 DNS addresses
"/dns6/bootstrap1.grid.dev/tcp/31337/p2p/QmedTaZXmULqwspJXz44SsPZyTNKxhnnFvYRajfH7MGhCY",
"/dns6/bootstrap2.grid.dev/tcp/31338/p2p/QmQGTqmM7NKjV6ggU1ZCap8zWiyKR89RViDXiqehSiCpY5",
# Reserved IPs
"/ip4/159.89.214.152/tcp/31337/p2p/QmedTaZXmULqwspJXz44SsPZyTNKxhnnFvYRajfH7MGhCY",
"/ip4/159.203.156.48/tcp/31338/p2p/QmQGTqmM7NKjV6ggU1ZCap8zWiyKR89RViDXiqehSiCpY5",
]
# The reachability API is currently used only when connecting to the public swarm
REACHABILITY_API_URL = "https://health.grid.dev"
DTYPE_MAP = dict(bfloat16=torch.bfloat16, float16=torch.float16, float32=torch.float32, auto="auto")
|
TheGrid-main
|
grid/constants.py
|
import os
os.environ.setdefault("BITSANDBYTES_NOWELCOME", "1")
import hivemind
import transformers
from packaging import version
from grid.client import *
from grid.models import *
from grid.utils import *
from grid.utils.logging import initialize_logs as _initialize_logs
__version__ = "2.0.1"
if not os.getenv("GRID_IGNORE_DEPENDENCY_VERSION"):
assert (
version.parse("4.31.0") <= version.parse(transformers.__version__) < version.parse("5.0.0")
), "Please install a proper transformers version: pip install transformers>=4.31.0,<5.0.0"
def _override_bfloat16_mode_default():
if os.getenv("USE_LEGACY_BFLOAT16") is None:
hivemind.compression.base.USE_LEGACY_BFLOAT16 = False
_initialize_logs()
_override_bfloat16_mode_default()
|
TheGrid-main
|
grid/__init__.py
|
import dataclasses
from enum import Enum
from typing import Any, Dict, Optional, Sequence, Tuple
import pydantic
from hivemind import PeerID
from hivemind.moe.expert_uid import ExpertUID
from grid.server.memory_cache import Handle
ModuleUID = str
UID_DELIMITER = "." # delimits parts of one module uid, e.g. "bloom.transformer.h.4.self_attention"
CHAIN_DELIMITER = " " # delimits multiple uids in a sequence, e.g. "bloom.layer3 bloom.layer4"
class ServerState(Enum):
OFFLINE = 0
JOINING = 1
ONLINE = 2
RPS = pydantic.confloat(ge=0, allow_inf_nan=False, strict=True)
@pydantic.dataclasses.dataclass
class ServerInfo:
state: ServerState
throughput: RPS
public_name: Optional[str] = None
version: Optional[str] = None
network_rps: Optional[RPS] = None
forward_rps: Optional[RPS] = None
inference_rps: Optional[RPS] = None
adapters: Sequence[str] = ()
torch_dtype: Optional[str] = None
quant_type: Optional[str] = None
using_relay: Optional[bool] = None
cache_tokens_left: Optional[pydantic.conint(ge=0, strict=True)] = None
next_pings: Optional[Dict[str, pydantic.confloat(ge=0, strict=True)]] = None
def to_tuple(self) -> Tuple[int, float, dict]:
extra_info = dataclasses.asdict(self)
del extra_info["state"], extra_info["throughput"]
return (self.state.value, self.throughput, extra_info)
@classmethod
def from_tuple(cls, source: tuple):
state, throughput = source[:2]
extra_info = source[2] if len(source) > 2 else {}
# pydantic will validate existing fields and ignore extra ones
return cls(state=ServerState(state), throughput=throughput, **extra_info)
@dataclasses.dataclass
class RemoteModuleInfo:
"""A remote module that is served by one or more servers"""
uid: ModuleUID
servers: Dict[PeerID, ServerInfo]
@dataclasses.dataclass
class RemoteSpanInfo:
"""A chain of remote blocks served by one specific remote peer"""
peer_id: PeerID
start: int
end: int
server_info: ServerInfo
@property
def length(self):
return self.end - self.start
RPCInfo = Dict[str, Any]
@dataclasses.dataclass(frozen=True)
class InferenceMetadata:
uid: ExpertUID
prefix_length: int
cache_handles: Tuple[Handle, ...]
active_adapter: Optional[str]
|
TheGrid-main
|
grid/data_structures.py
|
"""
A pytorch memory cache that can be allocated by ConnectionHandler (on cpu) and used over multiple calls to Runtime.
For now, the only purpose of this code is to ensure that allocated memory will be deleted properly.
"""
import asyncio
import contextlib
import ctypes
import multiprocessing as mp
import os
import time
from typing import AsyncContextManager, Dict, Optional, Sequence
import hivemind
import torch
from hivemind.utils import TensorDescriptor, get_logger
from grid.utils.asyncio import shield_and_wait
logger = get_logger(__name__)
Handle = int
class MemoryCache:
"""A shared cache for storing tensors that persist across calls. Main use case: storing past attention KVs"""
def __init__(self, max_size_bytes: Optional[int], alloc_timeout: float):
self.max_size_bytes = max_size_bytes if max_size_bytes is not None else (2**64 - 1)
self.alloc_timeout = alloc_timeout
self._lock_metadata = mp.Lock()
self._current_size = mp.Value(ctypes.c_int64, 0, lock=False)
self._handle_counter = mp.Value(ctypes.c_int64, 0, lock=False)
self._allocated_tensors: Dict[Handle, torch.Tensor] = {}
self.runtime_pid = os.getpid()
self._pipe_recv, self._pipe_send = mp.Pipe(duplex=False) # any ConnectionHandler -> runtime
self._lock_acquire_memory = mp.Lock()
self._memory_freed_event = mp.Event()
@property
def current_size_bytes(self) -> int:
return self._current_size.value
@current_size_bytes.setter
def current_size_bytes(self, value: int):
self._current_size.value = value
@property
def bytes_left(self) -> int:
return self.max_size_bytes - self.current_size_bytes
@property
def handle_counter(self) -> int:
return self._handle_counter.value
@handle_counter.setter
def handle_counter(self, value: int):
self._handle_counter.value = value
@contextlib.asynccontextmanager
async def allocate_cache(self, *descriptors: TensorDescriptor) -> AsyncContextManager[Sequence[Handle]]:
"""
Create a handle that is associated with buffers on unique device. If cache full, raises AllocationFailed.
:param descriptors: one or more tensors tensor of this size, dtype, etc
:note: if descriptors reside on different devices, it is expected that they are approximately balanced across devices;
if not, it will count maximum tensor allocation across devices for the purposes of size limit
:note: This function should be called by connection handlers, it can be called concurrently from multiple processes.
Furthermore, it can be called concurrently with at most one use_cache call in runtime.
"""
assert os.getpid() != self.runtime_pid, "must be called by a ConnectionHandler, not runtime"
assert all(descr.device is not None for descr in descriptors), "please specify allocated devices"
max_alloc_size = self.get_allocation_size(*descriptors)
gib = 1024**3
cur_size, max_size = self.current_size_bytes, self.max_size_bytes
friendly_max_size = f"{max_size / gib:.2f}" if max_size != 2**64 - 1 else "inf"
logger.info(
f"rpc_inference.wait_for_alloc(size={max_alloc_size / gib:.2f} GiB), "
f"already used {cur_size / gib:.2f}/{friendly_max_size} GiB ({cur_size / max_size * 100:.1f}%)"
)
alloc_task = asyncio.create_task(self._schedule_alloc(max_alloc_size, *descriptors))
try:
handles = await shield_and_wait(alloc_task)
logger.info(f"rpc_inference.alloc(size={max_alloc_size / gib:.2f} GiB)")
yield handles
finally:
self._free(max_alloc_size, alloc_task)
@staticmethod
def get_allocation_size(*descriptors: TensorDescriptor) -> int:
"""Return the memory size (bytes) to be allocated on a device. If there are many devices, return maximum"""
alloc_size_by_device = {}
for descr in descriptors:
tensor_size = descr.numel() * torch.finfo(descr.dtype).bits // 8
alloc_size_by_device[descr.device] = alloc_size_by_device.get(descr.device, 0) + tensor_size
return max(alloc_size_by_device.values())
async def _schedule_alloc(self, alloc_size: int, *descriptors: TensorDescriptor) -> Sequence[Handle]:
"""
This method should be called inside asyncio.shield() because:
- hivemind.utils.enter_asynchronously() does not always release the lock on cancellation
"""
loop = asyncio.get_event_loop()
async with hivemind.utils.enter_asynchronously(self._lock_acquire_memory):
if self.current_size_bytes + alloc_size > self.max_size_bytes:
await loop.run_in_executor(None, self._wait_until_available, alloc_size, self.alloc_timeout)
with self._lock_metadata:
handles = tuple(int(self.handle_counter) + i for i in range(len(descriptors)))
self.current_size_bytes += alloc_size
self.handle_counter += len(handles) # note: this will eventually overflow and it is okay
self._pipe_send.send((handles, descriptors))
return handles
def _free(self, alloc_size: int, alloc_task: asyncio.Task) -> None:
if alloc_task.exception() is not None:
return
handles = alloc_task.result()
with self._lock_metadata:
self._pipe_send.send((handles, None)) # signal runtime to free these handles
self.current_size_bytes -= alloc_size
self._memory_freed_event.set()
def _wait_until_available(self, allocated_size: int, timeout: Optional[float] = None):
# note: this function should only be called inside _lock_acquire_memory!
if allocated_size > self.max_size_bytes:
raise AllocationFailed(
f"Could not allocate {allocated_size} bytes, max cache size = {self.max_size_bytes} bytes"
)
deadline = None if timeout is None else time.perf_counter() + timeout
while self.current_size_bytes + allocated_size > self.max_size_bytes:
remaining_time = deadline - time.perf_counter() if timeout is not None else None
if not self._memory_freed_event.wait(remaining_time):
raise AllocationFailed(
f"Server's attention cache is full, failed to allocate {allocated_size} bytes in {timeout} seconds"
)
self._memory_freed_event.clear()
@contextlib.contextmanager
def use_cache(self, *handles: Handle) -> Sequence[torch.Tensor]:
"""
Return one or more tensors previously allocated with allocate_cache,
:note: This method is called by ModuleBackend in runtime: a single process with NO process parallelism.
However, runtime may call use_cache concurrently with one or more connection handlers calling allocate_cache
"""
assert os.getpid() == self.runtime_pid
# note: this specific function is not concurrent, so you can safely allocate/offload/defragment data here
# read creation/deletion requests from connection handlers
while self._pipe_recv.poll():
recv_handles, recv_data = self._pipe_recv.recv()
if recv_data is not None: # create new tensors
assert len(recv_handles) == len(recv_data)
for handle, descr in zip(recv_handles, recv_data):
self._allocated_tensors[handle] = descr.make_zeros()
assert handle in self._allocated_tensors, f"Sanity check failed: no such handle ({handle})"
else: # delete tensors by handle
for handle in recv_handles:
if handle not in self._allocated_tensors:
logger.warning(
f"Sanity check failed: asked to delete handle {handle}, but there is no such handle"
)
self._allocated_tensors.pop(handle, None)
yield tuple(self._allocated_tensors[handle] for handle in handles)
class AllocationFailed(Exception):
pass
|
TheGrid-main
|
grid/server/memory_cache.py
|
import ctypes
import multiprocessing as mp
import threading
import time
from concurrent.futures._base import PENDING
from dataclasses import dataclass, field
from queue import PriorityQueue
from typing import Any, List, Optional, Sequence, Tuple, Union
import torch
from hivemind import get_logger
from hivemind.moe.server.task_pool import TaskPoolBase
from hivemind.utils.mpfuture import ALL_STATES, MPFuture
logger = get_logger(__name__)
@dataclass(order=True, frozen=True)
class Task:
priority: float
time_submitted: float
future: MPFuture = field(compare=False)
args: Sequence[torch.Tensor] = field(compare=False)
@property
def uid(self) -> int:
return self.future._uid
class PrioritizedTaskPool(TaskPoolBase):
"""
Aggregates requests from multiple ConnectionHandler instances, orders them for processing in Runtime, then
returns results (or exception) to the corresponding ConnectionHandler. Runs a background process.
A single PrioritizedTaskPool services a specific function (e.g. layer1.forward, layer2.forward or layer1.backward)
:note: unlike hivemind.moe TaskPool, this pool does *not* combine incoming requests into batches.
This would require grouping requests of different length.
:param process_func: function to be applied to every formed batch; called by Runtime
Note that process_func should accept only positional args (Tensors) and return a flat tuple of Tensors
:param max_batch_size: process at most this many inputs in a batch (task contains have one or several inputs)
Measured in the total number of tokens (i.e. batch size * sequence length)
:param name: pool name, used for logging
:param min_batch_size: process at least this many inputs in a batch, otherwise wait for more
:param device: if specified, input tensors will be moved to that device by default
:param start: if True, start automatically at the end of __init__
"""
def __init__(
self,
process_func: callable,
max_batch_size: int,
name: str,
min_batch_size=1,
device: Optional[torch.device] = None,
daemon=True,
start=False,
):
super().__init__(process_func, daemon=daemon, name=name)
self.min_batch_size, self.max_batch_size = min_batch_size, max_batch_size
self.device = device
self.submitted_tasks = mp.SimpleQueue() # interaction with ConnectionHandlers
self._ordered_tasks = PriorityQueue() # interaction with Runtime - only valid inside Runtime
self._prioritizer_thread = threading.Thread(
name=self.name + "_prioritizer",
target=self._prioritize_tasks,
args=[self.submitted_tasks, self._ordered_tasks],
daemon=True,
)
self._dispatched_tasks = {}
self.batch_receiver, self.batch_sender = mp.Pipe(duplex=False)
self._oldest_undispatched_timestamp = mp.Value(ctypes.c_double, 1.0)
self.priority = float("inf"), float("inf") # (first task priority, first task timestamp)
self._stop = mp.Event()
if start:
self.start()
@staticmethod
def _prioritize_tasks(submitted_tasks: mp.SimpleQueue, ordered_tasks: PriorityQueue):
"""Read tasks from incoming queue and put them into a local priority queue"""
while True:
task = submitted_tasks.get()
if task is None:
logger.debug("Shutting down prioritizer thread")
break
ordered_tasks.put(task, block=True)
def start(self):
assert not self.is_alive() and not self._prioritizer_thread.is_alive()
self._prioritizer_thread.start()
super().start()
def shutdown(self, timeout: float = 3):
self.submitted_tasks.put(None) # Shuts down self._prioritizer_thread
self._stop.set()
self.join(timeout)
if self.is_alive():
logger.warning(f"{self.__class__.__name__} failed to shut down gracefully, sending SIGTERM")
self.terminate()
def submit_task(self, *args: Any, priority: float = 0.0) -> MPFuture:
"""Add task to this pool's queue, return Future for its output"""
future = MPFuture()
# Remove shmem from MPFuture. This disables the .cancel() feature but
# saves the server from "could not unlink the shared memory file" crashes during rebalancing
future._shared_state_code = torch.tensor([ALL_STATES.index(PENDING)], dtype=torch.uint8)
task = Task(priority, time.monotonic(), future, args)
if self.get_task_size(task) > self.max_batch_size:
exc = ValueError(f"Task size greater than max_batch_size ({self.max_batch_size}), it can't be processed")
task.future.set_exception(exc)
else:
self.submitted_tasks.put(task)
self.batch_sender.send(None) # use this pipe to count the number of unfinished batches
if (task.priority, task.time_submitted) < self.priority:
self.priority = (task.priority, task.time_submitted)
return task.future
def get_task_size(self, task: Task) -> int:
"""compute task processing complexity; defaults to the total number of tokens"""
if task.args and task.args[0].ndim >= 2:
return task.args[0].shape[0] * task.args[0].shape[1]
return 1
def load_batch_to_runtime(
self, timeout: Optional[float] = None, device: Optional[torch.device] = None
) -> Tuple[Any, List[torch.Tensor]]:
"""receive next batch of arrays"""
device = device if device is not None else self.device
task = self._ordered_tasks.get(block=True, timeout=timeout)
batch_inputs = [_move_to_device_if_tensor(arg, device, share_memory=False) for arg in task.args]
self._dispatched_tasks[task.uid] = task
self.batch_receiver.recv() # reduce the number of active batches
if not self._ordered_tasks.empty():
first_remaining_task: Task = self._ordered_tasks.queue[0]
self.priority = (first_remaining_task.priority, first_remaining_task.time_submitted)
return task.uid, batch_inputs
def send_outputs_from_runtime(self, uid: int, batch_outputs: List[torch.Tensor]):
"""send results for a processed batch, previously loaded through load_batch_to_runtime"""
batch_outputs = [_move_to_device_if_tensor(output, device="cpu", share_memory=True) for output in batch_outputs]
task = self._dispatched_tasks.pop(uid, None)
if task is None:
logger.error(
f"Internal error: task task with index {uid} is missing from the dictionary; " f"Could not set result"
)
else:
task.future.set_result(batch_outputs)
def send_exception_from_runtime(self, uid: int, exception: BaseException):
task = self._dispatched_tasks.pop(uid, None)
if task is None:
logger.error(
f"Internal error: task task with index {uid} is missing from the dictionary; "
f"Could not set exception {exception}"
)
else:
task.future.set_exception(exception)
def run(self, *args, **kwargs):
self._stop.wait()
@property
def empty(self):
return not self.batch_receiver.poll()
@property
def priority(self) -> Tuple[float, float]:
"""The priority of this pool equals the (priority, timestamp) of the most important task in it."""
return float(self._priority.value), float(self._oldest_undispatched_timestamp.value)
@priority.setter
def priority(self, item: Tuple[float, float]):
assert len(item) == 2
self._priority.value = float(item[0])
self._oldest_undispatched_timestamp.value = float(item[1])
def _move_to_device_if_tensor(arg: Any, device: Union[torch.device, str], share_memory: bool = False):
if isinstance(arg, torch.Tensor):
arg = arg.detach().to(device, non_blocking=not share_memory).requires_grad_(arg.requires_grad)
# note: it is important that non_blocking is disabled if share_memory=True; using share_memory on a tensor
# produced by a non-blocking copy will result in undefined behavior (depending on your gpu speed)
if share_memory:
arg = arg.share_memory_()
return arg
|
TheGrid-main
|
grid/server/task_pool.py
|
from __future__ import annotations
import gc
import math
import multiprocessing as mp
import random
import threading
import time
from typing import Dict, List, Optional, Sequence, Union
import hivemind
import torch
from hivemind import DHT, MAX_DHT_TIME_DISCREPANCY_SECONDS, BatchTensorDescriptor, get_dht_time
from hivemind.moe.server.layers import add_custom_models_from_file
from hivemind.moe.server.runtime import Runtime
from hivemind.proto.runtime_pb2 import CompressionType
from hivemind.utils.logging import get_logger
from transformers import PretrainedConfig
import grid
from grid.constants import DTYPE_MAP, PUBLIC_INITIAL_PEERS
from grid.data_structures import CHAIN_DELIMITER, UID_DELIMITER, ServerInfo, ServerState
from grid.dht_utils import declare_active_modules, get_remote_module_infos
from grid.server import block_selection
from grid.server.backend import TransformerBackend, merge_inference_pools_inplace
from grid.server.block_utils import get_block_size, resolve_block_dtype
from grid.server.from_pretrained import load_pretrained_block
from grid.server.handler import TransformerConnectionHandler
from grid.server.memory_cache import MemoryCache
from grid.server.reachability import ReachabilityProtocol, check_direct_reachability, validate_reachability
from grid.server.throughput import get_dtype_name, get_server_throughput
from grid.utils.auto_config import AutoDistributedConfig
from grid.utils.convert_block import QuantType, check_device_balance, convert_block
from grid.utils.ping import PingAggregator
from grid.utils.random import sample_up_to
from grid.utils.version import get_compatible_model_repo
logger = get_logger(__name__)
class Server:
"""
Runs ModuleContainer, periodically checks that the network is balanced,
restarts the ModuleContainer with other layers if the imbalance is significant
"""
def __init__(
self,
*,
initial_peers: List[str],
dht_prefix: Optional[str],
converted_model_name_or_path: str,
public_name: Optional[str] = None,
throughput: Union[float, str],
num_blocks: Optional[int] = None,
block_indices: Optional[str] = None,
num_handlers: int = 8,
inference_max_length: Optional[int] = None,
min_batch_size: int = 1,
max_batch_size: Optional[int] = None,
max_chunk_size_bytes: int = 256 * 1024 * 1024,
attn_cache_tokens: Optional[int] = None,
torch_dtype: str = "auto",
revision: Optional[str] = None,
cache_dir: Optional[str] = None,
max_disk_space: Optional[int] = None,
alloc_timeout: float = 5,
device: Optional[Union[str, torch.device]] = None,
compression=CompressionType.NONE,
stats_report_interval: Optional[int] = None,
custom_module_path=None,
update_period: float = 60,
expiration: Optional[float] = None,
request_timeout: float = 3 * 60,
session_timeout: float = 30 * 60,
step_timeout: float = 5 * 60,
prefetch_batches: int = 1,
sender_threads: int = 1,
balance_quality: float = 0.75,
mean_balance_check_period: float = 120,
mean_block_selection_delay: float = 2.5,
token: Optional[Union[str, bool]] = None,
quant_type: Optional[QuantType] = None,
tensor_parallel_devices: Optional[Sequence[torch.device]] = None,
skip_reachability_check: bool = False,
dht_client_mode: Optional[bool] = None,
use_relay: bool = True,
use_auto_relay: bool = True,
adapters: Sequence[str] = (),
**kwargs,
):
"""Create a server with one or more bloom blocks. See run_server.py for documentation."""
converted_model_name_or_path = get_compatible_model_repo(converted_model_name_or_path)
self.converted_model_name_or_path = converted_model_name_or_path
self.num_handlers = num_handlers
self.compression = compression
self.stats_report_interval, self.update_period = stats_report_interval, update_period
self.prefetch_batches, self.sender_threads = prefetch_batches, sender_threads
self.revision, self.token = revision, token
if custom_module_path is not None:
add_custom_models_from_file(custom_module_path)
self.block_config = AutoDistributedConfig.from_pretrained(
converted_model_name_or_path,
use_auth_token=token,
revision=revision,
)
if dht_prefix is None:
dht_prefix = self.block_config.dht_prefix
assert UID_DELIMITER not in dht_prefix and CHAIN_DELIMITER not in dht_prefix, (
f"DHT prefix should not contain '{UID_DELIMITER}' or '{CHAIN_DELIMITER}'. "
f"Please specify another --dht_prefix manually when starting a server"
)
self.dht_prefix = dht_prefix
if expiration is None:
expiration = max(2 * update_period, MAX_DHT_TIME_DISCREPANCY_SECONDS)
self.expiration = expiration
self.request_timeout = request_timeout
self.session_timeout, self.step_timeout = session_timeout, step_timeout
self.module_uids = [
f"{self.dht_prefix}{UID_DELIMITER}{block_index}"
for block_index in range(self.block_config.num_hidden_layers)
]
if dht_client_mode is None:
is_reachable = check_direct_reachability(initial_peers=initial_peers, use_relay=False, **kwargs)
dht_client_mode = is_reachable is False # if could not check reachability (returns None), run a full peer
logger.info(f"This server is accessible {'via relays' if dht_client_mode else 'directly'}")
self.dht = DHT(
initial_peers=initial_peers,
start=True,
num_workers=self.block_config.num_hidden_layers,
use_relay=use_relay,
use_auto_relay=use_auto_relay,
client_mode=dht_client_mode,
**kwargs,
)
self.reachability_protocol = ReachabilityProtocol.attach_to_dht(self.dht) if not dht_client_mode else None
visible_maddrs_str = [str(a) for a in self.dht.get_visible_maddrs()]
if initial_peers == PUBLIC_INITIAL_PEERS:
logger.info("Connecting to the public swarm")
else:
logger.info(f"Connecting to a private swarm, initial peers: {initial_peers}")
logger.info(f"Running a server on {visible_maddrs_str}")
self.should_validate_reachability = not skip_reachability_check and initial_peers == PUBLIC_INITIAL_PEERS
if device is None:
device = "cuda" if torch.cuda.is_available() else "cpu"
device = torch.device(device)
if device.type == "cuda" and device.index is None:
device = torch.device(device.type, index=0)
self.device = device
torch_dtype = resolve_block_dtype(self.block_config, DTYPE_MAP[torch_dtype])
self.torch_dtype = torch_dtype
if tensor_parallel_devices is None:
tensor_parallel_devices = (device,)
self.tensor_parallel_devices = tuple(map(torch.device, tensor_parallel_devices))
if len(self.tensor_parallel_devices) > 1:
logger.info(f"Model weights will be split between {', '.join(tensor_parallel_devices)}")
check_device_balance(self.tensor_parallel_devices)
if quant_type is None:
if device.type == "cuda":
quant_type = QuantType.NF4 if self.block_config.model_type == "llama" else QuantType.INT8
else:
quant_type = QuantType.NONE
self.quant_type = quant_type
logger.info(f"Model weights are loaded in {get_dtype_name(torch_dtype, quant_type)} format")
is_multiquery_attn = self.block_config.num_key_value_groups > 1
if max_batch_size is None:
max_batch_size = 8192 if is_multiquery_attn else 2048
if inference_max_length is None:
inference_max_length = 8192 if is_multiquery_attn else 2048
self.min_batch_size, self.max_batch_size = min_batch_size, max_batch_size
self.inference_max_length = inference_max_length
self.max_chunk_size_bytes = max_chunk_size_bytes
# For attention cache in GPU or RAM
if attn_cache_tokens is None:
attn_cache_tokens = 32768 if is_multiquery_attn else 8192
cache_values_per_block = 2 * self.block_config.hidden_size * attn_cache_tokens
cache_values_per_block //= self.block_config.num_key_value_groups
self._cache_bytes_per_block = cache_values_per_block * torch.finfo(self.torch_dtype).bits // 8
# For disk cache
self.cache_dir = cache_dir
self.max_disk_space = max_disk_space
self.adapters = adapters
assert num_blocks is None or block_indices is None, "Please specify num_blocks or block_indices, not both"
if num_blocks is None and block_indices is None:
num_blocks = self._choose_num_blocks()
if block_indices is not None:
try:
first_block_index, last_block_index = block_indices.split(":")
first_block_index, last_block_index = map(int, map(str.strip, (first_block_index, last_block_index)))
except Exception:
raise ValueError(f"Failed to parse `--block_indices {block_indices}`, must be start:end (e.g. 0:18)")
block_indices = range(first_block_index, last_block_index)
num_blocks = len(block_indices)
self.strict_block_indices, self.num_blocks = block_indices, num_blocks
gib = 1024**3
self.attn_cache_bytes = self._cache_bytes_per_block * num_blocks
logger.info(f"Attention cache for all blocks will consume up to {self.attn_cache_bytes / gib:.2f} GiB")
self.alloc_timeout = alloc_timeout
assert isinstance(throughput, float) or throughput in ["auto", "eval"]
if throughput in ["auto", "eval"]:
throughput_info = get_server_throughput(
converted_model_name_or_path,
self.block_config,
device,
torch_dtype,
num_blocks=num_blocks,
quant_type=quant_type,
tensor_parallel_devices=self.tensor_parallel_devices,
force_eval=(throughput == "eval"),
cache_dir=cache_dir,
)
else:
throughput_info = {"throughput": throughput}
self.server_info = ServerInfo(
state=ServerState.JOINING,
public_name=public_name,
version=grid.__version__,
adapters=tuple(adapters),
torch_dtype=str(torch_dtype).replace("torch.", ""),
quant_type=quant_type.name.lower(),
using_relay=self.dht.client_mode,
**throughput_info,
)
self.balance_quality = balance_quality
self.mean_balance_check_period = mean_balance_check_period
self.mean_block_selection_delay = mean_block_selection_delay
self.stop = threading.Event()
def _choose_num_blocks(self) -> int:
assert self.device.type == "cuda", (
"GPU is not available. If you want to run a CPU-only server, please specify --num_blocks. "
"CPU-only servers in the public swarm are discouraged since they are much slower"
)
num_devices = len(self.tensor_parallel_devices) if self.tensor_parallel_devices else 1
if num_devices > 1:
memory_per_device = tuple(
torch.cuda.get_device_properties(device).total_memory for device in self.tensor_parallel_devices
)
total_memory = min(memory_per_device) * num_devices
if max(memory_per_device) / min(memory_per_device) > 1.5:
raise ValueError(
"GPU devices have highly uneven memory, which makes tensor parallelism inefficient. "
"Please launch individual servers on each GPU or set --num_blocks manually to "
"override this exception."
)
else:
total_memory = torch.cuda.get_device_properties(self.device).total_memory
gib = 1024**3
# Estimate of GPU memory used in rpc_backward (2 GiB for BLOOM, proportional for other models)
autograd_memory = 2 * gib * num_devices / 14336 * self.block_config.hidden_size
block_size = get_block_size(self.block_config, "memory", dtype=self.torch_dtype, quant_type=self.quant_type)
total_memory_per_block = block_size + self._cache_bytes_per_block
if self.adapters:
# Delay import of grid.utils.peft to avoid unnecessary import of bitsandbytes
from grid.utils.peft import estimate_adapter_memory_per_block
total_memory_per_block += estimate_adapter_memory_per_block(
self.block_config,
self.torch_dtype,
self.adapters,
token=self.token,
cache_dir=self.cache_dir,
max_disk_space=self.max_disk_space,
)
num_blocks = math.floor((total_memory - autograd_memory) / total_memory_per_block)
assert num_blocks >= 1, "Your GPU does not have enough memory to serve at least one block"
num_blocks = min(num_blocks, self.block_config.num_hidden_layers)
logger.info(
f"Server will fill all your GPU memory with {num_blocks} transformer blocks. "
f"If you want to leave some free GPU memory, please specify a lesser --num_blocks manually"
)
return num_blocks
def run(self):
while True:
block_indices = self._choose_blocks()
self.module_container = ModuleContainer.create(
dht=self.dht,
dht_prefix=self.dht_prefix,
converted_model_name_or_path=self.converted_model_name_or_path,
block_config=self.block_config,
attn_cache_bytes=self.attn_cache_bytes,
alloc_timeout=self.alloc_timeout,
server_info=self.server_info,
block_indices=block_indices,
num_handlers=self.num_handlers,
min_batch_size=self.min_batch_size,
max_batch_size=self.max_batch_size,
max_chunk_size_bytes=self.max_chunk_size_bytes,
inference_max_length=self.inference_max_length,
torch_dtype=self.torch_dtype,
cache_dir=self.cache_dir,
max_disk_space=self.max_disk_space,
device=self.device,
compression=self.compression,
stats_report_interval=self.stats_report_interval,
update_period=self.update_period,
expiration=self.expiration,
request_timeout=self.request_timeout,
session_timeout=self.session_timeout,
step_timeout=self.step_timeout,
prefetch_batches=self.prefetch_batches,
sender_threads=self.sender_threads,
revision=self.revision,
token=self.token,
quant_type=self.quant_type,
tensor_parallel_devices=self.tensor_parallel_devices,
should_validate_reachability=self.should_validate_reachability,
start=True,
)
try:
self.module_container.ready.wait()
while True:
timeout = random.random() * 2 * self.mean_balance_check_period
if self.stop.wait(timeout):
return
if not self.module_container.is_healthy():
logger.warning("One of subprocesses crashed, restarting the server")
break
if self._should_choose_other_blocks():
logger.info("Swarm is imbalanced, server will load other blocks")
break # Stop serving this set of modules
finally:
self.module_container.shutdown()
self._clean_memory_and_fds()
def _clean_memory_and_fds(self):
del self.module_container
gc.collect() # In particular, this closes unused file descriptors
if self.device.type == "cuda":
torch.cuda.empty_cache()
allocated_vram = torch.cuda.memory_allocated(self.device)
reserved_vram = torch.cuda.memory_reserved(self.device)
gib = 1024**3
logger.info(
f"Cleaning up, left {allocated_vram / gib:.1f} GiB allocated memory, "
f"{reserved_vram / gib:.1f} GiB reserved memory"
)
def _choose_blocks(self) -> List[int]:
if self.strict_block_indices is not None:
return self.strict_block_indices
# If multiple servers (e.g., launched on the same machine by a script) get to this line at the same time,
# this delay decreases the probability of a race condition while choosing the best blocks to serve.
time.sleep(random.random() * 2 * self.mean_block_selection_delay)
module_infos = get_remote_module_infos(self.dht, self.module_uids, latest=True)
return block_selection.choose_best_blocks(self.num_blocks, module_infos)
def _should_choose_other_blocks(self) -> bool:
if self.strict_block_indices is not None:
return False
module_infos = get_remote_module_infos(self.dht, self.module_uids, latest=True)
return block_selection.should_choose_other_blocks(self.dht.peer_id, module_infos, self.balance_quality)
def shutdown(self):
self.stop.set()
if self.reachability_protocol is not None:
self.reachability_protocol.shutdown()
self.dht.shutdown()
self.dht.join()
class ModuleContainer(threading.Thread):
"""Serves a set of specific Bloom layers for inference, forward, and backward. Announces itself over the DHT."""
# noinspection PyMethodOverriding
@classmethod
def create(
cls,
*,
dht: DHT,
dht_prefix: str,
converted_model_name_or_path: str,
block_config: PretrainedConfig,
attn_cache_bytes: int,
alloc_timeout: float,
server_info: ServerInfo,
block_indices: List[int],
min_batch_size: int,
max_batch_size: int,
max_chunk_size_bytes: int,
torch_dtype: torch.dtype,
cache_dir: str,
max_disk_space: int,
device: Union[str, torch.device],
compression: CompressionType,
update_period: float,
expiration: Optional[float],
revision: Optional[str],
token: Optional[Union[str, bool]],
quant_type: QuantType,
tensor_parallel_devices: Sequence[torch.device],
should_validate_reachability: bool,
**kwargs,
) -> ModuleContainer:
module_uids = [f"{dht_prefix}{UID_DELIMITER}{block_index}" for block_index in block_indices]
memory_cache = MemoryCache(attn_cache_bytes, alloc_timeout)
server_info.state = ServerState.JOINING
dht_announcer = ModuleAnnouncerThread(
module_uids,
dht,
server_info,
block_config=block_config,
memory_cache=memory_cache,
update_period=update_period,
expiration=expiration,
daemon=True,
)
dht_announcer.start()
logger.info(f"Announced that blocks {block_indices} are joining")
assert len(tensor_parallel_devices) >= 1 and all(isinstance(d, torch.device) for d in tensor_parallel_devices)
blocks = {}
try:
for module_uid, block_index in zip(module_uids, block_indices):
block = load_pretrained_block(
converted_model_name_or_path,
block_index,
config=block_config,
torch_dtype=torch_dtype,
revision=revision,
token=token,
cache_dir=cache_dir,
max_disk_space=max_disk_space,
)
block = convert_block(
block,
block_index,
block_config,
tensor_parallel_devices,
device,
quant_type,
adapters=server_info.adapters,
freeze=True,
token=token,
cache_dir=cache_dir,
max_disk_space=max_disk_space,
)
blocks[module_uid] = TransformerBackend(
module_uid,
block,
config=block_config,
memory_cache=memory_cache,
backend_dtype=torch_dtype,
max_chunk_size_bytes=max_chunk_size_bytes,
args_schema=(
BatchTensorDescriptor(
1, 2048, block_config.hidden_size, dtype=torch_dtype, compression=compression
),
),
kwargs_schema={},
outputs_schema=(
BatchTensorDescriptor(
1, 2048, block_config.hidden_size, dtype=torch_dtype, compression=compression
),
),
min_batch_size=min_batch_size,
max_batch_size=max_batch_size,
)
merge_inference_pools_inplace(blocks)
if should_validate_reachability:
validate_reachability(dht.peer_id)
except:
logger.debug("Shutting down backends")
for backend in blocks.values():
backend.shutdown()
dht_announcer.announce(ServerState.OFFLINE)
logger.info(f"Announced that blocks {module_uids} are offline")
raise
return cls(
dht,
dht_prefix,
blocks,
dht_announcer=dht_announcer,
server_info=server_info,
update_period=update_period,
expiration=expiration,
**kwargs,
)
def __init__(
self,
dht: DHT,
dht_prefix: str,
module_backends: Dict[str, TransformerBackend],
*,
inference_max_length: int,
num_handlers: int,
dht_announcer: ModuleAnnouncerThread,
server_info: ServerInfo,
update_period: float,
expiration: Optional[float] = None,
request_timeout: float,
session_timeout: float,
step_timeout: float,
start: bool,
**kwargs,
):
super().__init__()
self.dht, self.module_backends = dht, module_backends
self.server_info, self.update_period, self.expiration = server_info, update_period, expiration
handler_event_queues = [mp.Queue() for _ in range(num_handlers)]
self.conn_handlers = [
TransformerConnectionHandler(
dht,
self.module_backends,
adapters=server_info.adapters,
dht_prefix=dht_prefix,
handler_event_queues=handler_event_queues,
handler_index=i,
inference_max_length=inference_max_length,
request_timeout=request_timeout,
session_timeout=session_timeout,
step_timeout=step_timeout,
)
for i in range(num_handlers)
]
self.runtime = RuntimeWithDeduplicatedPools(self.module_backends, device=None, **kwargs)
# note: We set device=None in runtime to avoid moving all modules to device 0 in runtime.run(). tensor_parallel has already moved it as needed.
dht_announcer.announce(ServerState.ONLINE)
self.dht_announcer = dht_announcer
if start:
self.run_in_background(await_ready=True)
def run(self):
"""
Runs ModuleContainer in the current thread. Initializes dht if necessary, starts connection handlers,
runs Runtime (self.runtime) to process incoming requests.
"""
for handler in self.conn_handlers:
handler.run_in_background()
self.runtime.run()
def run_in_background(self, await_ready=True, timeout=None):
"""
Starts ModuleContainer in a background thread. if await_ready, this method will wait until the container
is ready to process incoming requests or for :timeout: seconds max.
"""
self.start()
if await_ready and not self.ready.wait(timeout=timeout):
raise TimeoutError("ModuleContainer didn't notify .ready in {timeout} seconds")
@property
def ready(self) -> mp.synchronize.Event:
"""
An event (multiprocessing.Event) that is set when the container is ready to process requests.
Example
=======
>>> container.start()
>>> container.ready.wait(timeout=10)
>>> print("Container ready" if container.ready.is_set() else "Container didn't start in 10 seconds")
"""
return self.runtime.ready # mp.Event that is true if self is ready to process batches
def is_healthy(self) -> bool:
return all(handler.is_alive() for handler in self.conn_handlers) and all(
pool.is_alive() for pool in self.runtime.pools
)
def shutdown(self):
"""
Gracefully terminate the container, process-safe.
Please note that terminating container otherwise (e.g. by killing processes) may result in zombie processes.
If you did already cause a zombie outbreak, your only option is to kill them with -9 (SIGKILL).
"""
self.dht_announcer.announce(ServerState.OFFLINE)
logger.info(f"Announced that blocks {list(self.module_backends.keys())} are offline")
self.ready.clear()
logger.debug("Shutting down connection handlers")
for handler in self.conn_handlers:
handler.shutdown()
logger.debug("Shutting down pools")
for pool in self.runtime.pools:
if pool.is_alive():
pool.shutdown()
logger.debug("Shutting down runtime")
self.runtime.shutdown()
logger.debug("Shutting down backends")
for backend in self.module_backends.values():
backend.shutdown()
logger.info("Module container shut down successfully")
class ModuleAnnouncerThread(threading.Thread):
"""Periodically announces that this container hosts the specified modules, visible to all DHT peers"""
def __init__(
self,
module_uids: List[str],
dht: DHT,
server_info: ServerInfo,
*,
block_config: PretrainedConfig,
memory_cache: MemoryCache,
update_period: float,
expiration: float,
max_pinged: int = 5,
**kwargs,
):
super().__init__(**kwargs)
self.module_uids = module_uids
self.dht = dht
self.server_info = server_info
self.memory_cache = memory_cache
self.bytes_per_token = block_config.hidden_size * torch.finfo(DTYPE_MAP[server_info.torch_dtype]).bits // 8
self.bytes_per_token //= block_config.num_key_value_groups
self.update_period = update_period
self.expiration = expiration
self.trigger = threading.Event()
self.max_pinged = max_pinged
dht_prefix = module_uids[0].split(UID_DELIMITER)[0]
block_indices = [int(uid.split(UID_DELIMITER)[-1]) for uid in module_uids]
start_block, end_block = min(block_indices), max(block_indices) + 1
self.next_uids = [f"{dht_prefix}{UID_DELIMITER}{i}" for i in range(start_block + 1, end_block + 1)]
self.ping_aggregator = PingAggregator(self.dht)
def run(self) -> None:
while True:
start_time = time.perf_counter()
self.server_info.cache_tokens_left = self.memory_cache.bytes_left // self.bytes_per_token
if self.server_info.state != ServerState.OFFLINE:
self._ping_next_servers()
self.server_info.next_pings = {
peer_id.to_base58(): rtt for peer_id, rtt in self.ping_aggregator.to_dict().items()
}
else:
self.server_info.next_pings = None # No need to ping if we're disconnecting
declare_active_modules(
self.dht,
self.module_uids,
self.server_info,
expiration_time=get_dht_time() + self.expiration,
)
if self.server_info.state == ServerState.OFFLINE:
break
delay = self.update_period - (time.perf_counter() - start_time)
if delay < 0:
logger.warning("Declaring blocs to DHT takes more than --update_period, consider increasing it")
self.trigger.wait(max(delay, 0))
self.trigger.clear()
def announce(self, state: ServerState) -> None:
self.server_info.state = state
self.trigger.set()
if state == ServerState.OFFLINE:
self.join()
def _ping_next_servers(self) -> Dict[hivemind.PeerID, float]:
module_infos = get_remote_module_infos(self.dht, self.next_uids, latest=True)
middle_servers = {peer_id for info in module_infos[:-1] if info is not None for peer_id in info.servers}
pinged_servers = set(sample_up_to(middle_servers, self.max_pinged))
pinged_servers.discard(self.dht.peer_id)
if module_infos[-1] is not None:
# Sample servers hosting the block after the last one (most likely continuations) separately
pinged_servers |= set(sample_up_to(module_infos[-1].servers, self.max_pinged))
self.ping_aggregator.ping(list(pinged_servers))
class RuntimeWithDeduplicatedPools(Runtime):
"""A version of hivemind.moe.server.runtime.Runtime that allows multiple backends to reuse a task pool"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.pools = tuple(set(self.pools))
|
TheGrid-main
|
grid/server/server.py
|
from abc import ABC, abstractmethod
import torch
class TaskPrioritizerBase(ABC):
"""Abstract class for TaskPrioritizer whose responsibility is to evaluate task priority"""
@abstractmethod
def prioritize(self, *input: torch.Tensor, points: float = 0.0, **kwargs) -> float:
"""Evaluates task value by the amount of points given, task input and additional kwargs. Lower priority is better"""
pass
class DummyTaskPrioritizer(TaskPrioritizerBase):
"""Simple implementation of TaskPrioritizer which gives constant zero priority for every task"""
def prioritize(self, *input: torch.Tensor, points: float = 0.0, **kwargs) -> float:
if kwargs.get("type") == "inference":
return 1.0 # inference steps go first since they are more latency-sensitive
return 2.0 # forward, backward
|
TheGrid-main
|
grid/server/task_prioritizer.py
|
from __future__ import annotations
from collections import Counter
from itertools import chain
from typing import Any, Dict, Optional, Sequence, Tuple, Union
import torch
from hivemind import BatchTensorDescriptor, TensorDescriptor
from hivemind.moe.expert_uid import ExpertUID
from hivemind.moe.server.module_backend import ModuleBackend
from hivemind.utils import get_logger
from tensor_parallel import TensorParallel
from tensor_parallel.tensor_parallel import PerDeviceTensors
from transformers import PretrainedConfig
from grid.data_structures import InferenceMetadata
from grid.server.memory_cache import MemoryCache
from grid.server.task_pool import PrioritizedTaskPool
from grid.utils.misc import is_dummy
logger = get_logger(__name__)
class TransformerBackend(ModuleBackend):
"""A wrapper for a transformer block that can process requests for forward, backward and inference"""
_peft_module = None
def __init__(
self,
*args,
config: PretrainedConfig,
memory_cache: MemoryCache,
backend_dtype: torch.dtype,
max_chunk_size_bytes: int,
**kwargs,
):
import grid.utils.peft as _peft_module
self._peft_module = _peft_module
super().__init__(*args, **kwargs)
assert isinstance(self.module, TensorParallel)
self.config = config
self.memory_cache = memory_cache
self.max_chunk_size_bytes = max_chunk_size_bytes
for name, param in self.module.named_parameters():
assert not param.requires_grad, f"Block parameters must not accumulate gradients, but {name} does"
for name, buf in self.module.named_buffers():
assert not buf.requires_grad, f"Block parameters must not accumulate gradients, but {name} does"
max_batch_size = self.forward_pool.max_batch_size
device = self.module.devices[self.module.output_device_index]
self.inference_pool = PrioritizedTaskPool(
self.inference_step, max_batch_size=max_batch_size, device=device, name=f"{self.name}_inference"
) # note: inference_pools may be merged later, see merge_inference_pools_inplace
self.forward_pool = PrioritizedTaskPool(
self.forward, max_batch_size=max_batch_size, device=device, name=f"{self.name}_forward"
)
self.backward_pool = PrioritizedTaskPool(
self.backward, max_batch_size=max_batch_size, device=device, name=f"{self.name}_backward"
)
self.dtype = backend_dtype
self.dtype_bytes = torch.finfo(self.dtype).bits // 8
self.shard_num_heads = []
for shard in self.module.module_shards:
for submodule in shard.modules():
if isinstance(submodule, config.attn_class):
self.shard_num_heads.append(submodule.num_heads)
assert len(self.shard_num_heads) == len(self.module.devices)
assert sum(self.shard_num_heads) == config.num_attention_heads
self.inference_schema = (
(
*self.args_schema,
BatchTensorDescriptor((), dtype=self.dtype),
BatchTensorDescriptor((), dtype=torch.int64),
),
self.kwargs_schema,
)
self.cache_bytes_per_token: Dict[torch.device, int] = Counter()
for descr in self.get_inference_cache_descriptors(batch_size=1, max_length=1):
self.cache_bytes_per_token[descr.device] += descr.numel() * torch.finfo(descr.dtype).bits // 8
def get_inference_cache_descriptors(self, batch_size: int, max_length: int) -> Sequence[TensorDescriptor]:
"""Create tensor descriptors for attention cache tensors used during inference_step"""
head_dim = self.config.hidden_size // self.config.num_attention_heads
cache_tensors = []
for device, num_heads in zip(self.module.devices, self.shard_num_heads):
num_heads //= self.config.num_key_value_groups
keys = TensorDescriptor((batch_size, num_heads, head_dim, max_length), dtype=self.dtype, device=device)
values = TensorDescriptor((batch_size, num_heads, max_length, head_dim), dtype=self.dtype, device=device)
cache_tensors.extend((keys, values))
return cache_tensors
def forward(self, *inputs: Union[torch.Tensor, str]) -> Tuple[torch.Tensor, ...]:
*inputs, active_adapter = inputs
with self._peft_module.using_adapter(active_adapter):
return super().forward(*inputs)
def backward(self, *inputs: Union[torch.Tensor, str]) -> Tuple[torch.Tensor, ...]:
*inputs, active_adapter = inputs
with self._peft_module.using_adapter(active_adapter):
return super().backward(*inputs)
@torch.inference_mode()
def inference_step(
self,
hidden_states: torch.Tensor,
hypo_ids: torch.LongTensor,
inference_info: InferenceMetadata,
) -> Tuple[torch.Tensor, ...]:
assert hidden_states.ndim == 3, "expected hidden states to be 3-dimensional: [batch_size, seq_len, hid_size]"
seq_len = hidden_states.shape[1]
with self.memory_cache.use_cache(
*inference_info.cache_handles
) as cache_tensors, self._peft_module.using_adapter(inference_info.active_adapter):
self._reorder_cache_inplace(cache_tensors, hypo_ids)
# We chunk the inputs so that peak memory for long sequences fits into `autograd_memory`
# reserved in `Server._choose_num_blocks()`. This saves us from OOMs if `max_chunk_size_bytes`
# is at least 4-6x less than `autograd_memory`.
max_chunk_length = self._estimate_max_chunk_length(hidden_states, inference_info)
output_hidden_states = torch.empty_like(hidden_states) if seq_len > max_chunk_length else None
layer_past = self._select_layer_past(cache_tensors, inference_info.prefix_length)
for offset in range(0, seq_len, max_chunk_length):
hidden_states_chunk = hidden_states[:, offset : offset + max_chunk_length, :]
output_hidden_states_chunk, new_kvs = self.module.forward(
hidden_states_chunk, layer_past=layer_past, use_cache=True
)
if seq_len > max_chunk_length:
output_hidden_states[:, offset : offset + max_chunk_length] = output_hidden_states_chunk
else:
output_hidden_states = output_hidden_states_chunk # saves one memcopy
layer_past = new_kvs
self._update_cache_inplace(cache_tensors, new_kvs, inference_info.prefix_length)
return (output_hidden_states,)
def _estimate_max_chunk_length(self, hidden_states: torch.Tensor, inference_info: InferenceMetadata) -> int:
# We assume that attention logit matrices are the main thing that consumes memory, given that
# the model uses multi-query attention
batch_size, seq_length, hidden_size = hidden_states.shape
worst_case_length = inference_info.prefix_length + seq_length
attn_bytes_per_token = max(self.shard_num_heads) * batch_size * self.dtype_bytes * worst_case_length
return max(1, self.max_chunk_size_bytes // attn_bytes_per_token)
def _reorder_cache_inplace(self, cache_tensors: torch.Tensor, hypo_ids: torch.Tensor):
"""If hypo_ids is specified, reorder elements of each cache tensor in-place by taking indices from hypo_ids"""
if not is_dummy(hypo_ids):
for cache_tensor in cache_tensors:
cache_tensor[...] = cache_tensor[hypo_ids.to(cache_tensor.device)] # in-place reorder cache by hypo ids
def _select_layer_past(self, cache_tensors: Sequence[torch.Tensor], prefix_length: int) -> Sequence[torch.Tensor]:
"""Extract first {prefix_length} tokens and reshape them such that they can be used as layer_past"""
key_cache, value_cache = list(cache_tensors[0::2]), list(cache_tensors[1::2])
for i in range(len(key_cache)):
key_cache[i] = key_cache[i].flatten(0, 1)[:, :, :prefix_length]
# shape: [batch * num_kv_heads, head_dim, kv_length]
value_cache[i] = value_cache[i].flatten(0, 1)[:, :prefix_length]
# shape: [batch * num_kv_heads, kv_length, head_dim]
layer_past = tuple(chain(*zip(key_cache, value_cache)))
return PerDeviceTensors(*layer_past) if len(self.module.module_shards) > 1 else layer_past
def _update_cache_inplace(
self, cache_tensors: Sequence[torch.Tensor], new_kvs: Sequence[torch.Tensor], prefix_length: int
):
"""Writes new key/value tensors back into cache, works in-place"""
_batch_size_times_num_kv_heads, head_dim, new_length = new_kvs[0].shape
for cache_key, new_key in zip(cache_tensors[0::2], new_kvs[0::2]):
new_key = new_key.view(*cache_key.shape[:3], new_length)
cache_key[:, :, :, prefix_length:new_length] = new_key[:, :, :, prefix_length:new_length]
for cache_value, new_value in zip(cache_tensors[1::2], new_kvs[1::2]):
new_value = new_value.view(*cache_value.shape[:2], new_length, head_dim)
cache_value[:, :, prefix_length:new_length, :] = new_value[:, :, prefix_length:new_length, :]
def get_pools(self) -> Sequence[PrioritizedTaskPool]:
return self.forward_pool, self.backward_pool, self.inference_pool
def get_info(self) -> Dict[str, Any]:
"""Get module parameters and stats. Used by RemoteExpert to check shapes and for DMoE orchestration."""
return dict(super().get_info(), inference_schema=self.inference_schema)
def shutdown(self):
# Break the cyclic references, otherwise TransformerBackend may be not garbage-collected
self.forward_pool = self.backward_pool = self.inference_pool = None
# Explicitly free the GPU memory. This is not necessary at the time this code is written,
# but may help to avoid future issues when the module is not garbage-collected for some reasons
dummy = torch.tensor([])
for p in self.module.parameters():
p.data = dummy
def merge_inference_pools_inplace(backends: Dict[ExpertUID, TransformerBackend]):
"""Replace each backend's rpc_inference pools with a combined pool runs multiple blocks in one call"""
assert len(backends) != 0 and all(isinstance(b, TransformerBackend) for b in backends.values())
first_pool = next(iter(backends.values())).inference_pool
merged_pool = PrioritizedTaskPool(
_MergedInferenceStep(backends),
max_batch_size=first_pool.max_batch_size,
device=first_pool.device,
name="merged_inference",
)
for backend in backends.values():
assert not backend.inference_pool.is_alive()
backend.inference_pool = merged_pool
class _MergedInferenceStep:
def __init__(self, backends: Dict[ExpertUID, TransformerBackend]):
self.backends = backends
@torch.inference_mode()
def __call__(
self,
hidden_states: torch.Tensor,
hypo_ids: torch.LongTensor,
inference_infos: Sequence[InferenceMetadata],
*optional_prompts: Optional[torch.Tensor],
) -> Tuple[torch.Tensor, ...]:
assert len(inference_infos) == len(
optional_prompts
), f"found {len(inference_infos)} blocks but {len(optional_prompts)} prompts"
for inference_info, optional_prompt in zip(inference_infos, optional_prompts):
if optional_prompt is not None:
hidden_states[:, : optional_prompt.shape[1]] += optional_prompt
(hidden_states,) = self.backends[inference_info.uid].inference_step(hidden_states, hypo_ids, inference_info)
return (hidden_states,)
|
TheGrid-main
|
grid/server/backend.py
|
from __future__ import annotations
import asyncio
import contextlib
import multiprocessing as mp
import sys
from enum import Enum
from itertools import chain
from typing import Any, AsyncIterator, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from async_timeout import timeout
from hivemind import (
DHT,
MSGPackSerializer,
P2PContext,
PeerID,
deserialize_tensor_stream,
deserialize_torch_tensor,
nested_flatten,
nested_pack,
serialize_torch_tensor,
)
from hivemind.moe.server.connection_handler import ConnectionHandler
from hivemind.p2p.p2p_daemon import DEFAULT_MAX_MSG_SIZE
from hivemind.proto import runtime_pb2
from hivemind.utils.asyncio import amap_in_executor, anext
from hivemind.utils.logging import get_logger
from hivemind.utils.streaming import split_for_streaming
import grid
from grid.data_structures import CHAIN_DELIMITER, UID_DELIMITER, InferenceMetadata, ModuleUID
from grid.server.backend import TransformerBackend
from grid.server.memory_cache import Handle
from grid.server.task_pool import PrioritizedTaskPool
from grid.server.task_prioritizer import DummyTaskPrioritizer, TaskPrioritizerBase
from grid.utils.misc import DUMMY, is_dummy
logger = get_logger(__name__)
# Fix pickling protobufs, see https://stackoverflow.com/a/74873028
sys.modules["runtime_pb2"] = runtime_pb2
CACHE_TOKENS_AVAILABLE = "cache_tokens_available"
class Event(Enum):
NEW_SESSION = 0
END_SESSION = 1
PUSH = 2
SHUTDOWN = 3
class TransformerConnectionHandler(ConnectionHandler):
"""Handles three request types: forward, backward and forward-incremental (inference)"""
module_backends: Dict[ModuleUID, TransformerBackend]
def __init__(
self,
dht: DHT,
module_backends: Dict[str, TransformerBackend],
*,
adapters: Optional[Sequence[str]],
dht_prefix: str,
handler_event_queues: Sequence[mp.Queue],
handler_index: int,
inference_max_length: int,
request_timeout: float,
session_timeout: float,
step_timeout: float,
task_prioritizer: TaskPrioritizerBase = DummyTaskPrioritizer(),
):
super().__init__(dht, module_backends)
for module_backend in self.module_backends.values():
assert isinstance(module_backend, TransformerBackend)
self.dht_prefix = dht_prefix
self.adapters = adapters
self._handler_event_queues = handler_event_queues
self._handler_index = handler_index
self._own_event_queue = handler_event_queues[handler_index]
self._listener_task: Optional[asyncio.Task] = None
self._session_queues: Dict[str, asyncio.Queue] = {}
self._session_handlers: Dict[str, int] = {}
self.inference_max_length = inference_max_length
self.request_timeout = request_timeout
self.session_timeout, self.step_timeout = session_timeout, step_timeout
self._prioritizer = task_prioritizer
async def add_p2p_handlers(self, *args, **kwargs) -> None:
if self._listener_task is None:
# Start listening to our own event queue before we accept any requests
self._listener_task = asyncio.create_task(self._listen_to_event_queue())
await super().add_p2p_handlers(*args, **kwargs)
def shutdown(self):
if self.is_alive():
self._outer_pipe.send("_shutdown")
self._own_event_queue.put((Event.SHUTDOWN, None, None))
self.join(self.shutdown_timeout)
if self.is_alive():
logger.warning(f"{self.__class__.__name__} failed to shut down gracefully, sending SIGTERM")
self.terminate()
async def _gather_inputs(
self, requests: AsyncIterator[runtime_pb2.ExpertRequest], context: P2PContext
) -> Tuple[str, List[torch.Tensor], Dict]:
block_uid, metadata = None, None
def _unpack(req: runtime_pb2.ExpertRequest) -> Iterable[runtime_pb2.Tensor]:
nonlocal block_uid, metadata
if block_uid is None:
block_uid = req.uid
elif block_uid != req.uid:
raise ValueError("Block uids differ in one request")
if metadata is None:
metadata = MSGPackSerializer.loads(req.metadata) if req.metadata else {}
return req.tensors
tensors_stream = amap_in_executor(_unpack, requests)
inputs = await deserialize_tensor_stream(tensors_stream)
assert isinstance(block_uid, str) and isinstance(metadata, dict)
return block_uid, inputs, metadata
async def rpc_inference(
self,
requests: AsyncIterator[runtime_pb2.ExpertRequest],
context: P2PContext,
) -> AsyncIterator[runtime_pb2.ExpertResponse]:
"""Compute a single step of inference using attention cache; update attention cache accordingly."""
async with timeout(self.session_timeout):
try:
request = await asyncio.wait_for(anext(requests), self.step_timeout)
except asyncio.TimeoutError:
self._log_request("rpc_inference.open", None, context, warning="timed out")
return
requested_uids = self._check_uids(request.uid)
self._log_request("rpc_inference.open", requested_uids, context)
try:
metadata = MSGPackSerializer.loads(request.metadata) if request.metadata else {}
requested_backends = tuple(self.module_backends[uid] for uid in requested_uids)
max_length = metadata.get("max_length")
active_adapter = self._get_active_adapter(metadata)
points = metadata.get("points", 0)
session_id = metadata.get("session_id")
if not requested_uids:
raise ValueError("User must specify at least one block for inference, but got none")
assert isinstance(
max_length, int
), f"rpc_inference metadata must contain int max_length, got {max_length}"
assert isinstance(
points, (float, int)
), f"rpc_inference should have number of points as a number or None, got {points}"
if not 0 <= max_length <= self.inference_max_length:
raise ValueError(
f"Cannot allocate KV cache for {max_length} tokens, max = {self.inference_max_length}"
)
point_per_piece = points / max_length if max_length > 0 else 0.0
batch_size = request.tensors[0].size[0] if request.tensors else 1
prefix_length = 0
async with self._allocate_cache(requested_backends, batch_size, max_length) as cache_handles:
assert len(cache_handles) == len(requested_backends)
first_request = request
background_tasks = set()
async for request, metadata in self._iterate_inference_steps(
first_request, requests, session_id, requested_uids, context
):
hidden_states, prompts, hypo_ids = map(deserialize_torch_tensor, request.tensors)
# Cast inputs to backend dtype
hidden_states = hidden_states.to(requested_backends[0].dtype)
assert hypo_ids.dtype == torch.int64, f"hypo ids must be int64, got {hypo_ids.dtype}"
# parse deep prompts (optional argument)
has_prompts = prompts is not None and not is_dummy(prompts)
if not has_prompts:
prompts = [None] * len(requested_backends)
else:
prompts = [p.squeeze(0) for p in prompts.to(requested_backends[0].dtype).split(1, dim=0)]
prompts = [prompt if not is_dummy(prompt) else None for prompt in prompts]
if not (len(requested_backends) == len(prompts)):
raise ValueError(f"Received {len(prompts)} prompts for {len(requested_backends)} backends")
length_increment = hidden_states.shape[1] # how many tokens are added this step (in each seq)
if prefix_length + length_increment > max_length:
raise ValueError(
f"Maximum length exceeded: prefix {prefix_length} + current {length_increment}"
f" exceeds pre-allocated maximum {max_length}"
)
priority = self._prioritizer.prioritize(
hidden_states,
hypo_ids,
points=point_per_piece,
requested_uids=requested_uids,
type="inference",
)
inference_infos = tuple(
InferenceMetadata(uid, prefix_length, tuple(handles), active_adapter)
for uid, handles in zip(requested_uids, cache_handles)
)
if hidden_states.numel() == 0:
pass # user passed a tensor with 0 tokens. This is a special case that occurs, e.g.
# when user wants to pre-allocate cache or check that server *can* allocate that cache
else:
assert hidden_states.ndim == 3, "hidden states must be a single 3d tensor"
(hidden_states,) = await self.module_backends[requested_uids[0]].inference_pool.submit_task(
hidden_states, hypo_ids, inference_infos, *prompts, priority=priority
)
# serialize and send last layer outputs
output_tensors = [
serialize_torch_tensor(result.to(proto.dtype), proto.compression, allow_inplace=True)
for result, proto in zip(
(hidden_states,), nested_flatten(requested_backends[-1].outputs_schema)
)
]
if not has_prompts:
task = asyncio.create_task(self._push_outputs(request, output_tensors[0], metadata))
background_tasks.add(task) # Keep reference until it is done to save it from GC
task.add_done_callback(background_tasks.discard)
yield runtime_pb2.ExpertResponse(tensors=output_tensors)
# prepare for next step
prefix_length += length_increment
finally:
self._log_request("rpc_inference.close", requested_uids, context)
@contextlib.contextmanager
def _managed_session(self, session_id: str):
assert session_id not in self._session_queues, f"session id {session_id} is not unique"
try:
self._session_queues[session_id] = asyncio.Queue()
self._session_handlers[session_id] = self._handler_index
for other_index, other_queue in enumerate(self._handler_event_queues):
if other_index != self._handler_index:
other_queue.put_nowait((Event.NEW_SESSION, session_id, self._handler_index))
yield
finally:
self._session_queues.pop(session_id).put_nowait(None) # put None so that the get task will not hang
del self._session_handlers[session_id]
for other_index, other_queue in enumerate(self._handler_event_queues):
if other_index != self._handler_index:
other_queue.put_nowait((Event.END_SESSION, session_id, self._handler_index))
def _put_into_session_queue(self, session_id: str, request: runtime_pb2.ExpertRequest):
handler_index = self._session_handlers.get(session_id)
if handler_index is None:
logger.debug(f"Ignored rpc_push to unknown session ID: {session_id}")
elif handler_index == self._handler_index:
self._session_queues[session_id].put_nowait(request)
else:
self._handler_event_queues[handler_index].put_nowait((Event.PUSH, session_id, request))
async def _get_from_session_queue(self, session_id: str) -> Optional[runtime_pb2.ExpertRequest]:
assert self._session_handlers[session_id] == self._handler_index, "session belongs to another handler"
return await self._session_queues[session_id].get()
async def _listen_to_event_queue(self):
loop = asyncio.get_event_loop()
while True:
try:
event, session_id, payload = await loop.run_in_executor(None, self._own_event_queue.get)
if event == Event.SHUTDOWN:
break
elif event == Event.NEW_SESSION:
self._session_handlers[session_id] = payload # index of the handler that owns that session
elif event == Event.END_SESSION:
self._session_handlers.pop(session_id, None)
elif event == Event.PUSH:
maybe_session_queue = self._session_queues.get(session_id)
if maybe_session_queue is not None:
maybe_session_queue.put_nowait(payload)
else:
raise RuntimeError(f"Unexpected event: {event}")
except Exception as e:
logger.exception(e)
async def _iterate_inference_steps(
self,
first_request: runtime_pb2.ExpertRequest,
requests: AsyncIterator[runtime_pb2.ExpertRequest],
session_id: Optional[str],
requested_uids: Sequence[str],
context: P2PContext,
) -> AsyncIterator[Tuple[runtime_pb2.ExpertRequest, dict]]:
processed_step_ids = set()
n_pushes = n_late_pushes = 0
request = first_request
anext_task = get_push_task = None
try:
with self._managed_session(session_id) if session_id is not None else contextlib.nullcontext():
while request.tensors: # iterate while user is willing to supply tensors
metadata = MSGPackSerializer.loads(request.metadata) if request.metadata else {}
step_id = metadata.get("step_id")
pushed = metadata.get("pushed")
if pushed:
n_pushes += 1
self._log_request("rpc_inference.push", requested_uids, context, debug="session received push")
if step_id is None or step_id not in processed_step_ids:
yield request, metadata
if step_id is not None:
processed_step_ids.add(step_id)
elif pushed:
n_late_pushes += 1
self._log_request(
"rpc_inference.push",
requested_uids,
context,
warning=f"arrived late {n_late_pushes / n_pushes * 100:.1f}% of the time",
)
# Wait for the next request, coming either from the `requests` iterator or `push_queue`
if anext_task is None:
anext_task = asyncio.create_task(anext(requests))
if get_push_task is None:
if session_id is not None:
get_push_task = asyncio.create_task(self._get_from_session_queue(session_id))
else:
get_push_task = asyncio.create_task(asyncio.Event().wait()) # Dummy never-ending task
done, _ = await asyncio.wait(
[anext_task, get_push_task], timeout=self.step_timeout, return_when=asyncio.FIRST_COMPLETED
)
if anext_task in done:
request = await anext_task
anext_task = None
elif get_push_task in done:
request = await get_push_task
get_push_task = None
else:
self._log_request("rpc_inference.step", requested_uids, context, warning="timed out")
anext_task.cancel()
get_push_task.cancel()
return
except Exception:
logger.warning("rpc_inference._iterate_inference_steps() exception:", exc_info=True)
raise
async def rpc_push(self, request: runtime_pb2.ExpertRequest, context: P2PContext) -> runtime_pb2.ExpertResponse:
"""Directly push activation tensors from one server to another"""
requested_uids = self._check_uids(request.uid)
metadata = MSGPackSerializer.loads(request.metadata)
session_id = metadata["session_id"]
self._log_request("rpc_push", requested_uids, context, debug=f"session_id={session_id}")
self._put_into_session_queue(session_id, request)
return runtime_pb2.ExpertResponse()
async def _push_outputs(
self, request: runtime_pb2.ExpertRequest, serialized_outputs: runtime_pb2.Tensor, metadata: dict
) -> None:
try:
next_servers = metadata.get("next_servers")
if not next_servers:
return
next_peer_id, next_session_id, next_start, next_end = next_servers[0]
next_peer_id = PeerID.from_base58(next_peer_id)
next_uid = CHAIN_DELIMITER.join(f"{self.dht_prefix}{UID_DELIMITER}{i}" for i in range(next_start, next_end))
# Sending hidden states serialized with output_schema to avoid double serialization
next_tensors = [serialized_outputs] + request.tensors[1:]
next_metadata = metadata.copy()
next_metadata.update(session_id=next_session_id, next_servers=next_servers[1:], pushed=True)
stub = self.get_stub(self._p2p, next_peer_id)
await stub.rpc_push(
runtime_pb2.ExpertRequest(
uid=next_uid,
tensors=next_tensors,
metadata=MSGPackSerializer.dumps(next_metadata),
),
timeout=self.request_timeout,
)
except Exception:
logger.debug(
f"Failed to push outputs to peer_id={next_peer_id}, session_id={next_session_id}, blocks={next_start}:{next_end}:",
exc_info=True,
)
async def rpc_forward(self, request: runtime_pb2.ExpertRequest, context: P2PContext) -> runtime_pb2.ExpertResponse:
async with timeout(self.request_timeout):
# Parse request and prepare backends
flat_inputs = [deserialize_torch_tensor(tensor) for tensor in request.tensors]
requested_uids = self._check_uids(request.uid)
self._log_request("rpc_forward", requested_uids, context)
requested_backends = tuple(self.module_backends[uid] for uid in requested_uids)
metadata = MSGPackSerializer.loads(request.metadata) if request.metadata else {}
active_adapter = self._get_active_adapter(metadata)
points = metadata.get("points", 0)
assert isinstance(
points, (float, int)
), f"rpc_forward should have number of points as number or None, got {points}"
hidden_states = await _rpc_forward(
*flat_inputs,
requested_backends=requested_backends,
prioritizer=self._prioritizer,
active_adapter=active_adapter,
points=points,
)
return runtime_pb2.ExpertResponse(
tensors=self._serialize_outputs(hidden_states, requested_backends, metadata)
)
async def rpc_forward_stream(
self, requests: AsyncIterator[runtime_pb2.ExpertRequest], context: P2PContext
) -> AsyncIterator[runtime_pb2.ExpertRequest]:
async with timeout(self.request_timeout):
# Parse requests and prepare backends
uid_str, flat_inputs, metadata = await self._gather_inputs(requests, context)
requested_uids = self._check_uids(uid_str)
self._log_request("rpc_forward_stream", requested_uids, context)
requested_backends = tuple(self.module_backends[uid] for uid in requested_uids)
active_adapter = self._get_active_adapter(metadata)
points = metadata.get("points", 0)
assert isinstance(
points, (float, int)
), f"rpc_forward_stream should have number of points as number or None, got {points}"
hidden_states = await _rpc_forward(
*flat_inputs,
requested_backends=requested_backends,
prioritizer=self._prioritizer,
active_adapter=active_adapter,
points=points,
)
# Split the serialized_output for streaming and respond to client
for tensor in self._serialize_outputs(hidden_states, requested_backends, metadata):
for part in split_for_streaming(tensor, DEFAULT_MAX_MSG_SIZE):
yield runtime_pb2.ExpertResponse(tensors=[part])
def _serialize_outputs(
self,
hidden_states: torch.Tensor,
requested_backends: Sequence[TransformerBackend],
metadata: Dict[str, Any],
) -> Sequence[runtime_pb2.Tensor]:
"""Serialize forward outputs using either outputs_schema or custom user-specified schema"""
assert isinstance(hidden_states, torch.Tensor) and hidden_states.ndim == 3, "hidden_states must be a 3d tensor"
outputs_schema = requested_backends[-1].outputs_schema
if metadata.get("output_compression") is not None:
assert isinstance(metadata["output_compression"], (list, tuple)), "output_compression must be a tuple/list"
output_compression = tuple(metadata["output_compression"])
assert all(isinstance(c, int) for c in output_compression), "output_compression must contain integers"
assert len(output_compression) == 1, "output_compression tuple should have 1 element"
else:
output_compression = tuple(tensor.compression for tensor in outputs_schema)
return [
serialize_torch_tensor(result.to(proto.dtype), compression, allow_inplace=True)
for result, proto, compression in zip([hidden_states], outputs_schema, output_compression)
]
async def rpc_backward(self, request: runtime_pb2.ExpertRequest, context: P2PContext) -> runtime_pb2.ExpertResponse:
async with timeout(self.request_timeout):
# Parse requests and prepare backends
flat_tensors = [deserialize_torch_tensor(tensor) for tensor in request.tensors]
requested_uids = self._check_uids(request.uid)
self._log_request("rpc_backward", requested_uids, context)
requested_backends = tuple(self.module_backends[uid] for uid in requested_uids)
metadata = MSGPackSerializer.loads(request.metadata) if request.metadata else {}
active_adapter = self._get_active_adapter(metadata)
points = metadata.get("points", 0)
assert isinstance(
points, (float, int)
), f"rpc_backward should have number of points as number or None, got {points}"
grads = await _rpc_backward(
*flat_tensors,
requested_backends=requested_backends,
prioritizer=self._prioritizer,
active_adapter=active_adapter,
points=points,
)
return runtime_pb2.ExpertResponse(tensors=self._serialize_grads(grads, requested_backends, metadata))
async def rpc_backward_stream(
self, requests: AsyncIterator[runtime_pb2.ExpertRequest], context: P2PContext
) -> AsyncIterator[runtime_pb2.ExpertResponse]:
async with timeout(self.request_timeout):
uids_header, flat_tensors, metadata = await self._gather_inputs(requests, context)
requested_uids = self._check_uids(uids_header)
self._log_request("rpc_backward_stream", requested_uids, context)
requested_backends = tuple(self.module_backends[uid] for uid in requested_uids)
active_adapter = self._get_active_adapter(metadata)
points = metadata.get("points", 0)
assert isinstance(
points, (float, int)
), f"rpc_backward_stream should have number of points as number or None, got {points}"
grads = await _rpc_backward(
*flat_tensors,
requested_backends=requested_backends,
prioritizer=self._prioritizer,
active_adapter=active_adapter,
points=points,
)
# Split the serialized_grad_inputs for streaming and respond
for tensor in self._serialize_grads(grads, requested_backends, metadata):
for part in split_for_streaming(tensor, DEFAULT_MAX_MSG_SIZE):
yield runtime_pb2.ExpertResponse(tensors=[part])
def _get_active_adapter(self, metadata: dict) -> str:
active_adapter = metadata.get("active_adapter", "")
if active_adapter and (active_adapter not in self.adapters):
raise KeyError(f"adapter {active_adapter} not found")
return active_adapter
def _serialize_grads(
self,
grads: Sequence[torch.Tensor],
requested_backends: Sequence[TransformerBackend],
metadata: Dict[str, Any],
) -> Sequence[runtime_pb2.Tensor]:
"""Serialize backward gradients w.r.t. inputs using either default schema or custom user-specified schema"""
# Modify grad_inputs_schema to support grad_prompts
assert len(requested_backends[0].args_schema) == 1 and len(grads) in (1, 2) # TODO generalize
flat_grads_schema = tuple(
nested_flatten((requested_backends[0].args_schema * len(grads), requested_backends[0].kwargs_schema))
) # TODO generalize
if metadata.get("output_compression") is not None:
assert isinstance(metadata["output_compression"], (list, tuple)), "output_compression must be a tuple/list"
output_compression = tuple(metadata["output_compression"])
assert all(isinstance(c, int) for c in output_compression), "output_compression must contain integers"
assert len(output_compression) == len(grads), f"output_compression should have {len(grads)} elements"
else:
output_compression = tuple(tensor.compression for tensor in flat_grads_schema)
return [
serialize_torch_tensor(result.to(proto.dtype), compression, allow_inplace=True)
for result, proto, compression in zip(grads, flat_grads_schema, output_compression)
]
def _check_uids(self, uids: str) -> Tuple[ModuleUID, ...]:
"""Check that the first request to rpc_inference is valid"""
uids = (uids or "").split(CHAIN_DELIMITER)
if not uids:
raise RuntimeError("User did not provide any uids")
for uid in uids:
if uid not in self.module_backends:
raise RuntimeError(f"Remote peer does not serve {uid}")
return tuple(uids)
@contextlib.asynccontextmanager
async def _allocate_cache(
self, backends: Sequence[TransformerBackend], batch_size: int, max_length: int
) -> Sequence[Sequence[Handle]]:
"""
Allocate memory cache for all transformer blocks, return cache handle
:returns: a list of {len(backends)} elements, where i-th element is a tuple of cache handles for i-th backend
"""
descriptors = [backend.get_inference_cache_descriptors(batch_size, max_length) for backend in backends]
async with backends[0].memory_cache.allocate_cache(*chain(*descriptors)) as handles:
yield nested_pack(handles, descriptors)
def _log_request(
self,
method: str,
uids: Optional[Sequence[ModuleUID]],
context: P2PContext,
*,
debug: Optional[str] = None,
warning: Optional[str] = None,
) -> None:
if uids is not None:
friendly_uids = [uid.split(".")[-1] for uid in uids if "." in uid]
friendly_uids = [int(uid) for uid in friendly_uids if uid.isdigit()]
friendly_uids = f"{min(friendly_uids)}:{max(friendly_uids) + 1}" if friendly_uids else uids
else:
friendly_uids = "n/a"
friendly_remote_id = "..." + str(context.remote_id)[-6:]
message = f"{method}(blocks={friendly_uids}, remote_peer={friendly_remote_id})"
if warning is not None:
logger.warning(f"{message}: {warning}")
elif debug is not None:
logger.debug(f"{message}: {debug}")
else:
logger.info(message)
async def rpc_info(self, request: runtime_pb2.ExpertUID, context: P2PContext) -> runtime_pb2.ExpertInfo:
"""Return metadata about stored block uids and current load"""
backend = self.module_backends[request.uid] if request.uid else next(iter(self.module_backends.values()))
result = {
"version": grid.__version__,
"dht_client_mode": self.dht.client_mode,
CACHE_TOKENS_AVAILABLE: backend.memory_cache.bytes_left // max(backend.cache_bytes_per_token.values()),
}
if request.uid:
block_info = self.module_backends[request.uid].get_info()
common_keys = set(result.keys()) & set(block_info.keys())
if common_keys:
raise RuntimeError(f"The block's rpc_info has keys reserved for the server's rpc_info: {common_keys}")
result.update(block_info)
return runtime_pb2.ExpertInfo(serialized_info=MSGPackSerializer.dumps(result))
async def _rpc_forward(
*flat_tensors: torch.Tensor,
requested_backends: Sequence[TransformerBackend],
active_adapter: str = "",
prioritizer: TaskPrioritizerBase,
points: int = 0,
) -> torch.Tensor:
"""
Run forward pass on deserialized inputs and prompts, used by rpc_forward and rpc_forward_stream
:param flat_tensors: a list of tensors that includes first layer inputs, optional prompts and extra tensors
:note: some input tensors can be missing, in which case they will be replaced with dummy tensors (see is_dummy)
:param requested_backends: a sequence of transformer blocks in the same order as they appear in forward pass
:returns: hidden states after the last layer [batch_size, seq_length, hid_size]
"""
hidden_states, prompts = flat_tensors
dtype = requested_backends[0].dtype
# check parse input tensors and cast dtypes
hidden_states = hidden_states.to(dtype)
assert hidden_states.ndim == 3
if prompts is None or is_dummy(prompts):
prompts = [DUMMY] * len(requested_backends)
else:
prompts = [p.squeeze(0) for p in prompts.to(requested_backends[0].dtype).split(1, dim=0)]
# Run a chain of requested backends
for backend, prompt in zip(requested_backends, prompts):
if not is_dummy(prompt):
hidden_states[:, : prompt.shape[1]] += prompt
assert isinstance(backend.inference_pool, PrioritizedTaskPool), "grid support only prioritized pools"
priority = prioritizer.prioritize(
hidden_states, points=points / len(requested_backends), backend=backend, type="forward"
)
(hidden_states,) = await backend.forward_pool.submit_task(
hidden_states,
active_adapter,
priority=priority,
)
assert isinstance(hidden_states, torch.Tensor)
assert (
hidden_states.ndim == 3
), f"inputs to {type(backend)} must be a list with a single 3d tensor of hidden states"
return hidden_states
async def _rpc_backward(
*flat_tensors: torch.Tensor,
requested_backends: Sequence[TransformerBackend],
active_adapter: str = "",
prioritizer: TaskPrioritizerBase,
points: int = 0,
) -> Union[torch.Tensor, Sequence[torch.Tensor]]:
inputs, grad_outputs, prompts = flat_tensors
# Cast inputs & grad outputs to backend dtype
inputs = inputs.to(requested_backends[0].dtype)
grad_outputs = grad_outputs.to(requested_backends[-1].dtype)
if prompts is None or is_dummy(prompts):
prompts = [DUMMY] * len(requested_backends)
else:
prompts = [p.squeeze(0) for p in prompts.to(requested_backends[0].dtype).split(1, dim=0)]
# Run a forward chain to collect intermediate inputs
# Note that we do not forward for the last module since we do not need its output
inter_inputs = []
for backend, prompt in zip(requested_backends[:-1], prompts[:-1]):
assert inputs.ndim == 3, f"inputs to {type(backend)} must be a single 3d tensor of hidden states"
if not is_dummy(prompt):
inputs[:, : prompt.shape[1]] += prompt
inter_inputs.append(inputs)
assert isinstance(backend.inference_pool, PrioritizedTaskPool), "grid support only prioritized pools"
priority = prioritizer.prioritize(
inputs, points=points / len(requested_backends), backend=backend, type="forward_in_backward"
)
(inputs,) = await backend.forward_pool.submit_task(inputs, active_adapter, priority=priority)
assert isinstance(inputs, torch.Tensor)
if not is_dummy(prompts[-1]):
inputs[:, : prompts[-1].shape[1]] += prompts[-1]
inter_inputs.append(inputs)
assert len(inter_inputs) == len(prompts) == len(requested_backends), "internal shape error during backward"
grad_prompts_reversed = []
# Run a chain of requested backends
for inp, prompt, backend in zip(*map(reversed, (inter_inputs, prompts, requested_backends))):
assert isinstance(backend.inference_pool, PrioritizedTaskPool), "grid support only prioritized pools"
priority = prioritizer.prioritize(
inp, grad_outputs, points=points / len(requested_backends), backend=backend, type="backward"
)
(grad_outputs,) = await backend.backward_pool.submit_task(inp, grad_outputs, active_adapter, priority=priority)
assert isinstance(grad_outputs, torch.Tensor)
if not is_dummy(prompt):
grad_prompts_reversed.append(grad_outputs[:, : prompt.shape[1]].unsqueeze(0))
grad_prompts = torch.cat(grad_prompts_reversed[::-1], dim=0) if grad_prompts_reversed else DUMMY
return [grad_outputs] if is_dummy(grad_prompts) else [grad_outputs, grad_prompts] # TODO un-duct-tape
|
TheGrid-main
|
grid/server/handler.py
|
import fcntl
import json
import math
import multiprocessing as mp
import os
import time
from collections import Counter
from pathlib import Path
from typing import Dict, Optional, Sequence, Union
import torch
from hivemind.utils.logging import get_logger
from transformers import PretrainedConfig
from grid.server.block_utils import resolve_block_dtype
from grid.utils.convert_block import QuantType, convert_block
from grid.utils.disk_cache import DEFAULT_CACHE_DIR
logger = get_logger(__name__)
try:
import speedtest
except ImportError:
raise ImportError("Please `pip install speedtest-cli==2.1.3`")
if not hasattr(speedtest, "Speedtest"):
raise ImportError(
"You are using the wrong speedtest module. Please replace speedtest with speedtest-cli.\n"
"To do that, run `pip uninstall -y speedtest`. Depending on your python environment, "
"you may need to run uninstall speedtest two or more times, until it says 'not installed'.\n"
"After that, please `pip install speedtest-cli==2.1.3` to install the correct version."
)
def get_server_throughput(
model_name: str,
config: PretrainedConfig,
device: torch.device,
dtype: Union[str, torch.dtype],
*,
num_blocks: int,
quant_type: QuantType,
tensor_parallel_devices: Sequence[torch.device],
force_eval: bool = False,
cache_dir: Optional[str] = None,
) -> Dict[str, float]:
dtype = resolve_block_dtype(config, dtype)
if cache_dir is None:
cache_dir = DEFAULT_CACHE_DIR
lock_path = Path(cache_dir, "throughput.lock")
cache_path = Path(cache_dir, "throughput_v4.json")
# We use the system-wide lock since only one process at a time can measure the host throughput
os.makedirs(lock_path.parent, exist_ok=True)
with open(lock_path, "wb") as lock_fd:
logger.info("Loading throughput info")
fcntl.flock(lock_fd.fileno(), fcntl.LOCK_EX)
# The OS will release the lock when lock_fd is closed or the process is killed
cache_key = f"model_{model_name}"
cache_key += f"_device_{get_device_name(device).replace(' ', '_')}"
cache_key += f"_dtype_{get_dtype_name(dtype, quant_type)}"
if len(tensor_parallel_devices) > 1:
for i, device_i in enumerate(tensor_parallel_devices):
cache_key += f"_tp{i}_{get_device_name(device_i).replace(' ', '_')}"
cache = {}
try:
if not force_eval and os.path.exists(cache_path):
with open(cache_path) as cache_fd:
cache = json.load(cache_fd)
assert isinstance(cache, dict)
except Exception:
logger.exception(f"Failed to read throughput info from {cache_path}")
cache = {}
if cache_key not in cache:
cache[cache_key] = measure_throughput_info(
config, device, dtype, quant_type=quant_type, tensor_parallel_devices=tensor_parallel_devices
)
try:
os.makedirs(cache_path.parent, exist_ok=True)
with open(cache_path, "w") as cache_fd:
json.dump(cache, cache_fd)
except Exception:
logger.exception(f"Failed to save throughput info in {cache_path}")
throughput_info = cache[cache_key]
# Most requests start at some block hosted by a server, then use all next blocks hosted on this server.
# Assuming the start block index is distributed uniformly, the average number of blocks used per request is
# E[Uniform{1, 2, ..., num_blocks}] = (num_blocks + 1) / 2
average_blocks_used = (num_blocks + 1) / 2
throughput = throughput_info["forward_rps"] / average_blocks_used
throughput = min(throughput, throughput_info.get("network_rps", math.inf))
throughput_info["throughput"] = throughput
logger.info(f"Reporting throughput: {throughput:.1f} tokens/sec for {num_blocks} blocks")
return throughput_info
def measure_throughput_info(
config: PretrainedConfig,
device: torch.device,
dtype: torch.dtype,
*,
quant_type: QuantType,
tensor_parallel_devices: Sequence[torch.device],
) -> Dict[str, float]:
logger.info(
"Measuring network and compute throughput. This takes about a minute and will be cached for future runs"
)
return {
"inference_rps": measure_compute_rps(
config,
device,
dtype,
quant_type=quant_type,
tensor_parallel_devices=tensor_parallel_devices,
n_tokens=1,
n_steps=100,
inference=True,
),
"forward_rps": measure_compute_rps(
config,
device,
dtype,
quant_type=quant_type,
tensor_parallel_devices=tensor_parallel_devices,
n_tokens=1024,
n_steps=10,
inference=False,
),
"network_rps": measure_network_rps(config),
}
def measure_network_rps(
config: PretrainedConfig, *, timeout: float = 60, default_speed: float = 100e6 # 100 Mbit/s
) -> Optional[float]:
bits_per_request = config.hidden_size * 16 # Clients usually send 16-bit tensors for forward/backward
try:
pipe_recv, pipe_send = mp.Pipe(duplex=False)
process = mp.Process(target=_measure_bits_per_second, args=(pipe_send,))
process.start()
if not pipe_recv.poll(timeout):
process.terminate()
raise RuntimeError(f"speedtest did not finish in {timeout} seconds")
network_info = pipe_recv.recv()
if "exception" in network_info:
raise RuntimeError(f"speedtest failed: {network_info['exception']}")
network_rps = min(network_info["download"], network_info["upload"]) / bits_per_request
if network_rps == 0:
raise RuntimeError("speedtest has returned network_rps == 0")
logger.info(
f"Network throughput: {network_rps:.1f} tokens/sec "
f"({network_info['download'] / 1e6:.2f} Mbit/s on download, "
f"{network_info['upload'] / 1e6:.2f} Mbit/s on upload)"
)
return network_rps
except RuntimeError as e:
logger.info(f"Network throughput is not available: {e}. Using default of {default_speed / 1e6:.2f} Mbit/s")
return default_speed / bits_per_request
def _measure_bits_per_second(pipe_send: mp.Pipe):
try:
s = speedtest.Speedtest()
s.get_servers()
s.get_best_server()
s.download()
s.upload()
pipe_send.send(s.results.dict())
except Exception as e:
pipe_send.send({"exception": repr(e)})
def measure_compute_rps(
config: PretrainedConfig,
device: torch.device,
dtype: torch.dtype,
*,
quant_type: QuantType,
tensor_parallel_devices: Sequence[torch.device],
n_tokens: int,
n_steps: int,
inference: bool,
) -> float:
if not tensor_parallel_devices:
tensor_parallel_devices = (device,)
with torch.inference_mode():
block = config.block_class(config).to(dtype)
block = convert_block(block, 0, config, tensor_parallel_devices, device, quant_type=quant_type, freeze=True)
cache = None
elapsed = 0
for step in range(n_steps + 1):
dummy_input = torch.randn(n_tokens, 1, config.hidden_size, device=device, dtype=dtype)
start_time = time.perf_counter()
_, cache = block.forward(dummy_input, use_cache=True, layer_past=cache if inference else None)
if step >= 1: # Skip the 1st step to exclude the initialization time
elapsed += time.perf_counter() - start_time
device_rps = n_steps * n_tokens / elapsed
devices_repr = get_device_name(device)
if len(tensor_parallel_devices) > 1:
device_names = tuple(map(get_device_name, map(torch.device, tensor_parallel_devices)))
devices_repr = ", ".join(f"{count}x {name}" for name, count in Counter(device_names).most_common())
logger.info(
f"{'Inference' if inference else 'Forward pass'} throughput: {device_rps:.1f} tokens/sec per block "
f"({n_tokens} tokens/batch, {devices_repr}, {get_dtype_name(dtype, quant_type)})"
)
return device_rps
def get_device_name(device: torch.device) -> str:
return f"{torch.cuda.get_device_name(device)} GPU" if device.type == "cuda" else "CPU"
def get_dtype_name(dtype: torch.dtype, quant_type: QuantType) -> str:
name = str(dtype).replace("torch.", "")
if quant_type != QuantType.NONE:
name += f", quantized to {quant_type.name.lower()}"
return name
|
TheGrid-main
|
grid/server/throughput.py
|
from dataclasses import dataclass
from typing import Dict, List, Optional, Tuple
import numpy as np
from hivemind import PeerID, get_logger
from grid.data_structures import RemoteModuleInfo, ServerState
__all__ = ["choose_best_blocks", "should_choose_other_blocks"]
logger = get_logger(__name__)
@dataclass
class Span:
start: int
end: int
throughput: float
state: ServerState
@property
def length(self):
return self.end - self.start
def move_to(self, new_start: int) -> None:
self.start, self.end = new_start, new_start + self.length
def compute_spans(module_infos: List[Optional[RemoteModuleInfo]]) -> Tuple[Dict[PeerID, Span], np.ndarray]:
spans = {}
throughputs = np.zeros(len(module_infos))
for block, module in enumerate(module_infos):
if module is None:
continue
# We sort servers here to ensure that we get exactly the same throughputs for a given set of servers.
# If the order were not defined, we would get slightly different values due to floating point errors,
# which may cause excess block replacements.
for peer_id, server in sorted(module.servers.items()):
if server.state == ServerState.OFFLINE:
continue
if peer_id in spans:
spans[peer_id].start = min(spans[peer_id].start, block)
spans[peer_id].end = max(spans[peer_id].start, block + 1)
else:
spans[peer_id] = Span(start=block, end=block + 1, throughput=server.throughput, state=server.state)
throughputs[block] += server.throughput
return spans, throughputs
def _choose_best_start(throughputs: np.ndarray, num_blocks: int) -> int:
options = ((sorted(throughputs[i : i + num_blocks]), i) for i in range(0, len(throughputs) - num_blocks + 1))
return min(options)[-1]
def choose_best_blocks(num_blocks: int, module_infos: List[Optional[RemoteModuleInfo]]) -> List[int]:
_, throughputs = compute_spans(module_infos)
start = _choose_best_start(throughputs, num_blocks)
return list(range(start, start + num_blocks))
def should_choose_other_blocks(
local_peer_id: PeerID, module_infos: List[Optional[RemoteModuleInfo]], balance_quality: float
) -> bool:
if balance_quality > 1.0:
return True # Forces rebalancing on each check (may be used for debugging purposes)
spans, throughputs = compute_spans(module_infos)
initial_throughput = throughputs.min()
eps = 1e-3
assert local_peer_id in spans, "Span served by this server is not present in the DHT"
local_span = spans[local_peer_id]
throughputs[local_span.start : local_span.end] -= local_span.throughput * (1 + eps)
# Without (1 + eps) here, we would sometimes subtract a value slightly less than local_span.throughput
# due to the floating point error, which would cause excess block replacements.
# Also, subtracting local_span.throughput * (1 + eps) makes _choose_best_start() prefer
# the previous server position in case of other things being almost equal.
if initial_throughput > eps and throughputs.min() <= 0:
return False # Switching blocks would make the swarm disjoint
new_start = _choose_best_start(throughputs, local_span.length)
if local_span.start == new_start:
return False # This server is on its best place already
throughputs[local_span.start : local_span.end] += local_span.throughput * eps
local_span.move_to(new_start)
throughputs[local_span.start : local_span.end] += local_span.throughput
moved = True
while moved:
servers = list(spans.keys())
np.random.shuffle(servers)
moved = False
for peer_id in servers:
span = spans[peer_id]
throughputs[span.start : span.end] -= span.throughput * (1 + eps)
new_start = _choose_best_start(throughputs, span.length)
throughputs[span.start : span.end] += span.throughput * eps
if span.start != new_start:
span.move_to(new_start)
moved = True
throughputs[span.start : span.end] += span.throughput
new_throughput = throughputs.min()
if new_throughput < initial_throughput or new_throughput < eps:
return False
actual_quality = initial_throughput / new_throughput
logger.info(f"Swarm balance quality: {actual_quality * 100:.1f}%")
return actual_quality < balance_quality - eps
|
TheGrid-main
|
grid/server/block_selection.py
|
from typing import Optional, Union
import torch
from accelerate import init_empty_weights
from transformers import PretrainedConfig
from grid.utils.convert_block import QuantType
def resolve_block_dtype(config: PretrainedConfig, dtype: Union[str, torch.dtype]) -> torch.dtype:
"""If dtype is "auto", resolves it using BloomConfig. Returns `dtype` intact otherwise."""
if dtype not in ("auto", None):
return dtype
if config.torch_dtype not in ("auto", None):
return config.torch_dtype
return torch.bfloat16
def get_block_size(
config: PretrainedConfig,
location: str,
*,
dtype: Optional[Union[str, torch.dtype]] = None,
quant_type: QuantType = QuantType.NONE,
eps: float = 0.01, # eps accounts for ~1% of metainfo for tensor descriptions, quantization tables, etc.
) -> int:
if location == "memory":
assert (
dtype is not None and quant_type is not None
), 'get_block_size(..., location="memory") requires to specify dtype and quant_type for calculations'
with init_empty_weights(include_buffers=True):
block = config.block_class(config)
n_params = sum(param.numel() for param in block.parameters())
if location == "memory":
if quant_type == QuantType.NONE:
dtype = resolve_block_dtype(config, dtype)
bytes_per_value = torch.finfo(dtype).bits // 8
elif quant_type == QuantType.INT8:
bytes_per_value = 1
elif quant_type == QuantType.NF4:
bytes_per_value = 4.25 / 8 # Bitness of NF4 with this config (measured empirically)
else:
raise ValueError(f"Unsupported quant_type={quant_type}")
elif location == "disk":
dtype = resolve_block_dtype(config, "auto")
bytes_per_value = torch.finfo(dtype).bits // 8
return round(n_params * bytes_per_value * (1 + eps))
|
TheGrid-main
|
grid/server/block_utils.py
|
TheGrid-main
|
grid/server/__init__.py
|
|
import asyncio
import math
import threading
import time
from concurrent.futures import Future
from contextlib import asynccontextmanager
from functools import partial
from typing import Optional
import requests
from hivemind.dht import DHT, DHTNode
from hivemind.moe.client.remote_expert_worker import RemoteExpertWorker
from hivemind.p2p import P2P, P2PContext, PeerID, ServicerBase
from hivemind.proto import dht_pb2
from hivemind.utils import get_logger
from grid.constants import REACHABILITY_API_URL
logger = get_logger(__name__)
def validate_reachability(peer_id, wait_time: float = 7 * 60, retry_delay: float = 15) -> None:
"""verify that your peer is reachable from a (centralized) validator, whether directly or through a relay"""
for attempt_no in range(math.floor(wait_time / retry_delay) + 1):
try:
r = requests.get(f"{REACHABILITY_API_URL}/api/v1/is_reachable/{peer_id}", timeout=10)
r.raise_for_status()
response = r.json()
if response["success"]:
logger.info("Server is reachable from the Internet. It will appear at https://health.grid.dev soon")
return
if attempt_no == 0:
# Usually, libp2p manages to set up relays before we finish loading blocks.
# In other cases, we may need to wait for up to `wait_time` seconds before it's done.
logger.info("Detected a NAT or a firewall, connecting to libp2p relays. This takes a few minutes")
time.sleep(retry_delay)
except Exception as e:
logger.warning(f"Skipping reachability check because health.grid.dev is down: {repr(e)}")
return
raise RuntimeError(
f"Server has not become reachable from the Internet:\n\n"
f"{response['message']}\n\n"
f"You need to fix your port forwarding and/or firewall settings. How to do that:\n\n"
f" 1. Choose a specific port for the Grid server, for example, 31337.\n"
f" 2. Ensure that this port is accessible from the Internet and not blocked by your firewall.\n"
f" 3. Add these arguments to explicitly announce your IP address and port to other peers:\n"
f" python -m grid.cli.run_server ... --public_ip {response['your_ip']} --port 31337\n"
f" 4. If it does not help, ask for help in our Discord: https://discord.gg/Wuk8BnrEPH\n"
)
def check_direct_reachability(max_peers: int = 5, threshold: float = 0.5, **kwargs) -> Optional[bool]:
"""test if your peer is accessible by others in the swarm with the specified network options in **kwargs"""
async def _check_direct_reachability():
target_dht = await DHTNode.create(client_mode=True, **kwargs)
try:
protocol = ReachabilityProtocol(probe=target_dht.protocol.p2p)
async with protocol.serve(target_dht.protocol.p2p):
successes = requests = 0
for remote_peer in list(target_dht.protocol.routing_table.peer_id_to_uid.keys()):
probe_available = await protocol.call_check(remote_peer=remote_peer, check_peer=target_dht.peer_id)
if probe_available is None:
continue # remote peer failed to check probe
successes += probe_available
requests += 1
if requests >= max_peers:
break
logger.debug(f"Direct reachability: {successes}/{requests}")
return (successes / requests) >= threshold if requests > 0 else None
finally:
await target_dht.shutdown()
return RemoteExpertWorker.run_coroutine(_check_direct_reachability())
STRIPPED_PROBE_ARGS = dict(
dht_mode="client", use_relay=False, auto_nat=False, nat_port_map=False, no_listen=True, startup_timeout=60
)
class ReachabilityProtocol(ServicerBase):
"""Mini protocol to test if a locally running peer is accessible by other devices in the swarm"""
def __init__(self, *, probe: Optional[P2P] = None, wait_timeout: float = 5.0):
self.probe = probe
self.wait_timeout = wait_timeout
self._event_loop = self._stop = None
async def call_check(self, remote_peer: PeerID, *, check_peer: PeerID) -> Optional[bool]:
"""Returns True if remote_peer can reach check_peer, False if it cannot, None if it did not respond"""
try:
request = dht_pb2.PingRequest(peer=dht_pb2.NodeInfo(node_id=check_peer.to_bytes()))
timeout = self.wait_timeout if check_peer == remote_peer else self.wait_timeout * 2
response = await self.get_stub(self.probe, remote_peer).rpc_check(request, timeout=timeout)
logger.debug(f"call_check(remote_peer={remote_peer}, check_peer={check_peer}) -> {response.available}")
return response.available
except Exception:
logger.debug(f"Requested {remote_peer} to check {check_peer}, but got:", exc_info=True)
return None
async def rpc_check(self, request: dht_pb2.PingRequest, context: P2PContext) -> dht_pb2.PingResponse:
"""Help another peer to check its reachability"""
response = dht_pb2.PingResponse(available=True)
check_peer = PeerID(request.peer.node_id)
if check_peer != context.local_id: # remote peer wants us to check someone other than ourselves
response.available = await self.call_check(check_peer, check_peer=check_peer) is True
logger.info(
f"reachability.rpc_check(remote_peer=...{str(context.remote_id)[-6:]}, "
f"check_peer=...{str(check_peer)[-6:]}) -> {response.available}"
)
return response
@asynccontextmanager
async def serve(self, p2p: P2P):
try:
await self.add_p2p_handlers(p2p)
yield self
finally:
await self.remove_p2p_handlers(p2p)
@classmethod
def attach_to_dht(cls, dht: DHT, await_ready: bool = False, **kwargs) -> Optional["ReachabilityProtocol"]:
protocol = cls(**kwargs)
ready = Future()
async def _serve_with_probe():
try:
common_p2p = await dht.replicate_p2p()
protocol._event_loop = asyncio.get_event_loop()
protocol._stop = asyncio.Event()
initial_peers = [str(addr) for addr in await common_p2p.get_visible_maddrs(latest=True)]
for info in await common_p2p.list_peers():
initial_peers.extend(f"{addr}/p2p/{info.peer_id}" for addr in info.addrs)
protocol.probe = await P2P.create(initial_peers, **STRIPPED_PROBE_ARGS)
ready.set_result(True)
logger.info("Reachability service started")
async with protocol.serve(common_p2p):
await protocol._stop.wait()
except Exception as e:
logger.debug("Reachability service failed:", exc_info=True)
if not ready.done():
ready.set_exception(e)
finally:
if protocol is not None and protocol.probe is not None:
await protocol.probe.shutdown()
logger.debug("Reachability service shut down")
threading.Thread(target=partial(asyncio.run, _serve_with_probe()), daemon=True).start()
if await_ready:
ready.result() # Propagates startup exceptions, if any
return protocol
def shutdown(self):
if self._event_loop is not None and self._stop is not None:
self._event_loop.call_soon_threadsafe(self._stop.set)
|
TheGrid-main
|
grid/server/reachability.py
|
"""
Utils for fetching pretrained model parts. Currently, this relies on huggingface transformers' from_pretrained code.
If necessary, one can rewrite this to implement a different behavior, such as:
- loading files from a local data source (e.g. S3)
- load files via BitTorrent ( https://pypi.org/project/libtorrent/ ) or IPFS( https://docs.ipfs.io/how-to )
- fetch the weights over IPoAC, using a fleet of trained pigeons ( http://www.faqs.org/rfcs/rfc1149.html )
"""
import json
import time
from typing import Dict, Optional, Union
import torch
import torch.nn as nn
from accelerate import init_empty_weights
from accelerate.utils import set_module_tensor_to_device
from hivemind.utils.logging import get_logger
from huggingface_hub import get_hf_file_metadata, hf_hub_url
from transformers import PretrainedConfig
from transformers.utils import get_file_from_repo
from grid.constants import DTYPE_MAP
from grid.server.block_utils import resolve_block_dtype
from grid.utils.auto_config import AutoDistributedConfig
from grid.utils.disk_cache import DEFAULT_CACHE_DIR, allow_cache_reads, allow_cache_writes, free_disk_space_for
from grid.utils.hf_auth import always_needs_auth
logger = get_logger(__name__)
def load_pretrained_block(
model_name: str,
block_index: int,
*,
config: Optional[PretrainedConfig] = None,
torch_dtype: Union[torch.dtype, str] = "auto",
revision: Optional[str] = None,
token: Optional[Union[str, bool]] = None,
cache_dir: Optional[str] = None,
max_disk_space: Optional[int] = None,
) -> nn.Module:
if config is None:
config = AutoDistributedConfig.from_pretrained(model_name, use_auth_token=token)
if cache_dir is None:
cache_dir = DEFAULT_CACHE_DIR
assert torch_dtype in DTYPE_MAP.values(), f"torch_dtype must be one of {list(DTYPE_MAP.values())}"
torch_dtype = resolve_block_dtype(config, torch_dtype)
with init_empty_weights():
block = config.block_class(config)
block_prefix = f"{config.block_prefix}.{block_index}."
state_dict = _load_state_dict_from_repo(
model_name,
block_prefix,
revision=revision,
token=token,
cache_dir=cache_dir,
max_disk_space=max_disk_space,
)
# dummy load, check that keys match
report = block.load_state_dict(state_dict, strict=True)
assert not report.missing_keys, f"Some block weights are missing: {report.missing_keys}"
for param_name, _ in block.named_parameters():
assert param_name in state_dict, f"{param_name} not in state dict"
param = state_dict[param_name]
if not str(param.dtype).startswith(("torch.uint", "torch.int", "torch.bool")):
param = param.to(torch_dtype)
set_module_tensor_to_device(block, param_name, "cpu", value=param, dtype=param.dtype)
logger.info(f"Loaded {model_name} block {block_index}, {report}")
return block
StateDict = Dict[str, torch.Tensor]
def _load_state_dict_from_repo(
model_name: str,
block_prefix: str,
*,
revision: Optional[str] = None,
token: Optional[Union[str, bool]] = None,
cache_dir: str,
max_disk_space: Optional[int] = None,
) -> StateDict:
if always_needs_auth(model_name) and token is None:
token = True
index_file = get_file_from_repo(
model_name, filename="pytorch_model.bin.index.json", use_auth_token=token, cache_dir=cache_dir
)
if index_file is not None: # Sharded model
with open(index_file) as f:
index = json.load(f)
filenames = {
filename for param_name, filename in index["weight_map"].items() if param_name.startswith(block_prefix)
}
if not filenames:
raise RuntimeError(f"Block {block_prefix}* not found in the index: {index['weight_map']}")
else: # Non-sharded model
filenames = {"pytorch_model.bin"}
logger.debug(f"Loading {block_prefix}* from {filenames}")
state_dict = {}
for filename in filenames:
shard_state_dict = _load_state_dict_from_file(
model_name,
filename,
revision=revision,
token=token,
cache_dir=cache_dir,
max_disk_space=max_disk_space,
)
shard_state_dict = {
param_name[len(block_prefix) :]: param
for param_name, param in shard_state_dict.items()
if param_name.startswith(block_prefix)
} # Remove unused parameters from memory
state_dict.update(shard_state_dict)
return state_dict
def _load_state_dict_from_file(
model_name: str,
filename: str,
*,
revision: Optional[str] = None,
token: Optional[Union[str, bool]] = None,
cache_dir: str,
max_disk_space: Optional[int] = None,
delay: float = 30,
) -> StateDict:
# First, try to find the weights locally
try:
with allow_cache_reads(cache_dir):
path = get_file_from_repo(
model_name,
filename,
revision=revision,
use_auth_token=token,
cache_dir=cache_dir,
local_files_only=True,
)
if path is not None:
return torch.load(path, map_location="cpu")
except Exception:
logger.warning(f"Cache for file {filename} is corrupted, it will be downloaded again", exc_info=True)
# If not found, ensure that we have enough disk space to download them (maybe remove something)
while True:
try:
with allow_cache_writes(cache_dir):
url = hf_hub_url(model_name, filename, revision=revision)
file_size = get_hf_file_metadata(url, token=token).size
if file_size is not None:
free_disk_space_for(file_size, cache_dir=cache_dir, max_disk_space=max_disk_space)
else:
logger.warning(f"Failed to fetch size of file {filename} from repo {model_name}")
path = get_file_from_repo(
model_name,
filename,
revision=revision,
use_auth_token=token,
cache_dir=cache_dir,
local_files_only=False,
)
if path is None:
raise RuntimeError(f"File {filename} does not exist in repo {model_name}")
return torch.load(path, map_location="cpu")
except Exception:
logger.warning(f"Failed to load file {filename} from HF Hub (retry in {delay:.0f} sec)", exc_info=True)
time.sleep(delay)
|
TheGrid-main
|
grid/server/from_pretrained.py
|
import os
from hivemind.utils import logging as hm_logging
def initialize_logs():
"""Initialize Grid logging tweaks. This function is called when you import the `grid` module."""
# Env var GRID_LOGGING=False prohibits Grid do anything with logs
if os.getenv("GRID_LOGGING", "True").lower() in ("false", "0"):
return
hm_logging.use_hivemind_log_handler("in_root_logger")
# We suppress asyncio error logs by default since they are mostly not relevant for the end user,
# unless there is env var GRID_ASYNCIO_LOGLEVEL
asyncio_loglevel = os.getenv("GRID_ASYNCIO_LOGLEVEL", "FATAL" if hm_logging.loglevel != "DEBUG" else "DEBUG")
hm_logging.get_logger("asyncio").setLevel(asyncio_loglevel)
|
TheGrid-main
|
grid/utils/logging.py
|
import torch
DUMMY = torch.empty(0) # dummy tensor that replaces empty prompt or adapter parameters
def is_dummy(tensor: torch.Tensor):
return tensor.numel() == 0
|
TheGrid-main
|
grid/utils/misc.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.