python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Passage, query, answers and answer classification with explanations."""
from __future__ import absolute_import, division, print_function
import json
import os
import nlp
_CITATION = """
@unpublished{eraser2019,
title = {ERASER: A Benchmark to Evaluate Rationalized NLP Models},
author = {Jay DeYoung and Sarthak Jain and Nazneen Fatema Rajani and Eric Lehman and Caiming Xiong and Richard Socher and Byron C. Wallace}
}
@inproceedings{MultiRC2018,
author = {Daniel Khashabi and Snigdha Chaturvedi and Michael Roth and Shyam Upadhyay and Dan Roth},
title = {Looking Beyond the Surface:A Challenge Set for Reading Comprehension over Multiple Sentences},
booktitle = {NAACL},
year = {2018}
}
"""
_DESCRIPTION = """
Eraser Multi RC is a dataset for queries over multi-line passages, along with
answers and a rationalte. Each example in this dataset has the following 5 parts
1. A Mutli-line Passage
2. A Query about the passage
3. An Answer to the query
4. A Classification as to whether the answer is right or wrong
5. An Explanation justifying the classification
"""
_DOWNLOAD_URL = "http://www.eraserbenchmark.com/zipped/multirc.tar.gz"
class EraserMultiRc(nlp.GeneratorBasedBuilder):
"""Multi Sentence Reasoning with Explanations (Eraser Benchmark)."""
VERSION = nlp.Version("0.1.1")
def _info(self):
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features(
{
"passage": nlp.Value("string"),
"query_and_answer": nlp.Value("string"),
"label": nlp.features.ClassLabel(names=["False", "True"]),
"evidences": nlp.features.Sequence(nlp.Value("string")),
}
),
supervised_keys=None,
homepage="https://cogcomp.seas.upenn.edu/multirc/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
dl_dir = dl_manager.download_and_extract(_DOWNLOAD_URL)
data_dir = os.path.join(dl_dir, "multirc")
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"data_dir": data_dir, "filepath": os.path.join(data_dir, "train.jsonl")},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={"data_dir": data_dir, "filepath": os.path.join(data_dir, "val.jsonl")},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={"data_dir": data_dir, "filepath": os.path.join(data_dir, "test.jsonl")},
),
]
def _generate_examples(self, data_dir, filepath):
"""Yields examples."""
multirc_dir = os.path.join(data_dir, "docs")
with open(filepath) as f:
for line in f:
row = json.loads(line)
evidences = []
for evidence in row["evidences"][0]:
docid = evidence["docid"]
evidences.append(evidence["text"])
passage_file = os.path.join(multirc_dir, docid)
with open(passage_file) as f1:
passage_text = f1.read()
yield row["annotation_id"], {
"passage": passage_text,
"query_and_answer": row["query"],
"label": row["classification"],
"evidences": evidences,
}
|
nlp-master
|
datasets/eraser_multi_rc/eraser_multi_rc.py
|
from __future__ import absolute_import, division, print_function
import csv
import os
import nlp
_CITATION = """\
@article{go2009twitter,
title={Twitter sentiment classification using distant supervision},
author={Go, Alec and Bhayani, Richa and Huang, Lei},
journal={CS224N project report, Stanford},
volume={1},
number={12},
pages={2009},
year={2009}
}
"""
_DESCRIPTION = """\
Sentiment140 consists of Twitter messages with emoticons, which are used as noisy labels for
sentiment classification. For more detailed information please refer to the paper.
"""
_URL = "http://help.sentiment140.com/home"
_DATA_URL = "http://cs.stanford.edu/people/alecmgo/trainingandtestdata.zip"
_TEST_FILE_NAME = "testdata.manual.2009.06.14.csv"
_TRAIN_FILE_NAME = "training.1600000.processed.noemoticon.csv"
class Sentiment140Config(nlp.BuilderConfig):
"""BuilderConfig for Break"""
def __init__(self, data_url, **kwargs):
"""BuilderConfig for BlogAuthorship
Args:
data_url: `string`, url to the dataset (word or raw level)
**kwargs: keyword arguments forwarded to super.
"""
super(Sentiment140Config, self).__init__(
version=nlp.Version("1.0.0", "New split API (https://tensorflow.org/datasets/splits)"), **kwargs
)
self.data_url = data_url
class Sentiment140(nlp.GeneratorBasedBuilder):
VERSION = nlp.Version("0.1.0")
BUILDER_CONFIGS = [
Sentiment140Config(
name="sentiment140",
data_url=_DATA_URL,
description="sentiment classification dataset. Twitter messages are classified as either 'positive'=0, 'neutral'=1 or 'negative'=2.",
)
]
def _info(self):
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
"text": nlp.Value("string"),
"date": nlp.Value("string"),
"user": nlp.Value("string"),
"sentiment": nlp.Value("int32"),
"query": nlp.Value("string"),
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage=_URL,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
data_dir = dl_manager.download_and_extract(_DATA_URL)
test_csv_file = os.path.join(data_dir, _TEST_FILE_NAME)
train_csv_file = os.path.join(data_dir, _TRAIN_FILE_NAME)
if self.config.name == "sentiment140":
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"file_path": train_csv_file},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={"file_path": test_csv_file},
),
]
else:
raise NotImplementedError("{} does not exist".format(self.config.name))
def _generate_examples(self, file_path):
"""Yields examples."""
with open(file_path, encoding="ISO-8859-1") as f:
data = csv.reader(f, delimiter=",", quotechar='"')
for row_id, row in enumerate(data):
sentiment, tweet_id, date, query, user_name, message = row
yield "{}_{}".format(row_id, tweet_id), {
"text": message,
"date": date,
"user": user_name,
"sentiment": int(sentiment),
"query": query,
}
|
nlp-master
|
datasets/sentiment140/sentiment140.py
|
"""TODO(wiqa): Add a description here."""
from __future__ import absolute_import, division, print_function
import json
import os
import nlp
# TODO(wiqa): BibTeX citation
_CITATION = """\
@article{wiqa,
author = {Niket Tandon and Bhavana Dalvi Mishra and Keisuke Sakaguchi and Antoine Bosselut and Peter Clark}
title = {WIQA: A dataset for "What if..." reasoning over procedural text},
journal = {arXiv:1909.04739v1},
year = {2019},
}
"""
# TODO(wiqa):
_DESCRIPTION = """\
The WIQA dataset V1 has 39705 questions containing a perturbation and a possible effect in the context of a paragraph.
The dataset is split into 29808 train questions, 6894 dev questions and 3003 test questions.
"""
_URL = "https://public-aristo-processes.s3-us-west-2.amazonaws.com/wiqa_dataset_no_explanation_v2/wiqa-dataset-v2-october-2019.zip"
URl = "s3://ai2-s2-research-public/open-corpus/2020-04-10/"
class Wiqa(nlp.GeneratorBasedBuilder):
"""TODO(wiqa): Short description of my dataset."""
# TODO(wiqa): Set up version.
VERSION = nlp.Version("0.1.0")
def _info(self):
# TODO(wiqa): Specifies the nlp.DatasetInfo object
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
# These are the features of your dataset like images, labels ...
"question_stem": nlp.Value("string"),
"question_para_step": nlp.features.Sequence({"steps": nlp.Value("string")}),
"answer_label": nlp.Value("string"),
"answer_label_as_choice": nlp.Value("string"),
"choices": nlp.features.Sequence({"text": nlp.Value("string"), "label": nlp.Value("string")}),
"metadata_question_id": nlp.Value("string"),
"metadata_graph_id": nlp.Value("string"),
"metadata_para_id": nlp.Value("string"),
"metadata_question_type": nlp.Value("string"),
"metadata_path_len": nlp.Value("int32"),
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://allenai.org/data/wiqa",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(wiqa): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
dl_dir = dl_manager.download_and_extract(_URL)
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(dl_dir, "train.jsonl")},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(dl_dir, "test.jsonl")},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(dl_dir, "dev.jsonl")},
),
]
def _generate_examples(self, filepath):
"""Yields examples."""
# TODO(wiqa): Yields (key, example) tuples from the dataset
with open(filepath) as f:
for id_, row in enumerate(f):
data = json.loads(row)
yield id_, {
"question_stem": data["question"]["stem"],
"question_para_step": {"steps": data["question"]["para_steps"]},
"answer_label": data["question"]["answer_label"],
"answer_label_as_choice": data["question"]["answer_label_as_choice"],
"choices": {
"text": [choice["text"] for choice in data["question"]["choices"]],
"label": [choice["label"] for choice in data["question"]["choices"]],
},
"metadata_question_id": data["metadata"]["ques_id"],
"metadata_graph_id": data["metadata"]["graph_id"],
"metadata_para_id": data["metadata"]["para_id"],
"metadata_question_type": data["metadata"]["question_type"],
"metadata_path_len": data["metadata"]["path_len"],
}
|
nlp-master
|
datasets/wiqa/wiqa.py
|
"""TODO(cornell_movie_dialog): Add a description here."""
from __future__ import absolute_import, division, print_function
import ast
import csv
import os
import nlp
# TODO(cornell_movie_dialog): BibTeX citation
_CITATION = """\
@InProceedings{Danescu-Niculescu-Mizil+Lee:11a,
author={Cristian Danescu-Niculescu-Mizil and Lillian Lee},
title={Chameleons in imagined conversations:
A new approach to understanding coordination of linguistic style in dialogs.},
booktitle={Proceedings of the
Workshop on Cognitive Modeling and Computational Linguistics, ACL 2011},
year={2011}
}
"""
# TODO(cornell_movie_dialog):
_DESCRIPTION = """\
This corpus contains a large metadata-rich collection of fictional conversations extracted from raw movie scripts:
- 220,579 conversational exchanges between 10,292 pairs of movie characters
- involves 9,035 characters from 617 movies
- in total 304,713 utterances
- movie metadata included:
- genres
- release year
- IMDB rating
- number of IMDB votes
- IMDB rating
- character metadata included:
- gender (for 3,774 characters)
- position on movie credits (3,321 characters)
"""
_URL = "https://www.cs.cornell.edu/~cristian/data/cornell_movie_dialogs_corpus.zip"
class CornellMovieDialog(nlp.GeneratorBasedBuilder):
"""TODO(cornell_movie_dialog): Short description of my dataset."""
# TODO(cornell_movie_dialog): Set up version.
VERSION = nlp.Version("0.1.0")
def _info(self):
# TODO(cornell_movie_dialog): Specifies the nlp.DatasetInfo object
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
"movieID": nlp.Value("string"),
"movieTitle": nlp.Value("string"),
"movieYear": nlp.Value("string"),
"movieIMDBRating": nlp.Value("string"),
"movieNoIMDBVotes": nlp.Value("string"),
"movieGenres": nlp.features.Sequence({"genre": nlp.Value("string")}),
"characterID1": nlp.Value("string"),
"characterID2": nlp.Value("string"),
"characterName1": nlp.Value("string"),
"characterName2": nlp.Value("string"),
"utterance": nlp.features.Sequence({"text": nlp.Value("string"), "LineID": nlp.Value("string")})
# These are the features of your dataset like images, labels ...
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="http://www.cs.cornell.edu/~cristian/Cornell_Movie-Dialogs_Corpus.html",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(cornell_movie_dialog): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
dl_dir = dl_manager.download_and_extract(_URL)
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepaths": os.path.join(dl_dir, "cornell movie-dialogs corpus")},
),
]
def _generate_examples(self, filepaths):
"""Yields examples."""
# TODO(cornell_movie_dialog): Yields (key, example) tuples from the dataset
movie_char_file = os.path.join(filepaths, "movie_characters_metadata.txt")
movie_conv_file = os.path.join(filepaths, "movie_conversations.txt")
movie_lines_file = os.path.join(filepaths, "movie_lines.txt")
movie_titles_file = os.path.join(filepaths, "movie_titles_metadata.txt")
with open(movie_char_file, "rb") as f:
movie_char_data = [x.decode("latin").split("+++$+++") for x in f.readlines()]
with open(movie_conv_file, "rb") as f:
movie_conv_data = [x.decode("latin").split("+++$+++") for x in f.readlines()]
with open(movie_lines_file, "rb") as f:
movie_lines_data = [x.decode("latin").split("+++$+++") for x in f.readlines()]
with open(movie_titles_file, "rb") as f:
movie_titles_data = [x.decode("latin").split("+++$+++") for x in f.readlines()]
## looping over movie conversation file
for id_, conv in enumerate(movie_conv_data):
char_id_1 = conv[0]
char_id_2 = conv[1]
movie_id = conv[2]
line_ids = conv[-1].replace("\n", "")
line_ids = ast.literal_eval(line_ids.strip())
lines_texts = []
## searching text corresponding to each lineID in line_ids in movie lines file
for line_id in line_ids:
i = 0
while i < len(movie_lines_data) and movie_lines_data[i][0].strip() != line_id:
i += 1
lines_texts.append(movie_lines_data[i][0]) # if i < len(movie_lines_data) else '')
## look for char names in movie character file
j = 0
while j < len(movie_char_data) and movie_char_data[j][0].strip() != char_id_1.strip():
j += 1
char_name_1 = movie_char_data[j][1] # if j < len(movie_char_data) else ''
movie_title = movie_char_data[j][3] # if j < len(movie_char_data) else ''
k = 0
while k < len(movie_char_data) and movie_char_data[k][0].strip() != char_id_2.strip():
k += 1
char_name_2 = movie_char_data[k][1]
##look for movie year, IMDBRating, genre, no_imdb_voting in movie tiles file
l = 0
while l < len(movie_titles_data) and movie_titles_data[l][0].strip() != movie_id.strip():
l += 1
movie_year = movie_titles_data[l][2]
imdb_rating = movie_titles_data[l][3]
no_imdb_vote = movie_titles_data[l][4]
genre = movie_titles_data[l][5].replace("\n", "").strip()
movie_genres = ast.literal_eval(genre)
yield id_, {
"movieID": movie_id,
"movieTitle": movie_title,
"movieYear": movie_year,
"movieIMDBRating": imdb_rating,
"movieNoIMDBVotes": no_imdb_vote,
"movieGenres": {"genre": movie_genres},
"characterID1": char_id_1,
"characterID2": char_id_2,
"characterName1": char_name_1,
"characterName2": char_name_2,
"utterance": {"text": lines_texts, "LineID": line_ids},
}
|
nlp-master
|
datasets/cornell_movie_dialog/cornell_movie_dialog.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""IMDB movie reviews dataset."""
from __future__ import absolute_import, division, print_function
import os
import nlp
_DESCRIPTION = """\
Large Movie Review Dataset.
This is a dataset for binary sentiment classification containing substantially \
more data than previous benchmark datasets. We provide a set of 25,000 highly \
polar movie reviews for training, and 25,000 for testing. There is additional \
unlabeled data for use as well.\
"""
_CITATION = """\
@InProceedings{maas-EtAl:2011:ACL-HLT2011,
author = {Maas, Andrew L. and Daly, Raymond E. and Pham, Peter T. and Huang, Dan and Ng, Andrew Y. and Potts, Christopher},
title = {Learning Word Vectors for Sentiment Analysis},
booktitle = {Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies},
month = {June},
year = {2011},
address = {Portland, Oregon, USA},
publisher = {Association for Computational Linguistics},
pages = {142--150},
url = {http://www.aclweb.org/anthology/P11-1015}
}
"""
_DOWNLOAD_URL = "http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz"
class IMDBReviewsConfig(nlp.BuilderConfig):
"""BuilderConfig for IMDBReviews."""
def __init__(self, **kwargs):
"""BuilderConfig for IMDBReviews.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(IMDBReviewsConfig, self).__init__(
version=nlp.Version("1.0.0", "New split API (https://tensorflow.org/datasets/splits)"), **kwargs
)
class Imdb(nlp.GeneratorBasedBuilder):
"""IMDB movie reviews dataset."""
BUILDER_CONFIGS = [IMDBReviewsConfig(name="plain_text", description="Plain text",)]
def _info(self):
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features(
{"text": nlp.Value("string"), "label": nlp.features.ClassLabel(names=["neg", "pos"])}
),
supervised_keys=None,
homepage="http://ai.stanford.edu/~amaas/data/sentiment/",
citation=_CITATION,
)
def _vocab_text_gen(self, archive):
for _, ex in self._generate_examples(archive, os.path.join("aclImdb", "train")):
yield ex["text"]
def _split_generators(self, dl_manager):
arch_path = dl_manager.download_and_extract(_DOWNLOAD_URL)
data_dir = os.path.join(arch_path, "aclImdb")
return [
nlp.SplitGenerator(name=nlp.Split.TRAIN, gen_kwargs={"directory": os.path.join(data_dir, "train")}),
nlp.SplitGenerator(name=nlp.Split.TEST, gen_kwargs={"directory": os.path.join(data_dir, "test")}),
nlp.SplitGenerator(
name=nlp.Split("unsupervised"),
gen_kwargs={"directory": os.path.join(data_dir, "train"), "labeled": False},
),
]
def _generate_examples(self, directory, labeled=True):
"""Generate IMDB examples."""
# For labeled examples, extract the label from the path.
if labeled:
files = {
"pos": sorted(os.listdir(os.path.join(directory, "pos"))),
"neg": sorted(os.listdir(os.path.join(directory, "neg"))),
}
for key in files:
for id_, file in enumerate(files[key]):
filepath = os.path.join(directory, key, file)
with open(filepath) as f:
yield key + "_" + str(id_), {"text": f.read(), "label": key}
else:
unsup_files = sorted(os.listdir(os.path.join(directory, "unsup")))
for id_, file in enumerate(unsup_files):
filepath = os.path.join(directory, "unsup", file)
with open(filepath) as f:
yield id_, {"text": f.read(), "label": -1}
|
nlp-master
|
datasets/imdb/imdb.py
|
"""TODO(mlqa): Add a description here."""
from __future__ import absolute_import, division, print_function
import json
import os
import nlp
# TODO(mlqa): BibTeX citation
_CITATION = """\
@article{lewis2019mlqa,
title={MLQA: Evaluating Cross-lingual Extractive Question Answering},
author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},
journal={arXiv preprint arXiv:1910.07475},
year={2019}
}
"""
# TODO(mlqa):
_DESCRIPTION = """\
MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.
MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,
German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between
4 different languages on average.
"""
_URL = "https://dl.fbaipublicfiles.com/MLQA/"
_DEV_TEST_URL = "MLQA_V1.zip"
_TRANSLATE_TEST_URL = "mlqa-translate-test.tar.gz"
_TRANSLATE_TRAIN_URL = "mlqa-translate-train.tar.gz"
_LANG = ["ar", "de", "vi", "zh", "en", "es", "hi"]
_TRANSLATE_LANG = ["ar", "de", "vi", "zh", "es", "hi"]
class MlqaConfig(nlp.BuilderConfig):
def __init__(self, data_url, **kwargs):
"""BuilderConfig for MLQA
Args:
data_url: `string`, url to the dataset
**kwargs: keyword arguments forwarded to super.
"""
super(MlqaConfig, self).__init__(version=nlp.Version("1.0.0",), **kwargs)
self.data_url = data_url
class Mlqa(nlp.GeneratorBasedBuilder):
"""TODO(mlqa): Short description of my dataset."""
# TODO(mlqa): Set up version.
VERSION = nlp.Version("1.0.0")
BUILDER_CONFIGS = (
[
MlqaConfig(
name="mlqa-translate-train." + lang,
data_url=_URL + _TRANSLATE_TRAIN_URL,
description="Machine-translated data for Translate-train (SQuAD Train and Dev sets machine-translated into "
"Arabic, German, Hindi, Vietnamese, Simplified Chinese and Spanish)",
)
for lang in _LANG
if lang != "en"
]
+ [
MlqaConfig(
name="mlqa-translate-test." + lang,
data_url=_URL + _TRANSLATE_TEST_URL,
description="Machine-translated data for Translate-Test (MLQA-test set machine-translated into English) ",
)
for lang in _LANG
if lang != "en"
]
+ [
MlqaConfig(
name="mlqa." + lang1 + "." + lang2,
data_url=_URL + _DEV_TEST_URL,
description="development and test splits",
)
for lang1 in _LANG
for lang2 in _LANG
]
)
def _info(self):
# TODO(mlqa): Specifies the nlp.DatasetInfo object
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
"context": nlp.Value("string"),
"questions": nlp.features.Sequence({"question": nlp.Value("string")}),
"answers": nlp.features.Sequence(
{"text": nlp.Value("string"), "answer_start": nlp.Value("int32"),}
),
"ids": nlp.features.Sequence({"idx": nlp.Value("string")})
# These are the features of your dataset like images, labels ...
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://github.com/facebookresearch/MLQA",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(mlqa): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
if self.config.name.startswith("mlqa-translate-train"):
dl_file = dl_manager.download_and_extract(self.config.data_url)
lang = self.config.name.split(".")[-1]
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": os.path.join(
os.path.join(dl_file, "mlqa-translate-train"),
"{}_squad-translate-train-train-v1.1.json".format(lang),
),
"lang": lang,
},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": os.path.join(
os.path.join(dl_file, "mlqa-translate-train"),
"{}_squad-translate-train-dev-v1.1.json".format(lang),
),
"lang": lang,
},
),
]
else:
if self.config.name.startswith("mlqa."):
dl_file = dl_manager.download_and_extract(self.config.data_url)
name = self.config.name.split(".")
l1, l2 = name[1:]
return [
nlp.SplitGenerator(
name=nlp.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": os.path.join(
os.path.join(dl_file, "MLQA_V1/test"),
"test-context-{}-question-{}.json".format(l1, l2),
),
"lang": (l1, l2),
},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": os.path.join(
os.path.join(dl_file, "MLQA_V1/dev"), "dev-context-{}-question-{}.json".format(l1, l2)
),
"lang": (l1, l2),
},
),
]
else:
if self.config.name.startswith("mlqa-translate-test"):
dl_file = dl_manager.download_and_extract(self.config.data_url)
lang = self.config.name.split(".")[-1]
return [
nlp.SplitGenerator(
name=nlp.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": os.path.join(
os.path.join(dl_file, "mlqa-translate-test"),
"translate-test-context-{}-question-{}.json".format(lang, lang),
),
"lang": lang,
},
),
]
def _generate_examples(self, filepath, lang):
"""Yields examples."""
# TODO(mlqa): Yields (key, example) tuples from the dataset
with open(filepath) as f:
data = json.load(f)
for id1, examples in enumerate(data["data"]):
for id2, example in enumerate(examples["paragraphs"]):
context = example["context"]
questions = [qa["question"] for qa in example["qas"]]
answers = [qa["answers"] for qa in example["qas"]]
ids = [qa["id"] for qa in example["qas"]]
answers_start = [answer[0]["answer_start"] for answer in answers]
answers_text = [answer[0]["text"] for answer in answers]
yield str(id1) + "-" + str(id2), {
"context": context,
"questions": {"question": questions,},
"answers": {"answer_start": answers_start, "text": answers_text},
"ids": {"idx": ids},
}
|
nlp-master
|
datasets/mlqa/mlqa.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Reddit dataset using tldr as summaries."""
import json
import os
import nlp
_CITATION = """
@inproceedings{volske-etal-2017-tl,
title = {TL;DR: Mining {R}eddit to Learn Automatic Summarization},
author = {V{\"o}lske, Michael and Potthast, Martin and Syed, Shahbaz and Stein, Benno},
booktitle = {Proceedings of the Workshop on New Frontiers in Summarization},
month = {sep},
year = {2017},
address = {Copenhagen, Denmark},
publisher = {Association for Computational Linguistics},
url = {https://www.aclweb.org/anthology/W17-4508},
doi = {10.18653/v1/W17-4508},
pages = {59--63},
abstract = {Recent advances in automatic text summarization have used deep neural networks to generate high-quality abstractive summaries, but the performance of these models strongly depends on large amounts of suitable training data. We propose a new method for mining social media for author-provided summaries, taking advantage of the common practice of appending a {``}TL;DR{''} to long posts. A case study using a large Reddit crawl yields the Webis-TLDR-17 dataset, complementing existing corpora primarily from the news genre. Our technique is likely applicable to other social media sites and general web crawls.},
}
"""
_DESCRIPTION = """
This corpus contains preprocessed posts from the Reddit dataset.
The dataset consists of 3,848,330 posts with an average length of 270 words for content,
and 28 words for the summary.
Features includes strings: author, body, normalizedBody, content, summary, subreddit, subreddit_id.
Content is used as document and summary is used as summary.
"""
_URL = "https://zenodo.org/record/1043504/files/corpus-webis-tldr-17.zip?download=1"
_DOCUMENT = "content"
_SUMMARY = "summary"
_ADDITIONAL_FEATURES = ["author", "body", "normalizedBody", "subreddit", "subreddit_id", "id"]
class Reddit(nlp.GeneratorBasedBuilder):
"""Reddit Dataset."""
VERSION = nlp.Version("1.0.0")
def _info(self):
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features({k: nlp.Value("string") for k in _ADDITIONAL_FEATURES + [_DOCUMENT, _SUMMARY]}),
supervised_keys=None,
homepage="https://github.com/webis-de/webis-tldr-17-corpus",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
dl_path = dl_manager.download_and_extract(_URL)
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN, gen_kwargs={"path": os.path.join(dl_path, "corpus-webis-tldr-17.json")},
)
]
def _generate_examples(self, path=None):
"""Yields examples."""
with open(path, "rb") as f:
for i, line in enumerate(f):
# possible keys are:
# author: string (nullable = true)
# body: string (nullable = true)
# normalizedBody: string (nullable = true)
# content: string (nullable = true)
# content_len: long (nullable = true)
# summary: string (nullable = true)
# summary_len: long (nullable = true)
# id: string (nullable = true)
# subreddit: string (nullable = true)
# subreddit_id: string (nullable = true)
# title: string (nullable = true)
d = json.loads(line)
if _SUMMARY in d and _DOCUMENT in d:
yield i, {k: d.get(k, "") for k in _ADDITIONAL_FEATURES + [_DOCUMENT, _SUMMARY]}
|
nlp-master
|
datasets/reddit/reddit.py
|
"""TODO(race): Add a description here."""
from __future__ import absolute_import, division, print_function
import json
import os
import nlp
# TODO(race): BibTeX citation
_CITATION = """\
@article{lai2017large,
title={RACE: Large-scale ReAding Comprehension Dataset From Examinations},
author={Lai, Guokun and Xie, Qizhe and Liu, Hanxiao and Yang, Yiming and Hovy, Eduard},
journal={arXiv preprint arXiv:1704.04683},
year={2017}
}
"""
# TODO(race):
_DESCRIPTION = """\
Race is a large-scale reading comprehension dataset with more than 28,000 passages and nearly 100,000 questions. The
dataset is collected from English examinations in China, which are designed for middle school and high school students.
The dataset can be served as the training and test sets for machine comprehension.
"""
_URL = "http://www.cs.cmu.edu/~glai1/data/race/RACE.tar.gz"
class Race(nlp.GeneratorBasedBuilder):
"""TODO(race): Short description of my dataset."""
# TODO(race): Set up version.
VERSION = nlp.Version("0.1.0")
def _info(self):
# TODO(race): Specifies the nlp.DatasetInfo object
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
"article": nlp.Value("string"),
"answer": nlp.Value("string"),
"question": nlp.Value("string"),
"options": nlp.features.Sequence({"option": nlp.Value("string")})
# These are the features of your dataset like images, labels ...
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="http://www.cs.cmu.edu/~glai1/data/race/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(race): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
dl_dir = dl_manager.download_and_extract(_URL)
return [
nlp.SplitGenerator(
name=nlp.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"files": sorted(os.listdir(os.path.join(dl_dir, "RACE/test/high"))),
"filespath": os.path.join(dl_dir, "RACE/test/high"),
},
),
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"files": sorted(os.listdir(os.path.join(dl_dir, "RACE/train/high"))),
"filespath": os.path.join(dl_dir, "RACE/train/high"),
},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"files": sorted(os.listdir(os.path.join(dl_dir, "RACE/dev/high"))),
"filespath": os.path.join(dl_dir, "RACE/dev/high"),
},
),
]
def _generate_examples(self, files, filespath):
"""Yields examples."""
# TODO(race): Yields (key, example) tuples from the dataset
for file in files:
filepath = os.path.join(filespath, file)
with open(filepath) as f:
data = json.load(f)
questions = data["questions"]
answers = data["answers"]
options = data["options"]
for i in range(len(questions)):
question = questions[i]
answer = answers[i]
option = options[i]
yield i, {
"article": data["article"],
"question": question,
"answer": answer,
"options": {"option": option},
}
|
nlp-master
|
datasets/race/race.py
|
"""TODO(coqa): Add a description here."""
from __future__ import absolute_import, division, print_function
import csv
import json
import os
import nlp
# TODO(coqa): BibTeX citation
_CITATION = """\
@InProceedings{SivaAndAl:Coca,
author = {Siva, Reddy and Danqi, Chen and Christopher D., Manning},
title = {WikiQA: A Challenge Dataset for Open-Domain Question Answering},
journal = { arXiv},
year = {2018},
}
"""
# TODO(coqa):
_DESCRIPTION = """\
CoQA: A Conversational Question Answering Challenge
"""
_TRAIN_DATA_URL = "https://nlp.stanford.edu/data/coqa/coqa-train-v1.0.json"
_DEV_DATA_URL = "https://nlp.stanford.edu/data/coqa/coqa-dev-v1.0.json"
#
# class CoqaConfig(nlp.BuilderConfig):
# """BuilderConfig for Coqa."""
#
# def __init__(self,
# story,
# source,
# questions,
# answers,
# citation,
# description,
# additional_answers=None,
# **kwargs):
# """BuilderConfig for Coca.
#
# Args:
# story: `text`, context
# source: `text`, source of the story
# questions `Sequence` set of questions
# answers: `Sequence` set of answers to the questions
# data_url: `string`, url to download the file from
# citation: `string`, citation for the data set
# additional_answers: `Sequence`, in the dev set questions have also set of additional answers
# **kwargs: keyword arguments forwarded to super.
# """
# super(CoqaConfig, self).__init__(
# version=nlp.Version(
# "1.0.0",
# "New split API (https://tensorflow.org/datasets/splits)"),
# **kwargs)
# self.story = story
# self.source = source
# self.questions = questions
# self.answers = answers
# self.additional_answers = additional_answers
# self.citation = citation
# self.description = description
class Coqa(nlp.GeneratorBasedBuilder):
"""TODO(coqa): Short description of my dataset."""
# TODO(coqa): Set up version.
VERSION = nlp.Version("1.0.0")
# BUILDER_CONFIGS = CoqaConfig(
# story= 'story',
# source='source',
# questions='questions',
# answers='answers',
# additional_answers='additional_answers',
# description= _DESCRIPTION,
# citation= _CITATION
#
# )
def _info(self):
# TODO(coqa): Specifies the nlp.DatasetInfo object
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
"source": nlp.Value("string"),
"story": nlp.Value("string"),
"questions": nlp.features.Sequence({"input_text": nlp.Value("string"),}),
"answers": nlp.features.Sequence(
{
"input_text": nlp.Value("string"),
"answer_start": nlp.Value("int32"),
"answer_end": nlp.Value("int32"),
}
),
# ##the foloowing feature allows to take into account additional answers in the validation set
# 'additional_answers': nlp.features.Sequence({
# "input_texts": nlp.Value('int32'),
# "answers_start": nlp.Value('int32'),
# "answers_end": nlp.Value('int32')
# }),
# These are the features of your dataset like images, labels ...
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://stanfordnlp.github.io/coqa/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(coqa): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
urls_to_download = {"train": _TRAIN_DATA_URL, "dev": _DEV_DATA_URL}
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"], "split": "train"}
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"], "split": "validation"}
),
]
def _generate_examples(self, filepath, split):
"""Yields examples."""
# TODO(coqa): Yields (key, example) tuples from the dataset
with open(filepath) as f:
data = json.load(f)
for row in data["data"]:
questions = [question["input_text"] for question in row["questions"]]
story = row["story"]
source = row["source"]
answers_start = [answer["span_start"] for answer in row["answers"]]
answers_end = [answer["span_end"] for answer in row["answers"]]
answers = [answer["input_text"] for answer in row["answers"]]
# add_answers = row['additional_answers']
# add_input_tests = []
# add_start_answers = []
# add_end_answers = []
# for key in add_answers:
# add_answers_key = add_answers[key]
# add_input_tests.append([add_answer['input_text'] for add_answer in add_answers_key])
# add_start_answers.append([add_answer['span_start'] for add_answer in add_answers_key])
# add_end_answers.append([add_answer['span_end'] for add_answer in add_answers_key])
yield row["id"], {
"source": source,
"story": story,
"questions": {"input_text": questions,},
"answers": {"input_text": answers, "answer_start": answers_start, "answer_end": answers_end}
# 'additional_answers': {
# "input_texts": add_input_tests ,
# "answers_start": add_start_answers,
# "answers_end": add_end_answers,
# }
}
|
nlp-master
|
datasets/coqa/coqa.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""The Definite Pronoun Resolution Dataset."""
from __future__ import absolute_import, division, print_function
import nlp
_CITATION = """\
@inproceedings{rahman2012resolving,
title={Resolving complex cases of definite pronouns: the winograd schema challenge},
author={Rahman, Altaf and Ng, Vincent},
booktitle={Proceedings of the 2012 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning},
pages={777--789},
year={2012},
organization={Association for Computational Linguistics}
}"""
_DESCRIPTION = """\
Composed by 30 students from one of the author's undergraduate classes. These
sentence pairs cover topics ranging from real events (e.g., Iran's plan to
attack the Saudi ambassador to the U.S.) to events/characters in movies (e.g.,
Batman) and purely imaginary situations, largely reflecting the pop culture as
perceived by the American kids born in the early 90s. Each annotated example
spans four lines: the first line contains the sentence, the second line contains
the target pronoun, the third line contains the two candidate antecedents, and
the fourth line contains the correct antecedent. If the target pronoun appears
more than once in the sentence, its first occurrence is the one to be resolved.
"""
_DATA_URL_PATTERN = "http://www.hlt.utdallas.edu/~vince/data/emnlp12/{}.c.txt"
class DefinitePronounResolution(nlp.GeneratorBasedBuilder):
"""The Definite Pronoun Resolution Dataset."""
BUILDER_CONFIGS = [
nlp.BuilderConfig(
name="plain_text",
version=nlp.Version("1.0.0", "New split API (https://tensorflow.org/datasets/splits)"),
description="Plain text import of the Definite Pronoun Resolution Dataset.", # pylint: disable=line-too-long
)
]
def _info(self):
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features(
{
"sentence": nlp.Value("string"),
"pronoun": nlp.Value("string"),
"candidates": nlp.features.Sequence(nlp.Value("string"), length=2),
"label": nlp.features.ClassLabel(num_classes=2),
}
),
supervised_keys=("sentence", "label"),
homepage="http://www.hlt.utdallas.edu/~vince/data/emnlp12/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
files = dl_manager.download_and_extract(
{"train": _DATA_URL_PATTERN.format("train"), "test": _DATA_URL_PATTERN.format("test"),}
)
return [
nlp.SplitGenerator(name=nlp.Split.TEST, gen_kwargs={"filepath": files["test"]}),
nlp.SplitGenerator(name=nlp.Split.TRAIN, gen_kwargs={"filepath": files["train"]}),
]
def _generate_examples(self, filepath):
with open(filepath) as f:
line_num = -1
while True:
line_num += 1
sentence = f.readline().strip()
pronoun = f.readline().strip()
candidates = [c.strip() for c in f.readline().strip().split(",")]
correct = f.readline().strip()
f.readline()
if not sentence:
break
yield line_num, {
"sentence": sentence,
"pronoun": pronoun,
"candidates": candidates,
"label": candidates.index(correct),
}
|
nlp-master
|
datasets/definite_pronoun_resolution/definite_pronoun_resolution.py
|
"""TODO(fquad): Add a description here."""
from __future__ import absolute_import, division, print_function
import json
import os
import nlp
# TODO(fquad): BibTeX citation
_CITATION = """\
@ARTICLE{2020arXiv200206071
author = {Martin, d'Hoffschmidt and Maxime, Vidal and
Wacim, Belblidia and Tom, Brendlé},
title = "{FQuAD: French Question Answering Dataset}",
journal = {arXiv e-prints},
keywords = {Computer Science - Computation and Language},
year = "2020",
month = "Feb",
eid = {arXiv:2002.06071},
pages = {arXiv:2002.06071},
archivePrefix = {arXiv},
eprint = {2002.06071},
primaryClass = {cs.CL}
}
"""
# TODO(fquad):
_DESCRIPTION = """\
FQuAD: French Question Answering Dataset
We introduce FQuAD, a native French Question Answering Dataset. FQuAD contains 25,000+ question and answer pairs.
Finetuning CamemBERT on FQuAD yields a F1 score of 88% and an exact match of 77.9%.
"""
_URL = "https://storage.googleapis.com/illuin/fquad"
_TRAIN_DATA = "train.json.zip"
_VALID_DATA = "valid.json.zip"
class Fquad(nlp.GeneratorBasedBuilder):
"""TODO(fquad): Short description of my dataset."""
# TODO(fquad): Set up version.
VERSION = nlp.Version("0.1.0")
def _info(self):
# TODO(fquad): Specifies the nlp.DatasetInfo object
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
"context": nlp.Value("string"),
"questions": nlp.features.Sequence({"question": nlp.Value("string"),}),
"answers": nlp.features.Sequence(
{"texts": nlp.Value("string"), "answers_starts": nlp.Value("int32")}
),
# These are the features of your dataset like images, labels ...
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://fquad.illuin.tech/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(fquad): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
download_urls = {"train": os.path.join(_URL, _TRAIN_DATA), "valid": os.path.join(_URL, _VALID_DATA)}
dl_dir = dl_manager.download_and_extract(download_urls)
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(dl_dir["train"], "train.json")},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(dl_dir["valid"], "valid.json")},
),
]
def _generate_examples(self, filepath):
"""Yields examples."""
# TODO(fquad): Yields (key, example) tuples from the dataset
with open(filepath) as f:
data = json.load(f)
for id1, examples in enumerate(data["data"]):
for id2, example in enumerate(examples["paragraphs"]):
questions = [question["question"] for question in example["qas"]]
answers = [answer["answers"] for answer in example["qas"]]
texts = [answer[0]["text"] for answer in answers]
answers_starts = [answer[0]["answer_start"] for answer in answers]
yield str(id1) + "_" + str(id2), {
"context": example["context"],
"questions": {"question": questions,},
"answers": {"texts": texts, "answers_starts": answers_starts},
}
|
nlp-master
|
datasets/fquad/fquad.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""TED talk multilingual data set."""
from __future__ import absolute_import, division, print_function
import csv
import os
import six
import nlp
_DESCRIPTION = """\
Massively multilingual (60 language) data set derived from TED Talk transcripts.
Each record consists of parallel arrays of language and text. Missing and
incomplete translations will be filtered out.
"""
_CITATION = """\
@InProceedings{qi-EtAl:2018:N18-2,
author = {Qi, Ye and Sachan, Devendra and Felix, Matthieu and Padmanabhan, Sarguna and Neubig, Graham},
title = {When and Why Are Pre-Trained Word Embeddings Useful for Neural Machine Translation?},
booktitle = {Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 2 (Short Papers)},
month = {June},
year = {2018},
address = {New Orleans, Louisiana},
publisher = {Association for Computational Linguistics},
pages = {529--535},
abstract = {The performance of Neural Machine Translation (NMT) systems often suffers in low-resource scenarios where sufficiently large-scale parallel corpora cannot be obtained. Pre-trained word embeddings have proven to be invaluable for improving performance in natural language analysis tasks, which often suffer from paucity of data. However, their utility for NMT has not been extensively explored. In this work, we perform five sets of experiments that analyze when we can expect pre-trained word embeddings to help in NMT tasks. We show that such embeddings can be surprisingly effective in some cases -- providing gains of up to 20 BLEU points in the most favorable setting.},
url = {http://www.aclweb.org/anthology/N18-2084}
}
"""
_DATA_URL = "http://phontron.com/data/ted_talks.tar.gz"
_LANGUAGES = (
"en",
"es",
"pt-br",
"fr",
"ru",
"he",
"ar",
"ko",
"zh-cn",
"it",
"ja",
"zh-tw",
"nl",
"ro",
"tr",
"de",
"vi",
"pl",
"pt",
"bg",
"el",
"fa",
"sr",
"hu",
"hr",
"uk",
"cs",
"id",
"th",
"sv",
"sk",
"sq",
"lt",
"da",
"calv",
"my",
"sl",
"mk",
"fr-ca",
"fi",
"hy",
"hi",
"nb",
"ka",
"mn",
"et",
"ku",
"gl",
"mr",
"zh",
"ur",
"eo",
"ms",
"az",
"ta",
"bn",
"kk",
"be",
"eu",
"bs",
)
class TedMultiTranslate(nlp.GeneratorBasedBuilder):
"""TED talk multilingual data set."""
BUILDER_CONFIGS = [
nlp.BuilderConfig(
name="plain_text",
version=nlp.Version("1.0.0", "New split API (https://tensorflow.org/datasets/splits)"),
description="Plain text import of multilingual TED talk translations",
)
]
def _info(self):
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features(
{
"translations": nlp.features.TranslationVariableLanguages(languages=_LANGUAGES),
"talk_name": nlp.Value("string"),
}
),
homepage="https://github.com/neulab/word-embeddings-for-nmt",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract(_DATA_URL)
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN, gen_kwargs={"data_file": os.path.join(dl_dir, "all_talks_train.tsv")}
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION, gen_kwargs={"data_file": os.path.join(dl_dir, "all_talks_dev.tsv")}
),
nlp.SplitGenerator(
name=nlp.Split.TEST, gen_kwargs={"data_file": os.path.join(dl_dir, "all_talks_test.tsv")}
),
]
def _generate_examples(self, data_file):
"""This function returns the examples in the raw (text) form."""
with open(data_file) as f:
reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
for idx, row in enumerate(reader):
# Everything in the row except for 'talk_name' will be a translation.
# Missing/incomplete translations will contain the string "__NULL__" or
# "_ _ NULL _ _".
yield idx, {
"translations": {
lang: text
for lang, text in six.iteritems(row)
if lang != "talk_name" and _is_translation_complete(text)
},
"talk_name": row["talk_name"],
}
def _is_translation_complete(text):
return text and "__NULL__" not in text and "_ _ NULL _ _" not in text
|
nlp-master
|
datasets/ted_multi/ted_multi.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""WikiHow Datasets."""
from __future__ import absolute_import, division, print_function
import csv
import os
import re
import nlp
_CITATION = """
@misc{koupaee2018wikihow,
title={WikiHow: A Large Scale Text Summarization Dataset},
author={Mahnaz Koupaee and William Yang Wang},
year={2018},
eprint={1810.09305},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_DESCRIPTION = """
WikiHow is a new large-scale dataset using the online WikiHow
(http://www.wikihow.com/) knowledge base.
There are two features:
- text: wikihow answers texts.
- headline: bold lines as summary.
There are two separate versions:
- all: consisting of the concatenation of all paragraphs as the articles and
the bold lines as the reference summaries.
- sep: consisting of each paragraph and its summary.
Download "wikihowAll.csv" and "wikihowSep.csv" from
https://github.com/mahnazkoupaee/WikiHow-Dataset and place them in manual folder
https://www.tensorflow.org/datasets/api_docs/python/tfds/download/DownloadConfig.
Train/validation/test splits are provided by the authors.
Preprocessing is applied to remove short articles
(abstract length < 0.75 article length) and clean up extra commas.
"""
_DOCUMENT = "text"
_SUMMARY = "headline"
_URLS = {
"train": "https://raw.githubusercontent.com/mahnazkoupaee/WikiHow-Dataset/master/all_train.txt",
"validation": "https://raw.githubusercontent.com/mahnazkoupaee/WikiHow-Dataset/master/all_val.txt",
"test": "https://raw.githubusercontent.com/mahnazkoupaee/WikiHow-Dataset/master/all_test.txt",
}
class WikihowConfig(nlp.BuilderConfig):
"""BuilderConfig for Wikihow."""
def __init__(self, filename=None, **kwargs):
"""BuilderConfig for Wikihow.
Args:
filename: filename of different configs for the dataset.
**kwargs: keyword arguments forwarded to super.
"""
# Version 1.1.0 remove empty document and summary strings.
# Version 1.2.0 add train validation test split, add cleaning & filtering.
super(WikihowConfig, self).__init__(version=nlp.Version("1.2.0"), **kwargs)
self.filename = filename
class Wikihow(nlp.GeneratorBasedBuilder):
"""WikiHow: A Large Scale Text Summarization Dataset."""
BUILDER_CONFIGS = [
WikihowConfig(
name="all",
filename="wikihowAll.csv",
description="Use the concatenation of all paragraphs as the articles"
" and the bold lines as the reference summaries",
),
WikihowConfig(name="sep", filename="wikihowSep.csv", description="use each paragraph and its summary."),
]
@property
def manual_download_instructions(self):
return """\
You need to manually download two wikihow files. An overview of which files to download can be seen at https://github.com/mahnazkoupaee/WikiHow-Dataset.
You need to download the following two files manually:
1) https://ucsb.app.box.com/s/ap23l8gafpezf4tq3wapr6u8241zz358 and save the file under <path/to/folder>/wikihowAll.csv
2) https://ucsb.app.box.com/s/7yq601ijl1lzvlfu4rjdbbxforzd2oag and save the file under <path/to/folder>/wikihowSep.csv
The <path/to/folder> can e.g. be "~/manual_wikihow_data".
Wikihow can then be loaded using the following command `nlp.load_dataset("wikihow", data_dir="<path/to/folder>")`.
"""
def _info(self):
feature_names = [_DOCUMENT, _SUMMARY, "title"]
if self.config.name == "sep":
feature_names.extend(["overview", "sectionLabel"])
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features({k: nlp.Value("string") for k in feature_names}),
supervised_keys=None,
homepage="https://github.com/mahnazkoupaee/WikiHow-Dataset",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
dl_path = dl_manager.download_and_extract(_URLS)
titles = {k: set() for k in dl_path}
for k, path in dl_path.items():
with open(path) as f:
for line in f:
titles[k].add(line.strip())
path_to_manual_file = os.path.join(
os.path.abspath(os.path.expanduser(dl_manager.manual_dir)), self.config.filename
)
if not os.path.exists(path_to_manual_file):
raise FileNotFoundError(
"{} does not exist. Make sure you insert a manual dir via `nlp.load_dataset('wikihow', data_dir=...)` that includes a file name {}. Manual download instructions: {})".format(
path_to_manual_file, self.config.filename, self.manual_download_instructions
)
)
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN, gen_kwargs={"path": path_to_manual_file, "title_set": titles["train"],},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
gen_kwargs={"path": path_to_manual_file, "title_set": titles["validation"],},
),
nlp.SplitGenerator(
name=nlp.Split.TEST, gen_kwargs={"path": path_to_manual_file, "title_set": titles["test"],},
),
]
def _generate_examples(self, path=None, title_set=None):
"""Yields examples."""
with open(path) as f:
reader = csv.reader(f)
headers = next(reader)
if self.config.name == "all" and headers != ["headline", "title", "text"]:
raise ValueError("Mismatched header in WikiAll.txt")
if self.config.name == "sep" and headers != ["overview", "headline", "text", "sectionLabel", "title"]:
raise ValueError("Mismatched header in WikiSep.txt")
key2id = {key: i for i, key in enumerate(headers)}
for i, line in enumerate(reader):
# skip empty line or insufficient line.
if len(line) == len(key2id):
summary = line[key2id[_SUMMARY]].strip()
document = line[key2id[_DOCUMENT]].strip()
summary, document = _filter_and_clean(summary, document)
if summary and document:
if line[key2id["title"]].strip().replace(" ", "") in title_set:
d = {k: line[v].strip() for k, v in key2id.items() if k not in [_SUMMARY, _DOCUMENT]}
d[_DOCUMENT] = document
d[_SUMMARY] = summary
yield i, d
# This functions follow data processing acoording to original paper at
# https://github.com/mahnazkoupaee/WikiHow-Dataset/blob/master/process.py
def _filter_and_clean(abstract, article):
"""Remove short article and clean up commas in abstract and article."""
# a threshold is used to remove short articles with long summaries
# as well as articles with no summary
if len(abstract) < (0.75 * len(article)):
# remove extra commas in abstracts
abstract = abstract.replace(".,", ".")
# remove extra commas in articles
article = re.sub(r"[.]+[\n]+[,]", ".\n", article)
return abstract, article
else:
return "", ""
|
nlp-master
|
datasets/wikihow/wikihow.py
|
# coding=utf-8
# Copyright 2020 HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""The WNUT 17 Emerging Entities Dataset."""
from __future__ import absolute_import, division, print_function
import csv
import glob
import logging
import os
from pathlib import Path
import nlp
_CITATION = """\
@inproceedings{derczynski-etal-2017-results,
title = "Results of the {WNUT}2017 Shared Task on Novel and Emerging Entity Recognition",
author = "Derczynski, Leon and
Nichols, Eric and
van Erp, Marieke and
Limsopatham, Nut",
booktitle = "Proceedings of the 3rd Workshop on Noisy User-generated Text",
month = sep,
year = "2017",
address = "Copenhagen, Denmark",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W17-4418",
doi = "10.18653/v1/W17-4418",
pages = "140--147",
abstract = "This shared task focuses on identifying unusual, previously-unseen entities in the context of emerging discussions.
Named entities form the basis of many modern approaches to other tasks (like event clustering and summarization),
but recall on them is a real problem in noisy text - even among annotators.
This drop tends to be due to novel entities and surface forms.
Take for example the tweet {``}so.. kktny in 30 mins?!{''} {--} even human experts find the entity {`}kktny{'}
hard to detect and resolve. The goal of this task is to provide a definition of emerging and of rare entities,
and based on that, also datasets for detecting these entities. The task as described in this paper evaluated the
ability of participating entries to detect and classify novel and emerging named entities in noisy text.",
}
"""
_DESCRIPTION = """\
WNUT 17: Emerging and Rare entity recognition
This shared task focuses on identifying unusual, previously-unseen entities in the context of emerging discussions.
Named entities form the basis of many modern approaches to other tasks (like event clustering and summarisation),
but recall on them is a real problem in noisy text - even among annotators. This drop tends to be due to novel entities and surface forms.
Take for example the tweet “so.. kktny in 30 mins?” - even human experts find entity kktny hard to detect and resolve.
This task will evaluate the ability to detect and classify novel, emerging, singleton named entities in noisy text.
The goal of this task is to provide a definition of emerging and of rare entities, and based on that, also datasets for detecting these entities.
"""
_URL = "https://raw.githubusercontent.com/leondz/emerging_entities_17/master/"
_TRAINING_FILE = "wnut17train.conll"
_DEV_FILE = "emerging.dev.conll"
_TEST_FILE = "emerging.test.annotated"
class WNUT_17Config(nlp.BuilderConfig):
"""The WNUT 17 Emerging Entities Dataset."""
def __init__(self, **kwargs):
"""BuilderConfig for WNUT 17.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(WNUT_17Config, self).__init__(**kwargs)
class WNUT_17(nlp.GeneratorBasedBuilder):
"""The WNUT 17 Emerging Entities Dataset."""
BUILDER_CONFIGS = [
WNUT_17Config(
name="wnut_17", version=nlp.Version("1.0.0"), description="The WNUT 17 Emerging Entities Dataset"
),
]
def _info(self):
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features(
{
"id": nlp.Value("string"),
"tokens": nlp.Sequence(nlp.Value("string")),
"labels": nlp.Sequence(nlp.Value("string")),
}
),
supervised_keys=None,
homepage="http://noisy-text.github.io/2017/emerging-rare-entities.html",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
urls_to_download = {
"train": f"{_URL}{_TRAINING_FILE}",
"dev": f"{_URL}{_DEV_FILE}",
"test": f"{_URL}{_TEST_FILE}",
}
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
nlp.SplitGenerator(name=nlp.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
nlp.SplitGenerator(name=nlp.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
nlp.SplitGenerator(name=nlp.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
]
def _generate_examples(self, filepath):
logging.info("⏳ Generating examples from = %s", filepath)
with open(filepath) as f:
current_tokens = []
current_labels = []
sentence_counter = 0
for row in f:
row = row.rstrip()
if row:
token, label = row.split("\t")
current_tokens.append(token)
current_labels.append(label)
else:
# New sentence
if not current_tokens:
# Consecutive empty lines will cause empty sentences
continue
assert len(current_tokens) == len(current_labels), "💔 between len of tokens & labels"
sentence = (
sentence_counter,
{"id": str(sentence_counter), "tokens": current_tokens, "labels": current_labels,},
)
sentence_counter += 1
current_tokens = []
current_labels = []
yield sentence
# Don't forget last sentence in dataset 🧐
if current_tokens:
yield sentence_counter, {
"id": str(sentence_counter),
"tokens": current_tokens,
"labels": current_labels,
}
|
nlp-master
|
datasets/wnut_17/wnut_17.py
|
"""TODO(wikitext): Add a description here."""
from __future__ import absolute_import, division, print_function
import os
import nlp
# TODO(wikitext): BibTeX citation
_CITATION = """\
@InProceedings{wikitext,
author={Stephen, Merity and Caiming ,Xiong and James, Bradbury and Richard Socher}
year={2016}
}
"""
# TODO(wikitext):
_DESCRIPTION = """\
The WikiText language modeling dataset is a collection of over 100 million tokens extracted from the set of verified
Good and Featured articles on Wikipedia. The dataset is available under the Creative Commons Attribution-ShareAlike License.
"""
_URL = "https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/"
_DATA_URL = "https://s3.amazonaws.com/research.metamind.io/wikitext"
class WikitextConfig(nlp.BuilderConfig):
"""BuilderConfig for GLUE."""
def __init__(self, data_url, **kwargs):
"""BuilderConfig for Wikitext
Args:
data_url: `string`, url to the dataset (word or raw level)
**kwargs: keyword arguments forwarded to super.
"""
super(WikitextConfig, self).__init__(version=nlp.Version("1.0.0",), **kwargs)
self.data_url = data_url
class Wikitext(nlp.GeneratorBasedBuilder):
"""TODO(wikitext_103): Short description of my dataset."""
# TODO(wikitext_103): Set up version.
VERSION = nlp.Version("0.1.0")
BUILDER_CONFIGS = [
WikitextConfig(
name="wikitext-103-raw-v1",
data_url=_DATA_URL + "/" + "wikitext-103-raw-v1.zip",
description="word level dataset. No processing is needed other than replacing newlines with <eos> tokens.",
),
WikitextConfig(
name="wikitext-2-raw-v1",
data_url=_DATA_URL + "/" + "wikitext-2-raw-v1.zip",
description="word level dataset. No processing is needed other than replacing newlines with <eos> tokens.",
),
WikitextConfig(
name="wikitext-103-v1",
data_url=_DATA_URL + "/" + "wikitext-103-v1.zip",
description="raw level dataset. The raw tokens before the addition of <unk> tokens. "
"They should only be used for character level work or for creating newly derived datasets.",
),
WikitextConfig(
name="wikitext-2-v1",
data_url=_DATA_URL + "/" + "wikitext-2-v1.zip",
description="raw level dataset. The raw tokens before the addition of <unk> tokens. "
"They should only be used for character level work or for creating newly derived datasets.",
),
]
def _info(self):
# TODO(wikitext): Specifies the nlp.DatasetInfo object
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
"text": nlp.Value("string")
# These are the features of your dataset like images, labels ...
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage=_URL,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(wikitext): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
if self.config.name == "wikitext-103-v1":
data_file = dl_manager.download_and_extract(self.config.data_url)
data_dir = os.path.join(data_file, "wikitext-103")
return [
nlp.SplitGenerator(
name=nlp.Split.TEST,
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.test.tokens"), "split": "test"},
),
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.train.tokens"), "split": "train"},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.valid.tokens"), "split": "valid"},
),
]
else:
if self.config.name == "wikitext-103-raw-v1":
data_file = dl_manager.download_and_extract(self.config.data_url)
data_dir = os.path.join(data_file, "wikitext-103-raw")
return [
nlp.SplitGenerator(
name=nlp.Split.TEST,
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.test.raw"), "split": "test"},
),
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.train.raw"), "split": "train"},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.valid.raw"), "split": "valid"},
),
]
else:
if self.config.name == "wikitext-2-raw-v1":
data_file = dl_manager.download_and_extract(self.config.data_url)
data_dir = os.path.join(data_file, "wikitext-2-raw")
return [
nlp.SplitGenerator(
name=nlp.Split.TEST,
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.test.raw"), "split": "test"},
),
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.train.raw"), "split": "train"},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.valid.raw"), "split": "valid"},
),
]
else:
if self.config.name == "wikitext-2-v1":
data_file = dl_manager.download_and_extract(self.config.data_url)
data_dir = os.path.join(data_file, "wikitext-2")
return [
nlp.SplitGenerator(
name=nlp.Split.TEST,
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.test.tokens"), "split": "test"},
),
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
gen_kwargs={
"data_file": os.path.join(data_dir, "wiki.train.tokens"),
"split": "train",
},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
gen_kwargs={
"data_file": os.path.join(data_dir, "wiki.valid.tokens"),
"split": "valid",
},
),
]
def _generate_examples(self, data_file, split):
"""Yields examples."""
# TODO(wikitext): Yields (key, example) tuples from the dataset
with open(data_file) as f:
for idx, row in enumerate(f):
if row.strip():
yield idx, {"text": row}
else:
yield idx, {"text": ""}
|
nlp-master
|
datasets/wikitext/wikitext.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Opinosis Opinion Dataset."""
from __future__ import absolute_import, division, print_function
import os
import nlp
_CITATION = """
@inproceedings{ganesan2010opinosis,
title={Opinosis: a graph-based approach to abstractive summarization of highly redundant opinions},
author={Ganesan, Kavita and Zhai, ChengXiang and Han, Jiawei},
booktitle={Proceedings of the 23rd International Conference on Computational Linguistics},
pages={340--348},
year={2010},
organization={Association for Computational Linguistics}
}
"""
_DESCRIPTION = """
The Opinosis Opinion Dataset consists of sentences extracted from reviews for 51 topics.
Topics and opinions are obtained from Tripadvisor, Edmunds.com and Amazon.com.
"""
_URL = "https://github.com/kavgan/opinosis-summarization/raw/master/OpinosisDataset1.0_0.zip"
_REVIEW_SENTS = "review_sents"
_SUMMARIES = "summaries"
class Opinosis(nlp.GeneratorBasedBuilder):
"""Opinosis Opinion Dataset."""
VERSION = nlp.Version("1.0.0")
def _info(self):
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features(
{_REVIEW_SENTS: nlp.Value("string"), _SUMMARIES: nlp.features.Sequence(nlp.Value("string"))}
),
supervised_keys=(_REVIEW_SENTS, _SUMMARIES),
homepage="http://kavita-ganesan.com/opinosis/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
extract_path = dl_manager.download_and_extract(_URL)
return [
nlp.SplitGenerator(name=nlp.Split.TRAIN, gen_kwargs={"path": extract_path},),
]
def _generate_examples(self, path=None):
"""Yields examples."""
topics_path = os.path.join(path, "topics")
filenames = sorted(os.listdir(topics_path))
for filename in filenames:
file_path = os.path.join(topics_path, filename)
topic_name = filename.split(".txt")[0]
with open(file_path, "rb") as src_f:
input_data = src_f.read().decode("latin-1")
summaries_path = os.path.join(path, "summaries-gold", topic_name)
summary_lst = []
for summ_filename in sorted(os.listdir(summaries_path)):
file_path = os.path.join(summaries_path, summ_filename)
with open(file_path, "rb") as tgt_f:
data = tgt_f.read().strip().decode("latin-1")
summary_lst.append(data)
summary_data = summary_lst
yield filename, {_REVIEW_SENTS: input_data, _SUMMARIES: summary_data}
|
nlp-master
|
datasets/opinosis/opinosis.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""ParaCrawl (Bitextor) parallel open-source machine translation benchmark."""
from __future__ import absolute_import, division, print_function
import collections
import nlp
_DESCRIPTION = "Web-Scale Parallel Corpora for Official European Languages."
_BENCHMARK_URL = "https://paracrawl.eu/releases.html"
_CITATION = """\
@misc {paracrawl,
title = {ParaCrawl},
year = {2018},
url = {http://paracrawl.eu/download.html.}
}
"""
_BASE_DATA_URL_FORMAT_STR = (
"https://s3.amazonaws.com/web-language-models/" "paracrawl/release4/en-{target_lang}.bicleaner07." "txt.gz"
)
def _target_languages():
"""Create the sorted dictionary of language codes, and language names.
Returns:
The sorted dictionary as an instance of `collections.OrderedDict`.
"""
langs = {
"bg": "Bulgarian",
"cs": "Czech",
"da": "Danish",
"de": "German",
"el": "Greek",
"es": "Spanish",
"et": "Estonian",
"fi": "Finnish",
"fr": "French",
"ga": "Irish",
"hr": "Croatian",
"hu": "Hungarian",
"it": "Italian",
"lt": "Lithuanian",
"lv": "Latvian",
"mt": "Maltese",
"nl": "Dutch",
"pl": "Polish",
"pt": "Portuguese",
"ro": "Romanian",
"sk": "Slovak",
"sl": "Slovenian",
"sv": "Swedish",
}
return collections.OrderedDict(sorted(langs.items()))
class ParaCrawlConfig(nlp.BuilderConfig):
"""BuilderConfig for ParaCrawl."""
def __init__(self, target_language=None, **kwargs):
"""BuilderConfig for ParaCrawl.
Args:
for the `nlp.features.text.TextEncoder` used for the features feature.
target_language: Target language that will be used to translate to from
English which is always the source language. It has to contain 2-letter
coded strings. For example: "se", "hu".
**kwargs: Keyword arguments forwarded to super.
"""
# Validate the target language.
if target_language not in _target_languages():
raise ValueError("Invalid target language: %s " % target_language)
# Initialize the base class.
name = "en%s" % (target_language)
description = ("Translation dataset from English to %s.") % (target_language)
super(ParaCrawlConfig, self).__init__(name=name, description=description, **kwargs)
# Store the attributes.
self.target_language = target_language
self.data_url = _BASE_DATA_URL_FORMAT_STR.format(target_lang=target_language)
class ParaCrawl(nlp.GeneratorBasedBuilder):
"""ParaCrawl machine translation dataset."""
# Version history:
# 1.0.0: S3 (new shuffling, sharding and slicing mechanism).
# 0.1.0: Initial version.
BUILDER_CONFIGS = [
# The version below does not refer to the version of the released
# database. It only indicates the version of the TFDS integration.
ParaCrawlConfig( # pylint: disable=g-complex-comprehension
target_language=target_language, version=nlp.Version("1.0.0"),
)
for target_language in _target_languages()
]
def _info(self):
target_language = self.config.target_language
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features({"translation": nlp.features.Translation(languages=("en", target_language))}),
supervised_keys=("en", target_language),
homepage=_BENCHMARK_URL,
citation=_CITATION,
)
def _vocab_text_gen(self, files, language):
for _, ex in self._generate_examples(**files):
yield ex[language]
def _split_generators(self, dl_manager):
# Download the data file.
data_file = dl_manager.download_and_extract({"data_file": self.config.data_url})
# Return the single split of the data.
return [nlp.SplitGenerator(name=nlp.Split.TRAIN, gen_kwargs=data_file)]
def _generate_examples(self, data_file):
"""This function returns the examples in the raw (text) form."""
target_language = self.config.target_language
with open(data_file) as f:
for idx, line in enumerate(f):
line_parts = line.strip().split("\t")
if len(line_parts) != 2:
msg = (
"Wrong data format in line {}. The line '{}' does " "not have exactly one delimiter."
).format(idx, line)
raise ValueError(msg)
source, target = line_parts[0].strip(), line_parts[1].strip()
yield idx, {"translation": {"en": source, target_language: target}}
|
nlp-master
|
datasets/para_crawl/para_crawl.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""C4 dataset based on Common Crawl."""
from __future__ import absolute_import, division, print_function
import json
import logging
import os
import nlp
from .c4_utils import (
dedupe_urls,
filter_by_webtextlike,
get_clean_page_fn,
get_counter_inc_fn,
get_hashed_url_filter_fn,
is_language,
is_realnews_domain,
is_valid_length,
normalize_url,
remove_duplicate_text,
split_wet_file,
)
_DESCRIPTION = """\
A colossal, cleaned version of Common Crawl's web crawl corpus.
Based on Common Crawl dataset: "https://commoncrawl.org"
Due to the overhead of cleaning the dataset, it is recommend you prepare it with
a distributed service like Cloud Dataflow. More info at
https://www.tensorflow.org/datasets/beam_datasets.
"""
_CITATION = """
@article{2019t5,
author = {Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu},
title = {Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer},
journal = {arXiv e-prints},
year = {2019},
archivePrefix = {arXiv},
eprint = {1910.10683},
}
"""
_VERSION = nlp.Version("2.3.0", "Deduplicate lines within a page.")
_DOWNLOAD_HOST = "https://commoncrawl.s3.amazonaws.com"
_WET_PATH_URL = "https://commoncrawl.s3.amazonaws.com/crawl-data/CC-MAIN-{cc_version}/wet.paths.gz"
_REALNEWS_DOMAINS_URL = "https://raw.githubusercontent.com/rowanz/grover/38f7184bd87237ae2d3bc330b99f1e2e246f6d51/realnews/domain_to_allowed_subdomains.json"
_BADWORDS_URL = "https://raw.githubusercontent.com/LDNOOBW/List-of-Dirty-Naughty-Obscene-and-Otherwise-Bad-Words/25e679f03d96baa721cde20db9944649e8d0a844/{lang}"
_CHECKSUMS_URL = "https://storage.googleapis.com/tfds-data/manual_checksums/c4.txt"
_OPENWEBTEXT_URLS_ZIP = "OpenWebText.zip"
_OPENWEBTEXT_URLS_URL = "https://mega.nz/#F!EZZD0YwJ!9_PlEQzdMVLaNdKv_ICNVQ"
_OPENWEBTEXT_URLS_FILE_PATTERN = "OpenWebText/Version 1/URLs/*.txt"
_DEFAULT_CC_VERSIONS = ("2019-18",) # April 2019
_DEFAULT_WEBTEXTLIKE_CC_VERSIONS = ( # August 2018 - July 2019
"2018-34",
"2018-39",
"2018-43",
"2018-47",
"2018-51",
"2019-04",
"2019-09",
"2019-13",
"2019-18",
"2019-22",
"2019-26",
"2019-30",
)
class C4Config(nlp.BuilderConfig):
"""BuilderConfig for C4 dataset."""
def __init__(self, language, cc_versions=None, clean=True, realnewslike=False, webtextlike=False, **kwargs):
"""BuilderConfig for C4.
Args:
language: string, the language code, or "all" to disable language
filtering.
cc_versions: tuple(string), a collection of versions of Common Crawl to
use as the raw source text. Set to None to use defaults.
clean: bool, whether to clean the dataset for badwords, duplications, etc.
realnewslike: bool, whether to limit to news domains as compiled by
RealNews.
webtextlike: bool, whether to limit to WebText-like URLs.
**kwargs: keyword arguments forwarded to super.
"""
name_parts = [language]
if cc_versions:
name_parts.append("_".join(cc_versions))
if not clean:
name_parts.append("noclean")
if realnewslike:
name_parts.append("realnewslike")
if webtextlike:
name_parts.append("webtextlike")
name = ".".join(name_parts)
super(C4Config, self).__init__(name=name, version=_VERSION, **kwargs)
self.lang = language
self.cc_versions = cc_versions or (_DEFAULT_WEBTEXTLIKE_CC_VERSIONS if webtextlike else _DEFAULT_CC_VERSIONS)
self.clean = clean
self.realnewslike = realnewslike
self.webtextlike = webtextlike
class C4(nlp.BeamBasedBuilder):
"""C4 dataset based on Common Crawl."""
BUILDER_CONFIGS = [
C4Config(language="en", description="English C4 dataset."),
C4Config(
language="en",
clean=False,
description="Disables all cleaning (deduplication, removal based on bad words, " "etc.)",
),
C4Config(
language="en",
realnewslike=True,
description="Filters from the default config to only include content from the "
"domains used in the 'RealNews' dataset (Zellers et al., 2019).",
),
C4Config(
language="en",
webtextlike=True,
description="Filters from the default config to only include content from the "
"URLs in OpenWebText (https://github.com/jcpeterson/openwebtext).",
),
]
def manual_download_instructions(self):
return """\
For the WebText-like config, you must manually download 'OpenWebText.zip'
(from https://mega.nz/#F!EZZD0YwJ!9_PlEQzdMVLaNdKv_ICNVQ) and the Common Crawl
WET files from August 2018 to July 2019
(https://commoncrawl.org/the-data/get-started/) and place them in the
`data_dir`.
"""
def _info(self):
features = {
"text": nlp.Value("string"),
"url": nlp.Value("string"),
"content-type": nlp.Value("string"),
"content-length": nlp.Value("string"),
"timestamp": nlp.Value("string"),
}
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features(features),
citation=_CITATION,
homepage="https://github.com/google-research/text-to-text-transfer-transformer#datasets",
)
def _split_generators(self, dl_manager, pipeline):
import apache_beam as beam
# We will automatically down the default CC version(s), but others need to
# be manually downloaded.
cc_versions = set(self.config.cc_versions)
auto_cc_versions = cc_versions & set(_DEFAULT_CC_VERSIONS)
manual_cc_versions = cc_versions - set(_DEFAULT_CC_VERSIONS)
files_to_download = {}
files_to_download["wet_path_urls"] = [
_WET_PATH_URL.format(cc_version=cc_version) for cc_version in auto_cc_versions
]
if self.config.clean:
files_to_download["badwords"] = _BADWORDS_URL.format(lang=self.config.lang)
if self.config.realnewslike:
files_to_download["realnews_domains"] = _REALNEWS_DOMAINS_URL
file_paths = dl_manager.download_and_extract(files_to_download)
if self.config.webtextlike:
owt_path = os.path.join(dl_manager.manual_dir, _OPENWEBTEXT_URLS_ZIP)
if not os.path.exists(owt_path):
raise FileNotFoundError(
"{} does not exist. Make sure you insert a manual dir via `nlp.load_dataset('c4', data_dir=...)` that includes a file name {}. Manual download instructions: {})".format(
owt_path, _OPENWEBTEXT_URLS_ZIP, self.manual_download_instructions
)
)
file_paths["openwebtext_urls_zip"] = dl_manager.extract(owt_path)
wet_urls = []
for wet_path_url in file_paths["wet_path_urls"]:
with open(wet_path_url, "r") as f:
wet_urls.extend(["%s/%s" % (_DOWNLOAD_HOST, l.strip()) for l in f])
file_paths["wet_urls"] = wet_urls
file_paths["wet_files"] = []
for cc_version in manual_cc_versions:
cc_dir = os.path.join(dl_manager.manual_dir, cc_version)
wet_files = beam.io.filesystems.FileSystems.match(os.path.join(cc_dir, "*.warc.wet.gz"))
if not os.path.exists(cc_dir):
raise FileNotFoundError(
"{} does not exist. Make sure you insert a manual dir via `nlp.load_dataset('c4', data_dir=...)` that includes the files {}. Manual download instructions: {})".format(
cc_dir, "*.warc.wet.gz", self.manual_download_instructions
)
)
logging.info("Adding %d WET files for manually downloaded version %s.", len(wet_files), cc_version)
file_paths["wet_files"].extend(wet_files)
page_content_pcollection = self._get_page_content(pipeline, file_paths, dl_manager)
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
gen_kwargs=dict(
split="train",
page_content=page_content_pcollection,
hashed_url_predicate=lambda x: x % 1000 != 0, # 99.9%
),
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
gen_kwargs=dict(
split="validation",
page_content=page_content_pcollection,
hashed_url_predicate=lambda x: x % 1000 == 0, # 0.01%
),
),
]
def _get_page_content(self, pipeline, file_paths, dl_manager):
"""Build PCollection of un-split page content."""
import apache_beam as beam
wet_file_paths = pipeline | "create_wet_files" >> beam.Create(file_paths["wet_files"])
if "wet_urls" in file_paths:
def download_url(url, downloader, pipeline):
path = downloader.download(url)
if not pipeline.is_local():
path = downloader.ship_files_with_pipeline(path, pipeline)
return path
dl_wet_file_paths = (
pipeline
| "create_wet_urls" >> beam.Create(file_paths["wet_urls"])
| beam.Map(download_url, downloader=dl_manager, pipeline=pipeline)
)
wet_file_paths = (wet_file_paths, dl_wet_file_paths) | beam.Flatten()
# Parse WET files and filter by length.
# Output: url, text
page_content = wet_file_paths | beam.FlatMap(split_wet_file) | beam.Filter(is_valid_length)
# Optionally filter for RealNews domains.
# Output: url, text
if self.config.realnewslike:
with open(file_paths["realnews_domains"], "r") as f:
realnews_domains = json.load(f)
page_content = page_content | beam.Filter(is_realnews_domain, realnews_domains)
# Normalize and deduplicate by URL.
# Output: url, text
page_content = (
page_content
| "normalize_url" >> beam.Map(normalize_url)
| "group_url" >> beam.GroupByKey()
| beam.Map(dedupe_urls)
)
# Optionally filter for WebText-like URLs.
# Output: url, text
if self.config.webtextlike:
webtextlike_urls = (
pipeline
| "read_webtextlike_urls"
>> beam.io.ReadFromText(
os.path.join(file_paths["openwebtext_urls_zip"], _OPENWEBTEXT_URLS_FILE_PATTERN)
)
| "add_dummy_page" >> beam.Map(lambda x: (x, ""))
| "normal_webtext_url" >> beam.Map(normalize_url)
)
page_content = (
{"text": page_content, "webtextlike_urls": webtextlike_urls}
| "group_webtextlike_urls" >> beam.CoGroupByKey()
| beam.FlatMap(filter_by_webtextlike)
)
# Optionally clean pages of badwords, boilerpolate text, and duplicate
# spans of sentences.
# Output: url, text
if self.config.clean:
with open(file_paths["badwords"], "r") as f:
badwords = [l.strip() for l in f]
page_content = page_content | "clean_pages" >> beam.FlatMap(get_clean_page_fn(badwords))
page_content = remove_duplicate_text(page_content)
# Optionally filter out non-`language` pages. We do this after cleaning
# since it may change the predominate language.
if self.config.lang != "all":
page_content |= beam.Filter(is_language, language=self.config.lang)
return page_content
def _build_pcollection(self, unused_pipeline, split, page_content, hashed_url_predicate):
import apache_beam as beam
def _emit_examples(el):
get_counter_inc_fn(split)("examples")
_, features = el
return (
features["url"],
{
"url": features["url"],
"text": features["text"],
"content-type": features["content-type"],
"content-length": features["content-length"],
"timestamp": features["timestamp"],
},
)
return page_content | beam.Filter(get_hashed_url_filter_fn(hashed_url_predicate)) | beam.Map(_emit_examples)
|
nlp-master
|
datasets/c4/c4.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utilities for generating the C4 dataset."""
from __future__ import absolute_import, division, print_function
import functools
import gzip
import hashlib
import io
import re
import threading
# WET file constants
_PAGE_DELIMITER = "WARC/1.0"
_URL_KEY = "WARC-Target-URI:"
_URL_DATE = "WARC-Date:"
_CONTENT_TYPE = "Content-Type:"
_CONTENT_LEN = "Content-Length:"
_METADATA_PREFIXES = ("WARC", "CONTENT-", "Content-")
# Filters
_MIN_WORDS_PER_LINE = 5
_MIN_NUM_SENTENCES = 3
_MAX_WORD_LENGTH = 1000
_END_MARKS = (".", "?", "!", '"')
_ELLIPSIS = "..."
_POLICY_SUBSTRINGS = [
"terms of use",
"privacy policy",
"cookie policy",
"uses cookies",
"use of cookies",
"use cookies",
]
# Memoized sentence tokenizer.
_SENTENCE_TOKENIZER = None
def get_counter_inc_fn(namespace):
import apache_beam as beam
def counter_inc_fn(counter, amt=1):
beam.metrics.Metrics.counter(namespace, counter).inc(amt)
return counter_inc_fn
def get_hashed_url_filter_fn(predicate_fn):
import tensorflow.compat.v2 as tf
def filter_fn(el):
url, _ = el
val = int(hashlib.md5(tf.compat.as_text(url).encode("utf-8")).hexdigest(), 16)
return predicate_fn(val)
return filter_fn
def _load_sentence_tokenizer():
"""Returns a sentence tokenization function."""
# Lock to avoid a race-condition in the creation of the download directory.
with threading.Lock():
import nltk
nltk.download("punkt")
return nltk.data.load("nltk:tokenizers/punkt/english.pickle")
def _get_sentences(text):
import tensorflow.compat.v2 as tf
global _SENTENCE_TOKENIZER
if not _SENTENCE_TOKENIZER:
_SENTENCE_TOKENIZER = _load_sentence_tokenizer()
return list(_SENTENCE_TOKENIZER.tokenize(tf.compat.as_text(text)))
def _get_sentences_by_line(text, lower=False):
sentences = []
for line in text.splitlines():
sentences.append([s.lower() if lower else s for s in _get_sentences(line)])
return sentences
def is_language(page, language, min_probability=0.99):
"""Returns True iff text is in `language` with at least `min_probability`."""
unused_url, features = page
text = features["text"]
counter_inc_fn = get_counter_inc_fn("detected-lang")
# Make langdetect predictions deterministic.
import langdetect
langdetect.DetectorFactory.seed = 0
try:
predictions = langdetect.detect_langs(text)
except langdetect.lang_detect_exception.LangDetectException:
counter_inc_fn("langdetect-exception")
return False
if not predictions:
counter_inc_fn("page-filtered-nolangpredictions")
return False
best_prediction = predictions[0]
if best_prediction.prob < min_probability:
counter_inc_fn("page-filtered-lowlangdetectconf")
return False
if best_prediction.lang != language:
counter_inc_fn("page-filtered-ignoredlang")
counter_inc_fn("page-filtered-ignoredlang-%s" % (best_prediction.lang))
return False
counter_inc_fn("page-emited-%s" % best_prediction.lang)
return True
def get_clean_page_fn(badwords=None):
"""Returns `clean_page` with pre-compiled badword and citation regexes."""
# Used to filter citation from Wikipedia pages (among others).
citation_regex = re.compile(r"\[\d*\]|\[edit\]|\[citation needed\]")
if badwords:
badwords_regex = re.compile("[^a-z]({})[^a-z]".format("|".join(badwords or [])))
else:
badwords_regex = None
return functools.partial(clean_page, citation_regex=citation_regex, badwords_regex=badwords_regex)
def clean_page(
url_and_features,
citation_regex,
badwords_regex=None,
counter_inc_fn=None,
min_words_per_line=_MIN_WORDS_PER_LINE,
min_num_sentences=_MIN_NUM_SENTENCES,
max_word_length=_MAX_WORD_LENGTH,
):
"""Cleans a CommonCrawl page, yielding nothing if it should be skipped.
Cleaning removes lines with no end marks or with too few words. After line
filtering, pages are filtered out if they have too few sentences based on a
simple count of end marks.
Args:
url_and_features: tuple(string, dict), the url and features of the page.
citation_regex: Regex to use for finding Wikipedia-like citations to filter.
badwords_regex: Regex to use for finding badwords. Default None, which means
don't apply badwords filtering.
counter_inc_fn: function, a function taking the name of a counter to be
incremented and the (optional) amount. Defaults to a beam Metric counter.
min_words_per_line: int, the minimum number of words a line needs to not be
removed.
min_num_sentences: int, the minimum number of sentences a page needs to not
be skipped.
max_word_length: int, the maximum number of characters allowed in a word.
Lines containing a word with too many characters are removed.
Yields:
The url and cleaned text for the page.
"""
url, features = url_and_features
text = features["text"]
if not counter_inc_fn:
counter_inc_fn = get_counter_inc_fn("clean-page")
lines = text.splitlines()
valid_lines = []
num_sentences = 0
def line_has_too_long_word(line):
for word in line.split():
if len(word) > max_word_length:
return True
return False
for line in lines:
line = line.strip()
if line_has_too_long_word(line):
counter_inc_fn("lines-with-too-long-word")
continue
line = citation_regex.sub("", line)
if not line.endswith(_END_MARKS) or line.endswith(_ELLIPSIS):
counter_inc_fn("lines-no-endmark")
continue
if len(line.split()) < min_words_per_line:
counter_inc_fn("lines-too-short")
continue
line_lower = line.lower()
# Remove documents which contain lorem ipsum
if "lorem ipsum" in line_lower:
counter_inc_fn("filtered-page-loremipsum")
return
# Remove "javascript must be enabled" notices
if "javascript" in line_lower:
counter_inc_fn("lines-javascript")
continue
# Remove docs which probably contain javascript code
if "{" in line:
counter_inc_fn("filtered-page-squigglybracket")
return
# Remove policy lines
if any(p in line_lower for p in _POLICY_SUBSTRINGS):
counter_inc_fn("lines-policy")
continue
# If any badword appears on its own in the line, skip this doc
if badwords_regex:
badwords_found = badwords_regex.search(line_lower)
if badwords_found is not None:
counter_inc_fn("filtered-page-badword")
return
num_sentences += len(_get_sentences(line))
valid_lines.append(line)
counter_inc_fn("lines-valid")
if num_sentences < min_num_sentences:
counter_inc_fn("filtered-page-toofewsentences")
return
counter_inc_fn("emitted-clean-pages")
features["text"] = "\n".join(valid_lines).strip()
yield url, features
def _hash_line(line):
import tensorflow.compat.v2 as tf
m = hashlib.md5()
m.update(tf.compat.as_text(line).encode("utf-8").strip().lower())
return m.hexdigest()
def _emit_url_to_lines(page):
"""Emits url to all (lower-cased, hashed) lines."""
url, features = page
text = features["text"]
for line in text.split("\n"):
yield _hash_line(line), url
def _emit_line_to_urls(el, counter_inc_fn):
"""Emits (hashed) line to all but one url."""
import tensorflow.compat.v2 as tf
line, urls = el
# Materialize urls as a list.
urls = list(urls)
# Hash urls and sort to have a consistent, but unbiased, selection when the
# same urls exist for multiple lines.
skip_url = min(urls, key=lambda x: hashlib.md5(tf.compat.as_text(x).encode("utf-8")).hexdigest())
for url in urls:
if url != skip_url:
yield url, line
counter_inc_fn("emitted-line-duplicate", amt=len(urls) - 1)
def _remove_lines_from_text(el, counter_inc_fn, min_num_sentences=_MIN_NUM_SENTENCES):
"""Removes matching lines from the page.
Process the result of a join containing a single value for 'features' and zero
or more values for 'lines'. Each value in 'lines' is a lower-cased, hashed
line.
If a line has fewer sentences than `max_window_size`, the full line is
compared for a match.
Args:
el: `(string, {'features': features_dict, 'lines': [string]})`,
element containing the result of a join on key with both the page text
and lower-cased, hashed lines to remove.
counter_inc_fn: function, a function taking the name of a counter to be
incremented and the (optional) amount.
min_num_sentences: int, the minimum number of sentences a page needs to not
be skipped.
Yields:
url: The URL of the page.
features: The page features with lines removed from text.
"""
url, join_values = el
features = join_values["features"]
assert len(features) == 1, "Invalid page count (%d) for %s" % (len(features), url)
features = features[0]
text = features["text"]
lines_to_remove = set(join_values["lines"])
new_lines = []
hashed_lines = set()
for line in text.split("\n"):
hashed_line = _hash_line(line)
if hashed_line in lines_to_remove:
counter_inc_fn("filtered-lines-duplicate")
elif hashed_line not in hashed_lines:
new_lines.append(line)
hashed_lines.add(hashed_line)
new_text = "\n".join(new_lines)
if len(_get_sentences(new_text)) < min_num_sentences:
counter_inc_fn("filtered-doc-toofewsentences")
return
new_features = features.copy()
new_features["text"] = new_text
yield (url, new_features)
def remove_duplicate_text(pages):
"""Utility to remove duplicate lines across text documents."""
# Output: url, lines
import apache_beam as beam
counter_inc_fn = get_counter_inc_fn("dedupe-lines")
lines_to_remove = (
pages
| beam.FlatMap(_emit_url_to_lines)
| "group_sentences" >> beam.GroupByKey()
| beam.FlatMap(_emit_line_to_urls, counter_inc_fn=counter_inc_fn)
)
# Output: url, text
final_docs = (
{"features": pages, "lines": lines_to_remove}
| "group_features_and_lines_by_url" >> beam.CoGroupByKey()
| beam.FlatMap(_remove_lines_from_text, counter_inc_fn=counter_inc_fn)
)
return final_docs
def split_wet_file(wet_file_path, counter_inc_fn=None):
"""Split a WET file into separate pages."""
from absl import logging
logging.info("Splitting file: %s", wet_file_path)
if not counter_inc_fn:
counter_inc_fn = get_counter_inc_fn("split-wet-file")
counter_inc_fn("wet-file")
import apache_beam as beam
with beam.io.filesystems.FileSystems.open(wet_file_path) as f, gzip.GzipFile(fileobj=f) as g:
url = None
content = None
content_len = None
content_type = None
timestamp = None
def _maybe_get_page():
"""Generate a (url, {features}) page."""
if not url and url is not None:
counter_inc_fn("page-filtered-nourl")
if not content and content is not None:
counter_inc_fn("page-filtered-nocontent")
if not content_type and content_type is not None:
counter_inc_fn("page-nocontenttype")
if not content_len and content_len is not None:
counter_inc_fn("page-nocontentlen")
if not timestamp and timestamp is not None:
counter_inc_fn("page-notimestamp")
if content and url:
counter_inc_fn("page-emitted")
return (
url,
{
"text": "\n".join(content),
"content-type": content_type,
"content-length": content_len,
"timestamp": timestamp,
"url": url,
},
)
return None
for line in io.TextIOWrapper(g, encoding="utf-8"):
line = line.strip()
if not line:
continue
if line == _PAGE_DELIMITER:
page = _maybe_get_page()
if page:
yield page
url = ""
content = []
content_len = ""
content_type = ""
timestamp = ""
if line.startswith(_URL_KEY):
url = line[len(_URL_KEY) :].strip()
if line.startswith(_URL_DATE):
timestamp = line[len(_URL_DATE) :].strip()
if line.startswith(_CONTENT_TYPE):
content_type = line[len(_CONTENT_TYPE) :].strip()
if line.startswith(_CONTENT_LEN):
content_len = line[len(_CONTENT_LEN) :].strip()
if line.startswith(_METADATA_PREFIXES):
continue
content.append(line)
page = _maybe_get_page()
if page:
yield page
def dedupe_urls(el):
"""Returns the first value for a given URL."""
counter_inc_fn = get_counter_inc_fn("dedupe-urls")
url, vals = el
cnt = 0
v = None
for v in vals:
cnt += 1
counter_inc_fn("filtered-url-duplicate", cnt - 1)
counter_inc_fn("unique-url")
return url, v
def is_valid_length(el, max_length=1.9e5):
"""Returns False iff page's text is too long."""
counter_inc_fn = get_counter_inc_fn("is-valid-length")
_, page = el
if len(page["text"]) > max_length:
counter_inc_fn("filtered-page-contenttoolong")
return False
counter_inc_fn("valid-length")
return True
def is_realnews_domain(el, realnews_domains):
"""Returns False iff page's (sub)domain is not allowed."""
import tldextract
counter_inc_fn = get_counter_inc_fn("is-realnews-domain")
url, _ = el
ext = tldextract.extract(url)
main_domain = ext.domain + "." + ext.suffix
if main_domain not in realnews_domains:
counter_inc_fn("filtered-url-invaliddomain")
return False
allowed_subdomains = realnews_domains[main_domain]
if isinstance(allowed_subdomains, list) and ext.subdomain not in allowed_subdomains:
counter_inc_fn("filtered-url-invalidsubdomain")
return False
counter_inc_fn("realnews-domain")
return True
def filter_by_webtextlike(el):
"""Yields only pages with a matching WebText-like URL."""
counter_inc_fn = get_counter_inc_fn("filter-by-webtextlike")
url, join_values = el
text = join_values["text"]
webtextlike = join_values["webtextlike_urls"]
if not webtextlike:
counter_inc_fn("filtered-url-notwebtextlike")
return
if not text:
counter_inc_fn("missing-webtextlike")
return
assert len(text) == 1
counter_inc_fn("found-webtextlike")
yield url, text[0]
def normalize_url(el):
import tensorflow.compat.v2 as tf
url, val = el
url = tf.compat.as_text(url)
url = re.sub(r"https?:\/\/(www\.)?", "", url)
url = re.sub(r"\?(utm_|ref|feed).*", "", url)
url = url.rstrip("/")
return url, val
|
nlp-master
|
datasets/c4/c4_utils.py
|
"""TODO(quartz): Add a description here."""
from __future__ import absolute_import, division, print_function
import json
import os
import nlp
# TODO(quartz): BibTeX citation
_CITATION = """\
@InProceedings{quartz,
author = {Oyvind Tafjord and Matt Gardner and Kevin Lin and Peter Clark},
title = {"QUARTZ: An Open-Domain Dataset of Qualitative Relationship
Questions"},
year = {"2019"},
}
"""
# TODO(quartz):
_DESCRIPTION = """\
QuaRTz is a crowdsourced dataset of 3864 multiple-choice questions about open domain qualitative relationships. Each
question is paired with one of 405 different background sentences (sometimes short paragraphs).
The QuaRTz dataset V1 contains 3864 questions about open domain qualitative relationships. Each question is paired with
one of 405 different background sentences (sometimes short paragraphs).
The dataset is split into train (2696), dev (384) and test (784). A background sentence will only appear in a single split.
"""
_URL = "https://s3-us-west-2.amazonaws.com/ai2-website/data/quartz-dataset-v1-aug2019.zip"
class Quartz(nlp.GeneratorBasedBuilder):
"""TODO(quartz): Short description of my dataset."""
# TODO(quartz): Set up version.
VERSION = nlp.Version("0.1.0")
def _info(self):
# TODO(quartz): Specifies the nlp.DatasetInfo object
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
# These are the features of your dataset like images, labels ...
"id": nlp.Value("string"),
"question": nlp.Value("string"),
"choices": nlp.features.Sequence({"text": nlp.Value("string"), "label": nlp.Value("string")}),
"answerKey": nlp.Value("string"),
"para": nlp.Value("string"),
"para_id": nlp.Value("string"),
"para_anno": {
"effect_prop": nlp.Value("string"),
"cause_dir_str": nlp.Value("string"),
"effect_dir_str": nlp.Value("string"),
"cause_dir_sign": nlp.Value("string"),
"effect_dir_sign": nlp.Value("string"),
"cause_prop": nlp.Value("string"),
},
"question_anno": {
"more_effect_dir": nlp.Value("string"),
"less_effect_dir": nlp.Value("string"),
"less_cause_prop": nlp.Value("string"),
"more_effect_prop": nlp.Value("string"),
"less_effect_prop": nlp.Value("string"),
"less_cause_dir": nlp.Value("string"),
},
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://allenai.org/data/quartz",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(quartz): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
dl_dir = dl_manager.download_and_extract(_URL)
data_dir = os.path.join(dl_dir, "quartz-dataset-v1-aug2019")
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, "train.jsonl")},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, "test.jsonl")},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, "dev.jsonl")},
),
]
def _generate_examples(self, filepath):
"""Yields examples."""
# TODO(quartz): Yields (key, example) tuples from the dataset
with open(filepath) as f:
for row in f:
data = json.loads(row)
id_ = data["id"]
question = data["question"]["stem"]
answerKey = data["answerKey"]
choices = data["question"]["choices"]
choice_text = [choice["text"] for choice in choices]
choice_label = [choice["label"] for choice in choices]
para_id = data["para_id"]
para = data["para"]
para_ano = data["para_anno"]
effect_prop = para_ano.get("effect_prop", "")
cause_dir_str = para_ano.get("cause_dir_str", "")
effect_dir_str = para_ano.get("effect_dir_str", "")
cause_dir_sign = para_ano.get("cause_dir_sign", "")
effect_dir_sign = para_ano.get("effect_dir_sign", "")
cause_prop = para_ano.get("cause_prop", "")
question_anno = data["question_anno"]
more_effect_dir = "" if not question_anno else question_anno.get("more_effect_dir", "")
less_effect_dir = "" if not question_anno else question_anno.get("less_effect_dir", "")
less_cause_prop = "" if not question_anno else question_anno.get("less_cause_prop", "")
more_effect_prop = "" if not question_anno else question_anno.get("more_effect_prop", "")
less_effect_prop = "" if not question_anno else question_anno.get("less_effect_prop", "")
less_cause_dir = "" if not question_anno else question_anno.get("less_effect_prop", "")
yield id_, {
"id": id_,
"question": question,
"choices": {"text": choice_text, "label": choice_label},
"answerKey": answerKey,
"para": para,
"para_id": para_id,
"para_anno": {
"effect_prop": effect_prop,
"cause_dir_str": cause_dir_str,
"effect_dir_str": effect_dir_str,
"cause_dir_sign": cause_dir_sign,
"effect_dir_sign": effect_dir_sign,
"cause_prop": cause_prop,
},
"question_anno": {
"more_effect_dir": more_effect_dir,
"less_effect_dir": less_effect_dir,
"less_cause_prop": less_cause_prop,
"more_effect_prop": more_effect_prop,
"less_effect_prop": less_effect_prop,
"less_cause_dir": less_cause_dir,
},
}
|
nlp-master
|
datasets/quartz/quartz.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""The Language Model 1 Billion dataset."""
from __future__ import absolute_import, division, print_function
import glob
import logging
import os
import nlp
_CITATION = """\
@article{DBLP:journals/corr/ChelbaMSGBK13,
author = {Ciprian Chelba and
Tomas Mikolov and
Mike Schuster and
Qi Ge and
Thorsten Brants and
Phillipp Koehn},
title = {One Billion Word Benchmark for Measuring Progress in Statistical Language
Modeling},
journal = {CoRR},
volume = {abs/1312.3005},
year = {2013},
url = {http://arxiv.org/abs/1312.3005},
archivePrefix = {arXiv},
eprint = {1312.3005},
timestamp = {Mon, 13 Aug 2018 16:46:16 +0200},
biburl = {https://dblp.org/rec/bib/journals/corr/ChelbaMSGBK13},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
"""
_DESCRIPTION = """\
A benchmark corpus to be used for measuring progress in statistical language \
modeling. This has almost one billion words in the training data.
"""
_DOWNLOAD_URL = "http://www.statmt.org/lm-benchmark/" "1-billion-word-language-modeling-benchmark-r13output.tar.gz"
_TOP_LEVEL_DIR = "1-billion-word-language-modeling-benchmark-r13output"
_TRAIN_FILE_FORMAT = os.path.join(_TOP_LEVEL_DIR, "training-monolingual.tokenized.shuffled", "news.en-*")
_HELDOUT_FILE_FORMAT = os.path.join(_TOP_LEVEL_DIR, "heldout-monolingual.tokenized.shuffled", "news.en.heldout-*")
class Lm1bConfig(nlp.BuilderConfig):
"""BuilderConfig for Lm1b."""
def __init__(self, **kwargs):
"""BuilderConfig for Lm1b.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(Lm1bConfig, self).__init__(
version=nlp.Version("1.0.0", "New split API (https://tensorflow.org/datasets/splits)"), **kwargs
)
def _train_data_filenames(tmp_dir):
return sorted(glob.glob(os.path.join(tmp_dir, _TRAIN_FILE_FORMAT)))
def _test_data_filenames(tmp_dir):
return sorted(glob.glob(os.path.join(tmp_dir, _HELDOUT_FILE_FORMAT)))
class Lm1b(nlp.GeneratorBasedBuilder):
"""1 Billion Word Language Model Benchmark dataset."""
BUILDER_CONFIGS = [
Lm1bConfig(name="plain_text", description="Plain text",),
]
def _info(self):
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features({"text": nlp.Value("string")}),
supervised_keys=("text", "text"),
homepage="http://www.statmt.org/lm-benchmark/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
lm1b_path = dl_manager.download_and_extract(_DOWNLOAD_URL)
train_files = _train_data_filenames(lm1b_path)
test_files = _test_data_filenames(lm1b_path)
return [
nlp.SplitGenerator(name=nlp.Split.TRAIN, gen_kwargs={"files": train_files}),
nlp.SplitGenerator(name=nlp.Split.TEST, gen_kwargs={"files": test_files}),
]
def _generate_examples(self, files):
for filepath in files:
logging.info("generating examples from = %s", filepath)
with open(filepath) as f:
for idx, line in enumerate(f):
yield "%s_%d" % (os.path.basename(filepath), idx), {
"text": line.strip(),
}
|
nlp-master
|
datasets/lm1b/lm1b.py
|
"""TODO(hellaswag): Add a description here."""
from __future__ import absolute_import, division, print_function
import json
import os
import nlp
# TODO(hellaswag): BibTeX citation
_CITATION = """\
@inproceedings{zellers2019hellaswag,
title={HellaSwag: Can a Machine Really Finish Your Sentence?},
author={Zellers, Rowan and Holtzman, Ari and Bisk, Yonatan and Farhadi, Ali and Choi, Yejin},
booktitle ={Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics},
year={2019}
}
"""
# TODO(hellaswag):
_DESCRIPTION = """
"""
_URL = "https://github.com/rowanz/hellaswag/raw/master/data/"
_TEST_FILE = "hellaswag_test.jsonl"
_TRAIN_FILE = "hellaswag_train.jsonl"
_DEV_FILE = "hellaswag_val.jsonl"
class Hellaswag(nlp.GeneratorBasedBuilder):
"""TODO(hellaswag): Short description of my dataset."""
# TODO(hellaswag): Set up version.
VERSION = nlp.Version("0.1.0")
def _info(self):
# TODO(hellaswag): Specifies the nlp.DatasetInfo object
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
# These are the features of your dataset like images, labels ...
"ind": nlp.Value("int32"),
"activity_label": nlp.Value("string"),
"ctx_a": nlp.Value("string"),
"ctx_b": nlp.Value("string"),
"ctx": nlp.Value("string"),
"endings": nlp.features.Sequence({"ending": nlp.Value("string")}),
"source_id": nlp.Value("string"),
"split": nlp.Value("string"),
"split_type": nlp.Value("string"),
"label": nlp.Value("string"),
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://rowanzellers.com/hellaswag/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(hellaswag): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
urls_to_download = {
"train": os.path.join(_URL, _TRAIN_FILE),
"test": os.path.join(_URL, _TEST_FILE),
"dev": os.path.join(_URL, _DEV_FILE),
}
dl_dir = dl_manager.download_and_extract(urls_to_download)
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": dl_dir["train"]},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": dl_dir["test"]},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": dl_dir["dev"]},
),
]
def _generate_examples(self, filepath):
"""Yields examples."""
# TODO(hellaswag): Yields (key, example) tuples from the dataset
with open(filepath) as f:
for id_, row in enumerate(f):
data = json.loads(row)
yield id_, {
"ind": int(data["ind"]),
"activity_label": data["activity_label"],
"ctx_a": data.get("ctx_a", ""),
"ctx_b": data.get("ctx_b", ""),
"ctx": data["ctx"],
"endings": {"ending": data.get("endings", [])},
"source_id": data["source_id"],
"split": data["split"],
"split_type": data["split_type"],
"label": str(data.get("label", "")),
}
|
nlp-master
|
datasets/hellaswag/hellaswag.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Reddit TIFU dataset using tifu or tldr from subreddit tifu."""
from __future__ import absolute_import, division, print_function
import json
import nlp
_CITATION = """
@misc{kim2018abstractive,
title={Abstractive Summarization of Reddit Posts with Multi-level Memory Networks},
author={Byeongchang Kim and Hyunwoo Kim and Gunhee Kim},
year={2018},
eprint={1811.00783},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_DESCRIPTION = """
Reddit dataset, where TIFU denotes the name of subbreddit /r/tifu.
As defined in the publication, styel "short" uses title as summary and
"long" uses tldr as summary.
Features includes:
- document: post text without tldr.
- tldr: tldr line.
- title: trimmed title without tldr.
- ups: upvotes.
- score: score.
- num_comments: number of comments.
- upvote_ratio: upvote ratio.
"""
_URL = "https://drive.google.com/uc?export=download&id=1ffWfITKFMJeqjT8loC8aiCLRNJpc_XnF"
_DOCUMENT = "documents"
_TITLE = "title"
_TLDR = "tldr"
_ADDITIONAL_FEATURES = ["ups", "num_comments", "score", "upvote_ratio"]
class RedditTifuConfig(nlp.BuilderConfig):
"""BuilderConfig for RedditTifu."""
def __init__(self, summary_key=None, **kwargs):
"""BuilderConfig for RedditTifu.
Args:
summary_key: key string of summary in downloaded json file.
**kwargs: keyword arguments forwarded to super.
"""
# Version 1.1.0 remove empty document and summary strings.
super(RedditTifuConfig, self).__init__(version=nlp.Version("1.1.0"), **kwargs)
self.summary_key = summary_key
class RedditTifu(nlp.GeneratorBasedBuilder):
"""Reddit TIFU Dataset."""
BUILDER_CONFIGS = [
RedditTifuConfig(name="short", summary_key=_TITLE, description="Using title as summary.",),
RedditTifuConfig(name="long", summary_key=_TLDR, description="Using TLDR as summary.",),
]
def _info(self):
features = {
"ups": nlp.Value("float32"),
"num_comments": nlp.Value("float32"),
"upvote_ratio": nlp.Value("float32"),
"score": nlp.Value("float32"),
}
features.update({k: nlp.Value("string") for k in [_DOCUMENT, _TLDR, _TITLE]})
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features(features),
supervised_keys=(_DOCUMENT, self.config.summary_key),
homepage="https://github.com/ctr4si/MMN",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
dl_path = dl_manager.download_and_extract(_URL)
return [nlp.SplitGenerator(name=nlp.Split.TRAIN, gen_kwargs={"path": dl_path},)]
def _generate_examples(self, path=None):
"""Yields examples."""
with open(path, "rb") as f:
for i, line in enumerate(f):
# keys are 'title_tokenized','permalink','title','url','num_comments',
# 'tldr'(optional),'created_utc','trimmed_title_tokenized','ups',
# 'selftext_html','score','upvote_ratio','tldr_tokenized'(optional),
# 'selftext','trimmed_title','selftext_without_tldr_tokenized',
# 'id','selftext_without_tldr'
d = json.loads(line)
r = {
_DOCUMENT: d["selftext_without_tldr"].strip(),
_TITLE: d["trimmed_title"].strip(),
_TLDR: (d["tldr"] or "").strip(),
}
r.update({k: d[k] for k in _ADDITIONAL_FEATURES})
# skip if document or summary is empty
if r[_DOCUMENT] and r[self.config.summary_key]:
yield i, r
|
nlp-master
|
datasets/reddit_tifu/reddit_tifu.py
|
import json
import logging
import math
import nlp
_CITATION = """\
@ONLINE {wikidump,
author = {Wikimedia Foundation},
title = {Wikimedia Downloads},
url = {https://dumps.wikimedia.org}
}
"""
_DESCRIPTION = """\
Wikipedia version split into plain text snippets for dense semantic indexing.
"""
_LICENSE = (
"This work is licensed under the Creative Commons Attribution-ShareAlike "
"3.0 Unported License. To view a copy of this license, visit "
"http://creativecommons.org/licenses/by-sa/3.0/ or send a letter to "
"Creative Commons, PO Box 1866, Mountain View, CA 94042, USA."
)
def wiki40b_article_snippets(article, passage_len=100, overlap=0):
paragraphs = article["text"].split("\n")
aticle_idx = paragraphs.index("_START_ARTICLE_") + 1
article_title = paragraphs[aticle_idx] if aticle_idx < len(paragraphs) else ""
section_indices = [i + 1 for i, par in enumerate(paragraphs[:-1]) if par == "_START_SECTION_"]
par_tabs = [par.split(" ") for par in paragraphs]
word_map = [
(i, len(" ".join(par[:j])), w)
for i, par in enumerate(par_tabs)
if not par[0].startswith("_START_")
for j, w in enumerate(par)
if i > 0
]
step_size = passage_len - overlap
passages = []
for i in range(math.ceil(len(word_map) / step_size)):
pre_toks = word_map[i * step_size : i * step_size + passage_len]
start_section_id = max([0] + [j for j in section_indices if j <= pre_toks[0][0]])
section_ids = [j for j in section_indices if j >= start_section_id and j <= pre_toks[-1][0]]
section_ids = section_ids if len(section_ids) > 0 else [0]
passage_text = " ".join([w for p_id, s_id, w in pre_toks])
passages += [
{
"article_title": article_title,
"section_title": " & ".join([paragraphs[j] for j in section_ids]),
"wiki_id": article["wikidata_id"],
"start_paragraph": pre_toks[0][0],
"start_character": pre_toks[0][1],
"end_paragraph": pre_toks[-1][0],
"end_character": pre_toks[-1][1] + len(pre_toks[-1][2]) + 1,
"passage_text": passage_text.replace("_NEWLINE_", "\n"),
}
]
return passages
def wikipedia_article_snippets(article, passage_len=100, overlap=0):
paragraphs = [par for par in article["text"].split("\n") if not par.startswith("Category:")]
if "References" in paragraphs:
paragraphs = paragraphs[: paragraphs.index("References")]
article_title = article["title"]
section_indices = [
i + 1
for i, par in enumerate(paragraphs[:-2])
if paragraphs[i] == "" and paragraphs[i + 1] != "" and paragraphs[i + 2] != ""
]
par_tabs = [par.split(" ") for par in paragraphs]
word_map = [(i, len(" ".join(par[:j])), w) for i, par in enumerate(par_tabs) for j, w in enumerate(par)]
step_size = passage_len - overlap
passages = []
for i in range(math.ceil(len(word_map) / step_size)):
pre_toks = word_map[i * step_size : i * step_size + passage_len]
start_section_id = max([0] + [j for j in section_indices if j <= pre_toks[0][0]])
section_ids = [j for j in section_indices if j >= start_section_id and j <= pre_toks[-1][0]]
section_ids = section_ids if len(section_ids) > 0 else [-1]
passage_text = " ".join([w for p_id, s_id, w in pre_toks])
passages += [
{
"article_title": article_title,
"section_title": " & ".join(["Start" if j == -1 else paragraphs[j].strip() for j in section_ids]),
"wiki_id": article_title.replace(" ", "_"),
"start_paragraph": pre_toks[0][0],
"start_character": pre_toks[0][1],
"end_paragraph": pre_toks[-1][0],
"end_character": pre_toks[-1][1] + len(pre_toks[-1][2]) + 1,
"passage_text": passage_text,
}
]
return passages
_SPLIT_FUCNTION_MAP = {
"wikipedia": wikipedia_article_snippets,
"wiki40b": wiki40b_article_snippets,
}
def generate_snippets(wikipedia, split_funtion, passage_len=100, overlap=0):
for i, article in enumerate(wikipedia):
for doc in split_funtion(article, passage_len, overlap):
part_id = json.dumps(
{
"nlp_id": i,
"wiki_id": doc["wiki_id"],
"sp": doc["start_paragraph"],
"sc": doc["start_character"],
"ep": doc["end_paragraph"],
"ec": doc["end_character"],
}
)
doc["_id"] = part_id
doc["nlp_id"] = i
yield doc
class WikiSnippetsConfig(nlp.BuilderConfig):
"""BuilderConfig for WikiSnippets."""
def __init__(
self, wikipedia_name="wiki40b", wikipedia_version_name="en", snippets_length=100, snippets_overlap=0, **kwargs
):
"""BuilderConfig for WikiSnippets.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(WikiSnippetsConfig, self).__init__(**kwargs)
self.wikipedia_name = wikipedia_name
self.wikipedia_version_name = wikipedia_version_name
self.snippets_length = snippets_length
self.snippets_overlap = snippets_overlap
class WikiSnippets(nlp.GeneratorBasedBuilder):
BUILDER_CONFIG_CLASS = WikiSnippetsConfig
BUILDER_CONFIGS = [
WikiSnippetsConfig(
name="wiki40b_en_100_0",
version=nlp.Version("1.0.0"),
wikipedia_name="wiki40b",
wikipedia_version_name="en",
snippets_length=100,
snippets_overlap=0,
),
WikiSnippetsConfig(
name="wikipedia_en_100_0",
version=nlp.Version("1.0.0"),
wikipedia_name="wikipedia",
wikipedia_version_name="20200501.en",
snippets_length=100,
snippets_overlap=0,
),
]
test_dummy_data = False
def _info(self):
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features(
{
"_id": nlp.Value("string"),
"nlp_id": nlp.Value("int32"),
"wiki_id": nlp.Value("string"),
"start_paragraph": nlp.Value("int32"),
"start_character": nlp.Value("int32"),
"end_paragraph": nlp.Value("int32"),
"end_character": nlp.Value("int32"),
"article_title": nlp.Value("string"),
"section_title": nlp.Value("string"),
"passage_text": nlp.Value("string"),
}
),
supervised_keys=None,
homepage="https://dumps.wikimedia.org",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
wikipedia = nlp.load_dataset(path=self.config.wikipedia_name, name=self.config.wikipedia_version_name,)
return [
nlp.SplitGenerator(name=nlp.Split.TRAIN, gen_kwargs={"wikipedia": wikipedia}),
]
def _generate_examples(self, wikipedia):
logging.info(
"generating examples from = {} {}".format(self.config.wikipedia_name, self.config.wikipedia_version_name)
)
for split in wikipedia:
dset = wikipedia[split]
split_function = _SPLIT_FUCNTION_MAP[self.config.wikipedia_name]
for doc in generate_snippets(
dset, split_function, passage_len=self.config.snippets_length, overlap=self.config.snippets_overlap
):
id_ = doc["_id"]
yield id_, doc
|
nlp-master
|
datasets/wiki_snippets/wiki_snippets.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Mathematics database."""
from __future__ import absolute_import, division, print_function
import logging
import os
import nlp
_CITATION = """
@article{2019arXiv,
author = {Saxton, Grefenstette, Hill, Kohli},
title = {Analysing Mathematical Reasoning Abilities of Neural Models},
year = {2019},
journal = {arXiv:1904.01557}
}
"""
_DESCRIPTION = """
Mathematics database.
This dataset code generates mathematical question and answer pairs,
from a range of question types at roughly school-level difficulty.
This is designed to test the mathematical learning and algebraic
reasoning skills of learning models.
Original paper: Analysing Mathematical Reasoning Abilities of Neural Models
(Saxton, Grefenstette, Hill, Kohli).
Example usage:
train_examples, val_examples = nlp.load_dataset(
'math_dataset/arithmetic__mul',
split=['train', 'test'],
as_supervised=True)
"""
_DATA_URL = "https://storage.googleapis.com/mathematics-dataset/mathematics_dataset-v1.0.tar.gz"
_TRAIN_CATEGORY = [
"train-easy",
"train-medium",
"train-hard",
]
_INTERPOLATE_CATEGORY = [
"interpolate",
]
_MODULES = [
# extrapolate
"measurement__conversion",
# interpolate
"algebra__linear_1d",
"algebra__linear_1d_composed",
"algebra__linear_2d",
"algebra__linear_2d_composed",
"algebra__polynomial_roots",
"algebra__polynomial_roots_composed",
"algebra__sequence_next_term",
"algebra__sequence_nth_term",
"arithmetic__add_or_sub",
"arithmetic__add_or_sub_in_base",
"arithmetic__add_sub_multiple",
"arithmetic__div",
"arithmetic__mixed",
"arithmetic__mul",
"arithmetic__mul_div_multiple",
"arithmetic__nearest_integer_root",
"arithmetic__simplify_surd",
"calculus__differentiate",
"calculus__differentiate_composed",
"comparison__closest",
"comparison__closest_composed",
"comparison__kth_biggest",
"comparison__kth_biggest_composed",
"comparison__pair",
"comparison__pair_composed",
"comparison__sort",
"comparison__sort_composed",
"measurement__conversion",
"measurement__time",
"numbers__base_conversion",
"numbers__div_remainder",
"numbers__div_remainder_composed",
"numbers__gcd",
"numbers__gcd_composed",
"numbers__is_factor",
"numbers__is_factor_composed",
"numbers__is_prime",
"numbers__is_prime_composed",
"numbers__lcm",
"numbers__lcm_composed",
"numbers__list_prime_factors",
"numbers__list_prime_factors_composed",
"numbers__place_value",
"numbers__place_value_composed",
"numbers__round_number",
"numbers__round_number_composed",
"polynomials__add",
"polynomials__coefficient_named",
"polynomials__collect",
"polynomials__compose",
"polynomials__evaluate",
"polynomials__evaluate_composed",
"polynomials__expand",
"polynomials__simplify_power",
"probability__swr_p_level_set",
"probability__swr_p_sequence",
# train-easy train-medium train-hard
"algebra__linear_1d",
"algebra__linear_1d_composed",
"algebra__linear_2d",
"algebra__linear_2d_composed",
"algebra__polynomial_roots",
"algebra__polynomial_roots_composed",
"algebra__sequence_next_term",
"algebra__sequence_nth_term",
"arithmetic__add_or_sub",
"arithmetic__add_or_sub_in_base",
"arithmetic__add_sub_multiple",
"arithmetic__div",
"arithmetic__mixed",
"arithmetic__mul",
"arithmetic__mul_div_multiple",
"arithmetic__nearest_integer_root",
"arithmetic__simplify_surd",
"calculus__differentiate",
"calculus__differentiate_composed",
"comparison__closest",
"comparison__closest_composed",
"comparison__kth_biggest",
"comparison__kth_biggest_composed",
"comparison__pair",
"comparison__pair_composed",
"comparison__sort",
"comparison__sort_composed",
"measurement__conversion",
"measurement__time",
"numbers__base_conversion",
"numbers__div_remainder",
"numbers__div_remainder_composed",
"numbers__gcd",
"numbers__gcd_composed",
"numbers__is_factor",
"numbers__is_factor_composed",
"numbers__is_prime",
"numbers__is_prime_composed",
"numbers__lcm",
"numbers__lcm_composed",
"numbers__list_prime_factors",
"numbers__list_prime_factors_composed",
"numbers__place_value",
"numbers__place_value_composed",
"numbers__round_number",
"numbers__round_number_composed",
"polynomials__add",
"polynomials__coefficient_named",
"polynomials__collect",
"polynomials__compose",
"polynomials__evaluate",
"polynomials__evaluate_composed",
"polynomials__expand",
"polynomials__simplify_power",
"probability__swr_p_level_set",
"probability__swr_p_sequence",
]
_QUESTION = "question"
_ANSWER = "answer"
_DATASET_VERSION = "mathematics_dataset-v1.0"
def _generate_builder_configs():
"""Generate configs with different subsets of mathematics dataset."""
configs = []
for module in sorted(set(_MODULES)):
configs.append(nlp.BuilderConfig(name=module, version=nlp.Version("1.0.0"), description=_DESCRIPTION,))
return configs
class MathDataset(nlp.GeneratorBasedBuilder):
"""Math Dataset."""
BUILDER_CONFIGS = _generate_builder_configs()
def _info(self):
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features({_QUESTION: nlp.Value("string"), _ANSWER: nlp.Value("string"),}),
supervised_keys=(_QUESTION, _ANSWER),
homepage="https://github.com/deepmind/mathematics_dataset",
citation=_CITATION,
)
def _read_data_from_all_categories(self, directory, config, categories):
lines = []
for category in categories:
data_file = os.path.join(directory, _DATASET_VERSION, category, config)
if os.path.exists(data_file):
with open(data_file) as f:
ls = f.read().split("\n")
for l in ls[::-1]:
if not l:
ls.remove(l)
lines.extend(ls)
return lines
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
directory = dl_manager.download_and_extract(_DATA_URL)
config = self.config.name + ".txt"
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
gen_kwargs={"directory": directory, "config": config, "categories": _TRAIN_CATEGORY,},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
gen_kwargs={"directory": directory, "config": config, "categories": _INTERPOLATE_CATEGORY,},
),
]
def _generate_examples(self, directory, config, categories):
"""Yields examples based on directory, module file.."""
lines = self._read_data_from_all_categories(directory, config, categories)
logging.info("%s: %s contains total: %d", categories, config, len(lines))
questions = lines[::2]
answers = lines[1::2]
assert len(answers) == len(questions), "answers: %d do not match questions: %d" % (
len(answers),
len(questions),
)
for idx, (q, a) in enumerate(zip(questions, answers)):
result = {_QUESTION: q, _ANSWER: a}
if all(result.values()):
yield idx, result
|
nlp-master
|
datasets/math_dataset/math_dataset.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Multi-News dataset."""
from __future__ import absolute_import, division, print_function
import os
import nlp
_CITATION = """
@misc{alex2019multinews,
title={Multi-News: a Large-Scale Multi-Document Summarization Dataset and Abstractive Hierarchical Model},
author={Alexander R. Fabbri and Irene Li and Tianwei She and Suyi Li and Dragomir R. Radev},
year={2019},
eprint={1906.01749},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_DESCRIPTION = """
Multi-News, consists of news articles and human-written summaries
of these articles from the site newser.com.
Each summary is professionally written by editors and
includes links to the original articles cited.
There are two features:
- document: text of news articles seperated by special token "|||||".
- summary: news summary.
"""
_URL = "https://drive.google.com/uc?export=download&id=1vRY2wM6rlOZrf9exGTm5pXj5ExlVwJ0C"
_DOCUMENT = "document"
_SUMMARY = "summary"
class MultiNews(nlp.GeneratorBasedBuilder):
"""Multi-News dataset."""
VERSION = nlp.Version("1.0.0")
def _info(self):
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features({_DOCUMENT: nlp.Value("string"), _SUMMARY: nlp.Value("string")}),
supervised_keys=(_DOCUMENT, _SUMMARY),
homepage="https://github.com/Alex-Fabbri/Multi-News",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
extract_path = os.path.join(dl_manager.download_and_extract(_URL), "multi-news-original")
return [
nlp.SplitGenerator(name=nlp.Split.TRAIN, gen_kwargs={"path": os.path.join(extract_path, "train")},),
nlp.SplitGenerator(name=nlp.Split.VALIDATION, gen_kwargs={"path": os.path.join(extract_path, "val")},),
nlp.SplitGenerator(name=nlp.Split.TEST, gen_kwargs={"path": os.path.join(extract_path, "test")},),
]
def _generate_examples(self, path=None):
"""Yields examples."""
with open(os.path.join(path + ".src")) as src_f, open(os.path.join(path + ".tgt")) as tgt_f:
for i, (src_line, tgt_line) in enumerate(zip(src_f, tgt_f)):
yield i, {
# In original file, each line has one example and natural newline
# tokens "\n" are being replaced with "NEWLINE_CHAR". Here restore
# the natural newline token to avoid special vocab "NEWLINE_CHAR".
_DOCUMENT: src_line.strip().replace("NEWLINE_CHAR", "\n"),
# Remove the starting token "- " for every target sequence.
_SUMMARY: tgt_line.strip().lstrip("- "),
}
|
nlp-master
|
datasets/multi_news/multi_news.py
|
"""TODO(hansards): Add a description here."""
from __future__ import absolute_import, division, print_function
import glob
import os
import nlp
# TODO(hansards): BibTeX citation
_CITATION = """
"""
# TODO(hansards):
_DESCRIPTION = """
This release contains 1.3 million pairs of aligned text chunks (sentences or smaller fragments)
from the official records (Hansards) of the 36th Canadian Parliament.
The complete Hansards of the debates in the House and Senate of the 36th Canadian Parliament,
as far as available, were aligned. The corpus was then split into 5 sets of sentence pairs:
training (80% of the sentence pairs), two sets of sentence pairs for testing (5% each), and
two sets of sentence pairs for final evaluation (5% each). The current release consists of the
training and testing sets. The evaluation sets are reserved for future MT evaluation purposes
and currently not available.
Caveats
1. This release contains only sentence pairs. Even though the order of the sentences is the same
as in the original, there may be gaps resulting from many-to-one, many-to-many, or one-to-many
alignments that were filtered out. Therefore, this release may not be suitable for
discourse-related research.
2. Neither the sentence splitting nor the alignments are perfect. In particular, watch out for
pairs that differ considerably in length. You may want to filter these out before you do
any statistical training.
The alignment of the Hansards was performed as part of the ReWrite project under funding
from the DARPA TIDES program.
"""
_URL = "https://www.isi.edu/natural-language/download/hansard/"
_DATA_URL = "http://www.isi.edu/natural-language/download/hansard/"
_HOUSE_DEBATES_TRAIN_SET_FILE = "hansard.36.r2001-1a.house.debates.training.tar"
_HOUSE_DEBATES_TEST_SET_FILE = "hansard.36.r2001-1a.house.debates.testing.tar"
_SENATE_DEBATES_TRAIN_SET_FILE = "hansard.36.r2001-1a.senate.debates.training.tar"
_SENATE_DEBATES_TEST_SET_FILE = "hansard.36.r2001-1a.senate.debates.testing.tar"
class HansardsConfig(nlp.BuilderConfig):
"""BuilderConfig for Hansards."""
def __init__(self, **kwargs):
"""BuilderConfig for Hansards.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(HansardsConfig, self).__init__(
version=nlp.Version("1.0.0", "New split API (https://tensorflow.org/datasets/splits)"), **kwargs
)
class Hansards(nlp.GeneratorBasedBuilder):
"""TODO(hansards): Short description of my dataset."""
# TODO(hansards): Set up version.
VERSION = nlp.Version("0.1.0")
BUILDER_CONFIGS = [
HansardsConfig(
name="house",
description="""\
Alignment of debates in the House of the 36th Canadian Parliament: 1,070K sentence pairs.
""",
),
HansardsConfig(
name="senate",
description="""\
Alignment of debates in the Senate of the 36th Canadian Parliament: 208K sentence pairs.
""",
),
]
def _info(self):
# TODO(hansards): Specifies the nlp.DatasetInfo object
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
"fr": nlp.Value("string"),
"en": nlp.Value("string")
# These are the features of your dataset like images, labels ...
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage=_URL,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(hansards): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
name = self.config.name
if name == "house":
urls_to_download = {
"train": os.path.join(_DATA_URL, _HOUSE_DEBATES_TRAIN_SET_FILE),
"test": os.path.join(_DATA_URL, _HOUSE_DEBATES_TEST_SET_FILE),
}
elif name == "senate":
urls_to_download = {
"train": os.path.join(_DATA_URL, _SENATE_DEBATES_TRAIN_SET_FILE),
"test": os.path.join(_DATA_URL, _SENATE_DEBATES_TEST_SET_FILE),
}
else:
raise ValueError("Wrong builder config name '{}', it has to be either 'house' or 'senate'.".format(name))
downloaded_files = dl_manager.download_and_extract(urls_to_download)
if type(downloaded_files) == str:
downloaded_files = {k: downloaded_files for k in urls_to_download.keys()}
fr_files = {}
en_files = {}
for split_name in downloaded_files.keys():
archive_dir = "hansard.36/Release-2001.1a/sentence-pairs/{}/debates/development/{}".format(
name, split_name + "ing"
)
data_dir = os.path.join(downloaded_files[split_name], archive_dir)
split_compress_files = list(sorted(glob.glob(os.path.join(data_dir, "*.gz"))))
split_compress_files += list(sorted(glob.glob(os.path.join(data_dir, "**/*.gz"))))
fr_split_compress_files = sorted([f for f in split_compress_files if f.endswith(".f.gz")])
en_split_compress_files = sorted([f for f in split_compress_files if f.endswith(".e.gz")])
fr_files[split_name] = dl_manager.extract(fr_split_compress_files)
en_files[split_name] = dl_manager.extract(en_split_compress_files)
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"fr_files": fr_files["train"], "en_files": en_files["train"]},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={"fr_files": fr_files["test"], "en_files": en_files["test"]},
),
]
def _generate_examples(self, fr_files, en_files):
"""Yields examples."""
# TODO(hansards): Yields (key, example) tuples from the dataset
for fr_file, en_file in zip(fr_files, en_files):
with open(fr_file, "rb") as fr:
with open(en_file, "rb") as en:
for j, (fr_line, en_line) in enumerate(zip(fr, en)):
line_id = "{}:{}".format(fr_file, j)
rec = {"fr": fr_line.decode("ISO-8859-1").strip(), "en": en_line.decode("ISO-8859-1").strip()}
yield line_id, rec
|
nlp-master
|
datasets/hansards/hansards.py
|
"""TODO(squad_es): Add a description here."""
from __future__ import absolute_import, division, print_function
import json
import os
import nlp
# TODO(squad_es): BibTeX citation
_CITATION = """\
@article{2016arXiv160605250R,
author = {Casimiro Pio , Carrino and Marta R. , Costa-jussa and Jose A. R. , Fonollosa},
title = "{Automatic Spanish Translation of the SQuAD Dataset for Multilingual
Question Answering}",
journal = {arXiv e-prints},
year = 2019,
eid = {arXiv:1912.05200v1},
pages = {arXiv:1912.05200v1},
archivePrefix = {arXiv},
eprint = {1912.05200v2},
}
"""
# TODO(squad_es_v1):
_DESCRIPTION = """\
automatic translation of the Stanford Question Answering Dataset (SQuAD) v2 into Spanish
"""
_URL = "https://raw.githubusercontent.com/ccasimiro88/TranslateAlignRetrieve/master/"
class SquadEsConfig(nlp.BuilderConfig):
"""BuilderConfig for SQUADEsV2."""
def __init__(self, **kwargs):
"""BuilderConfig for SQUADEsV2.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(SquadEsConfig, self).__init__(**kwargs)
class SquadEs(nlp.GeneratorBasedBuilder):
"""TODO(squad_es): Short description of my dataset."""
# TODO(squad_es): Set up version.
VERSION = nlp.Version("0.1.0")
BUILDER_CONFIGS = [
SquadEsConfig(
name="v1.1.0",
version=nlp.Version("1.1.0", "New split API (https://tensorflow.org/datasets/splits)"),
description="Plain text Spanish squad version 1",
),
SquadEsConfig(
name="v2.0.0",
version=nlp.Version("2.0.0", "New split API (https://tensorflow.org/datasets/splits)"),
description="Plain text Spanish squad version 2",
),
]
def _info(self):
# TODO(squad_es): Specifies the nlp.DatasetInfo object
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
# These are the features of your dataset like images, labels ...
"id": nlp.Value("string"),
"title": nlp.Value("string"),
"context": nlp.Value("string"),
"question": nlp.Value("string"),
"answers": nlp.features.Sequence(
{"text": nlp.Value("string"), "answer_start": nlp.Value("int32"),}
),
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://github.com/ccasimiro88/TranslateAlignRetrieve",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(squad_es): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
v1_urls = {
"train": os.path.join(_URL, "SQuAD-es-v1.1/train-v1.1-es.json"),
"dev": os.path.join(_URL, "SQuAD-es-v1.1/dev-v1.1-es.json"),
}
v2_urls = {
"train": os.path.join(_URL, "SQuAD-es-v2.0/train-v2.0-es.json"),
"dev": os.path.join(_URL, "SQuAD-es-v2.0/dev-v2.0-es.json"),
}
if self.config.name == "v1.1.0":
dl_dir = dl_manager.download_and_extract(v1_urls)
elif self.config.name == "v2.0.0":
dl_dir = dl_manager.download_and_extract(v2_urls)
else:
raise Exception("version does not match any existing one")
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": dl_dir["train"]},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": dl_dir["dev"]},
),
]
def _generate_examples(self, filepath):
"""Yields examples."""
# TODO(squad_es): Yields (key, example) tuples from the dataset
with open(filepath) as f:
data = json.load(f)
for example in data["data"]:
title = example.get("title", "").strip()
for paragraph in example["paragraphs"]:
context = paragraph["context"].strip()
for qa in paragraph["qas"]:
question = qa["question"].strip()
id_ = qa["id"]
answer_starts = [answer["answer_start"] for answer in qa["answers"]]
answers = [answer["text"].strip() for answer in qa["answers"]]
yield id_, {
"title": title,
"context": context,
"question": question,
"id": id_,
"answers": {"answer_start": answer_starts, "text": answers,},
}
|
nlp-master
|
datasets/squad_es/squad_es.py
|
"""TODO(scifact): Add a description here."""
from __future__ import absolute_import, division, print_function
import json
import os
import nlp
# TODO(scifact): BibTeX citation
_CITATION = """\
@inproceedings{scifact2020
title={ Fact or Fiction: Verifying Scientific Claims},
author={David, Wadden and Kyle, Lo and Lucy Lu, Wang and Shanchuan, Lin and Madeleine van, Zuylen and Arman, Cohan and Hannaneh, Hajishirzi},
booktitle={2011 AAAI Spring Symposium Series},
year={2020},
}
"""
# TODO(scifact):
_DESCRIPTION = """\
SciFact, a dataset of 1.4K expert-written scientific claims paired with evidence-containing abstracts, and annotated with labels and rationales
"""
_URL = "https://ai2-s2-scifact.s3-us-west-2.amazonaws.com/release/2020-05-01/data.tar.gz"
class ScifactConfig(nlp.BuilderConfig):
"""BuilderConfig for Scifact"""
def __init__(self, **kwargs):
"""
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(ScifactConfig, self).__init__(
version=nlp.Version("1.0.0", "New split API (https://tensorflow.org/datasets/splits)"), **kwargs
)
class Scifact(nlp.GeneratorBasedBuilder):
"""TODO(scifact): Short description of my dataset."""
# TODO(scifact): Set up version.
VERSION = nlp.Version("0.1.0")
BUILDER_CONFIGS = [
ScifactConfig(name="corpus", description=" The corpus of evidence documents"),
ScifactConfig(name="claims", description=" The claims are split into train, test, dev"),
]
def _info(self):
# TODO(scifact): Specifies the nlp.DatasetInfo object
if self.config.name == "corpus":
features = {
"doc_id": nlp.Value("int32"), # The document's S2ORC ID.
"title": nlp.Value("string"), # The title.
"abstract": nlp.features.Sequence(
{"sentence": nlp.Value("string")}
), # The abstract, written as a list of sentences.
"structured": nlp.Value("bool"), # Indicator for whether this is a structured abstract.
}
else:
features = {
"id": nlp.Value("int32"), # An integer claim ID.
"claim": nlp.Value("string"), # The text of the claim.
"evidence_doc_id": nlp.Value("string"),
"evidence_label": nlp.Value("string"), # Label for the rationale.
"evidence_sentences": nlp.features.Sequence({"sentence": nlp.Value("int32")}), # Rationale sentences.
"cited_doc_ids": nlp.features.Sequence(
{"doc_id": nlp.Value("int32")}
), # The claim's "cited documents".
}
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
features
# These are the features of your dataset like images, labels ...
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://scifact.apps.allenai.org/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(scifact): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
dl_dir = dl_manager.download_and_extract(_URL)
if self.config.name == "corpus":
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(dl_dir, "data", "corpus.jsonl"), "split": "train"},
),
]
else:
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(dl_dir, "data", "claims_train.jsonl"), "split": "train"},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(dl_dir, "data", "claims_test.jsonl"), "split": "test"},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(dl_dir, "data", "claims_dev.jsonl"), "split": "dev"},
),
]
def _generate_examples(self, filepath, split):
"""Yields examples."""
# TODO(scifact): Yields (key, example) tuples from the dataset
with open(filepath) as f:
for id_, row in enumerate(f):
data = json.loads(row)
if self.config.name == "corpus":
yield id_, {
"doc_id": int(data["doc_id"]),
"title": data["title"],
"abstract": {"sentence": data["abstract"]},
"structured": data["structured"],
}
else:
if split == "test":
yield id_, {
"id": data["id"],
"claim": data["claim"],
"evidence_doc_id": "",
"evidence_label": "",
"evidence_sentences": {"sentence": []},
"cited_doc_ids": {"doc_id": []},
}
else:
evidences = data["evidence"]
if evidences:
for id1, doc_id in enumerate(evidences):
for id2, evidence in enumerate(evidences[doc_id]):
yield str(id_) + "_" + str(id1) + "_" + str(id2), {
"id": data["id"],
"claim": data["claim"],
"evidence_doc_id": doc_id,
"evidence_label": evidence["label"],
"evidence_sentences": {"sentence": evidence["sentences"]},
"cited_doc_ids": {"doc_id": data.get("cited_doc_ids", [])},
}
else:
yield id_, {
"id": data["id"],
"claim": data["claim"],
"evidence_doc_id": "",
"evidence_label": "",
"evidence_sentences": {"sentence": []},
"cited_doc_ids": {"doc_id": data.get("cited_doc_ids", [])},
}
|
nlp-master
|
datasets/scifact/scifact.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Annotated Enron Subject Line Corpus Dataset."""
from __future__ import absolute_import, division, print_function
import glob
import os
import nlp
_CITATION = """
@misc{zhang2019email,
title={This Email Could Save Your Life: Introducing the Task of Email Subject Line Generation},
author={Rui Zhang and Joel Tetreault},
year={2019},
eprint={1906.03497},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_DESCRIPTION = """
A collection of email messages of employees in the Enron Corporation.
There are two features:
- email_body: email body text.
- subject_line: email subject text.
"""
_URL = "https://github.com/ryanzhumich/AESLC/archive/master.zip"
_DOCUMENT = "email_body"
_SUMMARY = "subject_line"
class Aeslc(nlp.GeneratorBasedBuilder):
"""Annotated Enron Subject Line Corpus Dataset."""
VERSION = nlp.Version("1.0.0")
def _info(self):
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features({_DOCUMENT: nlp.Value("string"), _SUMMARY: nlp.Value("string")}),
supervised_keys=(_DOCUMENT, _SUMMARY),
homepage="https://github.com/ryanzhumich/AESLC",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
dl_path = dl_manager.download_and_extract(_URL)
input_path = os.path.join(dl_path, "AESLC-master", "enron_subject_line")
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN, gen_kwargs={"pattern": os.path.join(input_path, "train", "*.subject")},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION, gen_kwargs={"pattern": os.path.join(input_path, "dev", "*.subject")},
),
nlp.SplitGenerator(
name=nlp.Split.TEST, gen_kwargs={"pattern": os.path.join(input_path, "test", "*.subject")},
),
]
def _generate_examples(self, pattern=None):
"""Yields examples."""
for filename in sorted(glob.glob(pattern)):
email_body, subject_line = _parse_email_file(filename)
key = os.path.basename(filename).rstrip(".subject")
yield key, {_DOCUMENT: email_body, _SUMMARY: subject_line}
def _parse_email_file(filename):
"""Parse email file text for email body and subject."""
with open(filename) as f:
email_body = ""
for line in f:
if line == "\n":
break
email_body += line
line = next(f)
subject = ""
for line in f:
if line == "\n":
break
subject += line
return email_body, subject
|
nlp-master
|
datasets/aeslc/aeslc.py
|
from __future__ import absolute_import, division, print_function
import nlp
_DESCRIPTION = """\
"""
_URL = "https://www.gutenberg.org/files/2554/2554-h/2554-h.htm"
_DATA_URL = "https://raw.githubusercontent.com/patrickvonplaten/datasets/master/crime_and_punishment.txt"
class CrimeAndPunishConfig(nlp.BuilderConfig):
"""BuilderConfig for Crime and Punish."""
def __init__(self, data_url, **kwargs):
"""BuilderConfig for BlogAuthorship
Args:
data_url: `string`, url to the dataset (word or raw level)
**kwargs: keyword arguments forwarded to super.
"""
super(CrimeAndPunishConfig, self).__init__(version=nlp.Version("1.0.0",), **kwargs)
self.data_url = data_url
class CrimeAndPunish(nlp.GeneratorBasedBuilder):
VERSION = nlp.Version("0.1.0")
BUILDER_CONFIGS = [
CrimeAndPunishConfig(
name="crime-and-punish",
data_url=_DATA_URL,
description="word level dataset. No processing is needed other than replacing newlines with <eos> tokens.",
),
]
def _info(self):
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features({"line": nlp.Value("string"),}),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
homepage=_URL,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
if self.config.name == "crime-and-punish":
data = dl_manager.download_and_extract(self.config.data_url)
return [
nlp.SplitGenerator(name=nlp.Split.TRAIN, gen_kwargs={"data_file": data, "split": "train"},),
]
else:
raise ValueError("{} does not exist".format(self.config.name))
def _generate_examples(self, data_file, split):
with open(data_file, "rb") as f:
id_counter = 0
add_text = False
crime_and_punishment_occ_counter = 0
for line in f:
line = line.decode("UTF-8")
if "CRIME AND PUNISHMENT" in line:
crime_and_punishment_occ_counter += 1
add_text = crime_and_punishment_occ_counter == 3
if "End of Project" in line:
add_text = False
if add_text is True:
result = {"line": line}
id_counter += 1
yield id_counter, result
|
nlp-master
|
datasets/crime_and_punish/crime_and_punish.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""BillSum Dataset."""
from __future__ import absolute_import, division, print_function
import json
import os
import nlp
_CITATION = """
@misc{kornilova2019billsum,
title={BillSum: A Corpus for Automatic Summarization of US Legislation},
author={Anastassia Kornilova and Vlad Eidelman},
year={2019},
eprint={1910.00523},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_DESCRIPTION = """
BillSum, summarization of US Congressional and California state bills.
There are several features:
- text: bill text.
- summary: summary of the bills.
- title: title of the bills.
features for us bills. ca bills does not have.
- text_len: number of chars in text.
- sum_len: number of chars in summary.
"""
_URL = "https://drive.google.com/uc?export=download&id=1g89WgFHMRbr4QrvA0ngh26PY081Nv3lx"
_DOCUMENT = "text"
_SUMMARY = "summary"
class Billsum(nlp.GeneratorBasedBuilder):
"""BillSum Dataset."""
# 2.0.0 data source updated to filter near duplicates.
# 3.0.0 none of the test examples are 'near duplicates' of an example in the
# train set AND they dont have the same title, regardless of similarity.
VERSION = nlp.Version("3.0.0")
def _info(self):
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features(
{_DOCUMENT: nlp.Value("string"), _SUMMARY: nlp.Value("string"), "title": nlp.Value("string"),}
),
supervised_keys=(_DOCUMENT, _SUMMARY),
homepage="https://github.com/FiscalNote/BillSum",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
dl_path = dl_manager.download_and_extract(_URL)
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
gen_kwargs={"path": os.path.join(dl_path, "us_train_data_final_OFFICIAL.jsonl"), "key": "bill_id"},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
gen_kwargs={"path": os.path.join(dl_path, "us_test_data_final_OFFICIAL.jsonl"), "key": "bill_id"},
),
nlp.SplitGenerator(
name="ca_test",
gen_kwargs={"path": os.path.join(dl_path, "ca_test_data_final_OFFICIAL.jsonl"), "key": "external_id"},
),
]
def _generate_examples(self, path=None, key=None):
"""Yields examples."""
with open(path) as f:
for line in f:
# in us bills, json has fields:
# text, summary, title, bill_id, text_len, sum_len
# in ca bills, json has fields:
# text, summary, title, external_id
d = json.loads(line)
yield d[key], {k: d[k] for k in [_DOCUMENT, _SUMMARY, "title"]}
|
nlp-master
|
datasets/billsum/billsum.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""e-SNLI: Natural Language Inference with Natural Language Explanations."""
from __future__ import absolute_import, division, print_function
import csv
import os
import nlp
_CITATION = """
@incollection{NIPS2018_8163,
title = {e-SNLI: Natural Language Inference with Natural Language Explanations},
author = {Camburu, Oana-Maria and Rockt\"{a}schel, Tim and Lukasiewicz, Thomas and Blunsom, Phil},
booktitle = {Advances in Neural Information Processing Systems 31},
editor = {S. Bengio and H. Wallach and H. Larochelle and K. Grauman and N. Cesa-Bianchi and R. Garnett},
pages = {9539--9549},
year = {2018},
publisher = {Curran Associates, Inc.},
url = {http://papers.nips.cc/paper/8163-e-snli-natural-language-inference-with-natural-language-explanations.pdf}
}
"""
_DESCRIPTION = """
The e-SNLI dataset extends the Stanford Natural Language Inference Dataset to
include human-annotated natural language explanations of the entailment
relations.
"""
_URL = "https://raw.githubusercontent.com/OanaMariaCamburu/e-SNLI/master/dataset/"
class Esnli(nlp.GeneratorBasedBuilder):
"""e-SNLI: Natural Language Inference with Natural Language Explanations corpus."""
# Version History
# 0.0.2 Added explanation_2, explanation_3 fields which exist in the dev/test
# splits only.
# 0.0.1 Initial version
BUILDER_CONFIGS = [
nlp.BuilderConfig(name="plain_text", version=nlp.Version("0.0.2"), description="Plain text import of e-SNLI",)
]
def _info(self):
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features(
{
"premise": nlp.Value("string"),
"hypothesis": nlp.Value("string"),
"label": nlp.features.ClassLabel(names=["entailment", "neutral", "contradiction"]),
"explanation_1": nlp.Value("string"),
"explanation_2": nlp.Value("string"),
"explanation_3": nlp.Value("string"),
}
),
supervised_keys=None,
homepage="https://github.com/OanaMariaCamburu/e-SNLI",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
files = dl_manager.download_and_extract(
{
"train": [os.path.join(_URL, "esnli_train_1.csv"), os.path.join(_URL, "esnli_train_2.csv")],
"validation": [os.path.join(_URL, "esnli_dev.csv")],
"test": [os.path.join(_URL, "esnli_test.csv")],
}
)
return [
nlp.SplitGenerator(name=nlp.Split.TRAIN, gen_kwargs={"files": files["train"]},),
nlp.SplitGenerator(name=nlp.Split.VALIDATION, gen_kwargs={"files": files["validation"]},),
nlp.SplitGenerator(name=nlp.Split.TEST, gen_kwargs={"files": files["test"]},),
]
def _generate_examples(self, files):
"""Yields examples."""
for filepath in files:
with open(filepath) as f:
reader = csv.DictReader(f)
for _, row in enumerate(reader):
yield row["pairID"], {
"premise": row["Sentence1"],
"hypothesis": row["Sentence2"],
"label": row["gold_label"],
"explanation_1": row["Explanation_1"],
"explanation_2": row.get("Explanation_2", ""),
"explanation_3": row.get("Explanation_3", ""),
}
|
nlp-master
|
datasets/esnli/esnli.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""CFQ (Compositional Freebase Question) dataset."""
from __future__ import absolute_import, division, print_function
import json
import logging
import os
import re
import nlp
_CITATION = """
@inproceedings{Keysers2020,
title={Measuring Compositional Generalization: A Comprehensive Method on
Realistic Data},
author={Daniel Keysers and Nathanael Sch\"{a}rli and Nathan Scales and
Hylke Buisman and Daniel Furrer and Sergii Kashubin and
Nikola Momchev and Danila Sinopalnikov and Lukasz Stafiniak and
Tibor Tihon and Dmitry Tsarkov and Xiao Wang and Marc van Zee and
Olivier Bousquet},
booktitle={ICLR},
year={2020},
url={https://arxiv.org/abs/1912.09713.pdf},
}
"""
_DESCRIPTION = """
The CFQ dataset (and it's splits) for measuring compositional generalization.
See https://arxiv.org/abs/1912.09713.pdf for background.
Example usage:
data = nlp.load_dataset('cfq/mcd1')
"""
_DATA_URL = "https://storage.googleapis.com/cfq_dataset/cfq.tar.gz"
class CfqConfig(nlp.BuilderConfig):
"""BuilderConfig for CFQ splits."""
def __init__(self, name, directory="splits", **kwargs):
"""BuilderConfig for CFQ.
Args:
name: Unique name of the split.
directory: Which subdirectory to read the split from.
**kwargs: keyword arguments forwarded to super.
"""
# Version history:
super(CfqConfig, self).__init__(name=name, version=nlp.Version("1.0.1"), description=_DESCRIPTION, **kwargs)
self.split_file = os.path.join(directory, name + ".json")
_QUESTION = "question"
_QUERY = "query"
_QUESTION_FIELD = "questionPatternModEntities"
_QUERY_FIELD = "sparqlPatternModEntities"
class Cfq(nlp.GeneratorBasedBuilder):
"""CFQ task / splits."""
BUILDER_CONFIGS = [
CfqConfig(name="mcd1"),
CfqConfig(name="mcd2"),
CfqConfig(name="mcd3"),
CfqConfig(name="question_complexity_split"),
CfqConfig(name="question_pattern_split"),
CfqConfig(name="query_complexity_split"),
CfqConfig(name="query_pattern_split"),
CfqConfig(name="random_split"),
]
def _info(self):
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features({_QUESTION: nlp.Value("string"), _QUERY: nlp.Value("string"),}),
supervised_keys=(_QUESTION, _QUERY),
homepage="https://github.com/google-research/google-research/tree/master/cfq",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
data_dir = dl_manager.download_and_extract(_DATA_URL)
data_dir = os.path.join(data_dir, "cfq")
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
gen_kwargs={
"base_directory": data_dir,
"splits_file": self.config.split_file,
"split_id": "trainIdxs",
},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
gen_kwargs={"base_directory": data_dir, "splits_file": self.config.split_file, "split_id": "testIdxs"},
),
]
def _scrub_json(self, content):
"""Reduce JSON by filtering out only the fields of interest."""
# Loading of json data with the standard Python library is very inefficient:
# For the 4GB dataset file it requires more than 40GB of RAM and takes 3min.
# There are more efficient libraries but in order to avoid additional
# dependencies we use a simple (perhaps somewhat brittle) regexp to reduce
# the content to only what is needed. This takes 1min to execute but
# afterwards loading requires only 500MB or RAM and is done in 2s.
regex = re.compile(r'("%s":\s*"[^"]*").*?("%s":\s*"[^"]*")' % (_QUESTION_FIELD, _QUERY_FIELD), re.DOTALL)
return "[" + ",".join(["{" + m.group(1) + "," + m.group(2) + "}" for m in regex.finditer(content)]) + "]"
def _generate_examples(self, base_directory, splits_file, split_id):
"""Yields examples."""
samples_path = os.path.join(base_directory, "dataset.json")
splits_path = os.path.join(base_directory, splits_file)
with open(samples_path) as samples_file:
with open(splits_path) as splits_file:
logging.info("Reading json from %s into memory...", samples_path)
samples = json.loads(self._scrub_json(samples_file.read()))
logging.info("%d samples loaded", len(samples))
logging.info("Loaded json data from %s.", samples_path)
splits = json.load(splits_file)
for idx in splits[split_id]:
sample = samples[idx]
yield idx, {_QUESTION: sample[_QUESTION_FIELD], _QUERY: sample[_QUERY_FIELD]}
|
nlp-master
|
datasets/cfq/cfq.py
|
"""TODO(commonsense_qa): Add a description here."""
from __future__ import absolute_import, division, print_function
import json
import os
import nlp
# TODO(commonsense_qa): BibTeX citation
_CITATION = """\
@InProceedings{commonsense_QA,
title={COMMONSENSEQA: A Question Answering Challenge Targeting Commonsense Knowledge},
author={Alon, Talmor and Jonathan, Herzig and Nicholas, Lourie and Jonathan ,Berant},
journal={arXiv preprint arXiv:1811.00937v2},
year={2019}
"""
# TODO(commonsense_qa):
_DESCRIPTION = """\
CommonsenseQA is a new multiple-choice question answering dataset that requires different types of commonsense knowledge
to predict the correct answers . It contains 12,102 questions with one correct answer and four distractor answers.
The dataset is provided in two major training/validation/testing set splits: "Random split" which is the main evaluation
split, and "Question token split", see paper for details.
"""
_URL = "https://s3.amazonaws.com/commensenseqa"
_TRAINING_FILE = "train_rand_split.jsonl"
_DEV_FILE = "dev_rand_split.jsonl"
_TEST_FILE = "test_rand_split_no_answers.jsonl"
class CommonsenseQa(nlp.GeneratorBasedBuilder):
"""TODO(commonsense_qa): Short description of my dataset."""
# TODO(commonsense_qa): Set up version.
VERSION = nlp.Version("0.1.0")
def _info(self):
# These are the features of your dataset like images, labels ...
features = nlp.Features(
{
"answerKey": nlp.Value("string"),
"question": nlp.Value("string"),
"choices": nlp.features.Sequence({"label": nlp.Value("string"), "text": nlp.Value("string"),}),
}
)
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=features,
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://www.tau-nlp.org/commonsenseqa",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
download_urls = {
"train": os.path.join(_URL, _TRAINING_FILE),
"test": os.path.join(_URL, _TEST_FILE),
"dev": os.path.join(_URL, _DEV_FILE),
}
downloaded_files = dl_manager.download_and_extract(download_urls)
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"], "split": "train"}
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"], "split": "dev",}
),
nlp.SplitGenerator(
name=nlp.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"], "split": "test",}
),
]
def _generate_examples(self, filepath, split):
"""Yields examples."""
# TODO(commonsense_qa): Yields (key, example) tuples from the dataset
with open(filepath) as f:
for id_, row in enumerate(f):
data = json.loads(row)
question = data["question"]
choices = question["choices"]
labels = [label["label"] for label in choices]
texts = [text["text"] for text in choices]
stem = question["stem"]
if split == "test":
answerkey = ""
else:
answerkey = data["answerKey"]
yield id_, {
"answerKey": answerkey,
"question": stem,
"choices": {"label": labels, "text": texts},
}
|
nlp-master
|
datasets/commonsense_qa/commonsense_qa.py
|
import os
import tempfile
from unittest import TestCase
import numpy as np
import pyarrow as pa
from nlp.arrow_reader import BaseReader
from nlp.info import DatasetInfo
from nlp.splits import SplitDict, SplitInfo
class ReaderTester(BaseReader):
"""
Build a Dataset object out of Instruction instance(s).
This reader is made for testing. It mocks file reads.
"""
def _get_dataset_from_filename(self, filename_skip_take):
"""Returns a Dataset instance from given (filename, skip, take)."""
filename, skip, take = (
filename_skip_take["filename"],
filename_skip_take["skip"] if "skip" in filename_skip_take else None,
filename_skip_take["take"] if "take" in filename_skip_take else None,
)
pa_table = pa.Table.from_pydict({"filename": [filename + "_" + str(x) for x in np.arange(100).tolist()]})
if skip is not None and take is not None:
pa_table = pa_table.slice(skip, take)
return pa_table
class BaseDatasetTest(TestCase):
def _create_dummy_dataset(self):
name = "my_name"
train_info = SplitInfo(name="train", num_examples=30)
test_info = SplitInfo(name="test", num_examples=30)
split_infos = [train_info, test_info]
split_dict = SplitDict()
split_dict.add(train_info)
split_dict.add(test_info)
info = DatasetInfo(splits=split_dict)
reader = ReaderTester("", info)
dset = reader.read(name, "train", split_infos)
return dset
def test_map(self):
dset = self._create_dummy_dataset()
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_file = os.path.join(tmp_dir, "test.arrow")
dset_test = dset.map(
lambda x: {"name": x["filename"][:-2], "id": int(x["filename"][-1])}, cache_file_name=tmp_file
)
self.assertEqual(len(dset_test), 30)
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_file = os.path.join(tmp_dir, "test.arrow")
dset_test = dset.map(
lambda x: {"name": x["filename"][:-2], "id": int(x["filename"][-1])}, cache_file_name=tmp_file
)
dset_test_with_indices = dset.map(
lambda x, i: {"name": x["filename"][:-2], "id": i}, with_indices=True, cache_file_name=tmp_file
)
self.assertEqual(len(dset_test_with_indices), 30)
def test_map_batched(self):
dset = self._create_dummy_dataset()
def map_batched(example):
return {"filename_new": [x + "_extension" for x in example["filename"]]}
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_file = os.path.join(tmp_dir, "test.arrow")
dset_test_batched = dset.map(map_batched, batched=True, cache_file_name=tmp_file)
self.assertEqual(len(dset_test_batched), 30)
def map_batched_with_indices(example, idx):
return {"filename_new": [x + "_extension_" + str(idx) for x in example["filename"]]}
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_file = os.path.join(tmp_dir, "test.arrow")
dset_test_with_indices_batched = dset.map(
map_batched_with_indices, batched=True, with_indices=True, cache_file_name=tmp_file
)
self.assertEqual(len(dset_test_with_indices_batched), 30)
def test_remove_colums(self):
dset = self._create_dummy_dataset()
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_file = os.path.join(tmp_dir, "test.arrow")
dset = dset.map(
lambda x, i: {"name": x["filename"][:-2], "id": i}, with_indices=True, cache_file_name=tmp_file
)
self.assertTrue("id" in dset[0])
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_file = os.path.join(tmp_dir, "test.arrow")
dset = dset.map(lambda x: x, remove_columns=["id"], cache_file_name=tmp_file)
self.assertTrue("id" not in dset[0])
def test_filter(self):
dset = self._create_dummy_dataset()
# keep only first five examples
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_file = os.path.join(tmp_dir, "test.arrow")
dset_filter_first_five = dset.filter(lambda x, i: i < 5, with_indices=True, cache_file_name=tmp_file)
self.assertEqual(len(dset_filter_first_five), 5)
# filter filenames with even id at the end
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_file = os.path.join(tmp_dir, "test.arrow")
dset_filter_even_num = dset.filter(lambda x: (int(x["filename"][-1]) % 2 == 0), cache_file_name=tmp_file)
self.assertEqual(len(dset_filter_even_num), 15)
def test_select(self):
dset = self._create_dummy_dataset()
# select every two example
indices = list(range(0, len(dset), 2))
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_file = os.path.join(tmp_dir, "test.arrow")
dset_select_even = dset.select(indices, cache_file_name=tmp_file)
self.assertEqual(len(dset_select_even), 15)
for row in dset_select_even:
self.assertEqual(int(row["filename"][-1]) % 2, 0)
def test_shuffle(self):
dset = self._create_dummy_dataset()
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_file = os.path.join(tmp_dir, "test.arrow")
dset_shuffled = dset.shuffle(seed=1234, cache_file_name=tmp_file)
self.assertEqual(len(dset_shuffled), 30)
self.assertEqual(dset_shuffled[0]["filename"], "my_name-train_28")
self.assertEqual(dset_shuffled[2]["filename"], "my_name-train_10")
# Reproductibility
tmp_file = os.path.join(tmp_dir, "test_2.arrow")
dset_shuffled_2 = dset.shuffle(seed=1234, cache_file_name=tmp_file)
self.assertListEqual(dset_shuffled["filename"], dset_shuffled_2["filename"])
def test_sort(self):
dset = self._create_dummy_dataset()
with tempfile.TemporaryDirectory() as tmp_dir:
# Keep only 10 examples
tmp_file = os.path.join(tmp_dir, "test.arrow")
dset = dset.select(range(10), cache_file_name=tmp_file)
tmp_file = os.path.join(tmp_dir, "test_2.arrow")
dset = dset.shuffle(seed=1234, cache_file_name=tmp_file)
self.assertEqual(len(dset), 10)
self.assertEqual(dset[0]["filename"], "my_name-train_8")
self.assertEqual(dset[1]["filename"], "my_name-train_9")
# Sort
tmp_file = os.path.join(tmp_dir, "test_3.arrow")
dset_sorted = dset.sort("filename", cache_file_name=tmp_file)
for i, row in enumerate(dset_sorted):
self.assertEqual(int(row["filename"][-1]), i)
# Sort reversed
tmp_file = os.path.join(tmp_dir, "test_4.arrow")
dset_sorted = dset.sort("filename", cache_file_name=tmp_file, reverse=True)
for i, row in enumerate(dset_sorted):
self.assertEqual(int(row["filename"][-1]), len(dset_sorted) - 1 - i)
def test_train_test_split(self):
dset = self._create_dummy_dataset()
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_file = os.path.join(tmp_dir, "test.arrow")
tmp_file_2 = os.path.join(tmp_dir, "test_2.arrow")
dset_dict = dset.train_test_split(
test_size=10, shuffle=False, train_cache_file_name=tmp_file, test_cache_file_name=tmp_file_2
)
self.assertListEqual(list(dset_dict.keys()), ["train", "test"])
dset_train = dset_dict["train"]
dset_test = dset_dict["test"]
self.assertEqual(len(dset_train), 20)
self.assertEqual(len(dset_test), 10)
self.assertEqual(dset_train[0]["filename"], "my_name-train_0")
self.assertEqual(dset_train[-1]["filename"], "my_name-train_19")
self.assertEqual(dset_test[0]["filename"], "my_name-train_20")
self.assertEqual(dset_test[-1]["filename"], "my_name-train_29")
tmp_file = os.path.join(tmp_dir, "test_3.arrow")
tmp_file_2 = os.path.join(tmp_dir, "test_4.arrow")
dset_dict = dset.train_test_split(
test_size=0.5, shuffle=False, train_cache_file_name=tmp_file, test_cache_file_name=tmp_file_2
)
self.assertListEqual(list(dset_dict.keys()), ["train", "test"])
dset_train = dset_dict["train"]
dset_test = dset_dict["test"]
self.assertEqual(len(dset_train), 15)
self.assertEqual(len(dset_test), 15)
self.assertEqual(dset_train[0]["filename"], "my_name-train_0")
self.assertEqual(dset_train[-1]["filename"], "my_name-train_14")
self.assertEqual(dset_test[0]["filename"], "my_name-train_15")
self.assertEqual(dset_test[-1]["filename"], "my_name-train_29")
tmp_file = os.path.join(tmp_dir, "test_5.arrow")
tmp_file_2 = os.path.join(tmp_dir, "test_6.arrow")
dset_dict = dset.train_test_split(
train_size=10, shuffle=False, train_cache_file_name=tmp_file, test_cache_file_name=tmp_file_2
)
self.assertListEqual(list(dset_dict.keys()), ["train", "test"])
dset_train = dset_dict["train"]
dset_test = dset_dict["test"]
self.assertEqual(len(dset_train), 10)
self.assertEqual(len(dset_test), 20)
self.assertEqual(dset_train[0]["filename"], "my_name-train_0")
self.assertEqual(dset_train[-1]["filename"], "my_name-train_9")
self.assertEqual(dset_test[0]["filename"], "my_name-train_10")
self.assertEqual(dset_test[-1]["filename"], "my_name-train_29")
tmp_file = os.path.join(tmp_dir, "test_7.arrow")
tmp_file_2 = os.path.join(tmp_dir, "test_8.arrow")
dset_dict = dset.train_test_split(
train_size=10, train_cache_file_name=tmp_file, test_cache_file_name=tmp_file_2
)
self.assertListEqual(list(dset_dict.keys()), ["train", "test"])
dset_train = dset_dict["train"]
dset_test = dset_dict["test"]
self.assertEqual(len(dset_train), 10)
self.assertEqual(len(dset_test), 20)
self.assertNotEqual(dset_train[0]["filename"], "my_name-train_0")
self.assertNotEqual(dset_train[-1]["filename"], "my_name-train_9")
self.assertNotEqual(dset_test[0]["filename"], "my_name-train_10")
self.assertNotEqual(dset_test[-1]["filename"], "my_name-train_29")
|
nlp-master
|
tests/test_arrow_dataset.py
|
nlp-master
|
tests/__init__.py
|
|
# coding=utf-8
# Copyright 2020 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import logging
import os
import tempfile
import requests
from absl.testing import parameterized
from nlp import (
BuilderConfig,
DatasetBuilder,
DownloadConfig,
GenerateMode,
MockDownloadManager,
hf_api,
hf_bucket_url,
import_main_class,
load_dataset,
prepare_module,
)
from .utils import aws, local, slow
logging.basicConfig(level=logging.INFO)
class DatasetTester(object):
def __init__(self, parent):
self.parent = parent
def load_builder_class(self, dataset_name, is_local=False):
# Download/copy dataset script
if is_local is True:
module_path = prepare_module("./datasets/" + dataset_name)
else:
module_path = prepare_module(dataset_name, download_config=DownloadConfig(force_download=True))
# Get dataset builder class
builder_cls = import_main_class(module_path)
# Instantiate dataset builder
return builder_cls
def load_all_configs(self, dataset_name, is_local=False):
# get builder class
builder_cls = self.load_builder_class(dataset_name, is_local=is_local)
builder = builder_cls
if len(builder.BUILDER_CONFIGS) == 0:
return [None]
return builder.BUILDER_CONFIGS
def check_load_dataset(self, dataset_name, configs, is_local=False):
# test only first config to speed up testing
for config in configs:
with tempfile.TemporaryDirectory() as processed_temp_dir, tempfile.TemporaryDirectory() as raw_temp_dir:
# create config and dataset
dataset_builder_cls = self.load_builder_class(dataset_name, is_local=is_local)
name = config.name if config is not None else None
dataset_builder = dataset_builder_cls(name=name, cache_dir=processed_temp_dir)
# TODO: skip Beam datasets and datasets that lack dummy data for now
if not dataset_builder.test_dummy_data:
logging.info("Skip tests for this dataset for now")
return
if config is not None:
version = config.version
else:
version = dataset_builder.VERSION
# create mock data loader manager that has a special download_and_extract() method to download dummy data instead of real data
mock_dl_manager = MockDownloadManager(
dataset_name=dataset_name,
config=config,
version=version,
cache_dir=raw_temp_dir,
is_local=is_local,
)
if dataset_builder.__class__.__name__ == "Csv":
# need slight adoption for csv dataset
mock_dl_manager.download_dummy_data()
path_to_dummy_data = mock_dl_manager.dummy_file
dataset_builder.config.data_files = {
"train": os.path.join(path_to_dummy_data, "train.csv"),
"test": os.path.join(path_to_dummy_data, "test.csv"),
"dev": os.path.join(path_to_dummy_data, "dev.csv"),
}
elif dataset_builder.__class__.__name__ == "Json":
# need slight adoption for json dataset
mock_dl_manager.download_dummy_data()
path_to_dummy_data = mock_dl_manager.dummy_file
dataset_builder.config.data_files = {
"train": os.path.join(path_to_dummy_data, "train.json"),
"test": os.path.join(path_to_dummy_data, "test.json"),
"dev": os.path.join(path_to_dummy_data, "dev.json"),
}
# mock size needed for dummy data instead of actual dataset
if dataset_builder.info is not None:
# approximate upper bound of order of magnitude of dummy data files
one_mega_byte = 2 << 19
dataset_builder.info.size_in_bytes = 2 * one_mega_byte
dataset_builder.info.download_size = one_mega_byte
dataset_builder.info.dataset_size = one_mega_byte
# generate examples from dummy data
dataset_builder.download_and_prepare(
dl_manager=mock_dl_manager, download_mode=GenerateMode.FORCE_REDOWNLOAD, ignore_verifications=True
)
# get dataset
dataset = dataset_builder.as_dataset()
# check that dataset is not empty
for split in dataset_builder.info.splits.keys():
# check that loaded datset is not empty
self.parent.assertTrue(len(dataset[split]) > 0)
def get_local_dataset_names():
datasets = [dataset_dir.split("/")[-2] for dataset_dir in glob.glob("./datasets/*/")]
return [{"testcase_name": x, "dataset_name": x} for x in datasets]
@parameterized.named_parameters(get_local_dataset_names())
@local
class LocalDatasetTest(parameterized.TestCase):
dataset_name = None
def setUp(self):
self.dataset_tester = DatasetTester(self)
def test_load_dataset(self, dataset_name):
configs = self.dataset_tester.load_all_configs(dataset_name, is_local=True)[:1]
self.dataset_tester.check_load_dataset(dataset_name, configs, is_local=True)
def test_builder_class(self, dataset_name):
builder_cls = self.dataset_tester.load_builder_class(dataset_name, is_local=True)
name = builder_cls.BUILDER_CONFIGS[0].name if builder_cls.BUILDER_CONFIGS else None
with tempfile.TemporaryDirectory() as tmp_cache_dir:
builder = builder_cls(name=name, cache_dir=tmp_cache_dir)
self.assertTrue(isinstance(builder, DatasetBuilder))
def test_builder_configs(self, dataset_name):
builder_configs = self.dataset_tester.load_all_configs(dataset_name, is_local=True)
self.assertTrue(len(builder_configs) > 0)
if builder_configs[0] is not None:
all(self.assertTrue(isinstance(config, BuilderConfig)) for config in builder_configs)
@slow
def test_load_dataset_all_configs(self, dataset_name):
configs = self.dataset_tester.load_all_configs(dataset_name, is_local=True)
self.dataset_tester.check_load_dataset(dataset_name, configs, is_local=True)
@slow
def test_load_real_dataset(self, dataset_name):
with tempfile.TemporaryDirectory() as temp_data_dir:
download_config = DownloadConfig()
download_config.download_mode = GenerateMode.FORCE_REDOWNLOAD
dataset = load_dataset(
"./datasets/" + dataset_name, data_dir=temp_data_dir, download_config=download_config
)
for split in dataset.keys():
self.assertTrue(len(dataset[split]) > 0)
def get_aws_dataset_names():
api = hf_api.HfApi()
# fetch all dataset names
datasets = [x.id for x in api.dataset_list(with_community_datasets=False)]
return [{"testcase_name": x, "dataset_name": x} for x in datasets]
@parameterized.named_parameters(get_aws_dataset_names())
@aws
class AWSDatasetTest(parameterized.TestCase):
dataset_name = None
def setUp(self):
self.dataset_tester = DatasetTester(self)
def test_dataset_has_valid_etag(self, dataset_name):
py_script_path = list(filter(lambda x: x, dataset_name.split("/")))[-1] + ".py"
dataset_url = hf_bucket_url(dataset_name, filename=py_script_path, dataset=True)
etag = None
try:
response = requests.head(dataset_url, allow_redirects=True, proxies=None, timeout=10)
if response.status_code == 200:
etag = response.headers.get("Etag")
except (EnvironmentError, requests.exceptions.Timeout):
pass
self.assertIsNotNone(etag)
def test_builder_class(self, dataset_name):
builder_cls = self.dataset_tester.load_builder_class(dataset_name)
name = builder_cls.BUILDER_CONFIGS[0].name if builder_cls.BUILDER_CONFIGS else None
with tempfile.TemporaryDirectory() as tmp_cache_dir:
builder = builder_cls(name=name, cache_dir=tmp_cache_dir)
self.assertTrue(isinstance(builder, DatasetBuilder))
def test_builder_configs(self, dataset_name):
builder_configs = self.dataset_tester.load_all_configs(dataset_name)
self.assertTrue(len(builder_configs) > 0)
if builder_configs[0] is not None:
all(self.assertTrue(isinstance(config, BuilderConfig)) for config in builder_configs)
def test_load_dataset(self, dataset_name):
configs = self.dataset_tester.load_all_configs(dataset_name)[:1]
self.dataset_tester.check_load_dataset(dataset_name, configs)
@slow
def test_load_dataset_all_configs(self, dataset_name):
configs = self.dataset_tester.load_all_configs(dataset_name)
self.dataset_tester.check_load_dataset(dataset_name, configs)
@slow
def test_load_real_dataset(self, dataset_name):
with tempfile.TemporaryDirectory() as temp_data_dir:
download_config = DownloadConfig()
download_config.download_mode = GenerateMode.FORCE_REDOWNLOAD
dataset = load_dataset(dataset_name, data_dir=temp_data_dir, download_config=download_config)
for split in dataset.keys():
self.assertTrue(len(dataset[split]) > 0)
|
nlp-master
|
tests/test_dataset_common.py
|
import os
import unittest
from distutils.util import strtobool
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError("If set, {} must be yes or no.".format(key))
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_aws_tests = parse_flag_from_env("RUN_AWS", default=True)
_run_local_tests = parse_flag_from_env("RUN_LOCAL", default=True)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable
to a truthy value to run them.
"""
if not _run_slow_tests or _run_slow_tests == 0:
test_case = unittest.skip("test is slow")(test_case)
return test_case
def local(test_case):
"""
Decorator marking a test as local
Local tests are run by default. Set the RUN_LOCAL environment variable
to a falsy value to not run them.
"""
if not _run_local_tests or _run_local_tests == 0:
test_case = unittest.skip("test is local")(test_case)
return test_case
def aws(test_case):
"""
Decorator marking a test as one that relies on AWS.
AWS tests are skipped by default. Set the RUN_AWS environment variable
to a falsy value to not run them.
"""
if not _run_aws_tests or _run_aws_tests == 0:
test_case = unittest.skip("test requires aws")(test_case)
return test_case
|
nlp-master
|
tests/utils.py
|
from unittest import TestCase
import pyarrow as pa
from nlp.arrow_reader import BaseReader
from nlp.info import DatasetInfo
from nlp.splits import SplitDict, SplitInfo
class ReaderTest(BaseReader):
"""
Build a Dataset object out of Instruction instance(s).
This reader is made for testing. It mocks file reads.
"""
def _get_dataset_from_filename(self, filename_skip_take):
"""Returns a Dataset instance from given (filename, skip, take)."""
filename, skip, take = (
filename_skip_take["filename"],
filename_skip_take["skip"] if "skip" in filename_skip_take else None,
filename_skip_take["take"] if "take" in filename_skip_take else None,
)
pa_table = pa.Table.from_pydict({"filename": [filename] * 100})
if skip is not None and take is not None:
pa_table = pa_table.slice(skip, take)
return pa_table
class BaseReaderTest(TestCase):
def test_read(self):
name = "my_name"
train_info = SplitInfo(name="train", num_examples=100)
test_info = SplitInfo(name="test", num_examples=100)
split_infos = [train_info, test_info]
split_dict = SplitDict()
split_dict.add(train_info)
split_dict.add(test_info)
info = DatasetInfo(splits=split_dict)
reader = ReaderTest("", info)
instructions = "test[:33%]"
dset = reader.read(name, instructions, split_infos)
self.assertEqual(dset["filename"][0], f"{name}-test")
self.assertEqual(dset.num_rows, 33)
self.assertEqual(dset.num_columns, 1)
instructions = ["train", "test[:33%]"]
train_dset, test_dset = reader.read(name, instructions, split_infos)
self.assertEqual(train_dset["filename"][0], f"{name}-train")
self.assertEqual(train_dset.num_rows, 100)
self.assertEqual(train_dset.num_columns, 1)
self.assertEqual(test_dset["filename"][0], f"{name}-test")
self.assertEqual(test_dset.num_rows, 33)
self.assertEqual(test_dset.num_columns, 1)
def test_read_files(self):
train_info = SplitInfo(name="train", num_examples=100)
test_info = SplitInfo(name="test", num_examples=100)
split_dict = SplitDict()
split_dict.add(train_info)
split_dict.add(test_info)
info = DatasetInfo(splits=split_dict)
reader = ReaderTest("", info)
files = [{"filename": "train"}, {"filename": "test", "skip": 10, "take": 10}]
dset = reader.read_files(files, original_instructions="")
self.assertEqual(dset.num_rows, 110)
self.assertEqual(dset.num_columns, 1)
self.assertEqual(dset._data_files, files)
|
nlp-master
|
tests/test_arrow_reader.py
|
import os
import tempfile
from unittest import TestCase
import apache_beam as beam
import nlp
class DummyBeamDataset(nlp.BeamBasedBuilder):
"""Dummy beam dataset."""
def _info(self):
return nlp.DatasetInfo(
features=nlp.Features({"content": nlp.Value("string")}),
# No default supervised_keys.
supervised_keys=None,
)
def _split_generators(self, dl_manager, pipeline):
return [nlp.SplitGenerator(name=nlp.Split.TRAIN, gen_kwargs={"examples": get_test_examples()})]
def _build_pcollection(self, pipeline, examples):
return pipeline | "Load Examples" >> beam.Create(examples)
def get_test_examples():
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"])]
class BeamBuilderTest(TestCase):
def test_download_and_prepare(self):
expected_num_examples = len(get_test_examples())
with tempfile.TemporaryDirectory() as tmp_cache_dir:
builder = DummyBeamDataset(cache_dir=tmp_cache_dir, beam_runner="DirectRunner")
builder.download_and_prepare()
dset = builder.as_dataset()
self.assertEqual(dset["train"].num_rows, expected_num_examples)
self.assertEqual(dset["train"].info.splits["train"].num_examples, expected_num_examples)
self.assertTrue(
os.path.exists(
os.path.join(tmp_cache_dir, "dummy_beam_dataset", "default", "0.0.0", "dataset_info.json")
)
)
def test_no_beam_options(self):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
builder = DummyBeamDataset(cache_dir=tmp_cache_dir)
self.assertRaises(nlp.builder.MissingBeamOptions, builder.download_and_prepare)
|
nlp-master
|
tests/test_beam.py
|
from unittest import TestCase
from nlp.utils.py_utils import (
flatten_nest_dict,
flatten_nested,
map_nested,
temporary_assignment,
zip_dict,
zip_nested,
)
class PyUtilsTest(TestCase):
def test_flatten_nest_dict(self):
d1 = {}
d2 = {"a": 1, "b": 2}
d3 = {"a": {"1": 1, "2": 2}, "b": 3}
expected_flatten_d1 = {}
expected_flatten_d2 = {"a": 1, "b": 2}
expected_flatten_d3 = {"a/1": 1, "a/2": 2, "b": 3}
self.assertDictEqual(flatten_nest_dict(d1), expected_flatten_d1)
self.assertDictEqual(flatten_nest_dict(d2), expected_flatten_d2)
self.assertDictEqual(flatten_nest_dict(d3), expected_flatten_d3)
def test_flatten_nested(self):
s1 = {}
s2 = []
s3 = "foo"
s4 = ["foo", "bar"]
s5 = {"a": 1, "b": 2}
s6 = {"a": [1, 2], "b": [3, 4]}
s7 = {"a": {"1": 1}, "b": 2}
expected_flatten_nested_s1 = []
expected_flatten_nested_s2 = []
expected_flatten_nested_s3 = ["foo"]
expected_flatten_nested_s4 = ["foo", "bar"]
expected_flatten_nested_s5 = [1, 2]
expected_flatten_nested_s6 = [1, 2, 3, 4]
expected_flatten_nested_s7 = [1, 2]
self.assertEqual(flatten_nested(s1), expected_flatten_nested_s1)
self.assertEqual(flatten_nested(s2), expected_flatten_nested_s2)
self.assertEqual(flatten_nested(s3), expected_flatten_nested_s3)
self.assertEqual(flatten_nested(s4), expected_flatten_nested_s4)
self.assertEqual(flatten_nested(s5), expected_flatten_nested_s5)
self.assertEqual(flatten_nested(s6), expected_flatten_nested_s6)
self.assertEqual(flatten_nested(s7), expected_flatten_nested_s7)
def test_map_mested(self):
def add_one(i):
return i + 1
s1 = {}
s2 = []
s3 = 1
s4 = [1, 2]
s5 = {"a": 1, "b": 2}
s6 = {"a": [1, 2], "b": [3, 4]}
s7 = {"a": {"1": 1}, "b": 2}
expected_map_nested_s1 = {}
expected_map_nested_s2 = []
expected_map_nested_s3 = 2
expected_map_nested_s4 = [2, 3]
expected_map_nested_s5 = {"a": 2, "b": 3}
expected_map_nested_s6 = {"a": [2, 3], "b": [4, 5]}
expected_map_nested_s7 = {"a": {"1": 2}, "b": 3}
self.assertEqual(map_nested(add_one, s1), expected_map_nested_s1)
self.assertEqual(map_nested(add_one, s2), expected_map_nested_s2)
self.assertEqual(map_nested(add_one, s3), expected_map_nested_s3)
self.assertEqual(map_nested(add_one, s4), expected_map_nested_s4)
self.assertEqual(map_nested(add_one, s5), expected_map_nested_s5)
self.assertEqual(map_nested(add_one, s6), expected_map_nested_s6)
self.assertEqual(map_nested(add_one, s7), expected_map_nested_s7)
def test_zip_dict(self):
d1 = {"a": 1, "b": 2}
d2 = {"a": 3, "b": 4}
d3 = {"a": 5, "b": 6}
expected_zip_dict_result = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))])
self.assertEqual(sorted(list(zip_dict(d1, d2, d3))), expected_zip_dict_result)
def test_zip_nested(self):
d1 = {"a": {"1": 1}, "b": 2}
d2 = {"a": {"1": 3}, "b": 4}
expected_zip_nested_result = {"a": {"1": (1, 3)}, "b": (2, 4)}
self.assertDictEqual(zip_nested(d1, d2), expected_zip_nested_result)
def test_temporary_assignment(self):
class Foo:
my_attr = "bar"
foo = Foo()
self.assertEqual(foo.my_attr, "bar")
with temporary_assignment(foo, "my_attr", "BAR"):
self.assertEqual(foo.my_attr, "BAR")
self.assertEqual(foo.my_attr, "bar")
|
nlp-master
|
tests/utils/test_py_utils.py
|
# coding=utf-8
# Copyright 2020 The HuggingFace Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
""" Simple Dataset wrapping an Arrow Table."""
import hashlib
import logging
import os
from collections import defaultdict
from collections.abc import Mapping
from math import ceil, floor
from typing import Any, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from tqdm import tqdm
from nlp.utils.py_utils import dumps
from .arrow_writer import ArrowWriter
from .utils import convert_tuples_in_lists, map_nested
logger = logging.getLogger(__name__)
class DatasetInfoMixin(object):
""" This base class exposes some attributes of DatasetInfo
at the base level of the Dataset for easy access.
"""
def __init__(self, info, split):
self._info = info
self._split = split
@property
def info(self):
return self._info
@property
def split(self):
return self._split
@property
def builder_name(self) -> str:
return self._info.builder_name
@property
def citation(self) -> str:
return self._info.citation
@property
def config_name(self) -> str:
return self._info.config_name
@property
def dataset_size(self) -> Optional[int]:
return self._info.dataset_size
@property
def description(self) -> str:
return self._info.description
@property
def download_checksums(self) -> Optional[dict]:
return self._info.download_checksums
@property
def download_size(self) -> Optional[int]:
return self._info.download_size
@property
def features(self):
return self._info.features
@property
def homepage(self) -> Optional[str]:
return self._info.homepage
@property
def license(self) -> Optional[str]:
return self._info.license
@property
def size_in_bytes(self) -> Optional[int]:
return self._info.size_in_bytes
@property
def supervised_keys(self):
return self._info.supervised_keys
@property
def version(self):
return self._info.version
class Dataset(DatasetInfoMixin):
""" A Dataset backed by an Arrow table or Record Batch.
"""
def __init__(
self,
arrow_table: Union[pa.Table, pa.RecordBatch],
data_files: Optional[List[dict]] = None,
info: Optional[Any] = None,
split: Optional[Any] = None,
):
super().__init__(info=info, split=split)
self._data: pa.Table = arrow_table
self._data_files: List[dict] = data_files if data_files is not None else []
self._format_type = None
self._format_columns = None
self._output_all_columns = False
@classmethod
def from_file(cls, filename: str, info: Optional[Any] = None, split: Optional[Any] = None):
""" Instantiate a Dataset backed by an Arrow table at filename """
mmap = pa.memory_map(filename)
f = pa.ipc.open_stream(mmap)
pa_table = f.read_all()
return cls(arrow_table=pa_table, data_files=[{"filename": filename}], info=info, split=split)
@classmethod
def from_buffer(cls, buffer: pa.Buffer, info: Optional[Any] = None, split: Optional[Any] = None):
""" Instantiate a Dataset backed by an Arrow buffer """
mmap = pa.BufferReader(buffer)
f = pa.ipc.open_stream(mmap)
pa_table = f.read_all()
return cls(pa_table, info=info, split=split)
@property
def data(self):
return self._data
@property
def cache_files(self):
return self._data_files
@property
def columns(self):
return self._data.columns
@property
def nbytes(self):
return self._data.nbytes
@property
def num_columns(self):
return self._data.num_columns
@property
def num_rows(self):
return self._data.num_rows
@property
def column_names(self):
return self._data.column_names
@property
def schema(self) -> pa.Schema:
return self._data.schema
@property
def shape(self):
return self._data.shape
def drop(self, columns: Union[str, List[str]]):
""" Drop one or more columns.
Args:
columns: list of str
"""
if isinstance(columns, str):
columns = [columns]
if any(col not in self._data.column_names for col in columns):
raise ValueError(
"Columns {} not in the dataset. Current columns in the dataset: {}".format(
list(filter(lambda col: col not in self._data.column_names, columns)), self._data.column_names
)
)
self._data = self._data.drop(columns)
def unique(self, column: str):
""" Return a list of the unque elements in a column.
Args:
columns: str
"""
if column not in self._data.column_names:
raise ValueError(f"Column ({column}) not in table columns ({self._data.column_names}).")
return self._data.column(column).unique().to_pylist()
def dictionary_encode_column(self, column: str):
""" Dictionary encode a column.
Dictionnary encode can reduce the size of a column with many repetitions (e.g. string labels columns)
by storing a dictionnary of the strings. This only affect the internal storage.
Args:
columns: str
"""
if column not in self._data.column_names:
raise ValueError(f"Column ({column}) not in table columns ({self._data.column_names}).")
casted_schema: pa.Schema = self._data.schema
field_index = casted_schema.get_field_index(column)
field: pa.Field = casted_schema.field(field_index)
casted_field = pa.field(field.name, pa.dictionary(pa.int32(), field.type), nullable=False)
casted_schema.set(field_index, casted_field)
self._data = self._data.cast(casted_schema)
def flatten(self):
""" Flatten the Table.
Each column with a struct type is flattened into one column per struct field.
Other columns are left unchanged.
"""
self._data = self._data.flatten()
def __len__(self):
return self._data.num_rows
def __iter__(self):
format_type = self._format_type
format_columns = self._format_columns
output_all_columns = self._output_all_columns
for index in range(self._data.num_rows):
yield self._getitem(
index, format_type=format_type, format_columns=format_columns, output_all_columns=output_all_columns,
)
def __repr__(self):
schema_str = dict((a, str(b)) for a, b in zip(self._data.schema.names, self._data.schema.types))
return f"Dataset(schema: {schema_str}, num_rows: {self.num_rows})"
@property
def format(self):
return {
"type": "python" if self._format_type is None else self._format_type,
"columns": self.column_names if self._format_columns is None else self._format_columns,
"output_all_columns": self._output_all_columns,
}
def set_format(self, type: Optional[str] = None, columns: Optional[List] = None, output_all_columns: bool = False):
""" Set __getitem__ return format (type and columns)
Args:
type (Optional ``str``): output type selected in [None, 'numpy', 'torch', 'tensorflow', 'pandas']
None means __getitem__ returns python objects (default)
columns (Optional ``List[str]``): columns to format in the output
None means __getitem__ returns all columns (default)
output_all_columns (``bool`` default to False): keep un-formated columns as well in the output (as python objects)
"""
# Check return type
if type == "torch":
try:
import torch # noqa: F401
except ImportError:
logger.error("PyTorch needs to be installed to be able to return PyTorch tensors.")
elif type == "tensorflow":
try:
import tensorflow # noqa: F401
except ImportError:
logger.error("Tensorflow needs to be installed to be able to return Tensorflow tensors.")
else:
assert (
type is None or type == "numpy" or type == "pandas"
), "Return type should be None or selected in ['numpy', 'torch', 'tensorflow', 'pandas']."
# Check filter column
if isinstance(columns, str):
columns = [columns]
if columns is not None and any(col not in self._data.column_names for col in columns):
raise ValueError(
"Columns {} not in the dataset. Current columns in the dataset: {}".format(
list(filter(lambda col: col not in self._data.column_names, columns)), self._data.column_names
)
)
self._format_type = type
self._format_columns = columns
self._output_all_columns = output_all_columns
logger.info(
"Set __getitem__(key) output type to %s for %s columns "
" (when key is int or slice) and %s output other (un-formated) columns.",
"python objects" if type is None else type,
"no" if columns is None else str(columns),
"do" if output_all_columns else "don't",
)
def reset_format(self):
""" Reset __getitem__ return format to python objects and all columns.
Same as ``self.set_format()``
"""
self.set_format()
def _convert_outputs(self, outputs, format_type=None, format_columns=None, output_all_columns=False):
if format_type is None:
if output_all_columns:
return outputs
if isinstance(outputs, dict) and format_columns is not None:
return {k: v for k, v in outputs.items() if k in format_columns}
return outputs
if format_type == "numpy":
import numpy
command = numpy.array
elif format_type == "torch":
import torch
command = torch.tensor
elif format_type == "tensorflow":
import tensorflow
command = tensorflow.ragged.constant
else:
def identity(x):
return x
command = identity
if isinstance(outputs, (list, tuple)):
return command(outputs)
else:
output_dict = {}
for k, v in outputs.items():
if format_columns is not None and k not in format_columns and not output_all_columns:
continue
if format_columns is None or k in format_columns:
v = command(v)
output_dict[k] = v
return output_dict
@staticmethod
def _unnest(py_dict):
return dict((key, array[0]) for key, array in py_dict.items())
@staticmethod
def _nest(py_dict):
return dict((key, [elem]) for key, elem in py_dict.items())
def _getitem(
self, key: Union[int, slice, str], format_type=None, format_columns=None, output_all_columns=False
) -> Union[Dict, List]:
""" Can be used to index columns (by string names) or rows (by integer index or slices)
"""
if isinstance(key, int):
if key < 0:
key = self._data.num_rows + key
if key >= self._data.num_rows:
raise IndexError(f"Index ({key}) outside of table length ({self._data.num_rows}).")
if format_type is not None and format_type == "pandas":
outputs = self._data.slice(key, 1).to_pandas()
else:
outputs = self._unnest(self._data.slice(key, 1).to_pydict())
elif isinstance(key, slice):
key_indices = key.indices(self._data.num_rows)
if key_indices[2] != 1 or key_indices[1] < key_indices[0]:
raise ValueError("Slicing can only take contiguous and ordered slices.")
if format_type is not None and format_type == "pandas":
outputs = self._data.slice(key_indices[0], key_indices[1] - key_indices[0]).to_pandas()
else:
outputs = self._data.slice(key_indices[0], key_indices[1] - key_indices[0]).to_pydict()
elif isinstance(key, str):
if key not in self._data.column_names:
raise ValueError(f"Column ({key}) not in table columns ({self._data.column_names}).")
if format_type is not None:
if format_columns is None or key in format_columns:
if format_type == "pandas":
outputs = self._data[key].to_pandas(zero_copy_only=False)
elif format_type == "numpy":
outputs = np.concatenate(
[arr.to_numpy(zero_copy_only=False) for arr in self._data[key].chunks]
)
else:
outputs = self._convert_outputs(self._data[key].to_pylist(), format_type=format_type)
else:
outputs = self._data[key].to_pylist()
else:
outputs = self._data[key].to_pylist()
else:
raise ValueError("Can only get row(s) (int or slice) or columns (string).")
if (
(format_type is not None or format_columns is not None)
and not isinstance(key, str)
and format_type != "pandas"
):
outputs = self._convert_outputs(
outputs, format_type=format_type, format_columns=format_columns, output_all_columns=output_all_columns
)
return outputs
def __getitem__(self, key: Union[int, slice, str]) -> Union[Dict, List]:
""" Can be used to index columns (by string names) or rows (by integer index)
"""
return self._getitem(
key,
format_type=self._format_type,
format_columns=self._format_columns,
output_all_columns=self._output_all_columns,
)
def cleanup_cache_files(self):
""" Clean up all cache files in the dataset cache directory, excepted the currently used cache file if there is one.
Be carefull when running this command that no other process is currently using other cache files.
Return:
Number of removed files
"""
if not self._data_files or "filename" not in self._data_files[0]:
return None
current_cache_file = os.path.abspath(self._data_files[0]["filename"])
cache_directory = os.path.dirname(current_cache_file)
logger.info(f"Listing files in {cache_directory}")
files: List[str] = os.listdir(cache_directory)
files_to_remove = []
for f_name in files:
full_name = os.path.abspath(os.path.join(cache_directory, f_name))
if f_name.startswith("cache-") and f_name.endswith(".arrow"):
if full_name == current_cache_file:
logger.info(f"Keeping current cache file at {full_name}")
continue
files_to_remove.append(full_name)
for file_path in files_to_remove:
logger.info(f"Removing {file_path}")
os.remove(file_path)
return len(files_to_remove)
def _get_cache_file_path(self, function, cache_kwargs):
""" Find a unique name from the filenames, kwargs and the function """
if not self._data_files or "filename" not in self._data_files[0]:
return None
previous_files_string = "-".join(
"-".join(str(k) + "-" + str(v) for k, v in f.items()) for f in self._data_files
)
cache_kwargs_string = "-".join(str(k) + "-" + str(v) for k, v in cache_kwargs.items())
function_bytes = dumps(function)
output_hash = hashlib.md5(
previous_files_string.encode("utf-8") + cache_kwargs_string.encode("utf-8") + function_bytes
).hexdigest()
cache_file_name = "cache-" + output_hash + ".arrow"
cache_directory = os.path.dirname(self._data_files[0]["filename"])
cache_file_path = os.path.join(cache_directory, cache_file_name)
return cache_file_path
def map(
self,
function,
with_indices: bool = False,
batched: bool = False,
batch_size: Optional[int] = 1000,
remove_columns: Optional[List[str]] = None,
keep_in_memory: bool = False,
load_from_cache_file: bool = True,
cache_file_name: Optional[str] = None,
writer_batch_size: Optional[int] = 1000,
arrow_schema: Optional[pa.Schema] = None,
disable_nullable: bool = True,
):
""" Apply a function to all the elements in the table (individually or in batches)
and update the table (if function does updated examples).
Args:
`function` (`callable`): with one of the following signature:
- `function(example: Dict) -> Union[Dict, Any]` if `batched=False` and `with_indices=False`
- `function(example: Dict, indices: int) -> Union[Dict, Any]` if `batched=False` and `with_indices=True`
- `function(batch: Dict[List]) -> Union[Dict, Any]` if `batched=True` and `with_indices=False`
- `function(batch: Dict[List], indices: List[int]) -> Union[Dict, Any]` if `batched=True` and `with_indices=True`
`with_indices` (`bool`, default: `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`.
`batched` (`bool`, default: `False`): Provide batch of examples to `function`
`batch_size` (`Optional[int]`, default: `1000`): Number of examples per batch provided to `function` if `batched=True`
`batch_size <= 0` or `batch_size == None`: Provide the full dataset as a single batch to `function`
`remove_columns` (`Optional[List[str]]`, default: `None`): Remove a selection of columns while doing the mapping.
Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding
columns with names in `remove_columns`, these columns will be kept.
`keep_in_memory` (`bool`, default: `False`): Keep the dataset in memory instead of writing it to a cache file.
`load_from_cache_file` (`bool`, default: `True`): If a cache file storing the current computation from `function`
can be identified, use it instead of recomputing.
`cache_file_name` (`Optional[str]`, default: `None`): Provide the name of a cache file to use to store the
results of the computation instead of the automatically generated cache file name.
`writer_batch_size` (`int`, default: `1000`): Number of rows per write operation for the cache file writer.
Higher value gives smaller cache files, lower value consume less temporary memory while running `.map()`.
`arrow_schema` (`Optional[pa.Schema]`, default: `None`): Use a specific Apache Arrow Schema to store the cache file
instead of the automatically generated one.
`disable_nullable` (`bool`, default: `True`): Allow null values in the table.
"""
# If the array is empty we do nothing
if len(self) == 0:
return self
# Select the columns (arrow columns) to process
if remove_columns is not None and any(col not in self._data.column_names for col in remove_columns):
raise ValueError(
"Column to remove {} not in the dataset. Current columns in the dataset: {}".format(
list(filter(lambda col: col not in self._data.column_names, remove_columns)),
self._data.column_names,
)
)
# If we do batch computation but no batch sze is provided, default to the full dataset
if batched and (batch_size is None or batch_size <= 0):
batch_size = self._data.num_rows
# Check if the function returns updated examples
def does_function_return_dict(inputs, indices):
""" Does the function returns a dict. """
processed_inputs = function(inputs, indices) if with_indices else function(inputs)
does_return_dict = isinstance(processed_inputs, Mapping)
if does_return_dict is False and processed_inputs is not None:
raise TypeError(
"Provided `function` which is applied to all elements of table returns a variable of type {}. Make sure provided `function` returns a variable of type `dict` to update the dataset or `None` if you are only interested in side effects.".format(
type(processed_inputs)
)
)
elif isinstance(test_indices, list) and does_return_dict is True:
all_dict_values_are_lists = all(isinstance(value, list) for value in processed_inputs.values())
if all_dict_values_are_lists is False:
raise TypeError(
"Provided `function` which is applied to all elements of table returns a `dict` of types {}. When using `batched=True`, make sure provided `function` returns a `dict` of types `list`.".format(
[type(x) for x in processed_inputs.values()]
)
)
return does_return_dict
# We only update the data table (and use the cache) if the function returns a dict.
# Test it on the first element or a small batch (0, 1) for batched inputs
test_inputs = self[:2] if batched else self[0]
test_indices = [0, 1] if batched else 0
update_data = does_function_return_dict(test_inputs, test_indices)
def apply_function_on_filtered_inputs(inputs, indices):
""" Utility to apply the function on a selection of columns. """
processed_inputs = function(inputs, indices) if with_indices else function(inputs)
if not update_data:
return None # Nothing to update, let's move on
if remove_columns is not None:
for column in remove_columns:
inputs.pop(column)
if self._format_type is not None:
inputs = self._getitem(
key=(indices if isinstance(indices, int) else slice(indices[0], indices[-1])),
format_type=None,
format_columns=None,
)
inputs.update(processed_inputs)
return inputs
# Find the output schema if none is given
test_inputs = self[:2] if batched else self[0]
test_indices = [0, 1] if batched else 0
test_output = apply_function_on_filtered_inputs(test_inputs, test_indices)
if arrow_schema is None and update_data:
if not batched:
test_output = self._nest(test_output)
test_output = convert_tuples_in_lists(test_output)
arrow_schema = pa.Table.from_pydict(test_output).schema
if disable_nullable:
arrow_schema = pa.schema(pa.field(field.name, field.type, nullable=False) for field in arrow_schema)
# Check if we've already cached this computation (indexed by a hash)
if self._data_files and update_data:
if cache_file_name is None:
# we create a unique hash from the function, current dataset file and the mapping args
cache_kwargs = {
"with_indices": with_indices,
"batched": batched,
"batch_size": batch_size,
"remove_columns": remove_columns,
"keep_in_memory": keep_in_memory,
"load_from_cache_file": load_from_cache_file,
"cache_file_name": cache_file_name,
"writer_batch_size": writer_batch_size,
"arrow_schema": arrow_schema,
"disable_nullable": disable_nullable,
}
cache_file_name = self._get_cache_file_path(function, cache_kwargs)
if os.path.exists(cache_file_name) and load_from_cache_file:
logger.info("Loading cached processed dataset at %s", cache_file_name)
return Dataset.from_file(cache_file_name, info=self.info, split=self.split)
# Prepare output buffer and batched writer in memory or on file if we update the table
if update_data:
if keep_in_memory or not self._data_files:
buf_writer = pa.BufferOutputStream()
writer = ArrowWriter(schema=arrow_schema, stream=buf_writer, writer_batch_size=writer_batch_size)
else:
buf_writer = None
logger.info("Caching processed dataset at %s", cache_file_name)
writer = ArrowWriter(schema=arrow_schema, path=cache_file_name, writer_batch_size=writer_batch_size)
# Loop over single examples or batches and write to buffer/file if examples are to be updated
if not batched:
for i, example in tqdm(enumerate(self)):
example = apply_function_on_filtered_inputs(example, i)
if update_data:
writer.write(example)
else:
for i in tqdm(range(0, len(self), batch_size)):
batch = self[i : i + batch_size]
indices = list(range(*(slice(i, i + batch_size).indices(self._data.num_rows)))) # Something simpler?
batch = apply_function_on_filtered_inputs(batch, indices)
if update_data:
writer.write_batch(batch)
if update_data:
writer.finalize() # close_stream=bool(buf_writer is None)) # We only close if we are writing in a file
# Create new Dataset from buffer or file
if buf_writer is None:
return Dataset.from_file(cache_file_name, info=self.info, split=self.split)
else:
return Dataset.from_buffer(buf_writer.getvalue(), info=self.info, split=self.split)
else:
return self
def filter(self, function, with_indices=False, **kwargs):
""" Apply a filter function to all the elements in the table in batches
and update the table so that the dataset only includes examples according to the filter function.
Args:
`function` (`callable`): with one of the following signature:
- `function(example: Dict) -> bool` if `with_indices=False`
- `function(example: Dict, indices: int) -> bool` if `with_indices=True`
`with_indices` (`bool`, default: `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`.
`batch_size` (`Optional[int]`, default: `1000`): Number of examples per batch provided to `function` if `batched=True`
`batch_size <= 0` or `batch_size == None`: Provide the full dataset as a single batch to `function`
`remove_columns` (`Optional[List[str]]`, default: `None`): Remove a selection of columns while doing the mapping.
Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding
columns with names in `remove_columns`, these columns will be kept.
`keep_in_memory` (`bool`, default: `False`): Keep the dataset in memory instead of writing it to a cache file.
`load_from_cache_file` (`bool`, default: `True`): If a cache file storing the current computation from `function`
can be identified, use it instead of recomputing.
`cache_file_name` (`Optional[str]`, default: `None`): Provide the name of a cache file to use to store the
results of the computation instead of the automatically generated cache file name.
`writer_batch_size` (`int`, default: `1000`): Number of rows per write operation for the cache file writer.
Higher value gives smaller cache files, lower value consume less temporary memory while running `.map()`.
`disable_nullable` (`bool`, default: `True`): Allow null values in the table.
"""
# transforme the filter function into the map function
def map_function(batch, *args):
result = defaultdict(list)
num_examples = len(batch[next(iter(batch.keys()))])
# create single examples
for i in range(num_examples):
example = map_nested(lambda x: x[i], batch, dict_only=True)
# check if example should be fildered or not
if with_indices:
keep_example = function(example, args[0][i])
else:
keep_example = function(example)
assert isinstance(
keep_example, bool
), f"The filter function returns a variable of type {type(keep_example)}, but should return a variable of type `bool`."
# if example shall be kept add to result
if keep_example:
for key in batch.keys():
result[key].append(example[key])
# if no example shall be kept, init with empty list
if bool(result) is False:
for key in batch.keys():
result[key] = []
return result
# to avoid errors with the arrow_schema we define it here
test_inputs = self[:2]
if "remove_columns" in kwargs:
test_inputs = {key: test_inputs[key] for key in (test_inputs.keys() - kwargs["remove_columns"])}
arrow_schema = pa.Table.from_pydict(test_inputs).schema
# return map function
return self.map(map_function, batched=True, with_indices=with_indices, arrow_schema=arrow_schema, **kwargs)
def select(
self,
indices: Union[List[int], np.ndarray],
keep_in_memory: bool = False,
load_from_cache_file: bool = True,
cache_file_name: Optional[str] = None,
writer_batch_size: Optional[int] = 1000,
):
""" Create a new dataset with rows selected following the list/array of indices.
Args:
`indices` (`Union[List[int], np.ndarray]`): List or 1D-NumPy array of integer indices for indexing.
`keep_in_memory` (`bool`, default: `False`): Keep the dataset in memory instead of writing it to a cache file.
`load_from_cache_file` (`bool`, default: `True`): If a cache file storing the current computation from `function`
can be identified, use it instead of recomputing.
`cache_file_name` (`Optional[str]`, default: `None`): Provide the name of a cache file to use to store the
results of the computation instead of the automatically generated cache file name.
`writer_batch_size` (`int`, default: `1000`): Number of rows per write operation for the cache file writer.
Higher value gives smaller cache files, lower value consume less temporary memory while running `.map()`.
"""
# If the array is empty we do nothing
if len(self) == 0:
return self
# Check if we've already cached this computation (indexed by a hash)
if self._data_files:
if cache_file_name is None:
# we create a unique hash from the function, current dataset file and the mapping args
cache_kwargs = {
"indices": indices,
"keep_in_memory": keep_in_memory,
"load_from_cache_file": load_from_cache_file,
"cache_file_name": cache_file_name,
"writer_batch_size": writer_batch_size,
}
cache_file_name = self._get_cache_file_path(self.select, cache_kwargs)
if os.path.exists(cache_file_name) and load_from_cache_file:
logger.info("Loading cached selected dataset at %s", cache_file_name)
return Dataset.from_file(cache_file_name, info=self.info, split=self.split)
# Prepare output buffer and batched writer in memory or on file if we update the table
if keep_in_memory or not self._data_files:
buf_writer = pa.BufferOutputStream()
writer = ArrowWriter(schema=self.schema, stream=buf_writer, writer_batch_size=writer_batch_size)
else:
buf_writer = None
logger.info("Caching processed dataset at %s", cache_file_name)
writer = ArrowWriter(schema=self.schema, path=cache_file_name, writer_batch_size=writer_batch_size)
# Loop over single examples or batches and write to buffer/file if examples are to be updated
for i in tqdm(indices):
example = self._getitem(key=int(i), format_type=None, format_columns=None,)
writer.write(example)
writer.finalize() # close_stream=bool(buf_writer is None)) # We only close if we are writing in a file
# Create new Dataset from buffer or file
if buf_writer is None:
return Dataset.from_file(cache_file_name, info=self.info, split=self.split)
else:
return Dataset.from_buffer(buf_writer.getvalue(), info=self.info, split=self.split)
def sort(
self,
column: str,
reverse: bool = False,
kind: str = None,
keep_in_memory: bool = False,
load_from_cache_file: bool = True,
cache_file_name: Optional[str] = None,
writer_batch_size: Optional[int] = 1000,
):
""" Create a new dataset sorted according to a column.
Currently sorting according to a column name uses numpy sorting algorithm under the hood.
The column should thus be a numpy compatible type (in particular not a nested type).
This also means that the column used for sorting is fully loaded in memory (which should be fine in most cases).
Args:
`column` (`str`): column name to sort by.
`reverse`: (`bool`, default: `False`): If True, sort by descending order rather then ascending.
`kind` (Optional `str`): Numpy algorithm for sorting selected in {‘quicksort’, ‘mergesort’, ‘heapsort’, ‘stable’},
The default is ‘quicksort’. Note that both ‘stable’ and ‘mergesort’ use timsort under the covers and, in general,
the actual implementation will vary with data type. The ‘mergesort’ option is retained for backwards compatibility.
`keep_in_memory` (`bool`, default: `False`): Keep the dataset in memory instead of writing it to a cache file.
`load_from_cache_file` (`bool`, default: `True`): If a cache file storing the current computation from `function`
can be identified, use it instead of recomputing.
`cache_file_name` (`Optional[str]`, default: `None`): Provide the name of a cache file to use to store the
results of the computation instead of the automatically generated cache file name.
`writer_batch_size` (`int`, default: `1000`): Number of rows per write operation for the cache file writer.
Higher value gives smaller cache files, lower value consume less temporary memory while running `.map()`.
"""
# If the array is empty we do nothing
if len(self) == 0:
return self
# Check the column name
if not isinstance(column, str) or column not in self._data.column_names:
raise ValueError(
"Column '{}' not found in the dataset. Please provide a column selected in: {}".format(
column, self._data.column_names,
)
)
# Check if we've already cached this computation (indexed by a hash)
if self._data_files:
if cache_file_name is None:
# we create a unique hash from the function, current dataset file and the mapping args
cache_kwargs = {
"column": column,
"reverse": reverse,
"kind": kind,
"keep_in_memory": keep_in_memory,
"load_from_cache_file": load_from_cache_file,
"cache_file_name": cache_file_name,
"writer_batch_size": writer_batch_size,
}
cache_file_name = self._get_cache_file_path(self.sort, cache_kwargs)
if os.path.exists(cache_file_name) and load_from_cache_file:
logger.info("Loading cached sorted dataset at %s", cache_file_name)
return Dataset.from_file(cache_file_name, info=self.info, split=self.split)
indices = self._getitem(column, format_type="numpy", format_columns=None, output_all_columns=False,)
indices = np.argsort(indices, kind=kind)
if reverse:
indices = indices[::-1]
return self.select(
indices=indices,
keep_in_memory=keep_in_memory,
load_from_cache_file=load_from_cache_file,
cache_file_name=cache_file_name,
writer_batch_size=writer_batch_size,
)
def shuffle(
self,
seed: Optional[int] = None,
generator: Optional[np.random.Generator] = None,
keep_in_memory: bool = False,
load_from_cache_file: bool = True,
cache_file_name: Optional[str] = None,
writer_batch_size: Optional[int] = 1000,
):
""" Create a new Dataset where rows the rows are shuffled.
Currently shuffling uses numpy random generators.
You can either supply a NumPy BitGenerator to use, or a seed to initiate NumPy's default random generator (PCG64).
Args:
`seed` (Optional `int`): A seed to initialize the default BitGenerator if ``generator=None``.
If None, then fresh, unpredictable entropy will be pulled from the OS.
If an int or array_like[ints] is passed, then it will be passed to SeedSequence to derive the initial BitGenerator state.
`generator` (Optional `np.random.Generator`): Numpy random Generator to use to compute the permutation of the dataset rows.
If ``generator=None`` (default), uses np.random.default_rng (the default BitGenerator (PCG64) of NumPy).
`keep_in_memory` (`bool`, default: `False`): Keep the dataset in memory instead of writing it to a cache file.
`load_from_cache_file` (`bool`, default: `True`): If a cache file storing the current computation from `function`
can be identified, use it instead of recomputing.
`cache_file_name` (`Optional[str]`, default: `None`): Provide the name of a cache file to use to store the
results of the computation instead of the automatically generated cache file name.
`writer_batch_size` (`int`, default: `1000`): Number of rows per write operation for the cache file writer.
Higher value gives smaller cache files, lower value consume less temporary memory while running `.map()`.
"""
# If the array is empty we do nothing
if len(self) == 0:
return self
if seed is not None and generator is not None:
raise ValueError("Both `seed` and `generator` were provided. Please specify just one of them.")
assert generator is None or isinstance(
generator, np.random.Generator
), "The provided generator must be an instance of numpy.random.Generator"
# Check if we've already cached this computation (indexed by a hash)
if self._data_files:
if cache_file_name is None:
# we create a unique hash from the function, current dataset file and the mapping args
cache_kwargs = {
"generator": generator,
"seed": seed,
"keep_in_memory": keep_in_memory,
"load_from_cache_file": load_from_cache_file,
"cache_file_name": cache_file_name,
"writer_batch_size": writer_batch_size,
}
cache_file_name = self._get_cache_file_path(self.shuffle, cache_kwargs)
if os.path.exists(cache_file_name) and load_from_cache_file:
logger.info("Loading cached shuffled dataset at %s", cache_file_name)
return Dataset.from_file(cache_file_name, info=self.info, split=self.split)
if generator is None:
generator = np.random.default_rng(seed)
permutation = generator.permutation(len(self))
return self.select(
indices=permutation,
keep_in_memory=keep_in_memory,
load_from_cache_file=load_from_cache_file,
cache_file_name=cache_file_name,
writer_batch_size=writer_batch_size,
)
def train_test_split(
self,
test_size: Union[float, int, None] = None,
train_size: Union[float, int, None] = None,
shuffle: bool = True,
seed: Optional[int] = None,
generator: Optional[np.random.Generator] = None,
keep_in_memory: bool = False,
load_from_cache_file: bool = True,
train_cache_file_name: Optional[str] = None,
test_cache_file_name: Optional[str] = None,
writer_batch_size: Optional[int] = 1000,
):
""" Return a dictionary with two random train and test subsets (`train` and `test` ``Dataset`` splits).
Splits are created from the dataset according to `test_size`, `train_size` and `shuffle`.
This method is similar to scikit-learn `train_test_split` with the omission of the stratified options.
Args:
`test_size` (Optional `np.random.Generator`): Size of the test split
If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split.
If int, represents the absolute number of test samples.
If None, the value is set to the complement of the train size.
If train_size is also None, it will be set to 0.25.
`train_size` (Optional `np.random.Generator`): Size of the train split
If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the train split.
If int, represents the absolute number of train samples.
If None, the value is automatically set to the complement of the test size.
`shuffle` (Optional `bool`, default: `True`): Whether or not to shuffle the data before splitting.
`seed` (Optional `int`): A seed to initialize the default BitGenerator if ``generator=None``.
If None, then fresh, unpredictable entropy will be pulled from the OS.
If an int or array_like[ints] is passed, then it will be passed to SeedSequence to derive the initial BitGenerator state.
`generator` (Optional `np.random.Generator`): Numpy random Generator to use to compute the permutation of the dataset rows.
If ``generator=None`` (default), uses np.random.default_rng (the default BitGenerator (PCG64) of NumPy).
`keep_in_memory` (`bool`, default: `False`): Keep the dataset in memory instead of writing it to a cache file.
`load_from_cache_file` (`bool`, default: `True`): If a cache file storing the current computation from `function`
can be identified, use it instead of recomputing.
`train_cache_file_name` (`Optional[str]`, default: `None`): Provide the name of a cache file to use to store the
train split calche file instead of the automatically generated cache file name.
`test_cache_file_name` (`Optional[str]`, default: `None`): Provide the name of a cache file to use to store the
test split calche file instead of the automatically generated cache file name.
`writer_batch_size` (`int`, default: `1000`): Number of rows per write operation for the cache file writer.
Higher value gives smaller cache files, lower value consume less temporary memory while running `.map()`.
"""
# If the array is empty we do nothing
if len(self) == 0:
return self
if test_size is None and train_size is None:
test_size = 0.25
# Safety checks similar to scikit-learn's ones.
# (adapted from https://github.com/scikit-learn/scikit-learn/blob/fd237278e895b42abe8d8d09105cbb82dc2cbba7/sklearn/model_selection/_split.py#L1750)
n_samples = len(self)
if (
isinstance(test_size, int)
and (test_size >= n_samples or test_size <= 0)
or isinstance(test_size, float)
and (test_size <= 0 or test_size >= 1)
):
raise ValueError(
f"test_size={test_size} should be either positive and smaller "
f"than the number of samples {n_samples} or a float in the (0, 1) range"
)
if (
isinstance(train_size, int)
and (train_size >= n_samples or train_size <= 0)
or isinstance(train_size, float)
and (train_size <= 0 or train_size >= 1)
):
raise ValueError(
f"train_size={train_size} should be either positive and smaller "
f"than the number of samples {n_samples} or a float in the (0, 1) range"
)
if train_size is not None and not isinstance(train_size, (int, float)):
raise ValueError(f"Invalid value for train_size: {train_size} of type {type(train_size)}")
if test_size is not None and not isinstance(test_size, (int, float)):
raise ValueError(f"Invalid value for test_size: {test_size} of type {type(test_size)}")
if isinstance(train_size, float) and isinstance(test_size, float) and train_size + test_size > 1:
raise ValueError(
f"The sum of test_size and train_size = {train_size + test_size}, should be in the (0, 1)"
" range. Reduce test_size and/or train_size."
)
if isinstance(test_size, float):
n_test = ceil(test_size * n_samples)
elif isinstance(test_size, int):
n_test = float(test_size)
if isinstance(train_size, float):
n_train = floor(train_size * n_samples)
elif isinstance(train_size, int):
n_train = float(train_size)
if train_size is None:
n_train = n_samples - n_test
elif test_size is None:
n_test = n_samples - n_train
if n_train + n_test > n_samples:
raise ValueError(
f"The sum of train_size and test_size = {n_train + n_test}, "
"should be smaller than the number of "
f"samples {n_samples}. Reduce test_size and/or "
"train_size."
)
n_train, n_test = int(n_train), int(n_test)
if n_train == 0:
raise ValueError(
f"With n_samples={n_samples}, test_size={test_size} and train_size={train_size}, the "
"resulting train set will be empty. Adjust any of the "
"aforementioned parameters."
)
# Check if we've already cached this computation (indexed by a hash)
if self._data_files:
if train_cache_file_name is None or test_cache_file_name is None:
# we create a unique hash from the function, current dataset file and the mapping args
cache_kwargs = {
"test_size": test_size,
"train_size": train_size,
"shuffle": shuffle,
"generator": generator,
"seed": seed,
"keep_in_memory": keep_in_memory,
"load_from_cache_file": load_from_cache_file,
"train_cache_file_name": train_cache_file_name,
"test_cache_file_name": test_cache_file_name,
"writer_batch_size": writer_batch_size,
}
train_kwargs = cache_kwargs.deepcopy()
train_kwargs["split"] = "train"
test_kwargs = cache_kwargs.deepcopy()
test_kwargs["split"] = "test"
if train_cache_file_name is None:
train_cache_file_name = self._get_cache_file_path(self.train_test_split, train_kwargs)
if test_cache_file_name is None:
test_cache_file_name = self._get_cache_file_path(self.train_test_split, test_kwargs)
if os.path.exists(train_cache_file_name) and os.path.exists(test_cache_file_name) and load_from_cache_file:
logger.info("Loading cached split dataset at %s and %s", train_cache_file_name, test_cache_file_name)
return {
"train": Dataset.from_file(train_cache_file_name, info=self.info, split=self.split),
"test": Dataset.from_file(test_cache_file_name, info=self.info, split=self.split),
}
if not shuffle:
train_indices = np.arange(n_train)
test_indices = np.arange(n_train, n_train + n_test)
else:
if generator is None:
generator = np.random.default_rng(seed)
# random partition
permutation = generator.permutation(len(self))
test_indices = permutation[:n_test]
train_indices = permutation[n_test : (n_test + n_train)]
train_split = self.select(
indices=train_indices,
keep_in_memory=keep_in_memory,
load_from_cache_file=load_from_cache_file,
cache_file_name=train_cache_file_name,
writer_batch_size=writer_batch_size,
)
test_split = self.select(
indices=test_indices,
keep_in_memory=keep_in_memory,
load_from_cache_file=load_from_cache_file,
cache_file_name=test_cache_file_name,
writer_batch_size=writer_batch_size,
)
return {"train": train_split, "test": test_split}
|
nlp-master
|
src/nlp/arrow_dataset.py
|
# coding=utf-8
# Copyright 2020 The HuggingFace NLP Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utilities for file names."""
import os
import re
_first_cap_re = re.compile("(.)([A-Z][a-z0-9]+)")
_all_cap_re = re.compile("([a-z0-9])([A-Z])")
def camelcase_to_snakecase(name):
"""Convert camel-case string to snake-case."""
s1 = _first_cap_re.sub(r"\1_\2", name)
return _all_cap_re.sub(r"\1_\2", s1).lower()
def snake_to_camelcase(name):
"""Convert snake-case string to camel-case string."""
return "".join(n.capitalize() for n in name.split("_"))
def filename_prefix_for_name(name):
if os.path.basename(name) != name:
raise ValueError("Should be a dataset name, not a path: %s" % name)
return camelcase_to_snakecase(name)
def filename_prefix_for_split(name, split):
if os.path.basename(name) != name:
raise ValueError("Should be a dataset name, not a path: %s" % name)
return "%s-%s" % (filename_prefix_for_name(name), split)
def filepattern_for_dataset_split(dataset_name, split, data_dir, filetype_suffix=None):
prefix = filename_prefix_for_split(dataset_name, split)
if filetype_suffix:
prefix += ".%s" % filetype_suffix
filepath = os.path.join(data_dir, prefix)
return "%s*" % filepath
def filename_for_dataset_split(dataset_name, split, filetype_suffix=None):
prefix = filename_prefix_for_split(dataset_name, split)
if filetype_suffix:
prefix += ".%s" % filetype_suffix
return prefix
def filepath_for_dataset_split(dataset_name, split, data_dir, filetype_suffix=None):
filename = filename_for_dataset_split(dataset_name=dataset_name, split=split, filetype_suffix=filetype_suffix,)
filepath = os.path.join(data_dir, filename)
return filepath
|
nlp-master
|
src/nlp/naming.py
|
# coding=utf-8
# Copyright 2020 The HuggingFace NLP Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""To write records into Parquet files."""
import errno
import logging
import os
import socket
from typing import Any, Dict, List, Optional
import pyarrow as pa
from .utils.file_utils import HF_DATASETS_CACHE, hash_url_to_filename
logger = logging.getLogger(__name__)
# Batch size constants. For more info, see:
# https://github.com/apache/arrow/blob/master/docs/source/cpp/arrays.rst#size-limitations-and-recommendations)
DEFAULT_MAX_BATCH_SIZE = 10_000 # hopefully it doesn't write too much at once (max is 2GB)
class ArrowWriter(object):
"""Shuffles and writes Examples to Arrow files.
"""
def __init__(
self,
data_type: Optional[pa.DataType] = None,
schema: Optional[pa.Schema] = None,
path: Optional[str] = None,
stream: Optional[pa.NativeFile] = None,
writer_batch_size: Optional[int] = None,
disable_nullable: bool = True,
):
if path is None and stream is None:
raise ValueError("At least one of path and stream must be provided.")
if data_type is not None:
self._type: pa.DataType = data_type
self._schema: pa.Schema = pa.schema(field for field in self._type)
elif schema is not None:
self._schema: pa.Schema = schema
self._type: pa.DataType = pa.struct(field for field in self._schema)
else:
self._schema = None
self._type = None
if disable_nullable and self._schema is not None:
self._schema = pa.schema(pa.field(field.name, field.type, nullable=False) for field in self._type)
self._type = pa.struct(pa.field(field.name, field.type, nullable=False) for field in self._type)
self._path = path
if stream is None:
self.stream = pa.OSFile(self._path, "wb")
else:
self.stream = stream
self.writer_batch_size = writer_batch_size or DEFAULT_MAX_BATCH_SIZE
self._num_examples = 0
self._num_bytes = 0
self.current_rows = []
self._build_writer(schema=self._schema)
def _build_writer(self, pa_table=None, schema=None):
if schema is not None:
self._schema: pa.Schema = schema
self._type: pa.DataType = pa.struct(field for field in self._schema)
self.pa_writer = pa.RecordBatchStreamWriter(self.stream, schema)
elif pa_table is not None:
self._schema: pa.Schema = pa_table.schema
self._type: pa.DataType = pa.struct(field for field in self._schema)
self.pa_writer = pa.RecordBatchStreamWriter(self.stream, self._schema)
else:
self.pa_writer = None
@property
def schema(self):
return self._schema if self._schema is not None else []
def _write_array_on_file(self, pa_array):
"""Write a PyArrow Array"""
pa_batch = pa.RecordBatch.from_struct_array(pa_array)
self._num_bytes += pa_array.nbytes
self.pa_writer.write_batch(pa_batch)
def write_on_file(self):
""" Write stored examples
"""
if self.current_rows:
pa_array = pa.array(self.current_rows, type=self._type)
first_example = pa.array(self.current_rows[0:1], type=self._type)[0]
# Sanity check
if pa_array[0] != first_example:
# There was an Overflow in StructArray. Let's reduce the batch_size
while pa_array[0] != first_example:
new_batch_size = self.writer_batch_size // 2
pa_array = pa.array(self.current_rows[:new_batch_size], type=self._type)
logger.warning(
"Batch size is too big (>2GB). Reducing it from {} to {}".format(
self.writer_batch_size, new_batch_size
)
)
self.writer_batch_size = new_batch_size
n_batches = len(self.current_rows) // new_batch_size
n_batches += int(len(self.current_rows) % new_batch_size != 0)
for i in range(n_batches):
pa_array = pa.array(
self.current_rows[i * new_batch_size : (i + 1) * new_batch_size], type=self._type,
)
self._write_array_on_file(pa_array)
else:
# All good
self._write_array_on_file(pa_array)
self.current_rows = []
def write(self, example: Dict[str, Any], writer_batch_size: Optional[int] = None):
""" Add a given Example to the write-pool which is written to file.
Args:
example: the Example to add.
"""
self.current_rows.append(example)
self._num_examples += 1
if writer_batch_size is None:
writer_batch_size = self.writer_batch_size
if self.pa_writer is None:
self._build_writer(pa_table=pa.Table.from_pydict(example))
if writer_batch_size is not None and len(self.current_rows) >= writer_batch_size:
self.write_on_file()
def write_batch(
self, batch_examples: Dict[str, List[Any]], writer_batch_size: Optional[int] = None,
):
""" Write a batch of Example to file.
Args:
example: the Example to add.
"""
if self.pa_writer is None:
self._build_writer(pa_table=pa.Table.from_pydict(batch_examples))
pa_table: pa.Table = pa.Table.from_pydict(batch_examples, schema=self._schema)
if writer_batch_size is None:
writer_batch_size = self.writer_batch_size
batches: List[pa.RecordBatch] = pa_table.to_batches(max_chunksize=writer_batch_size)
self._num_bytes += sum(batch.nbytes for batch in batches)
self._num_examples += pa_table.num_rows
for batch in batches:
self.pa_writer.write_batch(batch)
def write_table(self, pa_table: pa.Table, writer_batch_size: Optional[int] = None):
""" Write a batch of Example to file.
Args:
example: the Example to add.
"""
if writer_batch_size is None:
writer_batch_size = self.writer_batch_size
if self.pa_writer is None:
self._build_writer(pa_table=pa_table)
batches: List[pa.RecordBatch] = pa_table.to_batches(max_chunksize=writer_batch_size)
self._num_bytes += sum(batch.nbytes for batch in batches)
self._num_examples += pa_table.num_rows
for batch in batches:
self.pa_writer.write_batch(batch)
def finalize(self, close_stream=True):
if self.pa_writer is not None:
self.write_on_file()
self.pa_writer.close()
if close_stream:
self.stream.close()
logger.info(
"Done writing %s examples in %s bytes %s.",
self._num_examples,
self._num_bytes,
self._path if self._path else "",
)
return self._num_examples, self._num_bytes
class BeamWriter(object):
"""
Shuffles and writes Examples to Arrow files.
The Arrow files are converted from Parquet files that are the output of Apache Beam pipelines.
"""
def __init__(
self,
data_type: Optional[pa.DataType] = None,
schema: Optional[pa.Schema] = None,
path: Optional[str] = None,
namespace: Optional[str] = None,
cache_dir: Optional[str] = None,
):
if data_type is None and schema is None:
raise ValueError("At least one of data_type and schema must be provided.")
if path is None:
raise ValueError("Path must be provided.")
if data_type is not None:
self._type: pa.DataType = data_type
self._schema: pa.Schema = pa.schema(field for field in self._type)
else:
self._schema: pa.Schema = schema
self._type: pa.DataType = pa.struct(field for field in self._schema)
self._path = path
self._parquet_path = os.path.splitext(path)[0] + ".parquet"
self._namespace = namespace or "default"
self._num_examples = None
self._cache_dir = cache_dir or HF_DATASETS_CACHE
def write_from_pcollection(self, pcoll_examples):
"""Add the final steps of the beam pipeline: write to parquet files."""
import apache_beam as beam
from .utils.beam_utils import WriteToParquet
def inc_num_examples(example):
beam.metrics.Metrics.counter(self._namespace, "num_examples").inc()
# count examples
_ = pcoll_examples | "Count N. Examples" >> beam.Map(inc_num_examples)
# save dataset
return (
pcoll_examples
| "Get values" >> beam.Values()
| "Save to parquet"
>> WriteToParquet(self._parquet_path, self._schema, num_shards=1, shard_name_template="")
)
def finalize(self, metrics_query_result: dict):
"""
Run after the pipeline has finished.
It converts the resulting parquet files to arrow and it completes the info from the pipeline metrics.
Args:
metrics_query_result: `dict` obtained from pipeline_results.metrics().query(m_filter). Make sure
that the filter keeps only the metrics for the considered split, under the namespace `split_name`.
"""
import apache_beam as beam
from .utils import beam_utils
# Convert to arrow
logger.info("Converting parquet file {} to arrow {}".format(self._parquet_path, self._path))
try: # stream conversion
with beam.io.filesystems.FileSystems.open(self._parquet_path) as src:
with beam.io.filesystems.FileSystems.create(self._path) as dest:
parquet_to_arrow(src, dest)
except socket.error as e: # broken pipe can happen if the connection is unstable, do local conversion instead
if e.errno != errno.EPIPE: # not a broken pipe
raise e
logger.warning("Broken Pipe during stream conversion from parquet to arrow. Using local convert instead")
local_convert_dir = os.path.join(self._cache_dir, "beam_convert")
os.makedirs(local_convert_dir, exist_ok=True)
local_parquet_path = os.path.join(local_convert_dir, hash_url_to_filename(self._parquet_path) + ".parquet")
local_arrow_path = os.path.splitext(local_parquet_path)[0] + ".arrow"
beam_utils.download_remote_to_local(self._parquet_path, local_parquet_path)
parquet_to_arrow(local_parquet_path, local_arrow_path)
beam_utils.upload_local_to_remote(local_arrow_path, self._path)
# Save metrics
counters_dict = {metric.key.metric.name: metric.result for metric in metrics_query_result["counters"]}
self._num_examples = counters_dict["num_examples"]
output_file_metadata = beam.io.filesystems.FileSystems.match([self._path], limits=[1])[0].metadata_list[0]
self._num_bytes = output_file_metadata.size_in_bytes
return self._num_examples, self._num_bytes
def parquet_to_arrow(source, destination):
"""Convert parquet file to arrow file. Inputs can be str paths or file-like objects"""
pf = pa.parquet.ParquetFile(source)
stream = None if isinstance(destination, str) else destination
writer = ArrowWriter(path=destination, stream=stream)
for i in range(pf.num_row_groups):
row_group_table = pf.read_row_group(i)
writer.write_table(row_group_table)
return destination
|
nlp-master
|
src/nlp/arrow_writer.py
|
# flake8: noqa
# coding=utf-8
# Copyright 2020 The HuggingFace NLP Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "0.3.0"
import pyarrow
from pyarrow import total_allocated_bytes
from . import datasets
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .features import ClassLabel, Features, Sequence, Tensor, Translation, TranslationVariableLanguages, Value
from .info import DatasetInfo, MetricInfo
from .inspect import inspect_dataset, inspect_metric, list_datasets, list_metrics
from .load import import_main_class, load_dataset, load_metric, prepare_module
from .metric import Metric
from .splits import NamedSplit, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent
from .utils import *
from .utils.tqdm_utils import disable_progress_bar
if int(pyarrow.__version__.split(".")[1]) < 16 or int(pyarrow.__version__.split(".")[0]) > 0:
raise ImportWarning(
"To use `nlp`, the module `pyarrow>=0.16.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
|
nlp-master
|
src/nlp/__init__.py
|
# coding=utf-8
# Copyright 2020 The HuggingFace NLP Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
""" Arrow ArrowReader."""
import copy
import logging
import math
import os
import re
import shutil
from dataclasses import dataclass
from typing import TYPE_CHECKING, List, Optional
import pyarrow as pa
import pyarrow.parquet
from .arrow_dataset import Dataset
from .naming import filename_for_dataset_split
from .utils import cached_path, py_utils
if TYPE_CHECKING:
from .info import DatasetInfo # noqa: F401
logger = logging.getLogger(__name__)
_BUFFER_SIZE = 8 << 20 # 8 MiB per file.
HF_GCP_BASE_URL = "https://storage.googleapis.com/huggingface-nlp/cache/datasets"
_SUB_SPEC_RE = re.compile(
r"""
^
(?P<split>\w+)
(\[
((?P<from>-?\d+)
(?P<from_pct>%)?)?
:
((?P<to>-?\d+)
(?P<to_pct>%)?)?
\])?
$
""",
re.X,
)
_ADDITION_SEP_RE = re.compile(r"\s*\+\s*")
class DatasetNotOnHfGcs(ConnectionError):
"""When you can't get the dataset from the Hf google cloud storage"""
pass
@dataclass(frozen=True)
class FileInstructions:
"""The file instructions associated with a split ReadInstruction.
Attributes:
num_examples: `int`, The total number of examples
file_instructions: List[dict(filename, skip, take)], the files information.
The filenames contains the relative path, not absolute.
skip/take indicates which example read in the file: `ds.slice(skip, take)`
"""
num_examples: int
file_instructions: List[dict]
def make_file_instructions(name, split_infos, instruction, filetype_suffix=None):
"""Returns instructions of the split dict.
Args:
name: Name of the dataset.
split_infos: `List[SplitInfo]`, Dataset splits information
instruction: `ReadInstruction` or `str`
filetype_suffix: `Optional[str]` suffix of dataset files, e.g. 'arrow' or 'parquet'
Returns:
file_intructions: FileInstructions instance
"""
name2len = {info.name: info.num_examples for info in split_infos}
if not isinstance(instruction, ReadInstruction):
instruction = ReadInstruction.from_spec(instruction)
# Create the absolute instruction (per split)
absolute_instructions = instruction.to_absolute(name2len)
return _make_file_instructions_from_absolutes(
name=name, name2len=name2len, absolute_instructions=absolute_instructions, filetype_suffix=filetype_suffix
)
def _make_file_instructions_from_absolutes(name, name2len, absolute_instructions, filetype_suffix=None):
"""Returns the files instructions from the absolute instructions list."""
# For each split, return the files instruction (skip/take)
file_instructions = []
num_examples = 0
for abs_instr in absolute_instructions:
length = name2len[abs_instr.splitname]
filename = filename_for_dataset_split(
dataset_name=name, split=abs_instr.splitname, filetype_suffix=filetype_suffix
)
from_ = 0 if abs_instr.from_ is None else abs_instr.from_
to = length if abs_instr.to is None else abs_instr.to
num_examples += to - from_
single_file_instructions = [{"filename": filename, "skip": from_, "take": to - from_}]
file_instructions.extend(single_file_instructions)
return FileInstructions(num_examples=num_examples, file_instructions=file_instructions,)
class BaseReader:
"""
Build a Dataset object out of Instruction instance(s).
"""
def __init__(self, path: str, info: Optional["DatasetInfo"]):
"""Initializes ArrowReader.
Args:
path (str): path where tfrecords are stored.
info (DatasetInfo): info about the dataset.
"""
self._path: str = path
self._info: Optional["DatasetInfo"] = info
self._filetype_suffix: Optional[str] = None
def _get_dataset_from_filename(self, filename_skip_take):
"""Returns a Dataset instance from given (filename, skip, take)."""
raise NotImplementedError
def _read_files(self, files, info, original_instructions) -> Dataset:
"""Returns Dataset for given file instructions.
Args:
files: List[dict(filename, skip, take)], the files information.
The filenames contain the absolute path, not relative.
skip/take indicates which example read in the file: `ds.slice(skip, take)`
original_instructions: store the original instructions used to build the dataset split in the dataset
"""
pa_batches = []
for f_dict in files:
pa_table: pa.Table = self._get_dataset_from_filename(f_dict)
pa_batches.extend(pa_table.to_batches())
if pa_batches:
pa_table = pa.Table.from_batches(pa_batches)
ds = Dataset(arrow_table=pa_table, data_files=files, info=info, split=original_instructions)
return ds
def get_file_instructions(self, name, instruction, split_infos):
"""Return list of dict {'filename': str, 'skip': int, 'take': int}"""
file_instructions = make_file_instructions(
name, split_infos, instruction, filetype_suffix=self._filetype_suffix
)
files = file_instructions.file_instructions
return files
def read(
self, name, instructions, split_infos,
):
"""Returns Dataset instance(s).
Args:
name (str): name of the dataset.
instructions (ReadInstruction, List[], Dict[]): instruction(s) to read.
Instructions can be string and will then be passed to the Instruction
constructor as it.
split_infos (list of SplitInfo proto): the available splits for dataset.
Returns:
a single Dataset instance if instruction is a single
ReadInstruction instance. Otherwise a dict/list of Dataset
corresponding to given instructions param shape.
"""
def _read_instruction_to_ds(instruction):
files = self.get_file_instructions(name, instruction, split_infos)
if not files:
msg = 'Instruction "%s" corresponds to no data!' % instruction
raise AssertionError(msg)
return self.read_files(files=tuple(files), original_instructions=instruction)
return py_utils.map_nested(_read_instruction_to_ds, instructions)
def read_files(
self, files, original_instructions=None,
):
"""Returns single Dataset instance for the set of file instructions.
Args:
files: List[dict(filename, skip, take)], the files information.
The filenames contains the relative path, not absolute.
skip/take indicates which example read in the file: `ds.skip().take()`
original_instructions: store the original instructions used to build the dataset split in the dataset
Returns:
a Dataset instance.
"""
# Prepend path to filename
files = copy.deepcopy(files)
for f in files:
f.update(filename=os.path.join(self._path, f["filename"]))
dataset = self._read_files(files=files, info=self._info, original_instructions=original_instructions)
return dataset
def download_from_hf_gcs(self, cache_dir, relative_data_dir):
"""
Download the dataset files from the Hf GCS
Args:
cache_dir: `str`, the local cache directory where to save the dataset
relative_data_dir: `str`, the relative directory of the remote files from
the `datasets` directory on GCS.
"""
remote_cache_dir = os.path.join(HF_GCP_BASE_URL, relative_data_dir)
try:
remote_dataset_info = os.path.join(remote_cache_dir, "dataset_info.json")
downloaded_dataset_info = cached_path(remote_dataset_info)
shutil.move(downloaded_dataset_info, os.path.join(cache_dir, "dataset_info.json"))
if self._info is not None:
self._info.update(self._info.from_directory(cache_dir))
except ConnectionError:
raise DatasetNotOnHfGcs()
for split in self._info.splits:
file_instructions = self.get_file_instructions(
name=self._info.builder_name, instruction=split, split_infos=self._info.splits.values(),
)
for file_instruction in file_instructions:
remote_prepared_filename = os.path.join(remote_cache_dir, file_instruction["filename"])
downloaded_prepared_filename = cached_path(remote_prepared_filename)
shutil.move(downloaded_prepared_filename, os.path.join(cache_dir, file_instruction["filename"]))
class ArrowReader(BaseReader):
"""
Build a Dataset object out of Instruction instance(s).
This Reader uses memory mapping on arrow files.
"""
def __init__(self, path: str, info: Optional["DatasetInfo"]):
"""Initializes ArrowReader.
Args:
path (str): path where tfrecords are stored.
info (DatasetInfo): info about the dataset.
"""
super().__init__(path, info)
self._filetype_suffix = "arrow"
def _get_dataset_from_filename(self, filename_skip_take):
"""Returns a Dataset instance from given (filename, skip, take)."""
filename, skip, take = (
filename_skip_take["filename"],
filename_skip_take["skip"] if "skip" in filename_skip_take else None,
filename_skip_take["take"] if "take" in filename_skip_take else None,
)
mmap = pa.memory_map(filename)
f = pa.ipc.open_stream(mmap)
pa_table = f.read_all()
if skip is not None and take is not None:
pa_table = pa_table.slice(skip, take)
return pa_table
class ParquetReader(BaseReader):
"""
Build a Dataset object out of Instruction instance(s).
This Reader uses memory mapping on parquet files.
"""
def __init__(self, path: str, info: Optional["DatasetInfo"]):
"""Initializes ParquetReader.
Args:
path (str): path where tfrecords are stored.
info (DatasetInfo): info about the dataset.
"""
super().__init__(path, info)
self._filetype_suffix = "parquet"
def _get_dataset_from_filename(self, filename_skip_take):
"""Returns a Dataset instance from given (filename, skip, take)."""
filename, skip, take = (
filename_skip_take["filename"],
filename_skip_take["skip"] if "skip" in filename_skip_take else None,
filename_skip_take["take"] if "take" in filename_skip_take else None,
)
pa_table = pa.parquet.read_table(filename, memory_map=True)
if skip is not None and take is not None:
pa_table = pa_table.slice(skip, take)
return pa_table
@dataclass(frozen=True)
class _AbsoluteInstruction:
"""A machine friendly slice: defined absolute positive boundaries."""
splitname: str
from_: int # uint (starting index).
to: int # uint (ending index).
@dataclass(frozen=True)
class _RelativeInstruction:
"""Represents a single parsed slicing instruction, can use % and negatives."""
splitname: str
from_: Optional[int] = None # int (starting index) or None if no lower boundary.
to: Optional[int] = None # int (ending index) or None if no upper boundary.
unit: Optional[str] = None
rounding: Optional[str] = None
def __post_init__(self):
assert self.unit is None or self.unit in ["%", "abs"]
assert self.rounding is None or self.rounding in ["closest", "pct1_dropremainder"]
if self.unit == "%" and self.from_ is not None and abs(self.from_) > 100:
raise AssertionError("Percent slice boundaries must be > -100 and < 100.")
if self.unit == "%" and self.to is not None and abs(self.to) > 100:
raise AssertionError("Percent slice boundaries must be > -100 and < 100.")
def _str_to_relative_instruction(spec):
"""Returns ReadInstruction for given string."""
res = _SUB_SPEC_RE.match(spec)
if not res:
raise AssertionError("Unrecognized instruction format: %s" % spec)
unit = "%" if res.group("from_pct") or res.group("to_pct") else "abs"
return ReadInstruction(
split_name=res.group("split"),
rounding="closest",
from_=int(res.group("from")) if res.group("from") else None,
to=int(res.group("to")) if res.group("to") else None,
unit=unit,
)
def _pct_to_abs_pct1(boundary, num_examples):
# Using math.trunc here, since -99.5% should give -99%, not -100%.
if num_examples < 100:
msg = (
'Using "pct1_dropremainder" rounding on a split with less than 100 '
"elements is forbidden: it always results in an empty dataset."
)
raise AssertionError(msg)
return boundary * math.trunc(num_examples / 100.0)
def _pct_to_abs_closest(boundary, num_examples):
return int(round(boundary * num_examples / 100.0))
def _rel_to_abs_instr(rel_instr, name2len):
"""Returns _AbsoluteInstruction instance for given RelativeInstruction.
Args:
rel_instr: RelativeInstruction instance.
name2len: dict {split_name: num_examples}.
"""
pct_to_abs = _pct_to_abs_closest if rel_instr.rounding == "closest" else _pct_to_abs_pct1
split = rel_instr.splitname
if split not in name2len:
raise ValueError('Unknown split "{}". Should be one of {}.'.format(split, list(name2len)))
num_examples = name2len[split]
from_ = rel_instr.from_
to = rel_instr.to
if rel_instr.unit == "%":
from_ = 0 if from_ is None else pct_to_abs(from_, num_examples)
to = num_examples if to is None else pct_to_abs(to, num_examples)
else:
from_ = 0 if from_ is None else from_
to = num_examples if to is None else to
if abs(from_) > num_examples or abs(to) > num_examples:
msg = "Requested slice [%s:%s] incompatible with %s examples." % (from_ or "", to or "", num_examples)
raise AssertionError(msg)
if from_ < 0:
from_ = num_examples + from_
elif from_ == 0:
from_ = None
if to < 0:
to = num_examples + to
elif to == num_examples:
to = None
return _AbsoluteInstruction(split, from_, to)
class ReadInstruction(object):
"""Reading instruction for a dataset.
Examples of usage:
```
# The following lines are equivalent:
ds = nlp.load_dataset('mnist', split='test[:33%]')
ds = nlp.load_dataset('mnist', split=ReadInstruction.from_spec('test[:33%]'))
ds = nlp.load_dataset('mnist', split=ReadInstruction('test', to=33, unit='%'))
ds = nlp.load_dataset('mnist', split=ReadInstruction(
'test', from_=0, to=33, unit='%'))
# The following lines are equivalent:
ds = nlp.load_dataset('mnist', split='test[:33%]+train[1:-1]')
ds = nlp.load_dataset('mnist', split=ReadInstruction.from_spec(
'test[:33%]+train[1:-1]'))
ds = nlp.load_dataset('mnist', split=(
ReadInstruction.('test', to=33, unit='%') +
ReadInstruction.('train', from_=1, to=-1, unit='abs')))
# 10-fold validation:
tests = nlp.load_dataset(
'mnist',
[ReadInstruction('train', from_=k, to=k+10, unit='%')
for k in range(0, 100, 10)])
trains = nlp.load_dataset(
'mnist',
[RI('train', to=k, unit='%') + RI('train', from_=k+10, unit='%')
for k in range(0, 100, 10)])
```
"""
def _init(self, relative_instructions):
# Private initializer.
self._relative_instructions = relative_instructions
@classmethod
def _read_instruction_from_relative_instructions(cls, relative_instructions):
"""Returns ReadInstruction obj initialized with relative_instructions."""
# Use __new__ to bypass __init__ used by public API and not conveniant here.
result = cls.__new__(cls)
result._init(relative_instructions) # pylint: disable=protected-access
return result
def __init__(self, split_name, rounding="closest", from_=None, to=None, unit=None):
"""Initialize ReadInstruction.
Args:
split_name (str): name of the split to read. Eg: 'train'.
rounding (str): The rounding behaviour to use when percent slicing is
used. Ignored when slicing with absolute indices.
Possible values:
- 'closest' (default): The specified percentages are rounded to the
closest value. Use this if you want specified percents to be as
much exact as possible.
- 'pct1_dropremainder': the specified percentages are treated as
multiple of 1%. Use this option if you want consistency. Eg:
len(5%) == 5 * len(1%).
Using this option, one might not be able to use the full set of
examples, if the number of those is not a multiple of 100.
from_ (int):
to (int): alternative way of specifying slicing boundaries. If any of
{from_, to, unit} argument is used, slicing cannot be specified as
string.
unit (str): optional, one of:
'%': to set the slicing unit as percents of the split size.
'abs': to set the slicing unit as absolute numbers.
"""
# This constructor is not always called. See factory method
# `_read_instruction_from_relative_instructions`. Common init instructions
# MUST be placed in the _init method.
self._init([_RelativeInstruction(split_name, from_, to, unit, rounding)])
@classmethod
def from_spec(cls, spec):
"""Creates a ReadInstruction instance out of a string spec.
Args:
spec (str): split(s) + optional slice(s) to read. A slice can be
specified, using absolute numbers (int) or percentages (int). E.g.
`test`: test split.
`test + validation`: test split + validation split.
`test[10:]`: test split, minus its first 10 records.
`test[:10%]`: first 10% records of test split.
`test[:-5%]+train[40%:60%]`: first 95% of test + middle 20% of
train.
Returns:
ReadInstruction instance.
"""
spec = str(spec) # Need to convert to str in case of NamedSplit instance.
subs = _ADDITION_SEP_RE.split(spec)
if not subs:
raise AssertionError("No instructions could be built out of %s" % spec)
instruction = _str_to_relative_instruction(subs[0])
return sum([_str_to_relative_instruction(sub) for sub in subs[1:]], instruction)
def __add__(self, other):
"""Returns a new ReadInstruction obj, result of appending other to self."""
if not isinstance(other, ReadInstruction):
msg = "ReadInstruction can only be added to another ReadInstruction obj."
raise AssertionError(msg)
other_ris = other._relative_instructions # pylint: disable=protected-access
if self._relative_instructions[0].rounding != other_ris[0].rounding:
raise AssertionError("It is forbidden to sum ReadInstruction instances " "with different rounding values.")
return self._read_instruction_from_relative_instructions(self._relative_instructions + other_ris)
def __str__(self):
return "ReadInstruction(%s)" % self._relative_instructions
def to_absolute(self, name2len):
"""Translate instruction into a list of absolute instructions.
Those absolute instructions are then to be added together.
Args:
name2len: dict associating split names to number of examples.
Returns:
list of _AbsoluteInstruction instances (corresponds to the + in spec).
"""
return [_rel_to_abs_instr(rel_instr, name2len) for rel_instr in self._relative_instructions]
|
nlp-master
|
src/nlp/arrow_reader.py
|
# coding=utf-8
# Copyright 2020 The HuggingFace NLP Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
""" This class handle features definition in datasets and some utilities to display table type."""
import logging
from dataclasses import dataclass
from typing import Any, ClassVar, Dict, List, Optional, Tuple, Union
import pyarrow as pa
from . import utils
logger = logging.getLogger(__name__)
def string_to_arrow(type_str: str):
if type_str not in pa.__dict__:
if str(type_str + "_") not in pa.__dict__:
raise ValueError(
f"Neither {type_str} nor {type_str + '_'} seems to be a pyarrow data type. "
f"Please make sure to use a correct data type, see: "
f"https://arrow.apache.org/docs/python/api/datatypes.html#factory-functions"
)
arrow_data_type_str = str(type_str + "_")
else:
arrow_data_type_str = type_str
return pa.__dict__[arrow_data_type_str]()
@dataclass
class Value:
""" Encapsulate an Arrow datatype for easy serialization.
"""
dtype: str
id: Optional[str] = None
# Automatically constructed
pa_type: ClassVar[Any] = None
_type: str = "Value"
def __post_init__(self):
self.pa_type = string_to_arrow(self.dtype)
def __call__(self):
return self.pa_type
def encode_example(self, value):
if pa.types.is_boolean(self.pa_type):
return bool(value)
elif pa.types.is_integer(self.pa_type):
return int(value)
elif pa.types.is_floating(self.pa_type):
return float(value)
else:
return value
@dataclass
class Tensor:
""" Construct a 0D or 1D Tensor feature.
If 0D, the Tensor is an dtype element, if 1D it will be a fixed length list or dtype elements.
Mostly here for compatiblity with tfds.
"""
shape: Union[Tuple[int], List[int]]
dtype: str
id: Optional[str] = None
# Automatically constructed
pa_type: ClassVar[Any] = None
_type: str = "Tensor"
def __post_init__(self):
assert len(self.shape) < 2, "Tensor can only take 0 or 1 dimensional shapes ."
if len(self.shape) == 1:
self.pa_type = pa.list_(string_to_arrow(self.dtype), self.shape[0])
else:
self.pa_type = string_to_arrow(self.dtype)
def __call__(self):
return self.pa_type
@dataclass
class ClassLabel:
""" Handle integer class labels. Here for compatiblity with tfds.
There are 3 ways to define a ClassLabel, which correspond to the 3
arguments:
* `num_classes`: create 0 to (num_classes-1) labels
* `names`: a list of label strings
* `names_file`: a file containing the list of labels.
Note: On python2, the strings are encoded as utf-8.
Args:
num_classes: `int`, number of classes. All labels must be < num_classes.
names: `list<str>`, string names for the integer classes. The
order in which the names are provided is kept.
names_file: `str`, path to a file with names for the integer
classes, one per line.
"""
num_classes: int = None
names: List[str] = None
names_file: str = None
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "int64"
pa_type: ClassVar[Any] = pa.int64()
_str2int: ClassVar[Dict[str, int]] = None
_int2str: ClassVar[Dict[int, int]] = None
_type: str = "ClassLabel"
def __post_init__(self):
# The label is explicitly set as undefined (no label defined)
if not sum(bool(a) for a in (self.num_classes, self.names, self.names_file)):
return
# if sum(bool(a) for a in (self.num_classes, self.names, self.names_file)) != 1:
# raise ValueError("Only a single argument of ClassLabel() should be provided.")
if self.num_classes is None:
if self.names is None:
self.names = self._load_names_from_file(self.names_file)
else:
if self.names is None:
self.names = [str(i) for i in range(self.num_classes)]
elif len(self.names) != self.num_classes:
raise ValueError(
"ClassLabel number of names do not match the defined num_classes. "
"Got {} names VS {} num_classes".format(len(self.names), self.num_classes)
)
# Prepare mappings
self._int2str = [str(name) for name in self.names]
self._str2int = {name: i for i, name in enumerate(self._int2str)}
if len(self._int2str) != len(self._str2int):
raise ValueError("Some label names are duplicated. Each label name should be unique.")
# If num_classes has been defined, ensure that num_classes and names match
num_classes = len(self._str2int)
if self.num_classes is None:
self.num_classes = num_classes
elif self.num_classes != num_classes:
raise ValueError(
"ClassLabel number of names do not match the defined num_classes. "
"Got {} names VS {} num_classes".format(num_classes, self.num_classes)
)
def __call__(self):
return self.pa_type
def str2int(self, str_value):
"""Conversion class name string => integer."""
str_value = str(str_value)
if self._str2int:
# strip key if not in dict
if str_value not in self._str2int:
str_value = str_value.strip()
return self._str2int[str_value]
# No names provided, try to integerize
failed_parse = False
try:
int_value = int(str_value)
except ValueError:
failed_parse = True
if failed_parse or not 0 <= int_value < self._num_classes:
raise ValueError("Invalid string class label %s" % str_value)
return int_value
def int2str(self, int_value):
"""Conversion integer => class name string."""
if self._int2str:
# Maybe should support batched np array/eager tensors, to allow things
# like
# out_ids = model(inputs)
# labels = cifar10.info.features['label'].int2str(out_ids)
return self._int2str[int_value]
# No names provided, return str(int)
if not 0 <= int_value < self._num_classes:
raise ValueError("Invalid integer class label %d" % int_value)
return str(int_value)
def encode_example(self, example_data):
if self.num_classes is None:
raise ValueError(
"Trying to use ClassLabel feature with undefined number of class. "
"Please set ClassLabel.names or num_classes."
)
# If a string is given, convert to associated integer
if isinstance(example_data, str):
example_data = self.str2int(example_data)
# Allowing -1 to mean no label.
if not -1 <= example_data < self.num_classes:
raise ValueError(
"Class label %d greater than configured num_classes %d" % (example_data, self.num_classes)
)
return example_data
@staticmethod
def _load_names_from_file(names_filepath):
with open(names_filepath, "r") as f:
return [name.strip() for name in f.read().split("\n") if name.strip()] # Filter empty names
@dataclass
class Translation:
"""`FeatureConnector` for translations with fixed languages per example.
Here for compatiblity with tfds.
Input: The Translate feature accepts a dictionary for each example mapping
string language codes to string translations.
Output: A dictionary mapping string language codes to translations as `Text`
features.
Example:
At construction time:
```
nlp.features.Translation(languages=['en', 'fr', 'de'])
```
During data generation:
```
yield {
'en': 'the cat',
'fr': 'le chat',
'de': 'die katze'
}
```
Tensor returned by `.as_dataset()`:
```
{
'en': 'the cat',
'fr': 'le chat',
'de': 'die katze',
}
```
"""
languages: List[str]
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "dict"
pa_type: ClassVar[Any] = None
_type: str = "Translation"
def __call__(self):
return pa.struct({lang: pa.string() for lang in self.languages})
@dataclass
class TranslationVariableLanguages:
"""`FeatureConnector` for translations with variable languages per example.
Here for compatiblity with tfds.
Input: The TranslationVariableLanguages feature accepts a dictionary for each
example mapping string language codes to one or more string translations.
The languages present may vary from example to example.
Output:
language: variable-length 1D tf.Tensor of tf.string language codes, sorted
in ascending order.
translation: variable-length 1D tf.Tensor of tf.string plain text
translations, sorted to align with language codes.
Example (fixed language list):
At construction time:
```
nlp.features.Translation(languages=['en', 'fr', 'de'])
```
During data generation:
```
yield {
'en': 'the cat',
'fr': ['le chat', 'la chatte,']
'de': 'die katze'
}
```
Tensor returned :
```
{
'language': ['en', 'de', 'fr', 'fr'],
'translation': ['the cat', 'die katze', 'la chatte', 'le chat'],
}
```
"""
languages: List = None
num_languages: int = None
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "dict"
pa_type: ClassVar[Any] = None
_type: str = "TranslationVariableLanguages"
def __post_init__(self):
self.languages = list(sorted(list(set(self.languages)))) if self.languages else None
self.num_languages = len(self.languages) if self.languages else None
def __call__(self):
return pa.struct({"language": pa.list_(pa.string()), "translation": pa.list_(pa.string())})
def encode_example(self, translation_dict):
lang_set = set(self.languages)
if self.languages and set(translation_dict) - lang_set:
raise ValueError(
"Some languages in example ({0}) are not in valid set ({1}).".format(
", ".join(sorted(set(translation_dict) - lang_set)), ", ".join(lang_set)
)
)
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
translation_tuples = []
for lang, text in translation_dict.items():
if isinstance(text, str):
translation_tuples.append((lang, text))
else:
translation_tuples.extend([(lang, el) for el in text])
# Ensure translations are in ascending order by language code.
languages, translations = zip(*sorted(translation_tuples))
return {"language": languages, "translation": translations}
@dataclass
class Sequence:
""" Construct a list of feature from a single type or a dict of types.
Mostly here for compatiblity with tfds.
"""
feature: Any
length: int = -1
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "list"
pa_type: ClassVar[Any] = None
_type: str = "Sequence"
FeatureType = Union[dict, list, tuple, Value, Tensor, ClassLabel, Translation, TranslationVariableLanguages, Sequence]
def get_nested_type(schema: FeatureType) -> pa.DataType:
""" Convert our Feature nested object in an Apache Arrow type """
# Nested structures: we allow dict, list/tuples, sequences
if isinstance(schema, dict):
return pa.struct({key: get_nested_type(value) for key, value in schema.items()})
elif isinstance(schema, (list, tuple)):
assert len(schema) == 1, "We defining list feature, you should just provide one example of the inner type"
inner_type = get_nested_type(schema[0])
return pa.list_(inner_type)
elif isinstance(schema, Sequence):
inner_type = get_nested_type(schema.feature)
# We allow to reverse list of dict => dict of list for compatiblity with tfds
if isinstance(inner_type, pa.StructType):
return pa.struct(dict((f.name, pa.list_(f.type, schema.length)) for f in inner_type))
return pa.list_(inner_type, schema.length)
# Other objects are callable which returns their data type (ClassLabel, Tensor, Translation, Arrow datatype creation methods)
return schema()
def encode_nested_example(schema, obj):
""" Encode a nested example.
This is used since some features (in particular ClassLabel) have some logic during encoding.
"""
# Nested structures: we allow dict, list/tuples, sequences
if isinstance(schema, dict):
return dict(
(k, encode_nested_example(sub_schema, sub_obj)) for k, (sub_schema, sub_obj) in utils.zip_dict(schema, obj)
)
elif isinstance(schema, (list, tuple)):
sub_schema = schema[0]
return [encode_nested_example(sub_schema, o) for o in obj]
elif isinstance(schema, Sequence):
# We allow to reverse list of dict => dict of list for compatiblity with tfds
if isinstance(schema.feature, dict):
# dict of list to fill
list_dict = {}
if isinstance(obj, (list, tuple)):
# obj is a list of dict
for k, dict_tuples in utils.zip_dict(schema.feature, *obj):
list_dict[k] = [encode_nested_example(dict_tuples[0], o) for o in dict_tuples[1:]]
return list_dict
else:
# obj is a single dict
for k, (sub_schema, sub_objs) in utils.zip_dict(schema.feature, obj):
list_dict[k] = [encode_nested_example(sub_schema, o) for o in sub_objs]
return list_dict
# schema.feature is not a dict
if isinstance(obj, str): # don't interpret a string as a list
raise ValueError("Got a string but expected a list instead: '{}'".format(obj))
return [encode_nested_example(schema.feature, o) for o in obj]
# Object with special encoding:
# ClassLabel will convert from string to int, TranslationVariableLanguages does some checks
elif isinstance(schema, (ClassLabel, TranslationVariableLanguages, Value)):
return schema.encode_example(obj)
# Other object should be directly convertible to a native Arrow type (like Translation and Translation)
return obj
def generate_from_dict(obj: Any):
""" Regenerate the nested feature object from a serialized dict.
We use the '_type' fields to get the dataclass name to load.
"""
# Nested structures: we allow dict, list/tuples, sequences
if isinstance(obj, list):
return [generate_from_dict(value) for value in obj]
# Otherwise we have a dict or a dataclass
if "_type" not in obj:
return {key: generate_from_dict(value) for key, value in obj.items()}
class_type = globals()[obj.pop("_type")]
if class_type == Sequence:
return Sequence(feature=generate_from_dict(obj["feature"]), length=obj["length"])
return class_type(**obj)
def generate_from_arrow(pa_type: pa.DataType):
if isinstance(pa_type, pa.StructType):
return {field.name: generate_from_arrow(field.type) for field in pa_type}
elif isinstance(pa_type, pa.FixedSizeListType):
return Sequence(feature=generate_from_arrow(pa_type.value_type), length=pa_type.list_size)
elif isinstance(pa_type, pa.ListType):
return [generate_from_arrow(pa_type.value_type)]
elif isinstance(pa_type, pa.DictionaryType):
raise NotImplementedError # TODO(thom) this will need access to the dictionary as well (for labels). I.e. to the py_table
elif isinstance(pa_type, pa.DataType):
return Value(dtype=str(pa_type))
else:
return ValueError(f"Cannot convert {pa_type} to a Feature type.")
class Features(dict):
@property
def type(self):
return get_nested_type(self)
@classmethod
def from_pyarrow_type(cls, pa_type: pa.DataType):
obj = generate_from_arrow(pa_type)
return cls(**obj)
@classmethod
def from_dict(cls, dic):
obj = generate_from_dict(dic)
return cls(**obj)
def encode_example(self, example):
return encode_nested_example(self, example)
|
nlp-master
|
src/nlp/features.py
|
# coding=utf-8
# Copyright 2020 The HuggingFace NLP Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""DatasetBuilder base class."""
import abc
import contextlib
import inspect
import logging
import os
import shutil
from dataclasses import dataclass
from typing import Dict, List, Optional, Union
import pyarrow as pa
from . import utils
from .arrow_reader import ArrowReader, DatasetNotOnHfGcs
from .arrow_writer import ArrowWriter, BeamWriter
from .features import Features, Value
from .info import DATASET_INFO_FILENAME, DATASET_INFOS_DICT_FILE_NAME, LICENSE_FILENAME, DatasetInfo, DatasetInfosDict
from .naming import camelcase_to_snakecase, filename_prefix_for_split
from .splits import Split, SplitDict
from .utils.download_manager import DownloadManager, GenerateMode
from .utils.file_utils import HF_DATASETS_CACHE, DownloadConfig, is_remote_url
from .utils.info_utils import verify_checksums, verify_splits
logger = logging.getLogger(__name__)
FORCE_REDOWNLOAD = GenerateMode.FORCE_REDOWNLOAD
REUSE_CACHE_IF_EXISTS = GenerateMode.REUSE_CACHE_IF_EXISTS
REUSE_DATASET_IF_EXISTS = GenerateMode.REUSE_DATASET_IF_EXISTS
class InvalidConfigName(ValueError):
pass
@dataclass
class BuilderConfig:
"""Base class for `DatasetBuilder` data configuration.
DatasetBuilder subclasses with data configuration options should subclass
`BuilderConfig` and add their own properties.
"""
name: str = "default"
version: Optional[Union[str, utils.Version]] = "0.0.0"
data_dir: str = None
data_files: Union[Dict, List] = None
description: str = None
def __post_init__(self):
# The config name is used to name the cache directory.
invalid_windows_characters = r"<>:/\|?*"
for invalid_char in invalid_windows_characters:
if invalid_char in self.name:
raise InvalidConfigName(
(
"Bad characters from black list '{}' found in '{}'. "
"They could create issues when creating a directory "
"for this config on Windows filesystem."
).format(invalid_windows_characters, self.name)
)
class DatasetBuilder:
"""Abstract base class for all datasets.
`DatasetBuilder` has 3 key methods:
* `nlp.DatasetBuilder.info`: documents the dataset, including feature
names, types, and shapes, version, splits, citation, etc.
* `nlp.DatasetBuilder.download_and_prepare`: downloads the source data
and writes it to disk.
* `nlp.DatasetBuilder.as_dataset`: generate an `Dataset`.
**Configuration**: Some `DatasetBuilder`s expose multiple variants of the
dataset by defining a `nlp.BuilderConfig` subclass and accepting a
config object (or name) on construction. Configurable datasets expose a
pre-defined set of configurations in `nlp.DatasetBuilder.builder_configs`.
"""
# Default version.
VERSION = utils.Version("0.0.0")
# Class for the builder config.
BUILDER_CONFIG_CLASS = BuilderConfig
# Named configurations that modify the data generated by download_and_prepare.
BUILDER_CONFIGS = []
# Must be set for datasets that use 'manual_dir' functionality - the ones
# that require users to do additional steps to download the data
# (this is usually due to some external regulations / rules).
#
# This field should contain a string with user instructions, including
# the list of files that should be present. It will be
# displayed in the dataset documentation.
def __init__(
self, cache_dir=None, name=None, **config_kwargs,
):
"""Constructs a DatasetBuilder.
Callers must pass arguments as keyword arguments.
Args:
cache_dir: `str`, directory to read/write data. Defaults to "~/nlp".
name: `str` name, optional configuration for the dataset that affects the data generated on disk. Different
`builder_config`s will have their own subdirectories and versions.
If not provided, uses the first configuration in self.BUILDER_CONFIGS
config_kwargs: will override the defaults kwargs in config
"""
# DatasetBuilder name
self.name = camelcase_to_snakecase(self.__class__.__name__)
# Prepare config: DatasetConfig contains name, version and description but can be extended by each dataset
config_kwargs = dict((key, value) for key, value in config_kwargs.items() if value is not None)
self.config = self._create_builder_config(name, **config_kwargs,)
# prepare info: DatasetInfo are a standardized dataclass across all datasets
# Prefill datasetinfo
info = self.get_exported_dataset_info()
info.update(self._info())
info.builder_name = self.name
info.config_name = self.config.name
info.version = self.config.version
self.info = info
# prepare data dirs
self._cache_dir_root = os.path.expanduser(cache_dir or HF_DATASETS_CACHE)
self._cache_dir = self._build_cache_dir()
if os.path.exists(self._cache_dir): # check if data exist
if len(os.listdir(self._cache_dir)) > 0:
logger.info("Overwrite dataset info from restored data version.")
self.info = DatasetInfo.from_directory(self._cache_dir)
else: # dir exists but no data, remove the empty dir as data aren't available anymore
logger.warning(
"Old caching folder {} for dataset {} exists but not data were found. Removing it. ".format(
self._cache_dir, self.name
)
)
os.rmdir(self._cache_dir)
@property
def manual_download_instructions(self) -> Optional[str]:
return None
@classmethod
def get_all_exported_dataset_infos(cls) -> dict:
"""Empty dict if doesn't exist"""
dset_infos_file_path = os.path.join(cls.get_imported_module_dir(), DATASET_INFOS_DICT_FILE_NAME)
if os.path.exists(dset_infos_file_path):
return DatasetInfosDict.from_directory(cls.get_imported_module_dir())
return {}
def get_exported_dataset_info(self) -> DatasetInfo:
"""Empty DatasetInfo if doesn't exist"""
return self.get_all_exported_dataset_infos().get(self.config.name, DatasetInfo())
def _create_builder_config(self, name=None, **config_kwargs):
""" Create and validate BuilderConfig object.
Uses the first configuration in self.BUILDER_CONFIGS if name is None
config_kwargs override the defaults kwargs in config
"""
builder_config = None
if name is None and self.BUILDER_CONFIGS and not config_kwargs:
if len(self.BUILDER_CONFIGS) > 1:
example_of_usage = "load_dataset('{}', '{}')".format(self.name, self.BUILDER_CONFIGS[0].name)
raise ValueError(
"Config name is missing."
"\nPlease pick one among the available configs: %s" % list(self.builder_configs.keys())
+ "\nExample of usage:\n\t`{}`".format(example_of_usage)
)
builder_config = self.BUILDER_CONFIGS[0]
logger.info("No config specified, defaulting to first: %s/%s", self.name, builder_config.name)
if isinstance(name, str):
builder_config = self.builder_configs.get(name)
if builder_config is None and self.BUILDER_CONFIGS:
raise ValueError(
"BuilderConfig %s not found. Available: %s" % (name, list(self.builder_configs.keys()))
)
if not builder_config:
if name is not None:
config_kwargs["name"] = name
if "version" not in config_kwargs and hasattr(self, "VERSION") and self.VERSION:
config_kwargs["version"] = self.VERSION
builder_config = self.BUILDER_CONFIG_CLASS(**config_kwargs)
for key, value in config_kwargs.items():
if value is not None:
setattr(builder_config, key, value)
name = builder_config.name
if not name:
raise ValueError("BuilderConfig must have a name, got %s" % name)
is_custom = name not in self.builder_configs
if is_custom:
logger.warning("Using custom data configuration %s", name)
else:
if builder_config is not self.builder_configs[name]:
raise ValueError(
"Cannot name a custom BuilderConfig the same as an available "
"BuilderConfig. Change the name. Available BuilderConfigs: %s"
% (list(self.builder_configs.keys()))
)
if not builder_config.version:
raise ValueError("BuilderConfig %s must have a version" % name)
# if not builder_config.description:
# raise ValueError("BuilderConfig %s must have a description" % name)
return builder_config
@utils.classproperty
@classmethod
@utils.memoize()
def builder_configs(cls):
"""Pre-defined list of configurations for this builder class."""
config_dict = {config.name: config for config in cls.BUILDER_CONFIGS}
if len(config_dict) != len(cls.BUILDER_CONFIGS):
names = [config.name for config in cls.BUILDER_CONFIGS]
raise ValueError("Names in BUILDER_CONFIGS must not be duplicated. Got %s" % names)
return config_dict
@property
def cache_dir(self):
return self._cache_dir
def _relative_data_dir(self, with_version=True):
"""Relative path of this dataset in cache_dir."""
builder_data_dir = self.name
builder_config = self.config
if builder_config:
builder_data_dir = os.path.join(builder_data_dir, builder_config.name)
if not with_version:
return builder_data_dir
version = self.config.version
version_data_dir = os.path.join(builder_data_dir, str(version))
return version_data_dir
def _build_cache_dir(self):
"""Return the data directory for the current version."""
builder_data_dir = os.path.join(self._cache_dir_root, self._relative_data_dir(with_version=False))
version_data_dir = os.path.join(self._cache_dir_root, self._relative_data_dir(with_version=True))
def _other_versions_on_disk():
"""Returns previous versions on disk."""
if not os.path.exists(builder_data_dir):
return []
version_dirnames = []
for dir_name in os.listdir(builder_data_dir):
try:
version_dirnames.append((utils.Version(dir_name), dir_name))
except ValueError: # Invalid version (ex: incomplete data dir)
pass
version_dirnames.sort(reverse=True)
return version_dirnames
# Check and warn if other versions exist on disk
version_dirs = _other_versions_on_disk()
if version_dirs:
other_version = version_dirs[0][0]
if other_version != self.config.version:
warn_msg = (
"Found a different version {other_version} of dataset {name} in "
"cache_dir {cache_dir}. Using currently defined version "
"{cur_version}.".format(
other_version=str(other_version),
name=self.name,
cache_dir=self._cache_dir_root,
cur_version=str(self.config.version),
)
)
logger.warning(warn_msg)
return version_data_dir
@abc.abstractmethod
def _info(self) -> DatasetInfo:
"""Construct the DatasetInfo object. See `DatasetInfo` for details.
Warning: This function is only called once and the result is cached for all
following .info() calls.
Returns:
info: (DatasetInfo) The dataset information
"""
raise NotImplementedError
@classmethod
def get_imported_module_dir(cls):
"""Return the path of the module of this class or subclass."""
return os.path.dirname(inspect.getfile(inspect.getmodule(cls)))
def download_and_prepare(
self,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[GenerateMode] = None,
ignore_verifications: bool = False,
save_infos: bool = False,
try_from_hf_gcs: bool = True,
dl_manager: Optional[DownloadManager] = None,
**download_and_prepare_kwargs,
):
"""Downloads and prepares dataset for reading.
Args:
download_config (Optional ``nlp.DownloadConfig``: specific download configuration parameters.
download_mode (Optional `nlp.GenerateMode`): select the download/generate mode - Default to REUSE_DATASET_IF_EXISTS
ignore_verifications (bool): Ignore the verifications of the downloaded/processed dataset information (checksums/size/splits/...)
save_infos (bool): Save the dataset information (checksums/size/splits/...)
try_from_hf_gcs (bool): If True, it will try to download the already prepared dataset from the Hf google cloud storage
dl_manager (Optional ``nlp.DownloadManager``): specific Download Manger to use
"""
download_mode = GenerateMode(download_mode or GenerateMode.REUSE_DATASET_IF_EXISTS)
data_exists = os.path.exists(self._cache_dir)
if data_exists and download_mode == REUSE_DATASET_IF_EXISTS:
logger.info("Reusing dataset %s (%s)", self.name, self._cache_dir)
return
# Currently it's not possible to overwrite the data because it would
# conflict with versioning: If the last version has already been generated,
# it will always be reloaded and cache_dir will be set at construction.
if data_exists and download_mode != REUSE_CACHE_IF_EXISTS:
raise ValueError(
"Trying to overwrite an existing dataset {} at {}. A dataset with "
"the same version {} already exists. If the dataset has changed, "
"please update the version number.".format(self.name, self._cache_dir, self.config.version)
)
logger.info("Generating dataset %s (%s)", self.name, self._cache_dir)
if not is_remote_url(self._cache_dir): # if cache dir is local, check for available space
os.makedirs(self._cache_dir_root, exist_ok=True)
if not utils.has_sufficient_disk_space(self.info.size_in_bytes or 0, directory=self._cache_dir_root):
raise IOError(
"Not enough disk space. Needed: {} (download: {}, generated: {})".format(
utils.size_str(self.info.size_in_bytes or 0),
utils.size_str(self.info.download_size or 0),
utils.size_str(self.info.dataset_size or 0),
)
)
@contextlib.contextmanager
def incomplete_dir(dirname):
"""Create temporary dir for dirname and rename on exit."""
if is_remote_url(dirname):
yield dirname
else:
tmp_dir = dirname + ".incomplete"
os.makedirs(tmp_dir)
try:
yield tmp_dir
if os.path.isdir(dirname):
shutil.rmtree(dirname)
os.rename(tmp_dir, dirname)
finally:
if os.path.exists(tmp_dir):
shutil.rmtree(tmp_dir)
# Try to download the already prepared dataset files
if try_from_hf_gcs:
try:
# Create a tmp dir and rename to self._cache_dir on successful exit.
with incomplete_dir(self._cache_dir) as tmp_data_dir:
# Temporarily assign _cache_dir to tmp_data_dir to avoid having to forward
# it to every sub function.
with utils.temporary_assignment(self, "_cache_dir", tmp_data_dir):
reader = ArrowReader(self._cache_dir, self.info)
reader.download_from_hf_gcs(self._cache_dir, self._relative_data_dir(with_version=True))
downloaded_info = DatasetInfo.from_directory(self._cache_dir)
self.info.update(downloaded_info)
logger.info("Dataset downloaded from Hf google storage.")
print(
f"Dataset {self.name} downloaded and prepared to {self._cache_dir}. "
f"Subsequent calls will reuse this data."
)
return
except DatasetNotOnHfGcs:
logger.info("Dataset not on Hf google storage. Downloading and preparing it from source")
# Print is intentional: we want this to always go to stdout so user has
# information needed to cancel download/preparation if needed.
# This comes right before the progress bar.
print(
f"Downloading and preparing dataset {self.info.builder_name}/{self.info.config_name} "
f"(download: {utils.size_str(self.info.download_size)}, generated: {utils.size_str(self.info.dataset_size)}, "
f"total: {utils.size_str(self.info.size_in_bytes)}) to {self._cache_dir}..."
)
if dl_manager is None:
if download_config is None:
download_config = DownloadConfig()
download_config.cache_dir = os.path.join(self._cache_dir_root, "downloads")
download_config.force_download = download_mode == FORCE_REDOWNLOAD
dl_manager = DownloadManager(
dataset_name=self.name, download_config=download_config, data_dir=self.config.data_dir
)
if self.manual_download_instructions is not None:
assert (
dl_manager.manual_dir is not None
), "The dataset {} with config {} requires manual data. \n Please follow the manual download instructions: {}. \n Manual data can be loaded with `nlp.load_dataset({}, data_dir='<path/to/manual/data>')".format(
self.name, self.config.name, self.manual_download_instructions, self.name
)
# Create a tmp dir and rename to self._cache_dir on successful exit.
with incomplete_dir(self._cache_dir) as tmp_data_dir:
# Temporarily assign _cache_dir to tmp_data_dir to avoid having to forward
# it to every sub function.
with utils.temporary_assignment(self, "_cache_dir", tmp_data_dir):
verify_infos = not save_infos and not ignore_verifications
self._download_and_prepare(
dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs
)
# Sync info
self.info.dataset_size = sum(split.num_bytes for split in self.info.splits.values())
self.info.download_checksums = dl_manager.get_recorded_sizes_checksums()
self.info.size_in_bytes = self.info.dataset_size + self.info.download_size
# Save info
self._save_info()
# Save to datasetinfos
if save_infos:
DatasetInfosDict(**{self.config.name: self.info}).write_to_directory(self.get_imported_module_dir())
print(
f"Dataset {self.name} downloaded and prepared to {self._cache_dir}. "
f"Subsequent calls will reuse this data."
)
def _download_and_prepare(self, dl_manager, verify_infos, **prepare_split_kwargs):
"""Downloads and prepares dataset for reading.
This is the internal implementation to overwrite called when user calls
`download_and_prepare`. It should download all required data and generate
the pre-processed datasets files.
Args:
dl_manager: (DownloadManager) `DownloadManager` used to download and cache
data.
verify_infos: bool, if True, do not perform checksums and size tests.
prepare_split_kwargs: Additional options.
"""
# Generating data for all splits
split_dict = SplitDict(dataset_name=self.name)
split_generators_kwargs = self._make_split_generators_kwargs(prepare_split_kwargs)
split_generators = self._split_generators(dl_manager, **split_generators_kwargs)
# Checksums verification
if verify_infos:
verify_checksums(self.info.download_checksums, dl_manager.get_recorded_sizes_checksums())
for split_generator in split_generators:
if str(split_generator.split_info.name).lower() == "all":
raise ValueError(
"`all` is a special split keyword corresponding to the "
"union of all splits, so cannot be used as key in "
"._split_generator()."
)
logger.info("Generating split %s", split_generator.split_info.name)
split_dict.add(split_generator.split_info)
try:
# Prepare split will record examples associated to the split
self._prepare_split(split_generator, **prepare_split_kwargs)
except OSError:
raise OSError("Cannot find data file. " + (self.manual_download_instructions or ""))
if verify_infos:
verify_splits(self.info.splits, split_dict)
# Update the info object with the splits.
self.info.splits = split_dict
self.info.download_size = dl_manager.downloaded_size
def _save_info(self):
self.info.write_to_directory(self._cache_dir)
def _make_split_generators_kwargs(self, prepare_split_kwargs):
"""Get kwargs for `self._split_generators()` from `prepare_split_kwargs`."""
del prepare_split_kwargs
return {}
def as_dataset(self, split: Optional[Split] = None):
""" Return a Dataset for the specified split.
"""
logger.info("Constructing Dataset for split %s, from %s", split, self._cache_dir)
if not os.path.exists(self._cache_dir):
raise AssertionError(
(
"Dataset %s: could not find data in %s. Please make sure to call "
"builder.download_and_prepare(), or pass download=True to "
"nlp.load_dataset() before trying to access the Dataset object."
)
% (self.name, self._cache_dir_root)
)
# By default, return all splits
if split is None:
split = {s: s for s in self.info.splits}
# Create a dataset for each of the given splits
datasets = utils.map_nested(self._build_single_dataset, split, map_tuple=True)
return datasets
def _build_single_dataset(self, split):
"""as_dataset for a single split."""
if isinstance(split, str):
split = Split(split)
# Build base dataset
ds = self._as_dataset(split=split,)
return ds
def _as_dataset(self, split: Split = Split.TRAIN):
"""Constructs a `Dataset`.
This is the internal implementation to overwrite called when user calls
`as_dataset`. It should read the pre-processed datasets files and generate
the `Dataset` object.
Args:
split: `nlp.Split` which subset of the data to read.
Returns:
`Dataset`
"""
ds = ArrowReader(self._cache_dir, self.info).read(
name=self.name, instructions=split, split_infos=self.info.splits.values(),
)
return ds
@abc.abstractmethod
def _split_generators(self, dl_manager):
"""Specify feature dictionary generators and dataset splits.
This function returns a list of `SplitGenerator`s defining how to generate
data and what splits to use.
Example:
return[
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
gen_kwargs={'file': 'train_data.zip'},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
gen_kwargs={'file': 'test_data.zip'},
),
]
The above code will first call `_generate_examples(file='train_data.zip')`
to write the train data, then `_generate_examples(file='test_data.zip')` to
write the test data.
Datasets are typically split into different subsets to be used at various
stages of training and evaluation.
Note that for datasets without a `VALIDATION` split, you can use a
fraction of the `TRAIN` data for evaluation as you iterate on your model
so as not to overfit to the `TEST` data.
For downloads and extractions, use the given `download_manager`.
Note that the `DownloadManager` caches downloads, so it is fine to have each
generator attempt to download the source data.
A good practice is to download all data in this function, and then
distribute the relevant parts to each split with the `gen_kwargs` argument
Args:
dl_manager: (DownloadManager) Download manager to download the data
Returns:
`list<SplitGenerator>`.
"""
raise NotImplementedError()
@abc.abstractmethod
def _prepare_split(self, split_generator, **kwargs):
"""Generate the examples and record them on disk.
Args:
split_generator: `SplitGenerator`, Split generator to process
**kwargs: Additional kwargs forwarded from _download_and_prepare (ex:
beam pipeline)
"""
raise NotImplementedError()
class GeneratorBasedBuilder(DatasetBuilder):
"""Base class for datasets with data generation based on dict generators.
`GeneratorBasedBuilder` is a convenience class that abstracts away much
of the data writing and reading of `DatasetBuilder`. It expects subclasses to
implement generators of feature dictionaries across the dataset splits
(`_split_generators`). See the method docstrings for details.
"""
# GeneratorBasedBulder should have dummy data for tests by default
test_dummy_data = True
def __init__(self, *args, **kwargs):
super(GeneratorBasedBuilder, self).__init__(*args, **kwargs)
self._writer_batch_size = kwargs.get("writer_batch_size")
@abc.abstractmethod
def _generate_examples(self, **kwargs):
"""Default function generating examples for each `SplitGenerator`.
This function preprocess the examples from the raw data to the preprocessed
dataset files.
This function is called once for each `SplitGenerator` defined in
`_split_generators`. The examples yielded here will be written on
disk.
Args:
**kwargs: `dict`, Arguments forwarded from the SplitGenerator.gen_kwargs
Yields:
key: `str` or `int`, a unique deterministic example identification key.
* Unique: An error will be raised if two examples are yield with the
same key.
* Deterministic: When generating the dataset twice, the same example
should have the same key.
Good keys can be the image id, or line number if examples are extracted
from a text file.
The key will be hashed and sorted to shuffle examples deterministically,
such as generating the dataset multiple times keep examples in the
same order.
example: `dict<str feature_name, feature_value>`, a feature dictionary
ready to be encoded and written to disk. The example will be
encoded with `self.info.features.encode_example({...})`.
"""
raise NotImplementedError()
def _prepare_split(self, split_generator):
split_info = split_generator.split_info
fname = "{}-{}.arrow".format(self.name, split_generator.name)
fpath = os.path.join(self._cache_dir, fname)
examples_type = self.info.features.type
writer = ArrowWriter(data_type=examples_type, path=fpath, writer_batch_size=self._writer_batch_size)
generator = self._generate_examples(**split_generator.gen_kwargs)
for key, record in utils.tqdm(generator, unit=" examples", total=split_info.num_examples, leave=False):
example = self.info.features.encode_example(record)
writer.write(example)
num_examples, num_bytes = writer.finalize()
assert num_examples == num_examples, f"Expected to write {split_info.num_examples} but wrote {num_examples}"
split_generator.split_info.num_examples = num_examples
split_generator.split_info.num_bytes = num_bytes
class ArrowBasedBuilder(DatasetBuilder):
"""Base class for datasets with data generation based on Arrow loading functions (CSV/JSON/Parquet).
"""
# ArrowBasedBuilder should have dummy data for tests by default
test_dummy_data = True
@abc.abstractmethod
def _generate_examples(self, **kwargs):
"""Default function generating examples for each `SplitGenerator`.
This function preprocess the examples from the raw data to the preprocessed
dataset files.
This function is called once for each `SplitGenerator` defined in
`_split_generators`. The examples yielded here will be written on
disk.
Args:
**kwargs: `dict`, Arguments forwarded from the SplitGenerator.gen_kwargs
Yields:
key: `str` or `int`, a unique deterministic example identification key.
* Unique: An error will be raised if two examples are yield with the
same key.
* Deterministic: When generating the dataset twice, the same example
should have the same key.
Good keys can be the image id, or line number if examples are extracted
from a text file.
The key will be hashed and sorted to shuffle examples deterministically,
such as generating the dataset multiple times keep examples in the
same order.
example: `dict<str feature_name, feature_value>`, a feature dictionary
ready to be encoded and written to disk. The example will be
encoded with `self.info.features.encode_example({...})`.
"""
raise NotImplementedError()
def _prepare_split(self, split_generator):
fname = "{}-{}.arrow".format(self.name, split_generator.name)
fpath = os.path.join(self._cache_dir, fname)
writer = ArrowWriter(path=fpath)
generator = self._generate_tables(**split_generator.gen_kwargs)
for key, table in utils.tqdm(generator, unit=" tables", leave=False):
writer.write_table(table)
num_examples, num_bytes = writer.finalize()
split_generator.split_info.num_examples = num_examples
split_generator.split_info.num_bytes = num_bytes
features = {}
def parse_schema(schema, schema_dict):
for field in schema:
if pa.types.is_struct(field.type):
schema_dict[field.name] = {}
parse_schema(field.type, schema_dict[field.name])
elif pa.types.is_list(field.type) and pa.types.is_struct(field.type.value_type):
schema_dict[field.name] = {}
parse_schema(field.type.value_type, schema_dict[field.name])
else:
schema_dict[field.name] = Value(str(field.type))
parse_schema(writer.schema, features)
self.info.features = Features(features)
class MissingBeamOptions(ValueError):
pass
class BeamBasedBuilder(DatasetBuilder):
"""Beam based Builder."""
# BeamBasedBuilder does not have dummy data for tests yet
test_dummy_data = False
def __init__(self, *args, **kwargs):
self._beam_runner = kwargs.pop("beam_runner", None)
self._beam_options = kwargs.pop("beam_options", None)
super(BeamBasedBuilder, self).__init__(*args, **kwargs)
self._beam_writers = {} # {split: beam_writer} mapping.
def _make_split_generators_kwargs(self, prepare_split_kwargs):
# Pass `pipeline` into `_split_generators()` from `prepare_split_kwargs` if
# it's in the call signature of `_split_generators()`.
# This allows for global preprocessing in beam.
split_generators_kwargs = {}
split_generators_arg_names = inspect.signature(self._split_generators).parameters.keys()
if "pipeline" in split_generators_arg_names:
split_generators_kwargs["pipeline"] = prepare_split_kwargs["pipeline"]
return split_generators_kwargs
@abc.abstractmethod
def _build_pcollection(self, pipeline, **kwargs):
"""Build the beam pipeline examples for each `SplitGenerator`.
This function extracts examples from the raw data with parallel transforms
in a Beam pipeline. It is called once for each `SplitGenerator` defined in
`_split_generators`. The examples from the PCollection will be
encoded and written to disk.
Warning: When running in a distributed setup, make sure that the data
which will be read (download_dir, manual_dir,...) and written (cache_dir)
can be accessed by the workers jobs. The data should be located in a
shared filesystem, like GCS.
Example:
```
def _build_pcollection(pipeline, extracted_dir):
return (
pipeline
| beam.Create(gfile.io.listdir(extracted_dir))
| beam.Map(_process_file)
)
```
Args:
pipeline: `beam.Pipeline`, root Beam pipeline
**kwargs: Arguments forwarded from the SplitGenerator.gen_kwargs
Returns:
pcollection: `PCollection`, an Apache Beam PCollection containing the
example to send to `self.info.features.encode_example(...)`.
"""
raise NotImplementedError()
def _download_and_prepare(self, dl_manager, verify_infos):
# Create the Beam pipeline and forward it to _prepare_split
import apache_beam as beam
import nlp.utils.beam_utils as beam_utils
beam_runner = self._beam_runner
beam_options = self._beam_options
if not beam_runner and not beam_options:
usage_example = f"load_dataset('{self.name}', '{self.config.name}', beam_runner='DirectRunner')"
raise MissingBeamOptions(
"Trying to generate a dataset using Apache Beam, yet no Beam Runner "
"or PipelineOptions() has been provided in `load_dataset` or in the "
"builder arguments. For big datasets it has to run on large-scale data "
"processing tools like Dataflow, Spark, etc. More information about "
"Apache Beam runners at "
"https://beam.apache.org/documentation/runners/capability-matrix/"
"\nIf you really want to run it locally because you feel like the "
"Dataset is small enough, you can use the local beam runner called "
"`DirectRunner` (you may run out of memory). \nExample of usage: "
"\n\t`{}`".format(usage_example)
)
beam_options = beam_options or beam.options.pipeline_options.PipelineOptions()
# Beam type checking assumes transforms multiple outputs are of same type,
# which is not our case. Plus it doesn't handle correctly all types, so we
# are better without it.
beam_options.view_as(beam.options.pipeline_options.TypeOptions).pipeline_type_check = False
# Use a single pipeline for all splits
pipeline = beam_utils.BeamPipeline(runner=beam_runner, options=beam_options,)
super(BeamBasedBuilder, self)._download_and_prepare(
dl_manager, verify_infos=False, pipeline=pipeline,
) # TODO handle verify_infos in beam datasets
# Run pipeline
pipeline_results = pipeline.run()
pipeline_results.wait_until_finish()
metrics = pipeline_results.metrics()
# Update `info.splits`.
split_dict = self.info.splits
for split_name, beam_writer in self._beam_writers.items():
m_filter = beam.metrics.MetricsFilter().with_namespace(namespace=split_name)
num_examples, num_bytes = beam_writer.finalize(metrics.query(m_filter))
split_info = split_dict[split_name]
split_info.num_examples = num_examples
split_info.num_bytes = num_bytes
def _save_info(self):
import apache_beam as beam
fs = beam.io.filesystems.FileSystems
with fs.create(os.path.join(self._cache_dir, DATASET_INFO_FILENAME)) as f:
self.info._dump_info(f)
with fs.create(os.path.join(self._cache_dir, LICENSE_FILENAME)) as f:
self.info._dump_license(f)
def _prepare_split(self, split_generator, pipeline):
import apache_beam as beam
split_name = split_generator.split_info.name
output_prefix = filename_prefix_for_split(self.name, split_name)
output_prefix = os.path.join(self._cache_dir, output_prefix)
# To write examples to disk:
fname = "{}-{}.arrow".format(self.name, split_name)
fpath = os.path.join(self._cache_dir, fname)
examples_type = self.info.features.type
beam_writer = BeamWriter(examples_type, path=fpath, namespace=split_name, cache_dir=self._cache_dir)
self._beam_writers[split_name] = beam_writer
encode_example = self.info.features.encode_example
# Note: We need to wrap the pipeline in a PTransform to avoid re-using the
# same label names for each split
@beam.ptransform_fn
def _build_pcollection(pipeline):
"""PTransformation which build a single split."""
# Encode the PCollection
pcoll_examples = self._build_pcollection(pipeline, **split_generator.gen_kwargs)
pcoll_examples |= "Encode" >> beam.Map(lambda key_ex: (key_ex[0], encode_example(key_ex[1])))
return beam_writer.write_from_pcollection(pcoll_examples)
# Add the PCollection to the pipeline
_ = pipeline | split_name >> _build_pcollection() # pylint: disable=no-value-for-parameter
|
nlp-master
|
src/nlp/builder.py
|
# coding=utf-8
# Copyright 2020 The HuggingFace NLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
""" Metrics base class."""
import logging
import os
from typing import Any, Dict, Optional
import pyarrow as pa
from filelock import FileLock, Timeout
from .arrow_reader import ArrowReader
from .arrow_writer import ArrowWriter
from .info import MetricInfo
from .naming import camelcase_to_snakecase
from .utils import HF_METRICS_CACHE, Version
logger = logging.getLogger(__file__)
class Metric(object):
def __init__(
self,
name: str = None,
experiment_id: Optional[str] = None,
process_id: int = 0,
num_process: int = 1,
data_dir: Optional[str] = None,
in_memory: bool = False,
**kwargs,
):
""" A Metrics is the base class and common API for all metrics.
Args:
process_id (int): specify the id of the node in a distributed settings between 0 and num_nodes-1
This can be used, to compute metrics on distributed setups
(in particular non-additive metrics like F1).
data_dir (str): path to a directory in which temporary data will be stored.
This should be a shared file-system for distributed setups.
experiment_id (str): Should be used if you perform several concurrent experiments using
the same caching directory (will be indicated in the raise error)
in_memory (bool): keep all predictions and references in memory. Not possible in distributed settings.
"""
# Safety checks
assert isinstance(process_id, int) and process_id >= 0, "'process_id' should be a number greater than 0"
assert (
isinstance(num_process, int) and num_process > process_id
), "'num_process' should be a number greater than process_id"
assert (
process_id == 0 or not in_memory
), "Using 'in_memory' is not possible in distributed setting (process_id > 0)."
# Metric name
self.name = camelcase_to_snakecase(self.__class__.__name__)
# Configuration name
self.config_name = name
self.process_id = process_id
self.num_process = num_process
self.in_memory = in_memory
self.experiment_id = experiment_id if experiment_id is not None else "cache"
self._version = "1.0.0"
self._data_dir_root = os.path.expanduser(data_dir or HF_METRICS_CACHE)
self.data_dir = self._build_data_dir()
# prepare info
info = self._info()
info.metric_name = self.name
info.config_name = self.config_name
info.version = self._version
self.info = info
# Update 'compute' and 'add' docstring
self.compute.__func__.__doc__ += self.info.inputs_description
self.add_batch.__func__.__doc__ += self.info.inputs_description
self.add.__func__.__doc__ += self.info.inputs_description
self.arrow_schema = pa.schema(field for field in self.info.features.type)
self.buf_writer = None
self.writer = None
self.writer_batch_size = None
self.data = None
# Check we can write on the cache file without competitors
self.cache_file_name = self._get_cache_path(self.process_id)
self.filelock = FileLock(self.cache_file_name + ".lock")
try:
self.filelock.acquire(timeout=1)
except Timeout:
raise ValueError(
"Cannot acquire lock, caching file might be used by another process, "
"you should setup a unique 'experiment_id' for this run."
)
def _relative_data_dir(self, with_version=True):
"""Relative path of this dataset in data_dir."""
builder_data_dir = self.name
if not with_version:
return builder_data_dir
version = self._version
version_data_dir = os.path.join(builder_data_dir, str(version))
return version_data_dir
def _build_data_dir(self):
""" Return the directory for the current version.
"""
builder_data_dir = os.path.join(self._data_dir_root, self._relative_data_dir(with_version=False))
version_data_dir = os.path.join(self._data_dir_root, self._relative_data_dir(with_version=True))
def _other_versions_on_disk():
"""Returns previous versions on disk."""
if not os.path.exists(builder_data_dir):
return []
version_dirnames = []
for dir_name in os.listdir(builder_data_dir):
try:
version_dirnames.append((Version(dir_name), dir_name))
except ValueError: # Invalid version (ex: incomplete data dir)
pass
version_dirnames.sort(reverse=True)
return version_dirnames
# Check and warn if other versions exist on disk
version_dirs = _other_versions_on_disk()
if version_dirs:
other_version = version_dirs[0][0]
if other_version != self._version:
warn_msg = (
"Found a different version {other_version} of metric {name} in "
"data_dir {data_dir}. Using currently defined version "
"{cur_version}.".format(
other_version=str(other_version),
name=self.name,
data_dir=self._data_dir_root,
cur_version=str(self._version),
)
)
logger.warning(warn_msg)
os.makedirs(version_data_dir, exist_ok=True)
return version_data_dir
def _get_cache_path(self, node_id):
return os.path.join(self.data_dir, f"{self.experiment_id}-{self.name}-{node_id}.arrow")
def finalize(self, timeout=120):
""" Close all the writing process and load/gather the data
from all the nodes if main node or all_process is True.
"""
self.writer.finalize()
self.writer = None
self.buf_writer = None
self.filelock.release()
if self.process_id == 0:
# Let's acquire a lock on each node files to be sure they are finished writing
node_files = []
locks = []
for node_id in range(self.num_process):
node_file = self._get_cache_path(node_id)
filelock = FileLock(node_file + ".lock")
filelock.acquire(timeout=timeout)
node_files.append({"filename": node_file})
locks.append(filelock)
# Read the predictions and references
reader = ArrowReader(path=self.data_dir, info=None)
self.data = reader.read_files(node_files)
# Release all of our locks
for lock in locks:
lock.release()
def compute(self, predictions=None, references=None, timeout=120, **metrics_kwargs):
""" Compute the metrics.
"""
if predictions is not None:
self.add_batch(predictions=predictions, references=references)
self.finalize(timeout=timeout)
self.data.set_format(type=self.info.format)
predictions = self.data["predictions"]
references = self.data["references"]
output = self._compute(predictions=predictions, references=references, **metrics_kwargs)
return output
def add_batch(self, predictions=None, references=None, **kwargs):
""" Add a batch of predictions and references for the metric's stack.
"""
batch = {"predictions": predictions, "references": references}
if self.writer is None:
self._init_writer()
self.writer.write_batch(batch)
def add(self, prediction=None, reference=None, **kwargs):
""" Add one prediction and reference for the metric's stack.
"""
example = {"predictions": prediction, "references": reference}
example = self.info.features.encode_example(example)
if self.writer is None:
self._init_writer()
self.writer.write(example)
def _init_writer(self):
if self.in_memory:
self.buf_writer = pa.BufferOutputStream()
self.writer = ArrowWriter(
schema=self.arrow_schema, stream=self.buf_writer, writer_batch_size=self.writer_batch_size
)
else:
self.buf_writer = None
self.writer = ArrowWriter(
schema=self.arrow_schema, path=self.cache_file_name, writer_batch_size=self.writer_batch_size
)
def _info(self) -> MetricInfo:
"""Construct the MetricInfo object. See `MetricInfo` for details.
Warning: This function is only called once and the result is cached for all
following .info() calls.
Returns:
info: (MetricInfo) The metrics information
"""
raise NotImplementedError
def _compute(self, predictions=None, references=None, **kwargs) -> Dict[str, Any]:
""" This method defines the common API for all the metrics in the library """
raise NotImplementedError
|
nlp-master
|
src/nlp/metric.py
|
# coding=utf-8
# Copyright 2020 The HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
""" List and inspect datasets and metrics."""
import logging
from typing import Optional
from .hf_api import HfApi
from .load import prepare_module
from .utils import DownloadConfig
logger = logging.getLogger(__name__)
def list_datasets(with_community_datasets=True):
""" List all the datasets scripts available on HuggingFace AWS bucket """
api = HfApi()
return api.dataset_list(with_community_datasets=with_community_datasets)
def list_metrics(with_community_metrics=True):
""" List all the metrics script available on HuggingFace AWS bucket """
api = HfApi()
return api.metric_list(with_community_metrics=with_community_metrics)
def inspect_dataset(path: str, local_path: str, download_config: Optional[DownloadConfig] = None, **download_kwargs):
r"""
Allow inspection/modification of a dataset script by copying on local drive at local_path.
Args:
path (``str``): path to the dataset processing script with the dataset builder. Can be either:
- a local path to processing script or the directory containing the script (if the script has the same name as the directory),
e.g. ``'./dataset/squad'`` or ``'./dataset/squad/squad.py'``
- a datatset identifier on HuggingFace AWS bucket (list all available datasets and ids with ``nlp.list_datasets()``)
e.g. ``'squad'``, ``'glue'`` or ``'openai/webtext'``
local_path (``str``): path to the local folder to copy the datset script to.
download_config (Optional ``nlp.DownloadConfig``: specific download configuration parameters.
**download_kwargs: optional attributes for DownloadConfig() which will override the attributes in download_config if supplied.
"""
module_path = prepare_module(
path, download_config=download_config, dataset=True, force_local_path=local_path, **download_kwargs
)
print(
f"The processing script for dataset {path} can be inspected at {local_path}. "
f"The main class is in {module_path}. "
f"You can modify this processing script and use it with `nlp.load_dataset({local_path})`."
)
def inspect_metric(path: str, local_path: str, download_config: Optional[DownloadConfig] = None, **download_kwargs):
r"""
Allow inspection/modification of a metric script by copying it on local drive at local_path.
Args:
path (``str``): path to the dataset processing script with the dataset builder. Can be either:
- a local path to processing script or the directory containing the script (if the script has the same name as the directory),
e.g. ``'./dataset/squad'`` or ``'./dataset/squad/squad.py'``
- a datatset identifier on HuggingFace AWS bucket (list all available datasets and ids with ``nlp.list_datasets()``)
e.g. ``'squad'``, ``'glue'`` or ``'openai/webtext'``
local_path (``str``): path to the local folder to copy the datset script to.
download_config (Optional ``nlp.DownloadConfig``: specific download configuration parameters.
**download_kwargs: optional attributes for DownloadConfig() which will override the attributes in download_config if supplied.
"""
module_path = prepare_module(
path, download_config=download_config, dataset=False, force_local_path=local_path, **download_kwargs
)
print(
f"The processing scripts for metric {path} can be inspected at {local_path}. "
f"The main class is in {module_path}. "
f"You can modify this processing scripts and use it with `nlp.load_metric({local_path})`."
)
|
nlp-master
|
src/nlp/inspect.py
|
# coding=utf-8
# Copyright 2019-present, the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
from os.path import expanduser
from typing import Dict, List, Optional, Tuple
import requests
from tqdm import tqdm
ENDPOINT = "https://huggingface.co"
class S3Obj:
"""
Data structure that represents a file belonging to the current user.
"""
def __init__(self, filename: str, LastModified: str, ETag: str, Size: int, **kwargs):
self.filename = filename
self.LastModified = LastModified
self.ETag = ETag
self.Size = Size
class PresignedUrl:
def __init__(self, write: str, access: str, type: str, **kwargs):
self.write = write
self.access = access
self.type = type # mime-type to send to S3.
class S3Object:
"""
Data structure that represents a public file accessible on our S3.
"""
def __init__(
self,
key: str, # S3 object key
etag: str,
lastModified: str,
size: int,
rfilename: str, # filename relative to config.json
**kwargs,
):
self.key = key
self.etag = etag
self.lastModified = lastModified
self.size = size
self.rfilename = rfilename
for k, v in kwargs.items():
setattr(self, k, v)
class ObjectInfo:
"""
Info about a public dataset or Metric accessible from our S3.
"""
def __init__(
self,
id: str,
key: str,
lastModified: Optional[str] = None,
description: Optional[str] = None,
citation: Optional[str] = None,
size: Optional[int] = None,
etag: Optional[str] = None,
siblings: List[Dict] = None,
author: str = None,
**kwargs,
):
self.id = id # id of dataset
self.key = key # S3 object key of config.json
self.lastModified = lastModified
self.description = description
self.citation = citation
self.size = size
self.etag = etag
self.siblings = siblings # list of files that constitute the dataset
self.author = author
self.siblings = [S3Object(**x) for x in self.siblings] if self.siblings else None
for k, v in kwargs.items():
setattr(self, k, v)
class HfApi:
ALLOWED_FILE_TYPES = ["datasets", "metrics"]
def __init__(self, endpoint=None):
"""Create Api using a specific endpoint and also the file types ('datasets' or 'metrics')"""
self.endpoint = endpoint if endpoint is not None else ENDPOINT
def login(self, username: str, password: str) -> str:
"""
Call HF API to sign in a user and get a token if credentials are valid.
Outputs:
token if credentials are valid
Throws:
requests.exceptions.HTTPError if credentials are invalid
"""
path = "{}/api/login".format(self.endpoint)
r = requests.post(path, json={"username": username, "password": password})
r.raise_for_status()
d = r.json()
return d["token"]
def whoami(self, token: str) -> Tuple[str, List[str]]:
"""
Call HF API to know "whoami"
"""
path = "{}/api/whoami".format(self.endpoint)
r = requests.get(path, headers={"authorization": "Bearer {}".format(token)})
r.raise_for_status()
d = r.json()
return d["user"], d["orgs"]
def logout(self, token: str) -> None:
"""
Call HF API to log out.
"""
path = "{}/api/logout".format(self.endpoint)
r = requests.post(path, headers={"authorization": "Bearer {}".format(token)})
r.raise_for_status()
def presign(
self, token: str, filename: str, organization: Optional[str] = None, file_types: Optional[str] = None
) -> PresignedUrl:
"""
Call HF API to get a presigned url to upload `filename` to S3.
"""
assert file_types in self.ALLOWED_FILE_TYPES, "Please specify file types from {}".format(
self.ALLOWED_FILE_TYPES
)
path = "{}/api/{}/presign".format(self.endpoint, file_types)
r = requests.post(
path,
headers={"authorization": "Bearer {}".format(token)},
json={"filename": filename, "organization": organization},
)
r.raise_for_status()
d = r.json()
return PresignedUrl(**d)
def presign_and_upload(
self,
token: str,
filename: str,
filepath: str,
organization: Optional[str] = None,
file_types: Optional[str] = None,
) -> str:
"""
Get a presigned url, then upload file to S3.
Outputs:
url: Read-only url for the stored file on S3.
"""
urls = self.presign(token, filename=filename, organization=organization, file_types=file_types)
# streaming upload:
# https://2.python-requests.org/en/master/user/advanced/#streaming-uploads
#
# Even though we presign with the correct content-type,
# the client still has to specify it when uploading the file.
with open(filepath, "rb") as f:
pf = TqdmProgressFileReader(f)
data = f if pf.total_size > 0 else ""
r = requests.put(urls.write, data=data, headers={"content-type": urls.type})
r.raise_for_status()
pf.close()
return urls.access
def list_objs(
self, token: str, organization: Optional[str] = None, file_types: Optional[str] = None
) -> List[S3Obj]:
"""
Call HF API to list all stored files for user (or one of their organizations).
"""
assert file_types in self.ALLOWED_FILE_TYPES, "Please specify file types from {}".format(
self.ALLOWED_FILE_TYPES
)
path = "{}/api/{}/listObjs".format(self.endpoint, file_types)
params = {"organization": organization} if organization is not None else None
r = requests.get(path, params=params, headers={"authorization": "Bearer {}".format(token)})
r.raise_for_status()
d = r.json()
return [S3Obj(**x) for x in d]
def delete_obj(
self, token: str, filename: str, organization: Optional[str] = None, file_types: Optional[str] = None
):
"""
Call HF API to delete a file stored by user
"""
assert file_types in self.ALLOWED_FILE_TYPES, "Please specify file types from {}".format(
self.ALLOWED_FILE_TYPES
)
path = "{}/api/{}/deleteObj".format(self.endpoint, file_types)
r = requests.delete(
path,
headers={"authorization": "Bearer {}".format(token)},
json={"filename": filename, "organization": organization},
)
r.raise_for_status()
def dataset_list(self, with_community_datasets=True) -> List[ObjectInfo]:
"""
Get the public list of all the datasets on huggingface, including the community datasets
"""
path = "{}/api/datasets".format(self.endpoint)
r = requests.get(path)
r.raise_for_status()
d = r.json()
datasets = [ObjectInfo(**x) for x in d]
if not with_community_datasets:
datasets = [d for d in datasets if "/" not in d.id]
return datasets
def metric_list(self, with_community_metrics=True) -> List[ObjectInfo]:
"""
Get the public list of all the metrics on huggingface, including the community metrics
"""
path = "{}/api/metrics".format(self.endpoint)
r = requests.get(path)
r.raise_for_status()
d = r.json()
metrics = [ObjectInfo(**x) for x in d]
if not with_community_metrics:
metrics = [m for m in metrics if "/" not in m.id]
return metrics
class TqdmProgressFileReader:
"""
Wrap an io.BufferedReader `f` (such as the output of `open(…, "rb")`)
and override `f.read()` so as to display a tqdm progress bar.
see github.com/huggingface/transformers/pull/2078#discussion_r354739608
for implementation details.
"""
def __init__(self, f: io.BufferedReader):
self.f = f
self.total_size = os.fstat(f.fileno()).st_size
self.pbar = tqdm(total=self.total_size, leave=False)
self.read = f.read
f.read = self._read
def _read(self, n=-1):
self.pbar.update(n)
return self.read(n)
def close(self):
self.pbar.close()
class HfFolder:
path_token = expanduser("~/.huggingface/token")
@classmethod
def save_token(cls, token):
"""
Save token, creating folder as needed.
"""
os.makedirs(os.path.dirname(cls.path_token), exist_ok=True)
with open(cls.path_token, "w+") as f:
f.write(token)
@classmethod
def get_token(cls):
"""
Get token or None if not existent.
"""
try:
with open(cls.path_token, "r") as f:
return f.read()
except FileNotFoundError:
pass
@classmethod
def delete_token(cls):
"""
Delete token.
Do not fail if token does not exist.
"""
try:
os.remove(cls.path_token)
except FileNotFoundError:
pass
|
nlp-master
|
src/nlp/hf_api.py
|
# coding=utf-8
# Copyright 2020 The HuggingFace NLP Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Splits related API."""
from __future__ import absolute_import, division, print_function
import abc
import collections
from dataclasses import dataclass, field
from typing import Dict, List, Optional, Union
from .arrow_reader import FileInstructions, make_file_instructions
from .utils.py_utils import NonMutableDict
@dataclass
class SplitInfo:
name: str = ""
num_bytes: int = 0
num_examples: int = 0
dataset_name: str = None
@property
def file_instructions(self):
"""Returns the list of dict(filename, take, skip)."""
# `self.dataset_name` is assigned in `SplitDict.add()`.
instructions = make_file_instructions(name=self.dataset_name, split_infos=[self], instruction=str(self.name),)
return instructions.file_instructions
@dataclass
class SubSplitInfo:
"""Wrapper around a sub split info.
This class expose info on the subsplit:
```
ds, info = nlp.load_dataset(..., split='train[75%:]', with_info=True)
info.splits['train[75%:]'].num_examples
```
"""
instructions: FileInstructions
@property
def num_examples(self):
"""Returns the number of example in the subsplit."""
return self.instructions.num_examples
@property
def file_instructions(self):
"""Returns the list of dict(filename, take, skip)."""
return self.instructions.file_instructions
class SplitBase(metaclass=abc.ABCMeta):
# pylint: disable=line-too-long
"""Abstract base class for Split compositionality.
See the
[guide on splits](https://github.com/huggingface/nlp/tree/master/docs/splits.md)
for more information.
There are three parts to the composition:
1) The splits are composed (defined, merged, split,...) together before
calling the `.as_dataset()` function. This is done with the `__add__`,
`__getitem__`, which return a tree of `SplitBase` (whose leaf
are the `NamedSplit` objects)
```
split = nlp.Split.TRAIN + nlp.Split.TEST.subsplit(nlp.percent[:50])
```
2) The `SplitBase` is forwarded to the `.as_dataset()` function
to be resolved into actual read instruction. This is done by the
`.get_read_instruction()` method which takes the real dataset splits
(name, number of shards,...) and parse the tree to return a
`SplitReadInstruction()` object
```
read_instruction = split.get_read_instruction(self.info.splits)
```
3) The `SplitReadInstruction` is then used in the `tf.data.Dataset` pipeline
to define which files to read and how to skip examples within file.
"""
# pylint: enable=line-too-long
@abc.abstractmethod
def get_read_instruction(self, split_dict):
"""Parse the descriptor tree and compile all read instructions together.
Args:
split_dict: `dict`, The `dict[split_name, SplitInfo]` of the dataset
Returns:
split_read_instruction: `SplitReadInstruction`
"""
raise NotImplementedError("Abstract method")
def __eq__(self, other):
"""Equality: nlp.Split.TRAIN == 'train'."""
if isinstance(other, (NamedSplit, str)):
return False
raise NotImplementedError("Equality is not implemented between merged/sub splits.")
def __ne__(self, other):
"""InEquality: nlp.Split.TRAIN != 'test'."""
return not self.__eq__(other)
def __add__(self, other):
"""Merging: nlp.Split.TRAIN + nlp.Split.TEST."""
return _SplitMerged(self, other)
def subsplit(self, arg=None, k=None, percent=None, weighted=None): # pylint: disable=redefined-outer-name
"""Divides this split into subsplits.
There are 3 ways to define subsplits, which correspond to the 3
arguments `k` (get `k` even subsplits), `percent` (get a slice of the
dataset with `nlp.percent`), and `weighted` (get subsplits with proportions
specified by `weighted`).
Examples:
```
# 50% train, 50% test
train, test = split.subsplit(k=2)
# 50% train, 25% test, 25% validation
train, test, validation = split.subsplit(weighted=[2, 1, 1])
# Extract last 20%
subsplit = split.subsplit(nlp.percent[-20:])
```
Warning: k and weighted will be converted into percent which mean that
values below the percent will be rounded up or down. The final split may be
bigger to deal with remainders. For instance:
```
train, test, valid = split.subsplit(k=3) # 33%, 33%, 34%
s1, s2, s3, s4 = split.subsplit(weighted=[2, 2, 1, 1]) # 33%, 33%, 16%, 18%
```
Args:
arg: If no kwargs are given, `arg` will be interpreted as one of
`k`, `percent`, or `weighted` depending on the type.
For example:
```
split.subsplit(10) # Equivalent to split.subsplit(k=10)
split.subsplit(nlp.percent[:-20]) # percent=nlp.percent[:-20]
split.subsplit([1, 1, 2]) # weighted=[1, 1, 2]
```
k: `int` If set, subdivide the split into `k` equal parts.
percent: `nlp.percent slice`, return a single subsplit corresponding to
a slice of the original split. For example:
`split.subsplit(nlp.percent[-20:]) # Last 20% of the dataset`.
weighted: `list[int]`, return a list of subsplits whose proportions match
the normalized sum of the list. For example:
`split.subsplit(weighted=[1, 1, 2]) # 25%, 25%, 50%`.
Returns:
A subsplit or list of subsplits extracted from this split object.
"""
# Note that the percent kwargs redefine the outer name nlp.percent. This
# is done for consistency (.subsplit(percent=nlp.percent[:40]))
if sum(bool(x) for x in (arg, k, percent, weighted)) != 1:
raise ValueError("Only one argument of subsplit should be set.")
# Auto deduce k
if isinstance(arg, int):
k = arg
elif isinstance(arg, slice):
percent = arg
elif isinstance(arg, list):
weighted = arg
if not (k or percent or weighted):
raise ValueError(
"Invalid split argument {}. Only list, slice and int supported. "
"One of k, weighted or percent should be set to a non empty value.".format(arg)
)
def assert_slices_coverage(slices):
# Ensure that the expended slices cover all percents.
assert sum((list(range(*s.indices(100))) for s in slices), []) == list(range(100))
if k:
if not 0 < k <= 100:
raise ValueError("Subsplit k should be between 0 and 100, got {}".format(k))
shift = 100 // k
slices = [slice(i * shift, (i + 1) * shift) for i in range(k)]
# Round up last element to ensure all elements are taken
slices[-1] = slice(slices[-1].start, 100)
# Internal check to ensure full coverage
assert_slices_coverage(slices)
return tuple(_SubSplit(self, s) for s in slices)
elif percent:
return _SubSplit(self, percent)
elif weighted:
# Normalize the weighted sum
total = sum(weighted)
weighted = [100 * x // total for x in weighted]
# Create the slice for each of the elements
start = 0
stop = 0
slices = []
for v in weighted:
stop += v
slices.append(slice(start, stop))
start = stop
# Round up last element to ensure all elements are taken
slices[-1] = slice(slices[-1].start, 100)
# Internal check to ensure full coverage
assert_slices_coverage(slices)
return tuple(_SubSplit(self, s) for s in slices)
else:
# Should not be possible
raise ValueError("Could not determine the split")
# 2 requirements:
# 1. nlp.percent be sliceable
# 2. nlp.percent be documented
#
# Instances are not documented, so we want nlp.percent to be a class, but to
# have it be sliceable, we need this metaclass.
class PercentSliceMeta(type):
def __getitem__(cls, slice_value):
if not isinstance(slice_value, slice):
raise ValueError("nlp.percent should only be called with slice, not {}".format(slice_value))
return slice_value
class PercentSlice(metaclass=PercentSliceMeta):
# pylint: disable=line-too-long
"""Syntactic sugar for defining slice subsplits: `nlp.percent[75:-5]`.
See the
[guide on splits](https://github.com/huggingface/nlp/tree/master/docs/splits.md)
for more information.
"""
# pylint: enable=line-too-long
pass
percent = PercentSlice # pylint: disable=invalid-name
class _SplitMerged(SplitBase):
"""Represent two split descriptors merged together."""
def __init__(self, split1, split2):
self._split1 = split1
self._split2 = split2
def get_read_instruction(self, split_dict):
read_instruction1 = self._split1.get_read_instruction(split_dict)
read_instruction2 = self._split2.get_read_instruction(split_dict)
return read_instruction1 + read_instruction2
def __repr__(self):
return "({!r} + {!r})".format(self._split1, self._split2)
class _SubSplit(SplitBase):
"""Represent a sub split of a split descriptor."""
def __init__(self, split, slice_value):
self._split = split
self._slice_value = slice_value
def get_read_instruction(self, split_dict):
return self._split.get_read_instruction(split_dict)[self._slice_value]
def __repr__(self):
slice_str = "{start}:{stop}"
if self._slice_value.step is not None:
slice_str += ":{step}"
slice_str = slice_str.format(
start="" if self._slice_value.start is None else self._slice_value.start,
stop="" if self._slice_value.stop is None else self._slice_value.stop,
step=self._slice_value.step,
)
return "{!r}(nlp.percent[{}])".format(self._split, slice_str)
class NamedSplit(SplitBase):
"""Descriptor corresponding to a named split (train, test, ...).
Each descriptor can be composed with other using addition or slice. Ex:
```
split = nlp.Split.TRAIN.subsplit(nlp.percent[0:25]) + nlp.Split.TEST
```
The resulting split will correspond to 25% of the train split merged with
100% of the test split.
Warning:
A split cannot be added twice, so the following will fail:
```
split = (
nlp.Split.TRAIN.subsplit(nlp.percent[:25]) +
nlp.Split.TRAIN.subsplit(nlp.percent[75:])
) # Error
split = nlp.Split.TEST + nlp.Split.ALL # Error
```
Warning:
The slices can be applied only one time. So the following are valid:
```
split = (
nlp.Split.TRAIN.subsplit(nlp.percent[:25]) +
nlp.Split.TEST.subsplit(nlp.percent[:50])
)
split = (nlp.Split.TRAIN + nlp.Split.TEST).subsplit(nlp.percent[:50])
```
But not:
```
train = nlp.Split.TRAIN
test = nlp.Split.TEST
split = train.subsplit(nlp.percent[:25]).subsplit(nlp.percent[:25])
split = (train.subsplit(nlp.percent[:25]) + test).subsplit(nlp.percent[:50])
```
"""
def __init__(self, name):
self._name = name
def __str__(self):
return self._name
def __repr__(self):
return "NamedSplit('{name}')".format(name=self._name)
def __eq__(self, other):
"""Equality: nlp.Split.TRAIN == 'train'."""
if isinstance(other, NamedSplit):
return self._name == other._name # pylint: disable=protected-access
elif isinstance(other, SplitBase):
return False
elif isinstance(other, str): # Other should be string
return self._name == other
else:
raise ValueError("Equality not supported between split {} and {}".format(self, other))
def __hash__(self):
return hash(self._name)
def get_read_instruction(self, split_dict):
return SplitReadInstruction(split_dict[self._name])
class NamedSplitAll(NamedSplit):
"""Split corresponding to the union of all defined dataset splits."""
def __init__(self):
super(NamedSplitAll, self).__init__("all")
def __repr__(self):
return f"NamedSplitAll({self._name}"
def get_read_instruction(self, split_dict):
# Merge all dataset split together
read_instructions = [SplitReadInstruction(s) for s in split_dict.values()]
return sum(read_instructions, SplitReadInstruction())
class Split(object):
# pylint: disable=line-too-long
"""`Enum` for dataset splits.
Datasets are typically split into different subsets to be used at various
stages of training and evaluation.
* `TRAIN`: the training data.
* `VALIDATION`: the validation data. If present, this is typically used as
evaluation data while iterating on a model (e.g. changing hyperparameters,
model architecture, etc.).
* `TEST`: the testing data. This is the data to report metrics on. Typically
you do not want to use this during model iteration as you may overfit to it.
Note: All splits, including compositions inherit from `nlp.SplitBase`
See the
[guide on splits](https://github.com/huggingface/nlp/tree/master/docs/splits.md)
for more information.
"""
# pylint: enable=line-too-long
TRAIN = NamedSplit("train")
TEST = NamedSplit("test")
VALIDATION = NamedSplit("validation")
def __new__(cls, name):
"""Create a custom split with nlp.Split('custom_name')."""
return NamedSplit(name)
# Similar to SplitInfo, but contain an additional slice info
SlicedSplitInfo = collections.namedtuple("SlicedSplitInfo", ["split_info", "slice_value",]) # noqa: E231
class SplitReadInstruction(object):
"""Object containing the reading instruction for the dataset.
Similarly to `SplitDescriptor` nodes, this object can be composed with itself,
but the resolution happens instantaneously, instead of keeping track of the
tree, such as all instructions are compiled and flattened in a single
SplitReadInstruction object containing the list of files and slice to use.
Once resolved, the instructions can be accessed with:
```
read_instructions.get_list_sliced_split_info() # List of splits to use
```
"""
def __init__(self, split_info=None):
self._splits = NonMutableDict(error_msg="Overlap between splits. Split {key} has been added with " "itself.")
if split_info:
self.add(SlicedSplitInfo(split_info=split_info, slice_value=None))
def add(self, sliced_split):
"""Add a SlicedSplitInfo the read instructions."""
# TODO(epot): Check that the number of examples per shard % 100 == 0
# Otherwise the slices value may be unbalanced and not exactly reflect the
# requested slice.
self._splits[sliced_split.split_info.name] = sliced_split
def __add__(self, other):
"""Merging split together."""
# Will raise error if a split has already be added (NonMutableDict)
# TODO(epot): If a split is already added but there is no overlap between
# the slices, should merge the slices (ex: [:10] + [80:])
split_instruction = SplitReadInstruction()
split_instruction._splits.update(self._splits) # pylint: disable=protected-access
split_instruction._splits.update(other._splits) # pylint: disable=protected-access
return split_instruction
def __getitem__(self, slice_value):
"""Sub-splits."""
# Will raise an error if a split has already been sliced
split_instruction = SplitReadInstruction()
for v in self._splits.values():
if v.slice_value is not None:
raise ValueError("Trying to slice Split {} which has already been sliced".format(v.split_info.name))
v = v._asdict()
v["slice_value"] = slice_value
split_instruction.add(SlicedSplitInfo(**v))
return split_instruction
def get_list_sliced_split_info(self):
return list(sorted(self._splits.values(), key=lambda x: x.split_info.name))
class SplitDict(dict):
"""Split info object."""
def __init__(self, *args, dataset_name=None, **kwargs):
super(SplitDict, self).__init__(*args, **kwargs)
# super(SplitDict, self).__init__(error_msg="Split {key} already present", **kwargs)
self.dataset_name = dataset_name
def __getitem__(self, key: Union[SplitBase, str]):
# 1st case: The key exists: `info.splits['train']`
if str(key) in self:
return super(SplitDict, self).__getitem__(str(key))
# 2nd case: Uses instructions: `info.splits['train[50%]']`
else:
instructions = make_file_instructions(name=self.dataset_name, split_infos=self.values(), instruction=key,)
return SubSplitInfo(instructions)
def __setitem__(self, key: Union[SplitBase, str], value: SplitInfo):
raise ValueError("Cannot add elem. Use .add() instead.")
def add(self, split_info: SplitInfo):
"""Add the split info."""
if split_info.name in self:
raise ValueError("Split {} already present".format(split_info.name))
# Forward the dataset name required to build file instructions:
# info.splits['train'].file_instructions
split_info.dataset_name = self.dataset_name
super(SplitDict, self).__setitem__(split_info.name, split_info)
@property
def total_num_examples(self):
"""Return the total number of examples."""
return sum(s.num_examples for s in self.values())
@classmethod
def from_split_dict(cls, split_infos: Union[List, Dict], dataset_name: Optional[str] = None):
"""Returns a new SplitDict initialized from a Dict or List of `split_infos`."""
if isinstance(split_infos, dict):
split_infos = list(split_infos.values())
if dataset_name is None:
dataset_name = split_infos[0]["dataset_name"]
split_dict = cls(dataset_name=dataset_name)
for split_info in split_infos:
if isinstance(split_info, dict):
split_info = SplitInfo(**split_info)
split_dict.add(split_info)
return split_dict
def to_split_dict(self):
"""Returns a list of SplitInfo protos that we have."""
# Return the SplitInfo, sorted by name
return sorted([s for s in self.values()], key=lambda s: s.name)
def copy(self):
return SplitDict.from_split_dict(self.to_split_dict(), self.dataset_name)
@dataclass
class SplitGenerator:
"""Defines the split information for the generator.
This should be used as returned value of
`GeneratorBasedBuilder._split_generators`.
See `GeneratorBasedBuilder._split_generators` for more info and example
of usage.
Args:
name: `str`, name of the Split for which the generator will
create the examples.
gen_kwargs: `dict`, kwargs to forward to the _generate_examples() method
of the builder.
"""
name: str
gen_kwargs: Dict = field(default_factory=dict)
split_info: SplitInfo = field(init=False)
def __post_init__(self):
self.name = str(self.name) # Make sure we convert NamedSplits in strings
self.split_info = SplitInfo(name=self.name)
|
nlp-master
|
src/nlp/splits.py
|
# coding=utf-8
# Copyright 2020 The HuggingFace NLP Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Access datasets."""
import importlib
import inspect
import json
import logging
import os
import re
import shutil
from hashlib import sha256
from pathlib import Path
from typing import Dict, List, Optional, Union
from urllib.parse import urlparse
from filelock import FileLock
from .arrow_dataset import Dataset
from .builder import DatasetBuilder
from .info import DATASET_INFOS_DICT_FILE_NAME
from .metric import Metric
from .splits import Split
from .utils.download_manager import GenerateMode
from .utils.file_utils import DownloadConfig, cached_path, hf_bucket_url
logger = logging.getLogger(__name__)
CURRENT_FILE_DIRECTORY = os.path.dirname(os.path.abspath(__file__))
DATASETS_PATH = os.path.join(CURRENT_FILE_DIRECTORY, "datasets")
DATASETS_MODULE = "nlp.datasets"
METRICS_PATH = os.path.join(CURRENT_FILE_DIRECTORY, "metrics")
METRICS_MODULE = "nlp.metrics"
def import_main_class(module_path, dataset=True):
""" Import a module at module_path and return its main class:
- a DatasetBuilder if dataset is True
- a Metric if dataset is False
"""
importlib.invalidate_caches()
module = importlib.import_module(module_path)
if dataset:
main_cls_type = DatasetBuilder
else:
main_cls_type = Metric
# Find the main class in our imported module
module_main_cls = None
for name, obj in module.__dict__.items():
if isinstance(obj, type) and issubclass(obj, main_cls_type):
if inspect.isabstract(obj):
continue
module_main_cls = obj
break
return module_main_cls
def files_to_hash(file_paths: List[str]):
"""
Convert a list of scripts or text files provided in file_paths into a hashed filename in a repeatable way.
"""
# List all python files in directories if directories are supplied as part of external imports
to_use_files = []
for file_path in file_paths:
if os.path.isdir(file_path):
to_use_files.extend(list(Path(file_path).rglob("*.[pP][yY]")))
else:
to_use_files.append(file_path)
# Get the code from all these files
lines = []
for file_path in to_use_files:
with open(file_path, mode="r") as f:
lines.extend(f.readlines())
filtered_lines = []
for line in lines:
line.replace("\n", "") # remove line breaks, white space and comments
line.replace(" ", "")
line.replace("\t", "")
line = re.sub(r"#.*", "", line)
if line:
filtered_lines.append(line)
file_str = "\n".join(filtered_lines)
# Make a hash from all this code
file_bytes = file_str.encode("utf-8")
file_hash = sha256(file_bytes)
filename = file_hash.hexdigest()
return filename
def convert_github_url(url_path: str):
""" Convert a link to a file on a github repo in a link to the raw github object.
"""
parsed = urlparse(url_path)
sub_directory = None
if parsed.scheme in ("http", "https", "s3") and parsed.netloc == "github.com":
if "blob" in url_path:
assert url_path.endswith(
".py"
), f"External import from github at {url_path} should point to a file ending with '.py'"
url_path = url_path.replace("blob", "raw") # Point to the raw file
else:
# Parse github url to point to zip
github_path = parsed.path[1:]
repo_info, branch = github_path.split("/tree/") if "/tree/" in github_path else (github_path, "master")
repo_owner, repo_name = repo_info.split("/")
url_path = "https://github.com/{}/{}/archive/{}.zip".format(repo_owner, repo_name, branch)
sub_directory = f"{repo_name}-{branch}"
return url_path, sub_directory
def get_imports(file_path: str):
r"""
Find whether we should import or clone additional files for a given processing script.
And list the import.
We allow:
- library dependencies,
- local dependencies and
- external dependencies whose url is specified with a comment starting from "# From:' followed by the raw url to a file, an archive or a github repository.
external dependencies will be downloaded (and extracted if needed in the dataset folder).
We also add an `__init__.py` to each sub-folder of a downloaded folder so the user can import from them in the script.
Note that only direct import in the dataset processing script will be handled
We don't recursively explore the additional import to download further files.
```python
import tensorflow
import .c4_utils
import .clicr.dataset-code.build_json_dataset # From: https://raw.githubusercontent.com/clips/clicr/master/dataset-code/build_json_dataset
```
"""
lines = []
with open(file_path, mode="r") as f:
lines.extend(f.readlines())
logger.info("Checking %s for additional imports.", file_path)
imports = []
for line in lines:
match = re.match(r"^import\s+(\.?)([^\s\.]+)[^#\r\n]*(?:#\s+From:\s+)?([^\r\n]*)", line, flags=re.MULTILINE)
if match is None:
match = re.match(
r"^from\s+(\.?)([^\s\.]+)(?:[^\s]*)\s+import\s+[^#\r\n]*(?:#\s+From:\s+)?([^\r\n]*)",
line,
flags=re.MULTILINE,
)
if match is None:
continue
if match.group(1):
# The import starts with a '.', we will download the relevant file
if any(imp[1] == match.group(2) for imp in imports):
# We already have this import
continue
if match.group(3):
# The import has a comment with 'From:', we'll retreive it from the given url
url_path = match.group(3)
url_path, sub_directory = convert_github_url(url_path)
imports.append(("external", match.group(2), url_path, sub_directory))
elif match.group(2):
# The import should be at the same place as the file
imports.append(("internal", match.group(2), match.group(2), None))
else:
imports.append(("library", match.group(2), match.group(2), None))
return imports
def prepare_module(
path: str,
download_config: Optional[DownloadConfig] = None,
dataset: bool = True,
force_local_path: Optional[str] = None,
**download_kwargs,
) -> DatasetBuilder:
r"""
Download/extract/cache a dataset (if dataset==True) or a metric (if dataset==False)
Dataset and metrics codes are cached inside the lib to allow easy import (avoid ugly sys.path tweaks)
and using cloudpickle (among other things).
Args:
path (str): path to the dataset or metric script, can be either:
- a path to a local directory containing the dataset processing python script
- an url to a S3 directory with a dataset processing python script
download_config (Optional ``nlp.DownloadConfig``: specific download configuration parameters.
dataset (bool): True if the script to load is a dataset, False if the script is a metric.
force_local_path (Optional str): Optional path to a local path to download and prepare the script to.
Used to inspect or modify the script folder.
**download_kwargs: optional attributes for DownloadConfig() which will override the attributes in download_config if supplied.
Return: ``str`` with
- the import path of the dataset/metric package if force_local_path is False: e.g. 'nlp.datasets.squad'
- the local path to the dataset/metric file if force_local_path is True: e.g. '/User/huggingface/nlp/datasets/squad/squad.py'
"""
if download_config is None:
download_config = DownloadConfig(**download_kwargs)
download_config.extract_compressed_file = True
download_config.force_extract = True
module_type = "dataset" if dataset else "metric"
name = list(filter(lambda x: x, path.split("/")))[-1]
if not name.endswith(".py"):
name = name + ".py"
# Short name is name without the '.py' at the end (for the module)
short_name = name[:-3]
# We have three ways to find the processing file:
# - if os.path.join(path, name) is a file or a remote url
# - if path is a file or a remote url
# - otherwise we assume path/name is a path to our S3 bucket
combined_path = os.path.join(path, name)
if os.path.isfile(combined_path):
file_path = combined_path
elif os.path.isfile(path):
file_path = path
else:
file_path = hf_bucket_url(path, filename=name, dataset=dataset)
base_path = os.path.dirname(file_path) # remove the filename
dataset_infos = os.path.join(base_path, DATASET_INFOS_DICT_FILE_NAME)
# Load the module in two steps:
# 1. get the processing file on the local filesystem if it's not there (download to cache dir)
# 2. copy from the local file system inside the library to import it
local_path = cached_path(file_path, download_config=download_config)
# Download the dataset infos file if available
try:
local_dataset_infos_path = cached_path(dataset_infos, download_config=download_config,)
except (FileNotFoundError, ConnectionError):
local_dataset_infos_path = None
# Download external imports if needed
imports = get_imports(local_path)
local_imports = []
library_imports = []
for import_type, import_name, import_path, sub_directory in imports:
if import_type == "library":
library_imports.append(import_name) # Import from a library
continue
if import_name == short_name:
raise ValueError(
f"Error in {module_type} script at {file_path}, importing relative {import_name} module "
f"but {import_name} is the name of the {module_type} script. "
f"Please change relative import {import_name} to another name and add a '# From: URL_OR_PATH' "
f"comment pointing to the original realtive import file path."
)
if import_type == "internal":
url_or_filename = base_path + "/" + import_path + ".py"
elif import_type == "external":
url_or_filename = import_path
else:
raise ValueError("Wrong import_type")
local_import_path = cached_path(url_or_filename, download_config=download_config,)
if sub_directory is not None:
local_import_path = os.path.join(local_import_path, sub_directory)
local_imports.append((import_name, local_import_path))
# Check library imports
needs_to_be_installed = []
for library_import in library_imports:
try:
lib = importlib.import_module(library_import) # noqa F841
except ImportError:
needs_to_be_installed.append(library_import)
if needs_to_be_installed:
raise ImportError(
f"To be able to use this {module_type}, you need to install the following dependencies {needs_to_be_installed} "
f"using 'pip install {' '.join(needs_to_be_installed)}' for instance'"
)
# Define a directory with a unique name in our dataset or metric folder
# path is: ./datasets|metrics/dataset|metric_name/hash_from_code/script.py
# we use a hash to be able to have multiple versions of a dataset/metric processing file together
hash = files_to_hash([local_path] + [loc[1] for loc in local_imports])
if force_local_path is None:
main_folder_path = os.path.join(DATASETS_PATH if dataset else METRICS_PATH, short_name)
hash_folder_path = os.path.join(main_folder_path, hash)
else:
main_folder_path = force_local_path
hash_folder_path = force_local_path
local_file_path = os.path.join(hash_folder_path, name)
dataset_infos_path = os.path.join(hash_folder_path, DATASET_INFOS_DICT_FILE_NAME)
# Prevent parallel disk operations
lock_path = local_path + ".lock"
with FileLock(lock_path):
# Create main dataset/metrics folder if needed
if not os.path.exists(main_folder_path):
logger.info(f"Creating main folder for {module_type} {file_path} at {main_folder_path}")
os.makedirs(main_folder_path, exist_ok=True)
else:
logger.info(f"Found main folder for {module_type} {file_path} at {main_folder_path}")
# add an __init__ file to the main dataset folder if needed
init_file_path = os.path.join(main_folder_path, "__init__.py")
if not os.path.exists(init_file_path):
with open(init_file_path, "w"):
pass
# Create hash dataset folder if needed
if not os.path.exists(hash_folder_path):
logger.info(f"Creating specific version folder for {module_type} {file_path} at {hash_folder_path}")
os.makedirs(hash_folder_path)
else:
logger.info(f"Found specific version folder for {module_type} {file_path} at {hash_folder_path}")
# add an __init__ file to the hash dataset folder if needed
init_file_path = os.path.join(hash_folder_path, "__init__.py")
if not os.path.exists(init_file_path):
with open(init_file_path, "w"):
pass
# Copy dataset.py file in hash folder if needed
if not os.path.exists(local_file_path):
logger.info("Copying script file from %s to %s", file_path, local_file_path)
shutil.copyfile(local_path, local_file_path)
else:
logger.info("Found script file from %s to %s", file_path, local_file_path)
# Copy dataset infos file if needed
if not os.path.exists(dataset_infos_path):
if local_dataset_infos_path is not None:
logger.info("Copying dataset infos file from %s to %s", dataset_infos, dataset_infos_path)
shutil.copyfile(local_dataset_infos_path, dataset_infos_path)
else:
logger.info("Couldn't find dataset infos file at %s", dataset_infos)
else:
if local_dataset_infos_path is not None:
logger.info("Updating dataset infos file from %s to %s", dataset_infos, dataset_infos_path)
shutil.copyfile(local_dataset_infos_path, dataset_infos_path)
else:
logger.info("Found dataset infos file from %s to %s", dataset_infos, dataset_infos_path)
# Record metadata associating original dataset path with local unique folder
meta_path = local_file_path.split(".py")[0] + ".json"
if not os.path.exists(meta_path):
logger.info(f"Creating metadata file for {module_type} {file_path} at {meta_path}")
meta = {"original file path": file_path, "local file path": local_file_path}
# the filename is *.py in our case, so better rename to filenam.json instead of filename.py.json
with open(meta_path, "w") as meta_file:
json.dump(meta, meta_file)
else:
logger.info(f"Found metadata file for {module_type} {file_path} at {meta_path}")
# Copy all the additional imports
for import_name, import_path in local_imports:
if os.path.isfile(import_path):
full_path_local_import = os.path.join(hash_folder_path, import_name + ".py")
if not os.path.exists(full_path_local_import):
logger.info("Copying local import file from %s at %s", import_path, full_path_local_import)
shutil.copyfile(import_path, full_path_local_import)
else:
logger.info("Found local import file from %s at %s", import_path, full_path_local_import)
elif os.path.isdir(import_path):
full_path_local_import = os.path.join(hash_folder_path, import_name)
if not os.path.exists(full_path_local_import):
logger.info("Copying local import directory from %s at %s", import_path, full_path_local_import)
shutil.copytree(import_path, full_path_local_import)
else:
logger.info("Found local import directory from %s at %s", import_path, full_path_local_import)
else:
raise OSError(f"Error with local import at {import_path}")
if force_local_path is None:
module_path = ".".join([DATASETS_MODULE if dataset else METRICS_MODULE, short_name, hash, short_name])
else:
module_path = local_file_path
return module_path
def load_metric(
path: str,
name: Optional[str] = None,
process_id: int = 0,
num_process: int = 1,
data_dir: Optional[str] = None,
experiment_id: Optional[str] = None,
in_memory: bool = False,
download_config: Optional[DownloadConfig] = None,
**metric_init_kwargs,
) -> Metric:
r"""
Load a `nlp.Metric`.
Args:
path (``str``): path to the dataset processing script with the dataset builder. Can be either:
- a local path to processing script or the directory containing the script (if the script has the same name as the directory),
e.g. ``'./dataset/squad'`` or ``'./dataset/squad/squad.py'``
- a dataset identifier on HuggingFace AWS bucket (list all available datasets and ids with ``nlp.list_datasets()``)
e.g. ``'squad'``, ``'glue'`` or ``'openai/webtext'``
name (Optional ``str``): defining the name of the dataset configuration
process_id (Optional ``int``): for distributed evaluation: id of the process
num_process (Optional ``int``): for distributed evaluation: total number of processes
data_dir (Optional str): path to store the temporary predictions and references (default to `~/.nlp/`)
experiment_id (Optional str): An optional unique id for the experiment.
in_memory (bool): Weither to store the temporary results in memory (default: False)
download_config (Optional ``nlp.DownloadConfig``: specific download configuration parameters.
Returns: `nlp.Metric`.
"""
module_path = prepare_module(path, download_config=download_config, dataset=False)
metric_cls = import_main_class(module_path, dataset=False)
metric = metric_cls(
name=name,
process_id=process_id,
num_process=num_process,
data_dir=data_dir,
experiment_id=experiment_id,
in_memory=in_memory,
**metric_init_kwargs,
)
return metric
def load_dataset(
path: str,
name: Optional[str] = None,
version: Optional[str] = None,
data_dir: Optional[str] = None,
data_files: Union[Dict, List] = None,
split: Optional[Union[str, Split]] = None,
cache_dir: Optional[str] = None,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[GenerateMode] = None,
ignore_verifications: bool = False,
save_infos: bool = False,
**config_kwargs,
) -> Union[Dict[Split, Dataset], Dataset]:
r"""Load a dataset
This method does the following under the hood:
1. Download and import in the library the dataset loading script from ``path`` if it's not already cached inside the library.
Processing scripts are small python scripts that define the citation, info and format of the dataset,
contain the URL to the original data files and the code to load examples from the original data files.
You can find some of the scripts here: https://github.com/huggingface/nlp/datasets
and easily upload yours to share them using the CLI ``nlp-cli``.
2. Run the dataset loading script which will:
* Download the dataset file from the original URL (see the script) if it's not already downloaded and cached.
* Process and cache the dataset in typed Arrow tables for caching.
Arrow table are arbitrarly long, typed tables which can store nested objects and be mapped to numpy/pandas/python standard types.
They can be directly access from drive, loaded in RAM or even streamed over the web.
3. Return a dataset build from the requested splits in ``split`` (default: all).
Args:
path (``str``): path to the dataset processing script with the dataset builder. Can be either:
- a local path to processing script or the directory containing the script (if the script has the same name as the directory),
e.g. ``'./dataset/squad'`` or ``'./dataset/squad/squad.py'``
- a datatset identifier on HuggingFace AWS bucket (list all available datasets and ids with ``nlp.list_datasets()``)
e.g. ``'squad'``, ``'glue'`` or ``'openai/webtext'``
name (Optional ``str``): defining the name of the dataset configuration
version (Optional ``str``): defining the version of the dataset configuration
data_files (Optional ``str``): defining the data_files of the dataset configuration
data_dir (Optional ``str``): defining the data_dir of the dataset configuration
split (`nlp.Split` or `str`): which split of the data to load.
If None, will return a `dict` with all splits (typically `nlp.Split.TRAIN` and `nlp.Split.TEST`).
If given, will return a single Dataset.
Splits can be combined and specified like in tensorflow-datasets.
cache_dir (Optional ``str``): directory to read/write data. Defaults to "~/nlp".
download_config (Optional ``nlp.DownloadConfig``: specific download configuration parameters.
download_mode (Optional `nlp.GenerateMode`): select the download/generate mode - Default to REUSE_DATASET_IF_EXISTS
ignore_verifications (bool): Ignore the verifications of the downloaded/processed dataset information (checksums/size/splits/...)
save_infos (bool): Save the dataset information (checksums/size/splits/...)
**config_kwargs (Optional ``dict``): keyword arguments to be passed to the ``nlp.BuilderConfig`` and used in the ``nlp.DatasetBuilder``.
Returns: ``nlp.Dataset`` or ``Dict[nlp.Split, nlp.Dataset]``
if `split` is not None: the dataset requested,
if `split` is None, a `dict<key: nlp.Split, value: nlp.Dataset>` with each split.
"""
# Download/copy dataset processing script
module_path = prepare_module(path, download_config=download_config, dataset=True)
# Get dataset builder class from the processing script
builder_cls = import_main_class(module_path, dataset=True)
# Instantiate the dataset builder
builder_instance = builder_cls(
cache_dir=cache_dir, name=name, version=version, data_dir=data_dir, data_files=data_files, **config_kwargs,
)
# Download and prepare data
builder_instance.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
ignore_verifications=ignore_verifications,
save_infos=save_infos,
)
# Build dataset for splits
ds = builder_instance.as_dataset(split=split)
return ds
|
nlp-master
|
src/nlp/load.py
|
# coding=utf-8
# Copyright 2020 The HuggingFace NLP Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
""" DatasetInfo and MetricInfo record information we know about a dataset and a metric.
This includes things that we know about the dataset statically, i.e.:
- description
- canonical location
- does it have validation and tests splits
- size
- etc.
This also includes the things that can and should be computed once we've
processed the dataset as well:
- number of examples (in each split)
- etc.
"""
import json
import logging
import os
from dataclasses import asdict, dataclass, field
from typing import List, Optional, Union
from nlp.utils.version import Version
from .features import Features, Value
from .splits import SplitDict
logger = logging.getLogger(__name__)
# Name of the file to output the DatasetInfo p rotobuf object.
DATASET_INFO_FILENAME = "dataset_info.json"
DATASET_INFOS_DICT_FILE_NAME = "dataset_infos.json"
LICENSE_FILENAME = "LICENSE"
METRIC_INFO_FILENAME = "metric_info.json"
@dataclass
class SupervisedKeysData:
input: str = ""
output: str = ""
@dataclass
class DownloadChecksumsEntryData:
key: str = ""
value: str = ""
class MissingCachedSizesConfigError(Exception):
"""The expected cached sizes of the download file are missing."""
class NonMatchingCachedSizesError(Exception):
"""The prepared split doesn't have expected sizes."""
@dataclass
class DatasetInfo:
"""Information about a dataset.
`DatasetInfo` documents datasets, including its name, version, and features.
See the constructor arguments and properties for a full list.
Note: Not all fields are known on construction and may be updated later.
"""
# Set in the dataset scripts
description: str = field(default_factory=str)
citation: str = field(default_factory=str)
homepage: str = field(default_factory=str)
license: str = field(default_factory=str)
features: Features = None
supervised_keys: Optional[SupervisedKeysData] = None
# Set later by the builder
builder_name: Optional[str] = None
config_name: Optional[str] = None
version: Optional[Union[str, Version]] = None
# Set later by `download_and_prepare`
splits: Optional[dict] = None
download_checksums: Optional[dict] = None
download_size: Optional[int] = None
dataset_size: Optional[int] = None
size_in_bytes: Optional[int] = None
def __post_init__(self):
# Convert back to the correct classes when we reload from dict
if self.features is not None and not isinstance(self.features, Features):
self.features = Features.from_dict(self.features)
if self.version is not None and not isinstance(self.version, Version):
if isinstance(self.version, str):
self.version = Version(self.version)
else:
self.version = Version.from_dict(self.version)
if self.splits is not None and not isinstance(self.splits, SplitDict):
self.splits = SplitDict.from_split_dict(self.splits)
if self.supervised_keys is not None and not isinstance(self.supervised_keys, SupervisedKeysData):
if isinstance(self.supervised_keys, (tuple, list)):
self.supervised_keys = SupervisedKeysData(*self.supervised_keys)
else:
self.supervised_keys = SupervisedKeysData(**self.supervised_keys)
def _license_path(self, dataset_info_dir):
return os.path.join(dataset_info_dir, LICENSE_FILENAME)
def write_to_directory(self, dataset_info_dir):
""" Write `DatasetInfo` as JSON to `dataset_info_dir`.
Also save the license separately in LICENCE.
"""
with open(os.path.join(dataset_info_dir, DATASET_INFO_FILENAME), "wb") as f:
self._dump_info(f)
with open(os.path.join(dataset_info_dir, LICENSE_FILENAME), "wb") as f:
self._dump_license(f)
def _dump_info(self, file):
"""Dump info in `file` file-like object open in bytes mode (to support remote files)"""
file.write(json.dumps(asdict(self)).encode("utf-8"))
def _dump_license(self, file):
"""Dump license in `file` file-like object open in bytes mode (to support remote files)"""
file.write(self.license.encode("utf-8"))
@classmethod
def from_directory(cls, dataset_info_dir):
"""Create DatasetInfo from the JSON file in `dataset_info_dir`.
This function updates all the dynamically generated fields (num_examples,
hash, time of creation,...) of the DatasetInfo.
This will overwrite all previous metadata.
Args:
dataset_info_dir: `str` The directory containing the metadata file. This
should be the root directory of a specific dataset version.
"""
logger.info("Loading Dataset info from %s", dataset_info_dir)
if not dataset_info_dir:
raise ValueError("Calling DatasetInfo.from_directory() with undefined dataset_info_dir.")
with open(os.path.join(dataset_info_dir, DATASET_INFO_FILENAME), "r") as f:
dataset_info_dict = json.load(f)
return cls(**dataset_info_dict)
def update(self, other_dataset_info, ignore_none=True):
self_dict = self.__dict__
self_dict.update(
**{k: v for k, v in other_dataset_info.__dict__.items() if (v is not None or not ignore_none)}
)
class DatasetInfosDict(dict):
def write_to_directory(self, dataset_infos_dir, overwrite=False):
total_dataset_infos = {}
dataset_infos_path = os.path.join(dataset_infos_dir, DATASET_INFOS_DICT_FILE_NAME)
if os.path.exists(dataset_infos_path) and not overwrite:
logger.info("Dataset Infos already exists in {}. Completing it with new infos.".format(dataset_infos_dir))
total_dataset_infos = self.from_directory(dataset_infos_dir)
else:
logger.info("Writing new Dataset Infos in {}".format(dataset_infos_dir))
total_dataset_infos.update(self)
with open(dataset_infos_path, "w") as f:
json.dump({config_name: asdict(dset_info) for config_name, dset_info in total_dataset_infos.items()}, f)
@classmethod
def from_directory(cls, dataset_infos_dir):
logger.info("Loading Dataset Infos from {}".format(dataset_infos_dir))
with open(os.path.join(dataset_infos_dir, DATASET_INFOS_DICT_FILE_NAME), "r") as f:
dataset_info_dict = {
config_name: DatasetInfo(**dataset_info_dict)
for config_name, dataset_info_dict in json.load(f).items()
}
return cls(**dataset_info_dict)
@dataclass
class MetricInfo:
"""Information about a metric.
`MetricInfo` documents a metric, including its name, version, and features.
See the constructor arguments and properties for a full list.
Note: Not all fields are known on construction and may be updated later.
"""
# Set in the dataset scripts
description: str
citation: str
features: Features
inputs_description: str = field(default_factory=str)
homepage: str = field(default_factory=str)
license: str = field(default_factory=str)
codebase_urls: List[str] = field(default_factory=list)
reference_urls: List[str] = field(default_factory=list)
streamable: bool = False
format: Optional[str] = None
# Set later by the builder
metric_name: Optional[str] = None
config_name: Optional[str] = None
version: Optional[str] = None
def __post_init__(self):
assert "predictions" in self.features, "Need to have at least a 'predictions' field in 'features'."
if self.format is not None:
for key, value in self.features.items():
if not isinstance(value, Value):
raise ValueError(
f"When using 'numpy' format, all features should be a `nlp.Value` feature. "
f"Here {key} is an instance of {value.__class__.__name__}"
)
def write_to_directory(self, metric_info_dir):
""" Write `MetricInfo` as JSON to `metric_info_dir`.
Also save the license separately in LICENCE.
"""
with open(os.path.join(metric_info_dir, METRIC_INFO_FILENAME), "w") as f:
json.dump(asdict(self), f)
with open(os.path.join(metric_info_dir, LICENSE_FILENAME), "w") as f:
f.write(self.license)
@classmethod
def from_directory(cls, metric_info_dir):
"""Create MetricInfo from the JSON file in `metric_info_dir`.
Args:
metric_info_dir: `str` The directory containing the metadata file. This
should be the root directory of a specific dataset version.
"""
logger.info("Loading Metric info from %s", metric_info_dir)
if not metric_info_dir:
raise ValueError("Calling MetricInfo.from_directory() with undefined metric_info_dir.")
with open(os.path.join(metric_info_dir, METRIC_INFO_FILENAME), "r") as f:
dataset_info_dict = json.load(f)
return cls(**dataset_info_dict)
|
nlp-master
|
src/nlp/info.py
|
nlp-master
|
src/nlp/metrics/__init__.py
|
|
nlp-master
|
src/nlp/datasets/__init__.py
|
|
# coding=utf-8
# Copyright 2020 The HuggingFace NLP Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Version utils."""
import re
from dataclasses import dataclass
_VERSION_TMPL = r"^(?P<major>{v})" r"\.(?P<minor>{v})" r"\.(?P<patch>{v})$"
_VERSION_WILDCARD_REG = re.compile(_VERSION_TMPL.format(v=r"\d+|\*"))
_VERSION_RESOLVED_REG = re.compile(_VERSION_TMPL.format(v=r"\d+"))
@dataclass()
class Version:
""" Dataset version MAJOR.MINOR.PATCH.
Args:
version_str: string. Eg: "1.2.3".
description: string, a description of what is new in this version.
nlp_version_to_prepare: string, defaults to None. If set, indicates that
current version cannot be used to `download_and_prepare` the
dataset, but that at version {nlp_version_to_prepare} should be
used instead.
"""
version_str: str
description: str = None
nlp_version_to_prepare: str = None
major: str = None
minor: str = None
patch: str = None
def __post_init__(self):
self.major, self.minor, self.patch = _str_to_version(self.version_str)
def __repr__(self):
return "{}.{}.{}".format(*self.tuple)
@property
def tuple(self):
return self.major, self.minor, self.patch
def _validate_operand(self, other):
if isinstance(other, str):
return Version(other)
elif isinstance(other, Version):
return other
raise AssertionError("{} (type {}) cannot be compared to version.".format(other, type(other)))
def __eq__(self, other):
other = self._validate_operand(other)
return self.tuple == other.tuple
def __ne__(self, other):
other = self._validate_operand(other)
return self.tuple != other.tuple
def __lt__(self, other):
other = self._validate_operand(other)
return self.tuple < other.tuple
def __le__(self, other):
other = self._validate_operand(other)
return self.tuple <= other.tuple
def __gt__(self, other):
other = self._validate_operand(other)
return self.tuple > other.tuple
def __ge__(self, other):
other = self._validate_operand(other)
return self.tuple >= other.tuple
def match(self, other_version):
"""Returns True if other_version matches.
Args:
other_version: string, of the form "x[.y[.x]]" where {x,y,z} can be a
number or a wildcard.
"""
major, minor, patch = _str_to_version(other_version, allow_wildcard=True)
return major in [self.major, "*"] and minor in [self.minor, "*"] and patch in [self.patch, "*"]
@classmethod
def from_dict(cls, dic):
return cls(**dic)
def _str_to_version(version_str, allow_wildcard=False):
"""Return the tuple (major, minor, patch) version extracted from the str."""
reg = _VERSION_WILDCARD_REG if allow_wildcard else _VERSION_RESOLVED_REG
res = reg.match(version_str)
if not res:
msg = "Invalid version '{}'. Format should be x.y.z".format(version_str)
if allow_wildcard:
msg += " with {x,y,z} being digits or wildcard."
else:
msg += " with {x,y,z} being digits."
raise ValueError(msg)
return tuple(v if v == "*" else int(v) for v in [res.group("major"), res.group("minor"), res.group("patch")])
|
nlp-master
|
src/nlp/utils/version.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Mock download manager interface."""
import logging
import os
import urllib.parse
from .file_utils import cached_path, hf_bucket_url
logger = logging.getLogger(__name__)
class MockDownloadManager(object):
dummy_file_name = "dummy_data"
def __init__(self, dataset_name, config, version, cache_dir=None, is_local=False):
self.downloaded_size = 0
self.dataset_name = dataset_name
self.cache_dir = cache_dir
self.is_local = is_local
self.config = config
# TODO(PVP, QUENTIN) might need to make this more general
self.version_name = str(version.major) + "." + str(version.minor) + "." + str(version.patch)
# to be downloaded
self._dummy_file = None
self._bucket_url = None
@property
def dummy_file(self):
if self._dummy_file is None:
self._dummy_file = self.download_dummy_data()
return self._dummy_file
@property
def dummy_data_folder(self):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy", self.config.name, self.version_name)
# structure is dummy / version_name
return os.path.join("dummy", self.version_name)
@property
def dummy_zip_file(self):
return os.path.join(self.dummy_data_folder, "dummy_data.zip")
def download_dummy_data(self):
path_to_dummy_data_dir = (
self.local_path_to_dummy_data if self.is_local is True else self.aws_path_to_dummy_data
)
local_path = cached_path(
path_to_dummy_data_dir, cache_dir=self.cache_dir, extract_compressed_file=True, force_extract=True
)
return os.path.join(local_path, self.dummy_file_name)
@property
def local_path_to_dummy_data(self):
return os.path.join("datasets", self.dataset_name, self.dummy_zip_file)
@property
def aws_path_to_dummy_data(self):
if self._bucket_url is None:
self._bucket_url = hf_bucket_url(self.dataset_name, filename=self.dummy_zip_file)
return self._bucket_url
@property
def manual_dir(self):
# return full path if its a dir
if os.path.isdir(self.dummy_file):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.split("/")[:-1])
# this function has to be in the manager under this name so that testing works
def download_and_extract(self, data_url, *args):
if self.cache_dir is not None:
# dummy data is downloaded and tested
dummy_file = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
dummy_file = self.dummy_file_name
# special case when data_url is a dict
if isinstance(data_url, dict):
return self.create_dummy_data_dict(dummy_file, data_url)
return dummy_file
# this function has to be in the manager under this name so that testing works
def download(self, data_url, *args):
return self.download_and_extract(data_url)
# this function has to be in the manager under this name so that testing works
def download_custom(self, data_url, custom_download):
return self.download_and_extract(data_url)
# this function has to be in the manager under this name so that testing works
def extract(self, path):
return path
# this function has to be in the manager under this name so that testing works
def get_recorded_sizes_checksums(self):
return {}
def create_dummy_data_dict(self, path_to_dummy_data, data_url):
dummy_data_dict = {}
for key, abs_path in data_url.items():
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(abs_path, list):
value = [os.path.join(path_to_dummy_data, urllib.parse.quote_plus(x.split("/")[-1])) for x in abs_path]
else:
value = os.path.join(path_to_dummy_data, urllib.parse.quote_plus(abs_path.split("/")[-1]))
dummy_data_dict[key] = value
# make sure that values are unique
first_value = next(iter(dummy_data_dict.values()))
if isinstance(first_value, str) and len(set(dummy_data_dict.values())) < len(dummy_data_dict.values()):
# append key to value to make its name unique
dummy_data_dict = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
|
nlp-master
|
src/nlp/utils/mock_download_manager.py
|
import logging
import os
import pyarrow as pa
import pyarrow.parquet as pq
from apache_beam.io import filebasedsink
from apache_beam.io.filesystem import CompressionTypes
from apache_beam.io.filesystems import FileSystems
from apache_beam.io.iobase import Write
from apache_beam.pipeline import Pipeline
from apache_beam.transforms import PTransform
CHUNK_SIZE = 2 << 20 # 2mb
logger = logging.getLogger(__name__)
class BeamPipeline(Pipeline):
"""Wrapper over `apache_beam.pipeline.Pipeline` for convenience"""
def is_local(self):
runner = self._options.get_all_options().get("runner")
return runner in [None, "DirectRunner", "PortableRunner"]
def upload_local_to_remote(local_file_path, remote_file_path, force_upload=False):
"""Use the Beam Filesystems to upload to a remote directory on gcs/s3/hdfs..."""
fs = FileSystems
if fs.exists(remote_file_path):
if force_upload:
logger.info("Remote path already exist: {}. Overwriting it as force_upload=True.".format(remote_file_path))
else:
logger.info("Remote path already exist: {}. Skipping it as force_upload=False.".format(remote_file_path))
return
with fs.create(remote_file_path) as remote_file:
with open(local_file_path, "rb") as local_file:
chunk = local_file.read(CHUNK_SIZE)
while chunk:
remote_file.write(chunk)
chunk = local_file.read(CHUNK_SIZE)
def download_remote_to_local(remote_file_path, local_file_path, force_download=False):
"""Use the Beam Filesystems to download from a remote directory on gcs/s3/hdfs..."""
fs = FileSystems
if os.path.exists(local_file_path):
if force_download:
logger.info("Local path already exist: {}. Overwriting it as force_upload=True.".format(remote_file_path))
else:
logger.info("Local path already exist: {}. Skipping it as force_upload=False.".format(remote_file_path))
return
with fs.open(remote_file_path) as remote_file:
with open(local_file_path, "wb") as local_file:
chunk = remote_file.read(CHUNK_SIZE)
while chunk:
local_file.write(chunk)
chunk = remote_file.read(CHUNK_SIZE)
class WriteToParquet(PTransform):
"""
From `apache_beam.io.parquetio.WriteToParquet`, but with a fix for the jira issue `BEAM-10022`.
Only the method `_flush_buffer` is different from the original implementation.
A ``PTransform`` for writing parquet files.
This ``PTransform`` is currently experimental. No backward-compatibility
guarantees.
"""
def __init__(
self,
file_path_prefix,
schema,
row_group_buffer_size=64 * 1024 * 1024,
record_batch_size=1000,
codec="none",
use_deprecated_int96_timestamps=False,
file_name_suffix="",
num_shards=0,
shard_name_template=None,
mime_type="application/x-parquet",
):
"""Initialize a WriteToParquet transform.
(from apache_beam.io.parquetio, only ._flush_buffer() is different)
Writes parquet files from a :class:`~apache_beam.pvalue.PCollection` of
records. Each record is a dictionary with keys of a string type that
represent column names. Schema must be specified like the example below.
.. testsetup::
from tempfile import NamedTemporaryFile
import glob
import os
import pyarrow
filename = NamedTemporaryFile(delete=False).name
.. testcode::
with beam.Pipeline() as p:
records = p | 'Read' >> beam.Create(
[{'name': 'foo', 'age': 10}, {'name': 'bar', 'age': 20}]
)
_ = records | 'Write' >> beam.io.WriteToParquet(filename,
pyarrow.schema(
[('name', pyarrow.binary()), ('age', pyarrow.int64())]
)
)
.. testcleanup::
for output in glob.glob('{}*'.format(filename)):
os.remove(output)
For more information on supported types and schema, please see the pyarrow
document.
Args:
file_path_prefix: The file path to write to. The files written will begin
with this prefix, followed by a shard identifier (see num_shards), and
end in a common extension, if given by file_name_suffix. In most cases,
only this argument is specified and num_shards, shard_name_template, and
file_name_suffix use default values.
schema: The schema to use, as type of ``pyarrow.Schema``.
row_group_buffer_size: The byte size of the row group buffer. Note that
this size is for uncompressed data on the memory and normally much
bigger than the actual row group size written to a file.
record_batch_size: The number of records in each record batch. Record
batch is a basic unit used for storing data in the row group buffer.
A higher record batch size implies low granularity on a row group buffer
size. For configuring a row group size based on the number of records,
set ``row_group_buffer_size`` to 1 and use ``record_batch_size`` to
adjust the value.
codec: The codec to use for block-level compression. Any string supported
by the pyarrow specification is accepted.
use_deprecated_int96_timestamps: Write nanosecond resolution timestamps to
INT96 Parquet format. Defaults to False.
file_name_suffix: Suffix for the files written.
num_shards: The number of files (shards) used for output. If not set, the
service will decide on the optimal number of shards.
Constraining the number of shards is likely to reduce
the performance of a pipeline. Setting this value is not recommended
unless you require a specific number of output files.
shard_name_template: A template string containing placeholders for
the shard number and shard count. When constructing a filename for a
particular shard number, the upper-case letters 'S' and 'N' are
replaced with the 0-padded shard number and shard count respectively.
This argument can be '' in which case it behaves as if num_shards was
set to 1 and only one file will be generated. The default pattern used
is '-SSSSS-of-NNNNN' if None is passed as the shard_name_template.
mime_type: The MIME type to use for the produced files, if the filesystem
supports specifying MIME types.
Returns:
A WriteToParquet transform usable for writing.
"""
super(WriteToParquet, self).__init__()
self._sink = _create_parquet_sink(
file_path_prefix,
schema,
codec,
row_group_buffer_size,
record_batch_size,
use_deprecated_int96_timestamps,
file_name_suffix,
num_shards,
shard_name_template,
mime_type,
)
def expand(self, pcoll):
return pcoll | Write(self._sink)
def display_data(self):
return {"sink_dd": self._sink}
def _create_parquet_sink(
file_path_prefix,
schema,
codec,
row_group_buffer_size,
record_batch_size,
use_deprecated_int96_timestamps,
file_name_suffix,
num_shards,
shard_name_template,
mime_type,
):
return _ParquetSink(
file_path_prefix,
schema,
codec,
row_group_buffer_size,
record_batch_size,
use_deprecated_int96_timestamps,
file_name_suffix,
num_shards,
shard_name_template,
mime_type,
)
class _ParquetSink(filebasedsink.FileBasedSink):
"""A sink for parquet files."""
def __init__(
self,
file_path_prefix,
schema,
codec,
row_group_buffer_size,
record_batch_size,
use_deprecated_int96_timestamps,
file_name_suffix,
num_shards,
shard_name_template,
mime_type,
):
super(_ParquetSink, self).__init__(
file_path_prefix,
file_name_suffix=file_name_suffix,
num_shards=num_shards,
shard_name_template=shard_name_template,
coder=None,
mime_type=mime_type,
# Compression happens at the block level using the supplied codec, and
# not at the file level.
compression_type=CompressionTypes.UNCOMPRESSED,
)
self._schema = schema
self._codec = codec
self._row_group_buffer_size = row_group_buffer_size
self._use_deprecated_int96_timestamps = use_deprecated_int96_timestamps
self._buffer = [[] for _ in range(len(schema.names))]
self._buffer_size = record_batch_size
self._record_batches = []
self._record_batches_byte_size = 0
self._file_handle = None
def open(self, temp_path):
self._file_handle = super(_ParquetSink, self).open(temp_path)
return pq.ParquetWriter(
self._file_handle,
self._schema,
compression=self._codec,
use_deprecated_int96_timestamps=self._use_deprecated_int96_timestamps,
)
def write_record(self, writer, value):
if len(self._buffer[0]) >= self._buffer_size:
self._flush_buffer()
if self._record_batches_byte_size >= self._row_group_buffer_size:
self._write_batches(writer)
# reorder the data in columnar format.
for i, n in enumerate(self._schema.names):
self._buffer[i].append(value[n])
def close(self, writer):
if len(self._buffer[0]) > 0:
self._flush_buffer()
if self._record_batches_byte_size > 0:
self._write_batches(writer)
writer.close()
if self._file_handle:
self._file_handle.close()
self._file_handle = None
def display_data(self):
res = super(_ParquetSink, self).display_data()
res["codec"] = str(self._codec)
res["schema"] = str(self._schema)
res["row_group_buffer_size"] = str(self._row_group_buffer_size)
return res
def _write_batches(self, writer):
table = pa.Table.from_batches(self._record_batches)
self._record_batches = []
self._record_batches_byte_size = 0
writer.write_table(table)
def _flush_buffer(self):
arrays = [[] for _ in range(len(self._schema.names))]
for x, y in enumerate(self._buffer):
arrays[x] = pa.array(y, type=self._schema.types[x])
self._buffer[x] = []
rb = pa.RecordBatch.from_arrays(arrays, self._schema.names)
self._record_batches.append(rb)
size = 0
for x in arrays:
for b in x.buffers():
if b is not None: # only this line is different
size = size + b.size
self._record_batches_byte_size = self._record_batches_byte_size + size
|
nlp-master
|
src/nlp/utils/beam_utils.py
|
# coding=utf-8
# Copyright 2020 The HuggingFace NLP Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# flake8: noqa
# Lint as: python3
"""Util import."""
from .download_manager import DownloadManager, GenerateMode
from .file_utils import (
HF_DATASETS_CACHE,
HF_METRICS_CACHE,
DownloadConfig,
cached_path,
hf_bucket_url,
is_remote_url,
is_tf_available,
is_torch_available,
)
from .mock_download_manager import MockDownloadManager
from .py_utils import *
from .tqdm_utils import *
from .version import Version
|
nlp-master
|
src/nlp/utils/__init__.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Download manager interface."""
import enum
import logging
import os
from .file_utils import HF_DATASETS_CACHE, cached_path, get_from_cache, hash_url_to_filename
from .info_utils import get_size_checksum_dict
from .py_utils import flatten_nested, map_nested, size_str
logger = logging.getLogger(__name__)
class GenerateMode(enum.Enum):
"""`Enum` for how to treat pre-existing downloads and data.
The default mode is `REUSE_DATASET_IF_EXISTS`, which will reuse both
raw downloads and the prepared dataset if they exist.
The generations modes:
| | Downloads | Dataset |
| -----------------------------------|-----------|---------|
| `REUSE_DATASET_IF_EXISTS` (default)| Reuse | Reuse |
| `REUSE_CACHE_IF_EXISTS` | Reuse | Fresh |
| `FORCE_REDOWNLOAD` | Fresh | Fresh |
"""
REUSE_DATASET_IF_EXISTS = "reuse_dataset_if_exists"
REUSE_CACHE_IF_EXISTS = "reuse_cache_if_exists"
FORCE_REDOWNLOAD = "force_redownload"
class DownloadManager(object):
def __init__(
self, dataset_name=None, data_dir=None, download_config=None,
):
"""Download manager constructor.
Args:
data_dir: can be used to specify a manual directory to get the files from.
cache_dir: `str`, path to directory where downloads are stored.
extract_dir: `str`, path to directory where artifacts are extracted.
dataset_name: `str`, name of dataset this instance will be used for. If
provided, downloads will contain which datasets they were used for.
force_download: `bool`, default to False. If True, always [re]download.
"""
self._dataset_name = dataset_name
self._data_dir = data_dir
self._download_config = download_config
# To record what is being used: {url: {num_bytes: int, checksum: str}}
self._recorded_sizes_checksums = {}
@property
def manual_dir(self):
return self._data_dir
@property
def downloaded_size(self):
"""Returns the total size of downloaded files."""
return sum(checksums_dict["num_bytes"] for checksums_dict in self._recorded_sizes_checksums.values())
def ship_files_with_pipeline(self, downloaded_path_or_paths, pipeline):
"""
Ship the files using Beam FileSystems to the pipeline temp dir.
"""
from nlp.utils.beam_utils import upload_local_to_remote
remote_dir = pipeline._options.get_all_options().get("temp_location")
if remote_dir is None:
raise ValueError("You need to specify 'temp_location' in PipelineOptions to upload files")
def upload(local_file_path):
remote_file_path = os.path.join(remote_dir, "downloads", os.path.basename(local_file_path))
logger.info(
"Uploading {} ({}) to {}.".format(
local_file_path, size_str(os.path.getsize(local_file_path)), remote_file_path
)
)
upload_local_to_remote(local_file_path, remote_file_path)
return remote_file_path
uploaded_path_or_paths = map_nested(lambda local_file_path: upload(local_file_path), downloaded_path_or_paths,)
return uploaded_path_or_paths
def _record_sizes_checksums(self, url_or_urls, downloaded_path_or_paths):
"""Record size/checksum of downloaded files."""
flattened_urls_or_urls = flatten_nested(url_or_urls)
flattened_downloaded_path_or_paths = flatten_nested(downloaded_path_or_paths)
for url, path in zip(flattened_urls_or_urls, flattened_downloaded_path_or_paths):
self._recorded_sizes_checksums[url] = get_size_checksum_dict(path)
def download_custom(self, url_or_urls, custom_download):
"""
Download given urls(s) by calling `custom_download`.
Args:
url_or_urls: url or `list`/`dict` of urls to download and extract. Each
url is a `str`.
custom_download: Callable with signature (src_url: str, dst_path: str) -> Any
as for example `tf.io.gfile.copy`, that lets you download from google storage
Returns:
downloaded_path(s): `str`, The downloaded paths matching the given input
url_or_urls.
"""
cache_dir = self._download_config.cache_dir or os.path.join(HF_DATASETS_CACHE, "downloads")
def url_to_downloaded_path(url):
return os.path.join(cache_dir, hash_url_to_filename(url))
downloaded_path_or_paths = map_nested(url_to_downloaded_path, url_or_urls)
flattened_urls_or_urls = flatten_nested(url_or_urls)
flattened_downloaded_path_or_paths = flatten_nested(downloaded_path_or_paths)
for url, path in zip(flattened_urls_or_urls, flattened_downloaded_path_or_paths):
try:
get_from_cache(url, cache_dir=cache_dir, local_files_only=True)
cached = True
except FileNotFoundError:
cached = False
if not cached or self._download_config.force_download:
custom_download(url, path)
get_from_cache(url, cache_dir=cache_dir, local_files_only=True)
self._record_sizes_checksums(url_or_urls, downloaded_path_or_paths)
return downloaded_path_or_paths
def download(self, url_or_urls):
"""Download given url(s).
Args:
url_or_urls: url or `list`/`dict` of urls to download and extract. Each
url is a `str`.
Returns:
downloaded_path(s): `str`, The downloaded paths matching the given input
url_or_urls.
"""
downloaded_path_or_paths = map_nested(
lambda url: cached_path(url, download_config=self._download_config,), url_or_urls,
)
self._record_sizes_checksums(url_or_urls, downloaded_path_or_paths)
return downloaded_path_or_paths
def iter_archive(self, path):
"""Returns iterator over files within archive.
Args:
path: path to archive.
Returns:
Generator yielding tuple (path_within_archive, file_obj).
File-Obj are opened in byte mode (io.BufferedReader)
"""
logger.info("Extracting archive at %s", str(path))
extracted_path = self.extract(path)
if os.path.isfile(extracted_path):
with open(extracted_path, "rb") as file_obj:
yield (extracted_path, file_obj)
# We do this complex absolute/relative scheme to reproduce the API of iter_tar of tfds
for root, dirs, files in os.walk(extracted_path, topdown=False):
relative_dir_path = root.replace(os.path.abspath(extracted_path) + "/", "")
for name in files:
relative_file_path = os.path.join(relative_dir_path, name)
absolute_file_path = os.path.join(root, name)
with open(absolute_file_path, "rb") as file_obj:
yield (relative_file_path, file_obj)
def extract(self, path_or_paths):
"""Extract given path(s).
Args:
path_or_paths: path or `list`/`dict` of path of file to extract. Each
path is a `str`.
Returns:
extracted_path(s): `str`, The extracted paths matching the given input
path_or_paths.
"""
return map_nested(
lambda path: cached_path(path, extract_compressed_file=True, force_extract=False), path_or_paths,
)
def download_and_extract(self, url_or_urls):
"""Download and extract given url_or_urls.
Is roughly equivalent to:
```
extracted_paths = dl_manager.extract(dl_manager.download(url_or_urls))
```
Args:
url_or_urls: url or `list`/`dict` of urls to download and extract. Each
url is a `str`.
Returns:
extracted_path(s): `str`, extracted paths of given URL(s).
"""
return self.extract(self.download(url_or_urls))
def get_recorded_sizes_checksums(self):
return self._recorded_sizes_checksums.copy()
|
nlp-master
|
src/nlp/utils/download_manager.py
|
# coding=utf-8
# Copyright 2020 The HuggingFace NLP Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Some python utils function and classes.
"""
import contextlib
import functools
import itertools
import os
from io import BytesIO as StringIO
from shutil import disk_usage
from types import CodeType
import dill
# NOTE: When used on an instance method, the cache is shared across all
# instances and IS NOT per-instance.
# See
# https://stackoverflow.com/questions/14946264/python-lru-cache-decorator-per-instance
# For @property methods, use @memoized_property below.
memoize = functools.lru_cache
def convert_tuples_in_lists(data_struct):
# Could add support for more exotic data_struct, like OrderedDict
if isinstance(data_struct, dict):
return {k: convert_tuples_in_lists(v) for k, v in data_struct.items()}
else:
if isinstance(data_struct, (list, tuple)):
return [convert_tuples_in_lists(v) for v in data_struct]
return data_struct
def size_str(size_in_bytes):
"""Returns a human readable size string.
If size_in_bytes is None, then returns "Unknown size".
For example `size_str(1.5 * nlp.units.GiB) == "1.50 GiB"`.
Args:
size_in_bytes: `int` or `None`, the size, in bytes, that we want to
format as a human-readable size string.
"""
if not size_in_bytes:
return "Unknown size"
_NAME_LIST = [("PiB", 2 ** 50), ("TiB", 2 ** 40), ("GiB", 2 ** 30), ("MiB", 2 ** 20), ("KiB", 2 ** 10)]
size_in_bytes = float(size_in_bytes)
for (name, size_bytes) in _NAME_LIST:
value = size_in_bytes / size_bytes
if value >= 1.0:
return "{:.2f} {}".format(value, name)
return "{} {}".format(int(size_in_bytes), "bytes")
def is_notebook():
"""Returns True if running in a notebook (Colab, Jupyter) environement."""
# Inspired from the tfdm autonotebook code
try:
from IPython import get_ipython # pylint: disable=import-outside-toplevel,g-import-not-at-top
if "IPKernelApp" not in get_ipython().config:
return False # Run in a IPython terminal
except: # noqa: E722
return False
else:
return True
@contextlib.contextmanager
def temporary_assignment(obj, attr, value):
"""Temporarily assign obj.attr to value."""
original = getattr(obj, attr, None)
setattr(obj, attr, value)
try:
yield
finally:
setattr(obj, attr, original)
def zip_dict(*dicts):
"""Iterate over items of dictionaries grouped by their keys."""
for key in set(itertools.chain(*dicts)): # set merge all keys
# Will raise KeyError if the dict don't have the same keys
yield key, tuple(d[key] for d in dicts)
class NonMutableDict(dict):
"""Dict where keys can only be added but not modified.
Will raise an error if the user try to overwrite one key. The error message
can be customized during construction. It will be formatted using {key} for
the overwritten key.
"""
def __init__(self, *args, **kwargs):
self._error_msg = kwargs.pop("error_msg", "Try to overwrite existing key: {key}",)
if kwargs:
raise ValueError("NonMutableDict cannot be initialized with kwargs.")
super(NonMutableDict, self).__init__(*args, **kwargs)
def __setitem__(self, key, value):
if key in self:
raise ValueError(self._error_msg.format(key=key))
return super(NonMutableDict, self).__setitem__(key, value)
def update(self, other):
if any(k in self for k in other):
raise ValueError(self._error_msg.format(key=set(self) & set(other)))
return super(NonMutableDict, self).update(other)
class classproperty(property): # pylint: disable=invalid-name
"""Descriptor to be used as decorator for @classmethods."""
def __get__(self, obj, objtype=None):
return self.fget.__get__(None, objtype)()
class memoized_property(property): # pylint: disable=invalid-name
"""Descriptor that mimics @property but caches output in member variable."""
def __get__(self, obj, objtype=None):
# See https://docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute")
attr = "__cached_" + self.fget.__name__
cached = getattr(obj, attr, None)
if cached is None:
cached = self.fget(obj)
setattr(obj, attr, cached)
return cached
def map_nested(function, data_struct, dict_only=False, map_tuple=False):
"""Apply a function recursively to each element of a nested data struct."""
# Could add support for more exotic data_struct, like OrderedDict
if isinstance(data_struct, dict):
return {k: map_nested(function, v, dict_only, map_tuple) for k, v in data_struct.items()}
elif not dict_only:
types = [list]
if map_tuple:
types.append(tuple)
if isinstance(data_struct, tuple(types)):
mapped = [map_nested(function, v, dict_only, map_tuple) for v in data_struct]
if isinstance(data_struct, list):
return mapped
else:
return tuple(mapped)
# Singleton
return function(data_struct)
def zip_nested(arg0, *args, **kwargs):
"""Zip data struct together and return a data struct with the same shape."""
# Python 2 do not support kwargs only arguments
dict_only = kwargs.pop("dict_only", False)
assert not kwargs
# Could add support for more exotic data_struct, like OrderedDict
if isinstance(arg0, dict):
return {k: zip_nested(*a, dict_only=dict_only) for k, a in zip_dict(arg0, *args)}
elif not dict_only:
if isinstance(arg0, list):
return [zip_nested(*a, dict_only=dict_only) for a in zip(arg0, *args)]
# Singleton
return (arg0,) + args
def flatten_nest_dict(d):
"""Return the dict with all nested keys flattened joined with '/'."""
# Use NonMutableDict to ensure there is no collision between features keys
flat_dict = NonMutableDict()
for k, v in d.items():
if isinstance(v, dict):
flat_dict.update({"{}/{}".format(k, k2): v2 for k2, v2 in flatten_nest_dict(v).items()})
else:
flat_dict[k] = v
return flat_dict
def flatten_nested(data_struct):
"""Flatten data struct of obj or `list`/`dict` of obj"""
if isinstance(data_struct, dict):
data_struct = list(flatten_nest_dict(data_struct).values())
if data_struct and isinstance(data_struct[0], (list, tuple)):
data_struct = [x for sublist in data_struct for x in sublist]
if isinstance(data_struct, (list, tuple)):
return data_struct
# Singleton
return [data_struct]
def nlp_dir():
"""Path to nlp directory."""
return os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
class abstractclassmethod(classmethod): # pylint: disable=invalid-name
"""Decorate a method to mark it as an abstract @classmethod."""
__isabstractmethod__ = True
def __init__(self, fn):
fn.__isabstractmethod__ = True
super(abstractclassmethod, self).__init__(fn)
def get_nlp_path(relative_path):
"""Returns absolute path to file given path relative to nlp root."""
path = os.path.join(nlp_dir(), relative_path)
return path
def has_sufficient_disk_space(needed_bytes, directory="."):
try:
free_bytes = disk_usage(os.path.abspath(directory)).free
except OSError:
return True
return needed_bytes < free_bytes
class Pickler(dill.Pickler):
"""Same Pickler as the one from dill, but improved for notebooks and shells"""
dispatch = dill._dill.MetaCatchingDict(dill.Pickler.dispatch.copy())
def dump(obj, file):
"""pickle an object to a file"""
Pickler(file).dump(obj)
return
def dumps(obj):
"""pickle an object to a string"""
file = StringIO()
dump(obj, file)
return file.getvalue()
def pklregister(t):
def proxy(func):
Pickler.dispatch[t] = func
return func
return proxy
@pklregister(CodeType)
def save_code(pickler, obj):
"""
From dill._dill.save_code
This is a modified version that removes the origin (filename + line no.)
of functions created in notebooks or shells for example.
"""
dill._dill.log.info("Co: %s" % obj)
# Filenames of functions created in notebooks or shells start with '<'
# ex: <ipython-input-13-9ed2afe61d25> for ipython, and <stdin> for shell
# Only those two lines are different from the original implementation:
co_filename = "" if obj.co_filename.startswith("<") else obj.co_filename
co_firstlineno = 1 if obj.co_filename.startswith("<") else obj.co_firstlineno
# The rest is the same as in the original dill implementation
if dill._dill.PY3:
if hasattr(obj, "co_posonlyargcount"):
args = (
obj.co_argcount,
obj.co_posonlyargcount,
obj.co_kwonlyargcount,
obj.co_nlocals,
obj.co_stacksize,
obj.co_flags,
obj.co_code,
obj.co_consts,
obj.co_names,
obj.co_varnames,
co_filename,
obj.co_name,
co_firstlineno,
obj.co_lnotab,
obj.co_freevars,
obj.co_cellvars,
)
else:
args = (
obj.co_argcount,
obj.co_kwonlyargcount,
obj.co_nlocals,
obj.co_stacksize,
obj.co_flags,
obj.co_code,
obj.co_consts,
obj.co_names,
obj.co_varnames,
co_filename,
obj.co_name,
co_firstlineno,
obj.co_lnotab,
obj.co_freevars,
obj.co_cellvars,
)
else:
args = (
obj.co_argcount,
obj.co_nlocals,
obj.co_stacksize,
obj.co_flags,
obj.co_code,
obj.co_consts,
obj.co_names,
obj.co_varnames,
co_filename,
obj.co_name,
co_firstlineno,
obj.co_lnotab,
obj.co_freevars,
obj.co_cellvars,
)
pickler.save_reduce(CodeType, args, obj=obj)
dill._dill.log.info("# Co")
return
|
nlp-master
|
src/nlp/utils/py_utils.py
|
# coding=utf-8
# Copyright 2020 The HuggingFace NLP Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Wrapper around tqdm.
"""
import contextlib
from tqdm import auto as tqdm_lib
class EmptyTqdm(object):
"""Dummy tqdm which doesn't do anything."""
def __init__(self, *args, **kwargs): # pylint: disable=unused-argument
self._iterator = args[0] if args else None
def __iter__(self):
return iter(self._iterator)
def __getattr__(self, _):
"""Return empty function."""
def empty_fn(*args, **kwargs): # pylint: disable=unused-argument
return
return empty_fn
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
return
_active = True
def tqdm(*args, **kwargs):
if _active:
return tqdm_lib.tqdm(*args, **kwargs)
else:
return EmptyTqdm(*args, **kwargs)
def async_tqdm(*args, **kwargs):
if _active:
return _async_tqdm(*args, **kwargs)
else:
return EmptyTqdm(*args, **kwargs)
def disable_progress_bar():
"""Disabled Tqdm progress bar.
Usage:
nlp.disable_progress_bar()
"""
# Replace tqdm
global _active
_active = False
@contextlib.contextmanager
def _async_tqdm(*args, **kwargs):
"""Wrapper around Tqdm which can be updated in threads.
Usage:
```
with utils.async_tqdm(...) as pbar:
# pbar can then be modified inside a thread
# pbar.update_total(3)
# pbar.update()
```
Args:
*args: args of tqdm
**kwargs: kwargs of tqdm
Yields:
pbar: Async pbar which can be shared between threads.
"""
with tqdm_lib.tqdm(*args, **kwargs) as pbar:
pbar = _TqdmPbarAsync(pbar)
yield pbar
pbar.clear() # pop pbar from the active list of pbar
print() # Avoid the next log to overlapp with the bar
class _TqdmPbarAsync(object):
"""Wrapper around Tqdm pbar which be shared between thread."""
_tqdm_bars = []
def __init__(self, pbar):
self._lock = tqdm_lib.tqdm.get_lock()
self._pbar = pbar
self._tqdm_bars.append(pbar)
def update_total(self, n=1):
"""Increment total pbar value."""
with self._lock:
self._pbar.total += n
self.refresh()
def update(self, n=1):
"""Increment current value."""
with self._lock:
self._pbar.update(n)
self.refresh()
def refresh(self):
"""Refresh all."""
for pbar in self._tqdm_bars:
pbar.refresh()
def clear(self):
"""Remove the tqdm pbar from the update."""
self._tqdm_bars.pop()
|
nlp-master
|
src/nlp/utils/tqdm_utils.py
|
"""
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
import gzip
import json
import logging
import os
import shutil
import sys
import tarfile
import tempfile
from contextlib import contextmanager
from dataclasses import dataclass
from functools import partial
from hashlib import sha256
from typing import Dict, Optional, Union
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import requests
from filelock import FileLock
from tqdm.auto import tqdm
from .. import __version__
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
try:
USE_TF = os.environ.get("USE_TF", "AUTO").upper()
USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper()
if USE_TORCH in ("1", "ON", "YES", "AUTO") and USE_TF not in ("1", "ON", "YES"):
import torch
_torch_available = True # pylint: disable=invalid-name
logger.info("PyTorch version {} available.".format(torch.__version__))
else:
logger.info("Disabling PyTorch because USE_TF is set")
_torch_available = False
except ImportError:
_torch_available = False # pylint: disable=invalid-name
try:
USE_TF = os.environ.get("USE_TF", "AUTO").upper()
USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper()
if USE_TF in ("1", "ON", "YES", "AUTO") and USE_TORCH not in ("1", "ON", "YES"):
import tensorflow as tf
assert hasattr(tf, "__version__") and int(tf.__version__[0]) >= 2
_tf_available = True # pylint: disable=invalid-name
logger.info("TensorFlow version {} available.".format(tf.__version__))
else:
logger.info("Disabling Tensorflow because USE_TORCH is set")
_tf_available = False
except (ImportError, AssertionError):
_tf_available = False # pylint: disable=invalid-name
hf_cache_home = os.path.expanduser(
os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface"))
)
default_datasets_cache_path = os.path.join(hf_cache_home, "datasets")
try:
from pathlib import Path
HF_DATASETS_CACHE = Path(os.getenv("HF_DATASETS_CACHE", default_datasets_cache_path))
except (AttributeError, ImportError):
HF_DATASETS_CACHE = os.getenv(os.getenv("HF_DATASETS_CACHE", default_datasets_cache_path))
S3_DATASETS_BUCKET_PREFIX = "https://s3.amazonaws.com/datasets.huggingface.co/nlp/datasets"
CLOUDFRONT_DATASETS_DISTRIB_PREFIX = "https://cdn-datasets.huggingface.co/nlp/datasets"
default_metrics_cache_path = os.path.join(hf_cache_home, "metrics")
try:
from pathlib import Path
HF_METRICS_CACHE = Path(os.getenv("HF_METRICS_CACHE", default_metrics_cache_path))
except (AttributeError, ImportError):
HF_METRICS_CACHE = os.getenv(os.getenv("HF_METRICS_CACHE", default_metrics_cache_path))
S3_METRICS_BUCKET_PREFIX = "https://s3.amazonaws.com/datasets.huggingface.co/nlp/metrics"
CLOUDFRONT_METRICS_DISTRIB_PREFIX = "https://cdn-datasets.huggingface.co/nlp/metric"
INCOMPLETE_SUFFIX = ".incomplete"
def is_torch_available():
return _torch_available
def is_tf_available():
return _tf_available
def is_remote_url(url_or_filename):
parsed = urlparse(url_or_filename)
return parsed.scheme in ("http", "https", "s3", "gs", "hdfs")
def hf_bucket_url(identifier: str, filename: str, use_cdn=False, dataset=True) -> str:
if dataset:
endpoint = CLOUDFRONT_DATASETS_DISTRIB_PREFIX if use_cdn else S3_DATASETS_BUCKET_PREFIX
else:
endpoint = CLOUDFRONT_METRICS_DISTRIB_PREFIX if use_cdn else S3_METRICS_BUCKET_PREFIX
return "/".join((endpoint, identifier, filename))
def hash_url_to_filename(url, etag=None):
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
If the url ends with .h5 (Keras HDF5 weights) adds '.h5' to the name
so that TF 2.0 can identify it as a HDF5 file
(see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1380)
"""
url_bytes = url.encode("utf-8")
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode("utf-8")
etag_hash = sha256(etag_bytes)
filename += "." + etag_hash.hexdigest()
if url.endswith(".py"):
filename += ".py"
return filename
@dataclass
class DownloadConfig:
""" Configuration for our cached path manager
Args:
cache_dir: specify a cache directory to save the file to (overwrite the default cache dir).
force_download: if True, re-dowload the file even if it's already cached in the cache dir.
resume_download: if True, resume the download if incompletly recieved file is found.
user_agent: Optional string or dict that will be appended to the user-agent on remote requests.
extract_compressed_file: if True and the path point to a zip or tar file, extract the compressed
file in a folder along the archive.
force_extract: if True when extract_compressed_file is True and the archive was already extracted,
re-extract the archive and overide the folder where it was extracted.
"""
cache_dir: Optional[Union[str, Path]] = None
force_download: bool = False
resume_download: bool = False
local_files_only: bool = False
proxies: Optional[Dict] = None
user_agent: Optional[str] = None
extract_compressed_file: bool = False
force_extract: bool = False
def cached_path(url_or_filename, download_config=None, **download_kwargs,) -> Optional[str]:
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
Return:
Local path (string)
Raises:
FileNotFoundError: in case of non-recoverable file
(non-existent or no cache on disk)
ConnectionError: in case of unreachable url
and no cache on disk
ValueError: if it couldn't parse the url or filename correctly
"""
if download_config is None:
download_config = DownloadConfig(**download_kwargs)
cache_dir = download_config.cache_dir or HF_DATASETS_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if is_remote_url(url_or_filename):
# URL, so get it from the cache (downloading if necessary)
output_path = get_from_cache(
url_or_filename,
cache_dir=cache_dir,
force_download=download_config.force_download,
proxies=download_config.proxies,
resume_download=download_config.resume_download,
user_agent=download_config.user_agent,
local_files_only=download_config.local_files_only,
)
elif os.path.exists(url_or_filename):
# File, and it exists.
output_path = url_or_filename
elif urlparse(url_or_filename).scheme == "":
# File, but it doesn't exist.
raise FileNotFoundError("Local file {} doesn't exist".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
if download_config.extract_compressed_file and output_path is not None:
if not is_zipfile(output_path) and not tarfile.is_tarfile(output_path) and not is_gzip(output_path):
return output_path
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
abs_output_path = os.path.abspath(output_path)
output_path_extracted = os.path.join(cache_dir, hash_url_to_filename(abs_output_path))
if (
os.path.isdir(output_path_extracted)
and os.listdir(output_path_extracted)
and not download_config.force_extract
) or (os.path.isfile(output_path_extracted) and not download_config.force_extract):
return output_path_extracted
# Prevent parallel extractions
lock_path = output_path + ".lock"
with FileLock(lock_path):
shutil.rmtree(output_path_extracted, ignore_errors=True)
os.makedirs(output_path_extracted, exist_ok=True)
if is_zipfile(output_path):
with ZipFile(output_path, "r") as zip_file:
zip_file.extractall(output_path_extracted)
zip_file.close()
elif tarfile.is_tarfile(output_path):
tar_file = tarfile.open(output_path)
tar_file.extractall(output_path_extracted)
tar_file.close()
elif is_gzip(output_path):
os.rmdir(output_path_extracted)
with gzip.open(output_path, "rb") as gzip_file:
with open(output_path_extracted, "wb") as extracted_file:
shutil.copyfileobj(gzip_file, extracted_file)
else:
raise EnvironmentError("Archive format of {} could not be identified".format(output_path))
return output_path_extracted
return output_path
def http_get(url, temp_file, proxies=None, resume_size=0, user_agent=None, cookies=None):
ua = "datasets/{}; python/{}".format(__version__, sys.version.split()[0])
if is_torch_available():
ua += "; torch/{}".format(torch.__version__)
if is_tf_available():
ua += "; tensorflow/{}".format(tf.__version__)
if isinstance(user_agent, dict):
ua += "; " + "; ".join("{}/{}".format(k, v) for k, v in user_agent.items())
elif isinstance(user_agent, str):
ua += "; " + user_agent
headers = {"user-agent": ua}
if resume_size > 0:
headers["Range"] = "bytes=%d-" % (resume_size,)
response = requests.get(url, stream=True, proxies=proxies, headers=headers, cookies=cookies)
if response.status_code == 416: # Range not satisfiable
return
content_length = response.headers.get("Content-Length")
total = resume_size + int(content_length) if content_length is not None else None
progress = tqdm(
unit="B",
unit_scale=True,
total=total,
initial=resume_size,
desc="Downloading",
disable=bool(logger.getEffectiveLevel() == logging.NOTSET),
)
for chunk in response.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(
url,
cache_dir=None,
force_download=False,
proxies=None,
etag_timeout=10,
resume_download=False,
user_agent=None,
local_files_only=False,
) -> Optional[str]:
"""
Given a URL, look for the corresponding file in the local cache.
If it's not there, download it. Then return the path to the cached file.
Return:
Local path (string)
Raises:
FileNotFoundError: in case of non-recoverable file
(non-existent or no cache on disk)
ConnectionError: in case of unreachable url
and no cache on disk
"""
if cache_dir is None:
cache_dir = HF_DATASETS_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
os.makedirs(cache_dir, exist_ok=True)
original_url = url # Some parameters may be added
connected = False
cookies = None
etag = None
if not local_files_only:
try:
response = requests.head(url, allow_redirects=True, proxies=proxies, timeout=etag_timeout)
if response.status_code == 200: # ok
etag = response.headers.get("ETag")
for k, v in response.cookies.items():
# In some edge cases, we need to get a confirmation token
if k.startswith("download_warning") and "drive.google.com" in url:
url += "&confirm=" + v
cookies = response.cookies
connected = True
# In some edge cases, head request returns 400 but the connection is actually ok
elif (response.status_code == 400 and "firebasestorage.googleapis.com" in url) or (
response.status_code == 405 and "drive.google.com" in url
):
connected = True
logger.info("Couldn't get ETag version for url {}".format(url))
except (EnvironmentError, requests.exceptions.Timeout):
# not connected
pass
filename = hash_url_to_filename(original_url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
# connected == False = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if not connected:
if os.path.exists(cache_path):
return cache_path
if local_files_only:
raise FileNotFoundError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False."
)
raise ConnectionError("Couldn't reach {}".format(url))
# From now on, connected is True.
if os.path.exists(cache_path) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
lock_path = cache_path + ".lock"
with FileLock(lock_path):
if resume_download:
incomplete_path = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(incomplete_path, "a+b") as f:
yield f
temp_file_manager = _resumable_file_manager
if os.path.exists(incomplete_path):
resume_size = os.stat(incomplete_path).st_size
else:
resume_size = 0
else:
temp_file_manager = partial(tempfile.NamedTemporaryFile, dir=cache_dir, delete=False)
resume_size = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
logger.info("%s not found in cache or force_download set to True, downloading to %s", url, temp_file.name)
# GET file object
http_get(url, temp_file, proxies=proxies, resume_size=resume_size, user_agent=user_agent, cookies=cookies)
logger.info("storing %s in cache at %s", url, cache_path)
shutil.move(temp_file.name, cache_path)
logger.info("creating metadata file for %s", cache_path)
meta = {"url": url, "etag": etag}
meta_path = cache_path + ".json"
with open(meta_path, "w") as meta_file:
json.dump(meta, meta_file)
return cache_path
def is_gzip(path: str) -> bool:
"""from https://stackoverflow.com/a/60634210"""
with gzip.open(path, "r") as fh:
try:
fh.read(1)
return True
except OSError:
return False
|
nlp-master
|
src/nlp/utils/file_utils.py
|
import logging
import os
from hashlib import sha256
from typing import Optional
logger = logging.getLogger(__name__)
class ChecksumVerificationException(Exception):
"""Exceptions during checksums verifications of downloaded files."""
class UnexpectedDownloadedFile(ChecksumVerificationException):
"""Some downloaded files were not expected."""
class ExpectedMoreDownloadedFiles(ChecksumVerificationException):
"""Some files were supposed to be downloaded but were not."""
class NonMatchingChecksumError(ChecksumVerificationException):
"""The downloaded file checksum don't match the expected checksum."""
def verify_checksums(expected_checksums: Optional[dict], recorded_checksums: dict):
if expected_checksums is None:
logger.info("Unable to verify checksums.")
return
if len(set(expected_checksums) - set(recorded_checksums)) > 0:
raise ExpectedMoreDownloadedFiles(str(set(expected_checksums) - set(recorded_checksums)))
if len(set(recorded_checksums) - set(expected_checksums)) > 0:
raise UnexpectedDownloadedFile(str(set(recorded_checksums) - set(expected_checksums)))
bad_urls = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
if len(bad_urls) > 0:
raise NonMatchingChecksumError(str(bad_urls))
logger.info("All the checksums matched successfully.")
class SplitsVerificationException(Exception):
"""Exceptions during splis verifications"""
class UnexpectedSplits(SplitsVerificationException):
"""The expected splits of the downloaded file is missing."""
class ExpectedMoreSplits(SplitsVerificationException):
"""Some recorded splits are missing."""
class NonMatchingSplitsSizesError(SplitsVerificationException):
"""The splits sizes don't match the expected splits sizes."""
def verify_splits(expected_splits: Optional[dict], recorded_splits: dict):
if expected_splits is None:
logger.info("Unable to verify splits sizes.")
return
if len(set(expected_splits) - set(recorded_splits)) > 0:
raise ExpectedMoreSplits(str(set(expected_splits) - set(recorded_splits)))
if len(set(recorded_splits) - set(expected_splits)) > 0:
raise UnexpectedSplits(str(set(recorded_splits) - set(expected_splits)))
bad_splits = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(bad_splits) > 0:
raise NonMatchingSplitsSizesError(str(bad_splits))
logger.info("All the splits matched successfully.")
def get_size_checksum_dict(path: str) -> dict:
"""Compute the file size and the sha256 checksum of a file"""
m = sha256()
with open(path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
m.update(chunk)
return {"num_bytes": os.path.getsize(path), "checksum": m.hexdigest()}
|
nlp-master
|
src/nlp/utils/info_utils.py
|
import logging
import os
from argparse import ArgumentParser
from nlp.commands import BaseTransformersCLICommand
from nlp.load import import_main_class, prepare_module
from nlp.utils import MockDownloadManager
logger = logging.getLogger(__name__)
def test_command_factory(args):
return DummyDataCommand(args.path_to_dataset, args.requires_manual,)
class DummyDataCommand(BaseTransformersCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
test_parser = parser.add_parser("dummy_data")
test_parser.add_argument("--requires_manual", action="store_true", help="Dataset requires manual data")
test_parser.add_argument("path_to_dataset", type=str, help="Name of the dataset to download")
test_parser.set_defaults(func=test_command_factory)
def __init__(
self, path_to_dataset: str, requires_manual: bool,
):
self._path_to_dataset = path_to_dataset
self._requires_manual = requires_manual
self._dataset_name = path_to_dataset.split("/")[-2]
def run(self):
module_path = prepare_module(self._path_to_dataset)
builder_cls = import_main_class(module_path)
# use `None` as config if no configs
configs = builder_cls.BUILDER_CONFIGS or [None]
for config in configs:
if config is None:
name = None
version = builder_cls.VERSION
else:
version = config.version
name = config.name
dataset_builder = builder_cls(name=name)
mock_dl_manager = MockDownloadManager(
dataset_name=self._dataset_name, config=config, version=version, is_local=True
)
dummy_data_folder = os.path.join(self._path_to_dataset, mock_dl_manager.dummy_data_folder)
logger.info(f"Creating dummy folder structure for {dummy_data_folder}... ")
os.makedirs(dummy_data_folder, exist_ok=True)
try:
generator_splits = dataset_builder._split_generators(mock_dl_manager)
except FileNotFoundError as e:
print(
f"Dataset {self._dataset_name} with config {config} seems to already open files in the method `_split_generators(...)`. You might consider to instead only open files in the method `_generate_examples(...)` instead. If this is not possible the dummy data has to be created with less guidance. Make sure you create the file {e.filename}."
)
files_to_create = set()
split_names = []
dummy_file_name = mock_dl_manager.dummy_file_name
for split in generator_splits:
logger.info(f"Collecting dummy data file paths to create for {split.name}")
split_names.append(split.name)
gen_kwargs = split.gen_kwargs
generator = dataset_builder._generate_examples(**gen_kwargs)
try:
dummy_data_guidance_print = "\n" + 30 * "=" + "DUMMY DATA INSTRUCTIONS" + 30 * "=" + "\n"
config_string = f"config {config.name} of " if config is not None else ""
dummy_data_guidance_print += (
"- In order to create the dummy data for "
+ config_string
+ f"{self._dataset_name}, please go into the folder '{dummy_data_folder}' with `cd {dummy_data_folder}` . \n\n"
)
# trigger generate function
for key, record in generator:
pass
dummy_data_guidance_print += f"- It appears that the function `_generate_examples(...)` expects one or more files in the folder {dummy_file_name} using the function `glob.glob(...)`. In this case, please refer to the `_generate_examples(...)` method to see under which filename the dummy data files should be created. \n\n"
except FileNotFoundError as e:
files_to_create.add(e.filename)
split_names = ", ".join(split_names)
if len(files_to_create) > 0:
# no glob.glob(...) in `_generate_examples(...)`
if len(files_to_create) == 1 and next(iter(files_to_create)) == dummy_file_name:
dummy_data_guidance_print += f"- Please create a single dummy data file called '{next(iter(files_to_create))}' from the folder '{dummy_data_folder}'. Make sure that the dummy data file provides at least one example for the split(s) '{split_names}' \n\n"
files_string = dummy_file_name
else:
files_string = ", ".join(files_to_create)
dummy_data_guidance_print += f"- Please create the following dummy data files '{files_string}' from the folder '{dummy_data_folder}'\n\n"
dummy_data_guidance_print += f"- For each of the splits '{split_names}', make sure that one or more of the dummy data files provide at least one example \n\n"
dummy_data_guidance_print += f"- If the method `_generate_examples(...)` includes multiple `open()` statements, you might have to create other files in addition to '{files_string}'. In this case please refer to the `_generate_examples(...)` method \n\n"
if len(files_to_create) == 1 and next(iter(files_to_create)) == dummy_file_name:
dummy_data_guidance_print += f"-After the dummy data file is created, it should be zipped to '{dummy_file_name}.zip' with the command `zip {dummy_file_name}.zip {dummy_file_name}` \n\n"
dummy_data_guidance_print += (
f"-You can now delete the file '{dummy_file_name}' with the command `rm {dummy_file_name}` \n\n"
)
dummy_data_guidance_print += f"- To get the file '{dummy_file_name}' back for further changes to the dummy data, simply unzip {dummy_file_name}.zip with the command `unzip {dummy_file_name}.zip` \n\n"
else:
dummy_data_guidance_print += f"-After all dummy data files are created, they should be zipped recursively to '{dummy_file_name}.zip' with the command `zip -r {dummy_file_name}.zip {dummy_file_name}/` \n\n"
dummy_data_guidance_print += f"-You can now delete the folder '{dummy_file_name}' with the command `rm -r {dummy_file_name}` \n\n"
dummy_data_guidance_print += f"- To get the folder '{dummy_file_name}' back for further changes to the dummy data, simply unzip {dummy_file_name}.zip with the command `unzip {dummy_file_name}.zip` \n\n"
dummy_data_guidance_print += (
f"- Make sure you have created the file '{dummy_file_name}.zip' in '{dummy_data_folder}' \n"
)
dummy_data_guidance_print += 83 * "=" + "\n"
print(dummy_data_guidance_print)
|
nlp-master
|
src/nlp/commands/dummy_data.py
|
import os
import sys
from argparse import ArgumentParser
from getpass import getpass
from typing import List, Union
from requests.exceptions import HTTPError
from nlp.commands import BaseTransformersCLICommand
from nlp.hf_api import HfApi, HfFolder
UPLOAD_MAX_FILES = 15
class UserCommands(BaseTransformersCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
login_parser = parser.add_parser("login", help="Log in using the same credentials as on huggingface.co")
login_parser.set_defaults(func=lambda args: LoginCommand(args))
whoami_parser = parser.add_parser("whoami", help="Find out which huggingface.co account you are logged in as.")
whoami_parser.set_defaults(func=lambda args: WhoamiCommand(args))
logout_parser = parser.add_parser("logout", help="Log out")
logout_parser.set_defaults(func=lambda args: LogoutCommand(args))
# s3 dataset
s3_parser = parser.add_parser(
"s3_datasets", help="{ls, rm} Commands to interact with the files you upload on S3."
)
s3_subparsers = s3_parser.add_subparsers(help="s3 related commands")
ls_parser = s3_subparsers.add_parser("ls")
ls_parser.add_argument("--organization", type=str, help="Optional: organization namespace.")
ls_parser.set_defaults(func=lambda args: ListObjsCommand(args, file_types="datasets"))
rm_parser = s3_subparsers.add_parser("rm")
rm_parser.add_argument("filename", type=str, help="individual object filename to delete from S3.")
rm_parser.add_argument("--organization", type=str, help="Optional: organization namespace.")
rm_parser.set_defaults(func=lambda args: DeleteObjCommand(args, file_types="datasets"))
# s3 metrics
s3_parser = parser.add_parser(
"s3_metrics", help="{ls, rm} Commands to interact with the files you upload on S3."
)
s3_subparsers = s3_parser.add_subparsers(help="s3 related commands")
ls_parser = s3_subparsers.add_parser("ls")
ls_parser.add_argument("--organization", type=str, help="Optional: organization namespace.")
ls_parser.set_defaults(func=lambda args: ListObjsCommand(args, file_types="metrics"))
rm_parser = s3_subparsers.add_parser("rm")
rm_parser.add_argument("filename", type=str, help="individual object filename to delete from S3.")
rm_parser.add_argument("--organization", type=str, help="Optional: organization namespace.")
rm_parser.set_defaults(func=lambda args: DeleteObjCommand(args, file_types="metrics"))
# upload dataset
upload_dataset_parser = parser.add_parser("upload_dataset", help="Upload a dataset to S3.")
upload_dataset_parser.add_argument(
"path", type=str, help="Local path of the dataset folder or individual file to upload."
)
upload_dataset_parser.add_argument("--organization", type=str, help="Optional: organization namespace.")
upload_dataset_parser.add_argument(
"--filename", type=str, default=None, help="Optional: override individual object filename on S3."
)
upload_dataset_parser.set_defaults(func=lambda args: UploadCommand(args, file_types="datasets"))
# upload metric
upload_metric_parser = parser.add_parser("upload_metric", help="Upload a metric to S3.")
upload_metric_parser.add_argument(
"path", type=str, help="Local path of the metric folder or individual file to upload."
)
upload_metric_parser.add_argument("--organization", type=str, help="Optional: organization namespace.")
upload_metric_parser.add_argument(
"--filename", type=str, default=None, help="Optional: override individual object filename on S3."
)
upload_metric_parser.set_defaults(func=lambda args: UploadCommand(args, file_types="metrics"))
class ANSI:
"""
Helper for en.wikipedia.org/wiki/ANSI_escape_code
"""
_bold = "\u001b[1m"
_red = "\u001b[31m"
_reset = "\u001b[0m"
@classmethod
def bold(cls, s):
return "{}{}{}".format(cls._bold, s, cls._reset)
@classmethod
def red(cls, s):
return "{}{}{}".format(cls._bold + cls._red, s, cls._reset)
class BaseUserCommand:
def __init__(self, args):
self.args = args
self._api = HfApi()
class LoginCommand(BaseUserCommand):
def run(self):
print(
"""
_| _| _| _| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _|_|_|_| _|_| _|_|_| _|_|_|_|
_| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _|
_|_|_|_| _| _| _| _|_| _| _|_| _| _| _| _| _| _|_| _|_|_| _|_|_|_| _| _|_|_|
_| _| _| _| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _|
_| _| _|_| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _| _| _| _|_|_| _|_|_|_|
"""
)
username = input("Username: ")
password = getpass()
try:
token = self._api.login(username, password)
except HTTPError as e:
# probably invalid credentials, display error message.
print(e)
print(ANSI.red(e.response.text))
exit(1)
HfFolder.save_token(token)
print("Login successful")
print("Your token:", token, "\n")
print("Your token has been saved to", HfFolder.path_token)
class WhoamiCommand(BaseUserCommand):
def run(self):
token = HfFolder.get_token()
if token is None:
print("Not logged in")
exit()
try:
user, orgs = self._api.whoami(token)
print(user)
if orgs:
print(ANSI.bold("orgs: "), ",".join(orgs))
except HTTPError as e:
print(e)
print(ANSI.red(e.response.text))
exit(1)
class LogoutCommand(BaseUserCommand):
def run(self):
token = HfFolder.get_token()
if token is None:
print("Not logged in")
exit()
HfFolder.delete_token()
self._api.logout(token)
print("Successfully logged out.")
class ListObjsCommand(BaseUserCommand):
def __init__(self, args, file_types: str):
super().__init__(args)
self.file_types = file_types
def tabulate(self, rows: List[List[Union[str, int]]], headers: List[str]) -> str:
"""
Inspired by:
stackoverflow.com/a/8356620/593036
stackoverflow.com/questions/9535954/printing-lists-as-tabular-data
"""
col_widths = [max(len(str(x)) for x in col) for col in zip(*rows, headers)]
row_format = ("{{:{}}} " * len(headers)).format(*col_widths)
lines = []
lines.append(row_format.format(*headers))
lines.append(row_format.format(*["-" * w for w in col_widths]))
for row in rows:
lines.append(row_format.format(*row))
return "\n".join(lines)
def run(self):
token = HfFolder.get_token()
if token is None:
print("Not logged in")
exit(1)
try:
objs = self._api.list_objs(token, organization=self.args.organization, file_types=self.file_types)
except HTTPError as e:
print(e)
print(ANSI.red(e.response.text))
exit(1)
if len(objs) == 0:
print("No shared file yet")
exit()
rows = [[obj.filename, obj.LastModified, obj.ETag, obj.Size] for obj in objs]
print(self.tabulate(rows, headers=["Filename", "LastModified", "ETag", "Size"]))
class DeleteObjCommand(BaseUserCommand):
def __init__(self, args, file_types):
super().__init__(args)
self.file_types = file_types
def run(self):
token = HfFolder.get_token()
if token is None:
print("Not logged in")
exit(1)
try:
self._api.delete_obj(
token, filename=self.args.filename, organization=self.args.organization, file_types=self.file_types
)
except HTTPError as e:
print(e)
print(ANSI.red(e.response.text))
exit(1)
print("Done")
class UploadCommand(BaseUserCommand):
def __init__(self, args, file_types):
super().__init__(args)
self.file_types = file_types
def walk_dir(self, rel_path: str):
"""
Recursively list all files in a folder.
"""
entries: List[os.DirEntry] = list(os.scandir(rel_path))
files = [(os.path.join(os.getcwd(), f.path), f.path) for f in entries if f.is_file()] # (filepath, filename)
for f in entries:
if f.is_dir():
files += self.walk_dir(f.path)
return files
def run(self):
token = HfFolder.get_token()
if token is None:
print("Not logged in")
exit(1)
user, _ = self._api.whoami(token)
namespace = self.args.organization if self.args.organization is not None else user
local_path = os.path.abspath(self.args.path)
if os.path.isdir(local_path):
if self.args.filename is not None:
raise ValueError("Cannot specify a filename override when uploading a folder.")
rel_path = os.path.basename(local_path)
files = self.walk_dir(rel_path)
elif os.path.isfile(local_path):
filename = self.args.filename if self.args.filename is not None else os.path.basename(local_path)
files = [(local_path, filename)]
else:
raise ValueError("Not a valid file or directory: {}".format(local_path))
if sys.platform == "win32":
files = [(filepath, filename.replace(os.sep, "/")) for filepath, filename in files]
if len(files) > UPLOAD_MAX_FILES:
print(
"About to upload {} files to S3. This is probably wrong. Please filter files before uploading.".format(
ANSI.bold(len(files))
)
)
exit(1)
for filepath, filename in files:
print(
"About to upload file {} to S3 under filename {} and namespace {}".format(
ANSI.bold(filepath), ANSI.bold(filename), ANSI.bold(namespace)
)
)
choice = input("Proceed? [Y/n] ").lower()
if not (choice == "" or choice == "y" or choice == "yes"):
print("Abort")
exit()
print(ANSI.bold("Uploading... This might take a while if files are large"))
for filepath, filename in files:
try:
access_url = self._api.presign_and_upload(
token=token,
filename=filename,
filepath=filepath,
organization=self.args.organization,
file_types=self.file_types,
)
except HTTPError as e:
print(e)
print(ANSI.red(e.response.text))
exit(1)
print("Your file now lives at:")
print(access_url)
|
nlp-master
|
src/nlp/commands/user.py
|
import os
from argparse import ArgumentParser
from shutil import copyfile
from typing import List
from nlp.builder import FORCE_REDOWNLOAD, HF_DATASETS_CACHE, REUSE_CACHE_IF_EXISTS, DatasetBuilder, DownloadConfig
from nlp.commands import BaseTransformersCLICommand
from nlp.info import DATASET_INFOS_DICT_FILE_NAME
from nlp.load import import_main_class, prepare_module
def run_beam_command_factory(args):
return RunBeamCommand(
args.dataset,
args.name,
args.cache_dir,
args.beam_pipeline_options,
args.data_dir,
args.all_configs,
args.save_infos,
args.ignore_verifications,
args.force_redownload,
)
class RunBeamCommand(BaseTransformersCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
run_beam_parser = parser.add_parser("run_beam")
run_beam_parser.add_argument("--name", type=str, default=None, help="Dataset processing name")
run_beam_parser.add_argument(
"--cache_dir", type=str, default=None, help="Cache directory where the datasets are stored.",
)
run_beam_parser.add_argument(
"--beam_pipeline_options",
type=str,
default="",
help="Beam pipeline options, separated by commas. Example: `--beam_pipeline_options=job_name=my-job,project=my-project`",
)
run_beam_parser.add_argument(
"--data_dir",
type=str,
default=None,
help="Can be used to specify a manual directory to get the files from.",
)
run_beam_parser.add_argument("--all_configs", action="store_true", help="Test all dataset configurations")
run_beam_parser.add_argument("--save_infos", action="store_true", help="Save the dataset infos file")
run_beam_parser.add_argument(
"--ignore_verifications", action="store_true", help="Run the test without checksums and splits checks"
)
run_beam_parser.add_argument("--force_redownload", action="store_true", help="Force dataset redownload")
run_beam_parser.add_argument("dataset", type=str, help="Name of the dataset to download")
run_beam_parser.set_defaults(func=run_beam_command_factory)
def __init__(
self,
dataset: str,
name: str,
cache_dir: str,
beam_pipeline_options: str,
data_dir: str,
all_configs: bool,
save_infos: bool,
ignore_verifications: bool,
force_redownload: bool,
):
self._dataset = dataset
self._name = name
self._cache_dir = cache_dir
self._beam_pipeline_options = beam_pipeline_options
self._data_dir = data_dir
self._all_configs = all_configs
self._save_infos = save_infos
self._ignore_verifications = ignore_verifications
self._force_redownload = force_redownload
def run(self):
import apache_beam as beam
if self._name is not None and self._all_configs:
print("Both parameters `name` and `all_configs` can't be used at once.")
exit(1)
path, name = self._dataset, self._name
module_path = prepare_module(path)
builder_cls = import_main_class(module_path)
builders: List[DatasetBuilder] = []
if self._beam_pipeline_options:
beam_options = beam.options.pipeline_options.PipelineOptions(
flags=["--%s" % opt.strip() for opt in self._beam_pipeline_options.split(",") if opt]
)
else:
beam_options = None
if self._all_configs and len(builder_cls.BUILDER_CONFIGS) > 0:
for config in builder_cls.BUILDER_CONFIGS:
builders.append(
builder_cls(
name=config.name, data_dir=self._data_dir, beam_options=beam_options, cache_dir=self._cache_dir
)
)
else:
builders.append(
builder_cls(name=name, data_dir=self._data_dir, beam_options=beam_options, cache_dir=self._cache_dir)
)
for builder in builders:
builder.download_and_prepare(
download_mode=REUSE_CACHE_IF_EXISTS if not self._force_redownload else FORCE_REDOWNLOAD,
download_config=DownloadConfig(cache_dir=os.path.join(HF_DATASETS_CACHE, "downloads")),
save_infos=self._save_infos,
ignore_verifications=self._ignore_verifications,
try_from_hf_gcs=False,
)
print("Apache beam run successful.")
# If save_infos=True, the dataset infos file is created next to the loaded module file.
# Let's move it to the original directory of the dataset script, to allow the user to
# upload them on S3 at the same time afterwards.
if self._save_infos:
dataset_infos_path = os.path.join(builder_cls.get_imported_module_dir(), DATASET_INFOS_DICT_FILE_NAME)
name = list(filter(lambda x: x, path.split("/")))[-1] + ".py"
combined_path = os.path.join(path, name)
if os.path.isfile(path):
dataset_dir = os.path.dirname(path)
elif os.path.isfile(combined_path):
dataset_dir = path
else: # in case of a remote dataset
print("Dataset Infos file saved at {}".format(dataset_infos_path))
exit(1)
# Move datasetinfo back to the user
user_dataset_infos_path = os.path.join(dataset_dir, DATASET_INFOS_DICT_FILE_NAME)
copyfile(dataset_infos_path, user_dataset_infos_path)
print("Dataset Infos file saved at {}".format(user_dataset_infos_path))
|
nlp-master
|
src/nlp/commands/run_beam.py
|
import platform
from argparse import ArgumentParser
from nlp import __version__ as version
from nlp import is_tf_available, is_torch_available
from nlp.commands import BaseTransformersCLICommand
def info_command_factory(_):
return EnvironmentCommand()
class EnvironmentCommand(BaseTransformersCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
download_parser = parser.add_parser("env")
download_parser.set_defaults(func=info_command_factory)
def run(self):
pt_version = "not installed"
pt_cuda_available = "NA"
if is_torch_available():
import torch
pt_version = torch.__version__
pt_cuda_available = torch.cuda.is_available()
tf_version = "not installed"
tf_cuda_available = "NA"
if is_tf_available():
import tensorflow as tf
tf_version = tf.__version__
try:
# deprecated in v2.1
tf_cuda_available = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
tf_cuda_available = bool(tf.config.list_physical_devices("GPU"))
info = {
"`nlp` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"PyTorch version (GPU?)": "{} ({})".format(pt_version, pt_cuda_available),
"Tensorflow version (GPU?)": "{} ({})".format(tf_version, tf_cuda_available),
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n")
print(self.format_dict(info))
return info
@staticmethod
def format_dict(d):
return "\n".join(["- {}: {}".format(prop, val) for prop, val in d.items()]) + "\n"
|
nlp-master
|
src/nlp/commands/env.py
|
from argparse import ArgumentParser
from nlp.commands import BaseTransformersCLICommand
def download_command_factory(args):
return DownloadCommand(args.model, args.cache_dir, args.force)
class DownloadCommand(BaseTransformersCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
download_parser = parser.add_parser("download")
download_parser.add_argument(
"--cache-dir", type=str, default=None, help="Path to location to store the models"
)
download_parser.add_argument(
"--force", action="store_true", help="Force the model to be download even if already in cache-dir"
)
download_parser.add_argument("model", type=str, help="Name of the model to download")
download_parser.set_defaults(func=download_command_factory)
def __init__(self, model: str, cache: str, force: bool):
self._model = model
self._cache = cache
self._force = force
def run(self):
from transformers import AutoModel, AutoTokenizer
AutoModel.from_pretrained(self._model, cache_dir=self._cache, force_download=self._force)
AutoTokenizer.from_pretrained(self._model, cache_dir=self._cache, force_download=self._force)
|
nlp-master
|
src/nlp/commands/download.py
|
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from logging import getLogger
from nlp.commands import BaseTransformersCLICommand
HIGHLIGHT_MESSAGE_PRE = """<<<<<<< This should probably be modified because it mentions: """
HIGHLIGHT_MESSAGE_POST = """=======
>>>>>>>
"""
TO_HIGHLIGHT = [
"TextEncoderConfig",
"ByteTextEncoder",
"SubwordTextEncoder",
"encoder_config",
"maybe_build_from_corpus",
"manual_dir",
]
TO_CONVERT = [
# (pattern, replacement)
# Order is important here for some replacements
(r"tfds\.core", r"nlp"),
(r"tf\.io\.gfile\.GFile", r"open"),
(r"tf\.([\w\d]+)", r"nlp.Value('\1')"),
(r"tfds\.features\.Text\(\)", r"nlp.Value('string')"),
(r"tfds\.features\.Text\(", r"nlp.Value('string'),"),
(r"features\s*=\s*tfds.features.FeaturesDict\(", r"features=nlp.Features("),
(r"tfds\.features\.FeaturesDict\(", r"dict("),
(r"The TensorFlow Datasets Authors", r"The TensorFlow Datasets Authors and the HuggingFace NLP Authors"),
(r"tfds\.", r"nlp."),
(r"dl_manager\.manual_dir", r"self.config.data_dir"),
(r"self\.builder_config", r"self.config"),
]
def convert_command_factory(args: Namespace):
"""
Factory function used to convert a model TF 1.0 checkpoint in a PyTorch checkpoint.
:return: ServeCommand
"""
return ConvertCommand(args.tfds_path, args.nlp_directory)
class ConvertCommand(BaseTransformersCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
"""
Register this command to argparse so it's available for the transformer-cli
:param parser: Root parser to register command-specific arguments
:return:
"""
train_parser = parser.add_parser(
"convert", help="CLI tool to convert a (nlp) TensorFlow-Dataset in a HuggingFace-NLP dataset.",
)
train_parser.add_argument(
"--tfds_path",
type=str,
required=True,
help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.",
)
train_parser.add_argument(
"--nlp_directory", type=str, required=True, help="Path to the HuggingFace NLP folder."
)
train_parser.set_defaults(func=convert_command_factory)
def __init__(self, tfds_path: str, nlp_directory: str, *args):
self._logger = getLogger("nlp-cli/converting")
self._tfds_path = tfds_path
self._nlp_directory = nlp_directory
def run(self):
if os.path.isdir(self._tfds_path):
abs_tfds_path = os.path.abspath(self._tfds_path)
elif os.path.isfile(self._tfds_path):
abs_tfds_path = os.path.dirname(self._tfds_path)
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path.")
abs_nlp_path = os.path.abspath(self._nlp_directory)
self._logger.info("Converting datasets from %s to %s", abs_tfds_path, abs_nlp_path)
utils_files = []
with_manual_update = []
imports_to_builder_map = {}
if os.path.isdir(self._tfds_path):
file_names = os.listdir(abs_tfds_path)
else:
file_names = [os.path.basename(self._tfds_path)]
for f_name in file_names:
self._logger.info("Looking at file %s", f_name)
input_file = os.path.join(abs_tfds_path, f_name)
output_file = os.path.join(abs_nlp_path, f_name)
if not os.path.isfile(input_file) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file")
continue
with open(input_file, "r") as f:
lines = f.readlines()
out_lines = []
is_builder = False
needs_manual_update = False
tfds_imports = []
for line in lines:
out_line = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
out_line = "import nlp\n"
elif "import tensorflow" in out_line:
# order is important here
out_line = ""
continue
elif "from absl import logging" in out_line:
out_line = "import logging\n"
elif any(expression in out_line for expression in TO_HIGHLIGHT):
needs_manual_update = True
to_remove = list(filter(lambda e: e in out_line, TO_HIGHLIGHT))
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(to_remove) + "\n")
out_lines.append(out_line)
out_lines.append(HIGHLIGHT_MESSAGE_POST)
continue
else:
for pattern, replacement in TO_CONVERT:
out_line = re.sub(pattern, replacement, out_line)
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
match = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)", out_line)
tfds_imports.extend(imp.strip() for imp in match.group(1).split(","))
out_line = "from . import " + match.group(1)
# Check we have not forget anything
assert (
"tf." not in out_line and "tfds." not in out_line and "tensorflow_datasets" not in out_line
), f"Error converting {out_line.strip()}"
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
is_builder = True
out_lines.append(out_line)
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
dir_name = f_name.replace(".py", "")
output_dir = os.path.join(abs_nlp_path, dir_name)
output_file = os.path.join(output_dir, f_name)
os.makedirs(output_dir, exist_ok=True)
self._logger.info("Adding directory %s", output_dir)
imports_to_builder_map.update(dict((imp, output_dir) for imp in tfds_imports))
else:
# Utilities will be moved at the end
utils_files.append(output_file)
if needs_manual_update:
with_manual_update.append(output_file)
with open(output_file, "w") as f:
f.writelines(out_lines)
self._logger.info("Converted in %s", output_file)
for utils_file in utils_files:
try:
f_name = os.path.basename(utils_file)
dest_folder = imports_to_builder_map[f_name.replace(".py", "")]
self._logger.info("Moving %s to %s", utils_file, dest_folder)
shutil.copy(utils_file, dest_folder)
except KeyError:
self._logger.error(f"Cannot find destination folder for {utils_file}. Please copy manually.")
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'."
)
|
nlp-master
|
src/nlp/commands/convert.py
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class BaseTransformersCLICommand(ABC):
@staticmethod
@abstractmethod
def register_subcommand(parser: ArgumentParser):
raise NotImplementedError()
@abstractmethod
def run(self):
raise NotImplementedError()
|
nlp-master
|
src/nlp/commands/__init__.py
|
import os
from argparse import ArgumentParser
from shutil import copyfile
from typing import List
from nlp.builder import FORCE_REDOWNLOAD, REUSE_CACHE_IF_EXISTS, DatasetBuilder, DownloadConfig
from nlp.commands import BaseTransformersCLICommand
from nlp.info import DATASET_INFOS_DICT_FILE_NAME
from nlp.load import import_main_class, prepare_module
def test_command_factory(args):
return TestCommand(
args.dataset,
args.name,
args.cache_dir,
args.data_dir,
args.all_configs,
args.save_infos,
args.ignore_verifications,
args.force_redownload,
)
class TestCommand(BaseTransformersCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
test_parser = parser.add_parser("test")
test_parser.add_argument("--name", type=str, default=None, help="Dataset processing name")
test_parser.add_argument(
"--cache_dir", type=str, default=None, help="Cache directory where the datasets are stored.",
)
test_parser.add_argument(
"--data_dir",
type=str,
default=None,
help="Can be used to specify a manual directory to get the files from.",
)
test_parser.add_argument("--all_configs", action="store_true", help="Test all dataset configurations")
test_parser.add_argument("--save_infos", action="store_true", help="Save the dataset infos file")
test_parser.add_argument(
"--ignore_verifications", action="store_true", help="Run the test without checksums and splits checks"
)
test_parser.add_argument("--force_redownload", action="store_true", help="Force dataset redownload")
test_parser.add_argument("dataset", type=str, help="Name of the dataset to download")
test_parser.set_defaults(func=test_command_factory)
def __init__(
self,
dataset: str,
name: str,
cache_dir: str,
data_dir: str,
all_configs: bool,
save_infos: bool,
ignore_verifications: bool,
force_redownload: bool,
):
self._dataset = dataset
self._name = name
self._cache_dir = cache_dir
self._data_dir = data_dir
self._all_configs = all_configs
self._save_infos = save_infos
self._ignore_verifications = ignore_verifications
self._force_redownload = force_redownload
def run(self):
if self._name is not None and self._all_configs:
print("Both parameters `config` and `all_configs` can't be used at once.")
exit(1)
path, name = self._dataset, self._name
module_path = prepare_module(path)
builder_cls = import_main_class(module_path)
builders: List[DatasetBuilder] = []
if self._all_configs and len(builder_cls.BUILDER_CONFIGS) > 0:
for config in builder_cls.BUILDER_CONFIGS:
builders.append(builder_cls(name=config.name, data_dir=self._data_dir))
else:
builders.append(builder_cls(name=name, data_dir=self._data_dir))
for builder in builders:
builder.download_and_prepare(
download_config=DownloadConfig(cache_dir=self._cache_dir),
download_mode=REUSE_CACHE_IF_EXISTS if not self._force_redownload else FORCE_REDOWNLOAD,
save_infos=self._save_infos,
ignore_verifications=self._ignore_verifications,
)
print("Test successful.")
# If save_infos=True, the dataset infos file is created next to the loaded module file.
# Let's move it to the original directory of the dataset script, to allow the user to
# upload them on S3 at the same time afterwards.
if self._save_infos:
dataset_infos_path = os.path.join(builder_cls.get_imported_module_dir(), DATASET_INFOS_DICT_FILE_NAME)
name = list(filter(lambda x: x, path.split("/")))[-1] + ".py"
combined_path = os.path.join(path, name)
if os.path.isfile(path):
dataset_dir = os.path.dirname(path)
elif os.path.isfile(combined_path):
dataset_dir = path
else: # in case of a remote dataset
print("Dataset Infos file saved at {}".format(dataset_infos_path))
exit(1)
# Move datasetinfo back to the user
user_dataset_infos_path = os.path.join(dataset_dir, DATASET_INFOS_DICT_FILE_NAME)
copyfile(dataset_infos_path, user_dataset_infos_path)
print("Dataset Infos file saved at {}".format(user_dataset_infos_path))
|
nlp-master
|
src/nlp/commands/test.py
|
from setuptools import setup, find_packages
setup(
name = 'video-diffusion-pytorch',
packages = find_packages(exclude=[]),
version = '0.6.0',
license='MIT',
description = 'Video Diffusion - Pytorch',
long_description_content_type = 'text/markdown',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/video-diffusion-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'denoising diffusion probabilistic models',
'video generation'
],
install_requires=[
'einops>=0.4',
'einops-exts',
'rotary-embedding-torch',
'torch>=1.10',
'torchvision',
'tqdm'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
video-diffusion-pytorch-main
|
setup.py
|
from video_diffusion_pytorch.video_diffusion_pytorch import Unet3D, GaussianDiffusion, Trainer
|
video-diffusion-pytorch-main
|
video_diffusion_pytorch/__init__.py
|
import math
import copy
import torch
from torch import nn, einsum
import torch.nn.functional as F
from functools import partial
from torch.utils import data
from pathlib import Path
from torch.optim import Adam
from torchvision import transforms as T, utils
from torch.cuda.amp import autocast, GradScaler
from PIL import Image
from tqdm import tqdm
from einops import rearrange
from einops_exts import check_shape, rearrange_many
from rotary_embedding_torch import RotaryEmbedding
from video_diffusion_pytorch.text import tokenize, bert_embed, BERT_MODEL_DIM
# helpers functions
def exists(x):
return x is not None
def noop(*args, **kwargs):
pass
def is_odd(n):
return (n % 2) == 1
def default(val, d):
if exists(val):
return val
return d() if callable(d) else d
def cycle(dl):
while True:
for data in dl:
yield data
def num_to_groups(num, divisor):
groups = num // divisor
remainder = num % divisor
arr = [divisor] * groups
if remainder > 0:
arr.append(remainder)
return arr
def prob_mask_like(shape, prob, device):
if prob == 1:
return torch.ones(shape, device = device, dtype = torch.bool)
elif prob == 0:
return torch.zeros(shape, device = device, dtype = torch.bool)
else:
return torch.zeros(shape, device = device).float().uniform_(0, 1) < prob
def is_list_str(x):
if not isinstance(x, (list, tuple)):
return False
return all([type(el) == str for el in x])
# relative positional bias
class RelativePositionBias(nn.Module):
def __init__(
self,
heads = 8,
num_buckets = 32,
max_distance = 128
):
super().__init__()
self.num_buckets = num_buckets
self.max_distance = max_distance
self.relative_attention_bias = nn.Embedding(num_buckets, heads)
@staticmethod
def _relative_position_bucket(relative_position, num_buckets = 32, max_distance = 128):
ret = 0
n = -relative_position
num_buckets //= 2
ret += (n < 0).long() * num_buckets
n = torch.abs(n)
max_exact = num_buckets // 2
is_small = n < max_exact
val_if_large = max_exact + (
torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)
).long()
val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
def forward(self, n, device):
q_pos = torch.arange(n, dtype = torch.long, device = device)
k_pos = torch.arange(n, dtype = torch.long, device = device)
rel_pos = rearrange(k_pos, 'j -> 1 j') - rearrange(q_pos, 'i -> i 1')
rp_bucket = self._relative_position_bucket(rel_pos, num_buckets = self.num_buckets, max_distance = self.max_distance)
values = self.relative_attention_bias(rp_bucket)
return rearrange(values, 'i j h -> h i j')
# small helper modules
class EMA():
def __init__(self, beta):
super().__init__()
self.beta = beta
def update_model_average(self, ma_model, current_model):
for current_params, ma_params in zip(current_model.parameters(), ma_model.parameters()):
old_weight, up_weight = ma_params.data, current_params.data
ma_params.data = self.update_average(old_weight, up_weight)
def update_average(self, old, new):
if old is None:
return new
return old * self.beta + (1 - self.beta) * new
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, *args, **kwargs):
return self.fn(x, *args, **kwargs) + x
class SinusoidalPosEmb(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
device = x.device
half_dim = self.dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, device=device) * -emb)
emb = x[:, None] * emb[None, :]
emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
return emb
def Upsample(dim):
return nn.ConvTranspose3d(dim, dim, (1, 4, 4), (1, 2, 2), (0, 1, 1))
def Downsample(dim):
return nn.Conv3d(dim, dim, (1, 4, 4), (1, 2, 2), (0, 1, 1))
class LayerNorm(nn.Module):
def __init__(self, dim, eps = 1e-5):
super().__init__()
self.eps = eps
self.gamma = nn.Parameter(torch.ones(1, dim, 1, 1, 1))
def forward(self, x):
var = torch.var(x, dim = 1, unbiased = False, keepdim = True)
mean = torch.mean(x, dim = 1, keepdim = True)
return (x - mean) / (var + self.eps).sqrt() * self.gamma
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = LayerNorm(dim)
def forward(self, x, **kwargs):
x = self.norm(x)
return self.fn(x, **kwargs)
# building block modules
class Block(nn.Module):
def __init__(self, dim, dim_out, groups = 8):
super().__init__()
self.proj = nn.Conv3d(dim, dim_out, (1, 3, 3), padding = (0, 1, 1))
self.norm = nn.GroupNorm(groups, dim_out)
self.act = nn.SiLU()
def forward(self, x, scale_shift = None):
x = self.proj(x)
x = self.norm(x)
if exists(scale_shift):
scale, shift = scale_shift
x = x * (scale + 1) + shift
return self.act(x)
class ResnetBlock(nn.Module):
def __init__(self, dim, dim_out, *, time_emb_dim = None, groups = 8):
super().__init__()
self.mlp = nn.Sequential(
nn.SiLU(),
nn.Linear(time_emb_dim, dim_out * 2)
) if exists(time_emb_dim) else None
self.block1 = Block(dim, dim_out, groups = groups)
self.block2 = Block(dim_out, dim_out, groups = groups)
self.res_conv = nn.Conv3d(dim, dim_out, 1) if dim != dim_out else nn.Identity()
def forward(self, x, time_emb = None):
scale_shift = None
if exists(self.mlp):
assert exists(time_emb), 'time emb must be passed in'
time_emb = self.mlp(time_emb)
time_emb = rearrange(time_emb, 'b c -> b c 1 1 1')
scale_shift = time_emb.chunk(2, dim = 1)
h = self.block1(x, scale_shift = scale_shift)
h = self.block2(h)
return h + self.res_conv(x)
class SpatialLinearAttention(nn.Module):
def __init__(self, dim, heads = 4, dim_head = 32):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
hidden_dim = dim_head * heads
self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False)
self.to_out = nn.Conv2d(hidden_dim, dim, 1)
def forward(self, x):
b, c, f, h, w = x.shape
x = rearrange(x, 'b c f h w -> (b f) c h w')
qkv = self.to_qkv(x).chunk(3, dim = 1)
q, k, v = rearrange_many(qkv, 'b (h c) x y -> b h c (x y)', h = self.heads)
q = q.softmax(dim = -2)
k = k.softmax(dim = -1)
q = q * self.scale
context = torch.einsum('b h d n, b h e n -> b h d e', k, v)
out = torch.einsum('b h d e, b h d n -> b h e n', context, q)
out = rearrange(out, 'b h c (x y) -> b (h c) x y', h = self.heads, x = h, y = w)
out = self.to_out(out)
return rearrange(out, '(b f) c h w -> b c f h w', b = b)
# attention along space and time
class EinopsToAndFrom(nn.Module):
def __init__(self, from_einops, to_einops, fn):
super().__init__()
self.from_einops = from_einops
self.to_einops = to_einops
self.fn = fn
def forward(self, x, **kwargs):
shape = x.shape
reconstitute_kwargs = dict(tuple(zip(self.from_einops.split(' '), shape)))
x = rearrange(x, f'{self.from_einops} -> {self.to_einops}')
x = self.fn(x, **kwargs)
x = rearrange(x, f'{self.to_einops} -> {self.from_einops}', **reconstitute_kwargs)
return x
class Attention(nn.Module):
def __init__(
self,
dim,
heads = 4,
dim_head = 32,
rotary_emb = None
):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
hidden_dim = dim_head * heads
self.rotary_emb = rotary_emb
self.to_qkv = nn.Linear(dim, hidden_dim * 3, bias = False)
self.to_out = nn.Linear(hidden_dim, dim, bias = False)
def forward(
self,
x,
pos_bias = None,
focus_present_mask = None
):
n, device = x.shape[-2], x.device
qkv = self.to_qkv(x).chunk(3, dim = -1)
if exists(focus_present_mask) and focus_present_mask.all():
# if all batch samples are focusing on present
# it would be equivalent to passing that token's values through to the output
values = qkv[-1]
return self.to_out(values)
# split out heads
q, k, v = rearrange_many(qkv, '... n (h d) -> ... h n d', h = self.heads)
# scale
q = q * self.scale
# rotate positions into queries and keys for time attention
if exists(self.rotary_emb):
q = self.rotary_emb.rotate_queries_or_keys(q)
k = self.rotary_emb.rotate_queries_or_keys(k)
# similarity
sim = einsum('... h i d, ... h j d -> ... h i j', q, k)
# relative positional bias
if exists(pos_bias):
sim = sim + pos_bias
if exists(focus_present_mask) and not (~focus_present_mask).all():
attend_all_mask = torch.ones((n, n), device = device, dtype = torch.bool)
attend_self_mask = torch.eye(n, device = device, dtype = torch.bool)
mask = torch.where(
rearrange(focus_present_mask, 'b -> b 1 1 1 1'),
rearrange(attend_self_mask, 'i j -> 1 1 1 i j'),
rearrange(attend_all_mask, 'i j -> 1 1 1 i j'),
)
sim = sim.masked_fill(~mask, -torch.finfo(sim.dtype).max)
# numerical stability
sim = sim - sim.amax(dim = -1, keepdim = True).detach()
attn = sim.softmax(dim = -1)
# aggregate values
out = einsum('... h i j, ... h j d -> ... h i d', attn, v)
out = rearrange(out, '... h n d -> ... n (h d)')
return self.to_out(out)
# model
class Unet3D(nn.Module):
def __init__(
self,
dim,
cond_dim = None,
out_dim = None,
dim_mults=(1, 2, 4, 8),
channels = 3,
attn_heads = 8,
attn_dim_head = 32,
use_bert_text_cond = False,
init_dim = None,
init_kernel_size = 7,
use_sparse_linear_attn = True,
block_type = 'resnet',
resnet_groups = 8
):
super().__init__()
self.channels = channels
# temporal attention and its relative positional encoding
rotary_emb = RotaryEmbedding(min(32, attn_dim_head))
temporal_attn = lambda dim: EinopsToAndFrom('b c f h w', 'b (h w) f c', Attention(dim, heads = attn_heads, dim_head = attn_dim_head, rotary_emb = rotary_emb))
self.time_rel_pos_bias = RelativePositionBias(heads = attn_heads, max_distance = 32) # realistically will not be able to generate that many frames of video... yet
# initial conv
init_dim = default(init_dim, dim)
assert is_odd(init_kernel_size)
init_padding = init_kernel_size // 2
self.init_conv = nn.Conv3d(channels, init_dim, (1, init_kernel_size, init_kernel_size), padding = (0, init_padding, init_padding))
self.init_temporal_attn = Residual(PreNorm(init_dim, temporal_attn(init_dim)))
# dimensions
dims = [init_dim, *map(lambda m: dim * m, dim_mults)]
in_out = list(zip(dims[:-1], dims[1:]))
# time conditioning
time_dim = dim * 4
self.time_mlp = nn.Sequential(
SinusoidalPosEmb(dim),
nn.Linear(dim, time_dim),
nn.GELU(),
nn.Linear(time_dim, time_dim)
)
# text conditioning
self.has_cond = exists(cond_dim) or use_bert_text_cond
cond_dim = BERT_MODEL_DIM if use_bert_text_cond else cond_dim
self.null_cond_emb = nn.Parameter(torch.randn(1, cond_dim)) if self.has_cond else None
cond_dim = time_dim + int(cond_dim or 0)
# layers
self.downs = nn.ModuleList([])
self.ups = nn.ModuleList([])
num_resolutions = len(in_out)
# block type
block_klass = partial(ResnetBlock, groups = resnet_groups)
block_klass_cond = partial(block_klass, time_emb_dim = cond_dim)
# modules for all layers
for ind, (dim_in, dim_out) in enumerate(in_out):
is_last = ind >= (num_resolutions - 1)
self.downs.append(nn.ModuleList([
block_klass_cond(dim_in, dim_out),
block_klass_cond(dim_out, dim_out),
Residual(PreNorm(dim_out, SpatialLinearAttention(dim_out, heads = attn_heads))) if use_sparse_linear_attn else nn.Identity(),
Residual(PreNorm(dim_out, temporal_attn(dim_out))),
Downsample(dim_out) if not is_last else nn.Identity()
]))
mid_dim = dims[-1]
self.mid_block1 = block_klass_cond(mid_dim, mid_dim)
spatial_attn = EinopsToAndFrom('b c f h w', 'b f (h w) c', Attention(mid_dim, heads = attn_heads))
self.mid_spatial_attn = Residual(PreNorm(mid_dim, spatial_attn))
self.mid_temporal_attn = Residual(PreNorm(mid_dim, temporal_attn(mid_dim)))
self.mid_block2 = block_klass_cond(mid_dim, mid_dim)
for ind, (dim_in, dim_out) in enumerate(reversed(in_out)):
is_last = ind >= (num_resolutions - 1)
self.ups.append(nn.ModuleList([
block_klass_cond(dim_out * 2, dim_in),
block_klass_cond(dim_in, dim_in),
Residual(PreNorm(dim_in, SpatialLinearAttention(dim_in, heads = attn_heads))) if use_sparse_linear_attn else nn.Identity(),
Residual(PreNorm(dim_in, temporal_attn(dim_in))),
Upsample(dim_in) if not is_last else nn.Identity()
]))
out_dim = default(out_dim, channels)
self.final_conv = nn.Sequential(
block_klass(dim * 2, dim),
nn.Conv3d(dim, out_dim, 1)
)
def forward_with_cond_scale(
self,
*args,
cond_scale = 2.,
**kwargs
):
logits = self.forward(*args, null_cond_prob = 0., **kwargs)
if cond_scale == 1 or not self.has_cond:
return logits
null_logits = self.forward(*args, null_cond_prob = 1., **kwargs)
return null_logits + (logits - null_logits) * cond_scale
def forward(
self,
x,
time,
cond = None,
null_cond_prob = 0.,
focus_present_mask = None,
prob_focus_present = 0. # probability at which a given batch sample will focus on the present (0. is all off, 1. is completely arrested attention across time)
):
assert not (self.has_cond and not exists(cond)), 'cond must be passed in if cond_dim specified'
batch, device = x.shape[0], x.device
focus_present_mask = default(focus_present_mask, lambda: prob_mask_like((batch,), prob_focus_present, device = device))
time_rel_pos_bias = self.time_rel_pos_bias(x.shape[2], device = x.device)
x = self.init_conv(x)
x = self.init_temporal_attn(x, pos_bias = time_rel_pos_bias)
r = x.clone()
t = self.time_mlp(time) if exists(self.time_mlp) else None
# classifier free guidance
if self.has_cond:
batch, device = x.shape[0], x.device
mask = prob_mask_like((batch,), null_cond_prob, device = device)
cond = torch.where(rearrange(mask, 'b -> b 1'), self.null_cond_emb, cond)
t = torch.cat((t, cond), dim = -1)
h = []
for block1, block2, spatial_attn, temporal_attn, downsample in self.downs:
x = block1(x, t)
x = block2(x, t)
x = spatial_attn(x)
x = temporal_attn(x, pos_bias = time_rel_pos_bias, focus_present_mask = focus_present_mask)
h.append(x)
x = downsample(x)
x = self.mid_block1(x, t)
x = self.mid_spatial_attn(x)
x = self.mid_temporal_attn(x, pos_bias = time_rel_pos_bias, focus_present_mask = focus_present_mask)
x = self.mid_block2(x, t)
for block1, block2, spatial_attn, temporal_attn, upsample in self.ups:
x = torch.cat((x, h.pop()), dim = 1)
x = block1(x, t)
x = block2(x, t)
x = spatial_attn(x)
x = temporal_attn(x, pos_bias = time_rel_pos_bias, focus_present_mask = focus_present_mask)
x = upsample(x)
x = torch.cat((x, r), dim = 1)
return self.final_conv(x)
# gaussian diffusion trainer class
def extract(a, t, x_shape):
b, *_ = t.shape
out = a.gather(-1, t)
return out.reshape(b, *((1,) * (len(x_shape) - 1)))
def cosine_beta_schedule(timesteps, s = 0.008):
"""
cosine schedule
as proposed in https://openreview.net/forum?id=-NEXDKk8gZ
"""
steps = timesteps + 1
x = torch.linspace(0, timesteps, steps, dtype = torch.float64)
alphas_cumprod = torch.cos(((x / timesteps) + s) / (1 + s) * torch.pi * 0.5) ** 2
alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
return torch.clip(betas, 0, 0.9999)
class GaussianDiffusion(nn.Module):
def __init__(
self,
denoise_fn,
*,
image_size,
num_frames,
text_use_bert_cls = False,
channels = 3,
timesteps = 1000,
loss_type = 'l1',
use_dynamic_thres = False, # from the Imagen paper
dynamic_thres_percentile = 0.9
):
super().__init__()
self.channels = channels
self.image_size = image_size
self.num_frames = num_frames
self.denoise_fn = denoise_fn
betas = cosine_beta_schedule(timesteps)
alphas = 1. - betas
alphas_cumprod = torch.cumprod(alphas, axis=0)
alphas_cumprod_prev = F.pad(alphas_cumprod[:-1], (1, 0), value = 1.)
timesteps, = betas.shape
self.num_timesteps = int(timesteps)
self.loss_type = loss_type
# register buffer helper function that casts float64 to float32
register_buffer = lambda name, val: self.register_buffer(name, val.to(torch.float32))
register_buffer('betas', betas)
register_buffer('alphas_cumprod', alphas_cumprod)
register_buffer('alphas_cumprod_prev', alphas_cumprod_prev)
# calculations for diffusion q(x_t | x_{t-1}) and others
register_buffer('sqrt_alphas_cumprod', torch.sqrt(alphas_cumprod))
register_buffer('sqrt_one_minus_alphas_cumprod', torch.sqrt(1. - alphas_cumprod))
register_buffer('log_one_minus_alphas_cumprod', torch.log(1. - alphas_cumprod))
register_buffer('sqrt_recip_alphas_cumprod', torch.sqrt(1. / alphas_cumprod))
register_buffer('sqrt_recipm1_alphas_cumprod', torch.sqrt(1. / alphas_cumprod - 1))
# calculations for posterior q(x_{t-1} | x_t, x_0)
posterior_variance = betas * (1. - alphas_cumprod_prev) / (1. - alphas_cumprod)
# above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
register_buffer('posterior_variance', posterior_variance)
# below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
register_buffer('posterior_log_variance_clipped', torch.log(posterior_variance.clamp(min =1e-20)))
register_buffer('posterior_mean_coef1', betas * torch.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))
register_buffer('posterior_mean_coef2', (1. - alphas_cumprod_prev) * torch.sqrt(alphas) / (1. - alphas_cumprod))
# text conditioning parameters
self.text_use_bert_cls = text_use_bert_cls
# dynamic thresholding when sampling
self.use_dynamic_thres = use_dynamic_thres
self.dynamic_thres_percentile = dynamic_thres_percentile
def q_mean_variance(self, x_start, t):
mean = extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
variance = extract(1. - self.alphas_cumprod, t, x_start.shape)
log_variance = extract(self.log_one_minus_alphas_cumprod, t, x_start.shape)
return mean, variance, log_variance
def predict_start_from_noise(self, x_t, t, noise):
return (
extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
)
def q_posterior(self, x_start, x_t, t):
posterior_mean = (
extract(self.posterior_mean_coef1, t, x_t.shape) * x_start +
extract(self.posterior_mean_coef2, t, x_t.shape) * x_t
)
posterior_variance = extract(self.posterior_variance, t, x_t.shape)
posterior_log_variance_clipped = extract(self.posterior_log_variance_clipped, t, x_t.shape)
return posterior_mean, posterior_variance, posterior_log_variance_clipped
def p_mean_variance(self, x, t, clip_denoised: bool, cond = None, cond_scale = 1.):
x_recon = self.predict_start_from_noise(x, t=t, noise = self.denoise_fn.forward_with_cond_scale(x, t, cond = cond, cond_scale = cond_scale))
if clip_denoised:
s = 1.
if self.use_dynamic_thres:
s = torch.quantile(
rearrange(x_recon, 'b ... -> b (...)').abs(),
self.dynamic_thres_percentile,
dim = -1
)
s.clamp_(min = 1.)
s = s.view(-1, *((1,) * (x_recon.ndim - 1)))
# clip by threshold, depending on whether static or dynamic
x_recon = x_recon.clamp(-s, s) / s
model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
return model_mean, posterior_variance, posterior_log_variance
@torch.inference_mode()
def p_sample(self, x, t, cond = None, cond_scale = 1., clip_denoised = True):
b, *_, device = *x.shape, x.device
model_mean, _, model_log_variance = self.p_mean_variance(x = x, t = t, clip_denoised = clip_denoised, cond = cond, cond_scale = cond_scale)
noise = torch.randn_like(x)
# no noise when t == 0
nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
@torch.inference_mode()
def p_sample_loop(self, shape, cond = None, cond_scale = 1.):
device = self.betas.device
b = shape[0]
img = torch.randn(shape, device=device)
for i in tqdm(reversed(range(0, self.num_timesteps)), desc='sampling loop time step', total=self.num_timesteps):
img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), cond = cond, cond_scale = cond_scale)
return unnormalize_img(img)
@torch.inference_mode()
def sample(self, cond = None, cond_scale = 1., batch_size = 16):
device = next(self.denoise_fn.parameters()).device
if is_list_str(cond):
cond = bert_embed(tokenize(cond)).to(device)
batch_size = cond.shape[0] if exists(cond) else batch_size
image_size = self.image_size
channels = self.channels
num_frames = self.num_frames
return self.p_sample_loop((batch_size, channels, num_frames, image_size, image_size), cond = cond, cond_scale = cond_scale)
@torch.inference_mode()
def interpolate(self, x1, x2, t = None, lam = 0.5):
b, *_, device = *x1.shape, x1.device
t = default(t, self.num_timesteps - 1)
assert x1.shape == x2.shape
t_batched = torch.stack([torch.tensor(t, device=device)] * b)
xt1, xt2 = map(lambda x: self.q_sample(x, t=t_batched), (x1, x2))
img = (1 - lam) * xt1 + lam * xt2
for i in tqdm(reversed(range(0, t)), desc='interpolation sample time step', total=t):
img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long))
return img
def q_sample(self, x_start, t, noise = None):
noise = default(noise, lambda: torch.randn_like(x_start))
return (
extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise
)
def p_losses(self, x_start, t, cond = None, noise = None, **kwargs):
b, c, f, h, w, device = *x_start.shape, x_start.device
noise = default(noise, lambda: torch.randn_like(x_start))
x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
if is_list_str(cond):
cond = bert_embed(tokenize(cond), return_cls_repr = self.text_use_bert_cls)
cond = cond.to(device)
x_recon = self.denoise_fn(x_noisy, t, cond = cond, **kwargs)
if self.loss_type == 'l1':
loss = F.l1_loss(noise, x_recon)
elif self.loss_type == 'l2':
loss = F.mse_loss(noise, x_recon)
else:
raise NotImplementedError()
return loss
def forward(self, x, *args, **kwargs):
b, device, img_size, = x.shape[0], x.device, self.image_size
check_shape(x, 'b c f h w', c = self.channels, f = self.num_frames, h = img_size, w = img_size)
t = torch.randint(0, self.num_timesteps, (b,), device=device).long()
x = normalize_img(x)
return self.p_losses(x, t, *args, **kwargs)
# trainer class
CHANNELS_TO_MODE = {
1 : 'L',
3 : 'RGB',
4 : 'RGBA'
}
def seek_all_images(img, channels = 3):
assert channels in CHANNELS_TO_MODE, f'channels {channels} invalid'
mode = CHANNELS_TO_MODE[channels]
i = 0
while True:
try:
img.seek(i)
yield img.convert(mode)
except EOFError:
break
i += 1
# tensor of shape (channels, frames, height, width) -> gif
def video_tensor_to_gif(tensor, path, duration = 120, loop = 0, optimize = True):
images = map(T.ToPILImage(), tensor.unbind(dim = 1))
first_img, *rest_imgs = images
first_img.save(path, save_all = True, append_images = rest_imgs, duration = duration, loop = loop, optimize = optimize)
return images
# gif -> (channels, frame, height, width) tensor
def gif_to_tensor(path, channels = 3, transform = T.ToTensor()):
img = Image.open(path)
tensors = tuple(map(transform, seek_all_images(img, channels = channels)))
return torch.stack(tensors, dim = 1)
def identity(t, *args, **kwargs):
return t
def normalize_img(t):
return t * 2 - 1
def unnormalize_img(t):
return (t + 1) * 0.5
def cast_num_frames(t, *, frames):
f = t.shape[1]
if f == frames:
return t
if f > frames:
return t[:, :frames]
return F.pad(t, (0, 0, 0, 0, 0, frames - f))
class Dataset(data.Dataset):
def __init__(
self,
folder,
image_size,
channels = 3,
num_frames = 16,
horizontal_flip = False,
force_num_frames = True,
exts = ['gif']
):
super().__init__()
self.folder = folder
self.image_size = image_size
self.channels = channels
self.paths = [p for ext in exts for p in Path(f'{folder}').glob(f'**/*.{ext}')]
self.cast_num_frames_fn = partial(cast_num_frames, frames = num_frames) if force_num_frames else identity
self.transform = T.Compose([
T.Resize(image_size),
T.RandomHorizontalFlip() if horizontal_flip else T.Lambda(identity),
T.CenterCrop(image_size),
T.ToTensor()
])
def __len__(self):
return len(self.paths)
def __getitem__(self, index):
path = self.paths[index]
tensor = gif_to_tensor(path, self.channels, transform = self.transform)
return self.cast_num_frames_fn(tensor)
# trainer class
class Trainer(object):
def __init__(
self,
diffusion_model,
folder,
*,
ema_decay = 0.995,
num_frames = 16,
train_batch_size = 32,
train_lr = 1e-4,
train_num_steps = 100000,
gradient_accumulate_every = 2,
amp = False,
step_start_ema = 2000,
update_ema_every = 10,
save_and_sample_every = 1000,
results_folder = './results',
num_sample_rows = 4,
max_grad_norm = None
):
super().__init__()
self.model = diffusion_model
self.ema = EMA(ema_decay)
self.ema_model = copy.deepcopy(self.model)
self.update_ema_every = update_ema_every
self.step_start_ema = step_start_ema
self.save_and_sample_every = save_and_sample_every
self.batch_size = train_batch_size
self.image_size = diffusion_model.image_size
self.gradient_accumulate_every = gradient_accumulate_every
self.train_num_steps = train_num_steps
image_size = diffusion_model.image_size
channels = diffusion_model.channels
num_frames = diffusion_model.num_frames
self.ds = Dataset(folder, image_size, channels = channels, num_frames = num_frames)
print(f'found {len(self.ds)} videos as gif files at {folder}')
assert len(self.ds) > 0, 'need to have at least 1 video to start training (although 1 is not great, try 100k)'
self.dl = cycle(data.DataLoader(self.ds, batch_size = train_batch_size, shuffle=True, pin_memory=True))
self.opt = Adam(diffusion_model.parameters(), lr = train_lr)
self.step = 0
self.amp = amp
self.scaler = GradScaler(enabled = amp)
self.max_grad_norm = max_grad_norm
self.num_sample_rows = num_sample_rows
self.results_folder = Path(results_folder)
self.results_folder.mkdir(exist_ok = True, parents = True)
self.reset_parameters()
def reset_parameters(self):
self.ema_model.load_state_dict(self.model.state_dict())
def step_ema(self):
if self.step < self.step_start_ema:
self.reset_parameters()
return
self.ema.update_model_average(self.ema_model, self.model)
def save(self, milestone):
data = {
'step': self.step,
'model': self.model.state_dict(),
'ema': self.ema_model.state_dict(),
'scaler': self.scaler.state_dict()
}
torch.save(data, str(self.results_folder / f'model-{milestone}.pt'))
def load(self, milestone, **kwargs):
if milestone == -1:
all_milestones = [int(p.stem.split('-')[-1]) for p in Path(self.results_folder).glob('**/*.pt')]
assert len(all_milestones) > 0, 'need to have at least one milestone to load from latest checkpoint (milestone == -1)'
milestone = max(all_milestones)
data = torch.load(str(self.results_folder / f'model-{milestone}.pt'))
self.step = data['step']
self.model.load_state_dict(data['model'], **kwargs)
self.ema_model.load_state_dict(data['ema'], **kwargs)
self.scaler.load_state_dict(data['scaler'])
def train(
self,
prob_focus_present = 0.,
focus_present_mask = None,
log_fn = noop
):
assert callable(log_fn)
while self.step < self.train_num_steps:
for i in range(self.gradient_accumulate_every):
data = next(self.dl).cuda()
with autocast(enabled = self.amp):
loss = self.model(
data,
prob_focus_present = prob_focus_present,
focus_present_mask = focus_present_mask
)
self.scaler.scale(loss / self.gradient_accumulate_every).backward()
print(f'{self.step}: {loss.item()}')
log = {'loss': loss.item()}
if exists(self.max_grad_norm):
self.scaler.unscale_(self.opt)
nn.utils.clip_grad_norm_(self.model.parameters(), self.max_grad_norm)
self.scaler.step(self.opt)
self.scaler.update()
self.opt.zero_grad()
if self.step % self.update_ema_every == 0:
self.step_ema()
if self.step != 0 and self.step % self.save_and_sample_every == 0:
milestone = self.step // self.save_and_sample_every
num_samples = self.num_sample_rows ** 2
batches = num_to_groups(num_samples, self.batch_size)
all_videos_list = list(map(lambda n: self.ema_model.sample(batch_size=n), batches))
all_videos_list = torch.cat(all_videos_list, dim = 0)
all_videos_list = F.pad(all_videos_list, (2, 2, 2, 2))
one_gif = rearrange(all_videos_list, '(i j) c f h w -> c f (i h) (j w)', i = self.num_sample_rows)
video_path = str(self.results_folder / str(f'{milestone}.gif'))
video_tensor_to_gif(one_gif, video_path)
log = {**log, 'sample': video_path}
self.save(milestone)
log_fn(log)
self.step += 1
print('training completed')
|
video-diffusion-pytorch-main
|
video_diffusion_pytorch/video_diffusion_pytorch.py
|
import torch
from einops import rearrange
def exists(val):
return val is not None
# singleton globals
MODEL = None
TOKENIZER = None
BERT_MODEL_DIM = 768
def get_tokenizer():
global TOKENIZER
if not exists(TOKENIZER):
TOKENIZER = torch.hub.load('huggingface/pytorch-transformers', 'tokenizer', 'bert-base-cased')
return TOKENIZER
def get_bert():
global MODEL
if not exists(MODEL):
MODEL = torch.hub.load('huggingface/pytorch-transformers', 'model', 'bert-base-cased')
if torch.cuda.is_available():
MODEL = MODEL.cuda()
return MODEL
# tokenize
def tokenize(texts, add_special_tokens = True):
if not isinstance(texts, (list, tuple)):
texts = [texts]
tokenizer = get_tokenizer()
encoding = tokenizer.batch_encode_plus(
texts,
add_special_tokens = add_special_tokens,
padding = True,
return_tensors = 'pt'
)
token_ids = encoding.input_ids
return token_ids
# embedding function
@torch.no_grad()
def bert_embed(
token_ids,
return_cls_repr = False,
eps = 1e-8,
pad_id = 0.
):
model = get_bert()
mask = token_ids != pad_id
if torch.cuda.is_available():
token_ids = token_ids.cuda()
mask = mask.cuda()
outputs = model(
input_ids = token_ids,
attention_mask = mask,
output_hidden_states = True
)
hidden_state = outputs.hidden_states[-1]
if return_cls_repr:
return hidden_state[:, 0] # return [cls] as representation
if not exists(mask):
return hidden_state.mean(dim = 1)
mask = mask[:, 1:] # mean all tokens excluding [cls], accounting for length
mask = rearrange(mask, 'b n -> b n 1')
numer = (hidden_state[:, 1:] * mask).sum(dim = 1)
denom = mask.sum(dim = 1)
masked_mean = numer / (denom + eps)
return masked_mean
|
video-diffusion-pytorch-main
|
video_diffusion_pytorch/text.py
|
from setuptools import setup, find_packages
setup(
name = 'mlp-mixer-pytorch',
packages = find_packages(exclude=[]),
version = '0.1.1',
license='MIT',
description = 'MLP Mixer - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/mlp-mixer-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'image recognition'
],
install_requires=[
'einops>=0.3',
'torch>=1.6'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
mlp-mixer-pytorch-main
|
setup.py
|
from mlp_mixer_pytorch.mlp_mixer_pytorch import MLPMixer
from mlp_mixer_pytorch.permutator import Permutator
|
mlp-mixer-pytorch-main
|
mlp_mixer_pytorch/__init__.py
|
from torch import nn
from functools import partial
from einops.layers.torch import Rearrange, Reduce
pair = lambda x: x if isinstance(x, tuple) else (x, x)
class PreNormResidual(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = nn.LayerNorm(dim)
def forward(self, x):
return self.fn(self.norm(x)) + x
def FeedForward(dim, expansion_factor = 4, dropout = 0., dense = nn.Linear):
inner_dim = int(dim * expansion_factor)
return nn.Sequential(
dense(dim, inner_dim),
nn.GELU(),
nn.Dropout(dropout),
dense(inner_dim, dim),
nn.Dropout(dropout)
)
def MLPMixer(*, image_size, channels, patch_size, dim, depth, num_classes, expansion_factor = 4, expansion_factor_token = 0.5, dropout = 0.):
image_h, image_w = pair(image_size)
assert (image_h % patch_size) == 0 and (image_w % patch_size) == 0, 'image must be divisible by patch size'
num_patches = (image_h // patch_size) * (image_w // patch_size)
chan_first, chan_last = partial(nn.Conv1d, kernel_size = 1), nn.Linear
return nn.Sequential(
Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = patch_size, p2 = patch_size),
nn.Linear((patch_size ** 2) * channels, dim),
*[nn.Sequential(
PreNormResidual(dim, FeedForward(num_patches, expansion_factor, dropout, chan_first)),
PreNormResidual(dim, FeedForward(dim, expansion_factor_token, dropout, chan_last))
) for _ in range(depth)],
nn.LayerNorm(dim),
Reduce('b n c -> b c', 'mean'),
nn.Linear(dim, num_classes)
)
|
mlp-mixer-pytorch-main
|
mlp_mixer_pytorch/mlp_mixer_pytorch.py
|
from torch import nn
from functools import partial
from einops.layers.torch import Rearrange, Reduce
class PreNormResidual(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = nn.LayerNorm(dim)
def forward(self, x):
return self.fn(self.norm(x)) + x
class ParallelSum(nn.Module):
def __init__(self, *fns):
super().__init__()
self.fns = nn.ModuleList(fns)
def forward(self, x):
return sum(map(lambda fn: fn(x), self.fns))
def Permutator(*, image_size, patch_size, dim, depth, num_classes, segments, expansion_factor = 4, dropout = 0.):
assert (image_size % patch_size) == 0, 'image must be divisible by patch size'
assert (dim % segments) == 0, 'dimension must be divisible by the number of segments'
height = width = image_size // patch_size
s = segments
return nn.Sequential(
Rearrange('b c (h p1) (w p2) -> b h w (p1 p2 c)', p1 = patch_size, p2 = patch_size),
nn.Linear((patch_size ** 2) * 3, dim),
*[nn.Sequential(
PreNormResidual(dim, nn.Sequential(
ParallelSum(
nn.Sequential(
Rearrange('b h w (c s) -> b w c (h s)', s = s),
nn.Linear(height * s, height * s),
Rearrange('b w c (h s) -> b h w (c s)', s = s),
),
nn.Sequential(
Rearrange('b h w (c s) -> b h c (w s)', s = s),
nn.Linear(width * s, width * s),
Rearrange('b h c (w s) -> b h w (c s)', s = s),
),
nn.Linear(dim, dim)
),
nn.Linear(dim, dim)
)),
PreNormResidual(dim, nn.Sequential(
nn.Linear(dim, dim * expansion_factor),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(dim * expansion_factor, dim),
nn.Dropout(dropout)
))
) for _ in range(depth)],
nn.LayerNorm(dim),
Reduce('b h w c -> b c', 'mean'),
nn.Linear(dim, num_classes)
)
|
mlp-mixer-pytorch-main
|
mlp_mixer_pytorch/permutator.py
|
from setuptools import setup, find_packages
setup(
name = 'robotic-transformer-pytorch',
packages = find_packages(exclude=[]),
version = '0.0.17',
license='MIT',
description = 'Robotic Transformer - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/robotic-transformer-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'attention mechanism',
'robotics'
],
install_requires=[
'classifier-free-guidance-pytorch>=0.1.4',
'einops>=0.6',
'torch>=1.6',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
robotic-transformer-pytorch-main
|
setup.py
|
import torch
import torch.nn.functional as F
from torch import nn, einsum
from typing import List, Optional, Callable, Tuple
from beartype import beartype
from einops import pack, unpack, repeat, reduce, rearrange
from einops.layers.torch import Rearrange, Reduce
from functools import partial
from classifier_free_guidance_pytorch import TextConditioner, AttentionTextConditioner, classifier_free_guidance
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def cast_tuple(val, length = 1):
return val if isinstance(val, tuple) else ((val,) * length)
def pack_one(x, pattern):
return pack([x], pattern)
def unpack_one(x, ps, pattern):
return unpack(x, ps, pattern)[0]
# sinusoidal positions
def posemb_sincos_1d(seq, dim, temperature = 10000, device = None, dtype = torch.float32):
n = torch.arange(seq, device = device)
omega = torch.arange(dim // 2, device = device) / (dim // 2 - 1)
omega = 1. / (temperature ** omega)
n = n[:, None] * omega[None, :]
pos_emb = torch.cat((n.sin(), n.cos()), dim = 1)
return pos_emb.type(dtype)
# helper classes
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x):
return self.fn(x) + x
class LayerNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.gamma = nn.Parameter(torch.ones(dim))
self.register_buffer("beta", torch.zeros(dim))
def forward(self, x):
return F.layer_norm(x, x.shape[-1:], self.gamma, self.beta)
class FeedForward(nn.Module):
def __init__(self, dim, mult = 4, dropout = 0.):
super().__init__()
inner_dim = int(dim * mult)
self.norm = LayerNorm(dim)
self.net = nn.Sequential(
nn.Linear(dim, inner_dim),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(inner_dim, dim),
nn.Dropout(dropout)
)
def forward(self, x, cond_fn = None):
x = self.norm(x)
if exists(cond_fn):
# adaptive layernorm
x = cond_fn(x)
return self.net(x)
# MBConv
class SqueezeExcitation(nn.Module):
def __init__(self, dim, shrinkage_rate = 0.25):
super().__init__()
hidden_dim = int(dim * shrinkage_rate)
self.gate = nn.Sequential(
Reduce('b c h w -> b c', 'mean'),
nn.Linear(dim, hidden_dim, bias = False),
nn.SiLU(),
nn.Linear(hidden_dim, dim, bias = False),
nn.Sigmoid(),
Rearrange('b c -> b c 1 1')
)
def forward(self, x):
return x * self.gate(x)
class MBConvResidual(nn.Module):
def __init__(self, fn, dropout = 0.):
super().__init__()
self.fn = fn
self.dropsample = Dropsample(dropout)
def forward(self, x):
out = self.fn(x)
out = self.dropsample(out)
return out + x
class Dropsample(nn.Module):
def __init__(self, prob = 0):
super().__init__()
self.prob = prob
def forward(self, x):
device = x.device
if self.prob == 0. or (not self.training):
return x
keep_mask = torch.FloatTensor((x.shape[0], 1, 1, 1), device = device).uniform_() > self.prob
return x * keep_mask / (1 - self.prob)
def MBConv(
dim_in,
dim_out,
*,
downsample,
expansion_rate = 4,
shrinkage_rate = 0.25,
dropout = 0.
):
hidden_dim = int(expansion_rate * dim_out)
stride = 2 if downsample else 1
net = nn.Sequential(
nn.Conv2d(dim_in, hidden_dim, 1),
nn.BatchNorm2d(hidden_dim),
nn.GELU(),
nn.Conv2d(hidden_dim, hidden_dim, 3, stride = stride, padding = 1, groups = hidden_dim),
nn.BatchNorm2d(hidden_dim),
nn.GELU(),
SqueezeExcitation(hidden_dim, shrinkage_rate = shrinkage_rate),
nn.Conv2d(hidden_dim, dim_out, 1),
nn.BatchNorm2d(dim_out)
)
if dim_in == dim_out and not downsample:
net = MBConvResidual(net, dropout = dropout)
return net
# attention related classes
class Attention(nn.Module):
def __init__(
self,
dim,
dim_head = 32,
dropout = 0.,
window_size = 7
):
super().__init__()
assert (dim % dim_head) == 0, 'dimension should be divisible by dimension per head'
self.norm = LayerNorm(dim)
self.heads = dim // dim_head
self.scale = dim_head ** -0.5
self.to_qkv = nn.Linear(dim, dim * 3, bias = False)
self.attend = nn.Sequential(
nn.Softmax(dim = -1),
nn.Dropout(dropout)
)
self.to_out = nn.Sequential(
nn.Linear(dim, dim, bias = False),
nn.Dropout(dropout)
)
# relative positional bias
self.rel_pos_bias = nn.Embedding((2 * window_size - 1) ** 2, self.heads)
pos = torch.arange(window_size)
grid = torch.stack(torch.meshgrid(pos, pos, indexing = 'ij'))
grid = rearrange(grid, 'c i j -> (i j) c')
rel_pos = rearrange(grid, 'i ... -> i 1 ...') - rearrange(grid, 'j ... -> 1 j ...')
rel_pos += window_size - 1
rel_pos_indices = (rel_pos * torch.tensor([2 * window_size - 1, 1])).sum(dim = -1)
self.register_buffer('rel_pos_indices', rel_pos_indices, persistent = False)
def forward(self, x):
batch, height, width, window_height, window_width, _, device, h = *x.shape, x.device, self.heads
x = self.norm(x)
# flatten
x = rearrange(x, 'b x y w1 w2 d -> (b x y) (w1 w2) d')
# project for queries, keys, values
q, k, v = self.to_qkv(x).chunk(3, dim = -1)
# split heads
q, k, v = map(lambda t: rearrange(t, 'b n (h d ) -> b h n d', h = h), (q, k, v))
# scale
q = q * self.scale
# sim
sim = einsum('b h i d, b h j d -> b h i j', q, k)
# add positional bias
bias = self.rel_pos_bias(self.rel_pos_indices)
sim = sim + rearrange(bias, 'i j h -> h i j')
# attention
attn = self.attend(sim)
# aggregate
out = einsum('b h i j, b h j d -> b h i d', attn, v)
# merge heads
out = rearrange(out, 'b h (w1 w2) d -> b w1 w2 (h d)', w1 = window_height, w2 = window_width)
# combine heads out
out = self.to_out(out)
return rearrange(out, '(b x y) ... -> b x y ...', x = height, y = width)
class MaxViT(nn.Module):
def __init__(
self,
*,
num_classes,
dim,
depth,
dim_head = 32,
dim_conv_stem = None,
window_size = 7,
mbconv_expansion_rate = 4,
mbconv_shrinkage_rate = 0.25,
dropout = 0.1,
channels = 3
):
super().__init__()
assert isinstance(depth, tuple), 'depth needs to be tuple if integers indicating number of transformer blocks at that stage'
# convolutional stem
dim_conv_stem = default(dim_conv_stem, dim)
self.conv_stem = nn.Sequential(
nn.Conv2d(channels, dim_conv_stem, 3, stride = 2, padding = 1),
nn.Conv2d(dim_conv_stem, dim_conv_stem, 3, padding = 1)
)
# variables
num_stages = len(depth)
dims = tuple(map(lambda i: (2 ** i) * dim, range(num_stages)))
dims = (dim_conv_stem, *dims)
dim_pairs = tuple(zip(dims[:-1], dims[1:]))
self.layers = nn.ModuleList([])
# shorthand for window size for efficient block - grid like attention
w = window_size
# iterate through stages
cond_hidden_dims = []
for ind, ((layer_dim_in, layer_dim), layer_depth) in enumerate(zip(dim_pairs, depth)):
for stage_ind in range(layer_depth):
is_first = stage_ind == 0
stage_dim_in = layer_dim_in if is_first else layer_dim
cond_hidden_dims.append(stage_dim_in)
block = nn.Sequential(
MBConv(
stage_dim_in,
layer_dim,
downsample = is_first,
expansion_rate = mbconv_expansion_rate,
shrinkage_rate = mbconv_shrinkage_rate
),
Rearrange('b d (x w1) (y w2) -> b x y w1 w2 d', w1 = w, w2 = w), # block-like attention
Residual(Attention(dim = layer_dim, dim_head = dim_head, dropout = dropout, window_size = w)),
Residual(FeedForward(dim = layer_dim, dropout = dropout)),
Rearrange('b x y w1 w2 d -> b d (x w1) (y w2)'),
Rearrange('b d (w1 x) (w2 y) -> b x y w1 w2 d', w1 = w, w2 = w), # grid-like attention
Residual(Attention(dim = layer_dim, dim_head = dim_head, dropout = dropout, window_size = w)),
Residual(FeedForward(dim = layer_dim, dropout = dropout)),
Rearrange('b x y w1 w2 d -> b d (w1 x) (w2 y)'),
)
self.layers.append(block)
embed_dim = dims[-1]
self.embed_dim = dims[-1]
self.cond_hidden_dims = cond_hidden_dims
# mlp head out
self.mlp_head = nn.Sequential(
Reduce('b d h w -> b d', 'mean'),
LayerNorm(embed_dim),
nn.Linear(embed_dim, num_classes)
)
@beartype
def forward(
self,
x,
texts: Optional[List[str]] = None,
cond_fns: Optional[Tuple[Callable, ...]] = None,
cond_drop_prob = 0.,
return_embeddings = False
):
x = self.conv_stem(x)
if not exists(cond_fns):
cond_fns = (None,) * len(self.layers)
for stage, cond_fn in zip(self.layers, cond_fns):
if exists(cond_fn):
x = cond_fn(x)
x = stage(x)
if return_embeddings:
return x
return self.mlp_head(x)
# attention
class TransformerAttention(nn.Module):
def __init__(
self,
dim,
causal = False,
dim_head = 64,
dim_context = None,
heads = 8,
norm_context = False,
dropout = 0.1
):
super().__init__()
self.heads = heads
self.scale = dim_head ** -0.5
self.causal = causal
inner_dim = dim_head * heads
dim_context = default(dim_context, dim)
self.norm = LayerNorm(dim)
self.context_norm = LayerNorm(dim_context) if norm_context else nn.Identity()
self.attn_dropout = nn.Dropout(dropout)
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim_context, dim_head * 2, bias = False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim, bias = False),
nn.Dropout(dropout)
)
def forward(
self,
x,
context = None,
mask = None,
attn_bias = None,
attn_mask = None,
cond_fn: Optional[Callable] = None
):
b = x.shape[0]
if exists(context):
context = self.context_norm(context)
kv_input = default(context, x)
x = self.norm(x)
if exists(cond_fn):
# adaptive layer-norm
x = cond_fn(x)
q, k, v = self.to_q(x), *self.to_kv(kv_input).chunk(2, dim = -1)
q = rearrange(q, 'b n (h d) -> b h n d', h = self.heads)
q = q * self.scale
sim = einsum('b h i d, b j d -> b h i j', q, k)
if exists(attn_bias):
sim = sim + attn_bias
if exists(attn_mask):
sim = sim.masked_fill(~attn_mask, -torch.finfo(sim.dtype).max)
if exists(mask):
mask = rearrange(mask, 'b j -> b 1 1 j')
sim = sim.masked_fill(~mask, -torch.finfo(sim.dtype).max)
if self.causal:
i, j = sim.shape[-2:]
causal_mask = torch.ones((i, j), dtype = torch.bool, device = x.device).triu(j - i + 1)
sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)
attn = sim.softmax(dim = -1)
attn = self.attn_dropout(attn)
out = einsum('b h i j, b j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
@beartype
class Transformer(nn.Module):
def __init__(
self,
dim,
dim_head = 64,
heads = 8,
depth = 6,
attn_dropout = 0.,
ff_dropout = 0.
):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
TransformerAttention(dim = dim, heads = heads, dropout = attn_dropout),
FeedForward(dim = dim, dropout = ff_dropout)
]))
def forward(
self,
x,
cond_fns: Optional[Tuple[Callable, ...]] = None,
attn_mask = None
):
if not exists(cond_fns):
cond_fns = (None,) * len(self.layers) * 2
cond_fns = iter(cond_fns)
for attn, ff in self.layers:
x = attn(x, attn_mask = attn_mask, cond_fn = next(cond_fns)) + x
x = ff(x, cond_fn = next(cond_fns)) + x
return x
# token learner module
class TokenLearner(nn.Module):
"""
https://arxiv.org/abs/2106.11297
using the 1.1 version with the MLP (2 dense layers with gelu) for generating attention map
"""
def __init__(
self,
*,
dim,
ff_mult = 2,
num_output_tokens = 8,
num_layers = 2
):
super().__init__()
inner_dim = dim * ff_mult * num_output_tokens
self.num_output_tokens = num_output_tokens
self.net = nn.Sequential(
nn.Conv2d(dim * num_output_tokens, inner_dim, 1, groups = num_output_tokens),
nn.GELU(),
nn.Conv2d(inner_dim, num_output_tokens, 1, groups = num_output_tokens),
)
def forward(self, x):
x, ps = pack_one(x, '* c h w')
x = repeat(x, 'b c h w -> b (g c) h w', g = self.num_output_tokens)
attn = self.net(x)
attn = rearrange(attn, 'b g h w -> b 1 g h w')
x = rearrange(x, 'b (g c) h w -> b c g h w', g = self.num_output_tokens)
x = reduce(x * attn, 'b c g h w -> b c g', 'mean')
x = unpack_one(x, ps, '* c n')
return x
# Robotic Transformer
@beartype
class RT1(nn.Module):
def __init__(
self,
*,
vit: MaxViT,
num_actions = 11,
action_bins = 256,
depth = 6,
heads = 8,
dim_head = 64,
token_learner_ff_mult = 2,
token_learner_num_layers = 2,
token_learner_num_output_tokens = 8,
cond_drop_prob = 0.2,
use_attn_conditioner = False,
conditioner_kwargs: dict = dict()
):
super().__init__()
self.vit = vit
self.num_vit_stages = len(vit.cond_hidden_dims)
conditioner_klass = AttentionTextConditioner if use_attn_conditioner else TextConditioner
self.conditioner = conditioner_klass(
hidden_dims = (*tuple(vit.cond_hidden_dims), *((vit.embed_dim,) * depth * 2)),
hiddens_channel_first = (*((True,) * self.num_vit_stages), *((False,) * depth * 2)),
cond_drop_prob = cond_drop_prob,
**conditioner_kwargs
)
self.token_learner = TokenLearner(
dim = vit.embed_dim,
ff_mult = token_learner_ff_mult,
num_output_tokens = token_learner_num_output_tokens,
num_layers = token_learner_num_layers
)
self.num_learned_tokens = token_learner_num_output_tokens
self.transformer_depth = depth
self.transformer = Transformer(
dim = vit.embed_dim,
dim_head = dim_head,
heads = heads,
depth = depth
)
self.cond_drop_prob = cond_drop_prob
self.to_logits = nn.Sequential(
LayerNorm(vit.embed_dim),
nn.Linear(vit.embed_dim, num_actions * action_bins),
Rearrange('... (a b) -> ... a b', b = action_bins)
)
@classifier_free_guidance
def forward(
self,
video,
texts: Optional[List[str]] = None,
cond_drop_prob = 0.
):
depth = self.transformer_depth
cond_drop_prob = default(cond_drop_prob, self.cond_drop_prob)
frames, device = video.shape[2], video.device
cond_fns = self.conditioner(
texts,
cond_drop_prob = cond_drop_prob,
repeat_batch = (*((frames,) * self.num_vit_stages), *((1,) * self.transformer_depth * 2))
)
vit_cond_fns, transformer_cond_fns = cond_fns[:-(depth * 2)], cond_fns[-(depth * 2):]
video = rearrange(video, 'b c f h w -> b f c h w')
images, packed_shape = pack_one(video, '* c h w')
tokens = self.vit(
images,
texts = texts,
cond_fns = vit_cond_fns,
cond_drop_prob = cond_drop_prob,
return_embeddings = True
)
tokens = unpack_one(tokens, packed_shape, '* c h w')
learned_tokens = self.token_learner(tokens)
learned_tokens = rearrange(learned_tokens, 'b f c n -> b (f n) c')
# causal attention mask
attn_mask = torch.ones((frames, frames), dtype = torch.bool, device = device).triu(1)
attn_mask = repeat(attn_mask, 'i j -> (i r1) (j r2)', r1 = self.num_learned_tokens, r2 = self.num_learned_tokens)
# sinusoidal positional embedding
pos_emb = posemb_sincos_1d(frames, learned_tokens.shape[-1], dtype = learned_tokens.dtype, device = learned_tokens.device)
learned_tokens = learned_tokens + repeat(pos_emb, 'n d -> (n r) d', r = self.num_learned_tokens)
# attention
attended_tokens = self.transformer(learned_tokens, cond_fns = transformer_cond_fns, attn_mask = ~attn_mask)
pooled = reduce(attended_tokens, 'b (f n) d -> b f d', 'mean', f = frames)
logits = self.to_logits(pooled)
return logits
|
robotic-transformer-pytorch-main
|
robotic_transformer_pytorch/robotic_transformer_pytorch.py
|
from robotic_transformer_pytorch.robotic_transformer_pytorch import RT1, TokenLearner, MaxViT
|
robotic-transformer-pytorch-main
|
robotic_transformer_pytorch/__init__.py
|
from setuptools import setup, find_packages
setup(
name = 'memory_compressed_attention',
packages = find_packages(),
version = '0.0.7',
license='MIT',
description = 'Memory-Compressed Self Attention',
long_description_content_type = 'text/markdown',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/memory-compressed-attention',
keywords = ['transformers', 'artificial intelligence', 'attention mechanism'],
install_requires=[
'torch'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
memory-compressed-attention-master
|
setup.py
|
from memory_compressed_attention.memory_compressed_attention import MemoryCompressedAttention
|
memory-compressed-attention-master
|
memory_compressed_attention/__init__.py
|
import torch
from torch import nn
import torch.nn.functional as F
# convolutional compression class
class ConvCompress(nn.Module):
def __init__(self, dim, ratio = 3, groups = 1):
super().__init__()
self.conv = nn.Conv1d(dim, dim, ratio, stride = ratio, groups = groups)
def forward(self, mem):
mem = mem.transpose(1, 2)
compressed_mem = self.conv(mem)
return compressed_mem.transpose(1, 2)
# main class
class MemoryCompressedAttention(nn.Module):
def __init__(
self,
dim,
heads = 8,
causal = False,
compression_factor = 3,
dropout = 0.):
super().__init__()
assert (dim % heads) == 0, 'dimension must be divisible by number of heads'
self.heads = heads
self.causal = causal
self.compression_factor = compression_factor
self.compress_fn = ConvCompress(dim, compression_factor, groups = heads)
self.to_qkv = nn.Linear(dim, dim * 3, bias = False)
self.to_out = nn.Linear(dim, dim)
self.dropout = nn.Dropout(dropout)
self.null_k = nn.Parameter(torch.zeros(1, 1, dim))
self.null_v = nn.Parameter(torch.zeros(1, 1, dim))
def forward(self, x, input_mask = None):
b, t, d, h, cf, device = *x.shape, self.heads, self.compression_factor, x.device
q, k, v = self.to_qkv(x).chunk(3, dim=-1)
# make sure keys and values sequence lengths
# are divisible by the compression factor
padding = cf - (t % cf)
if padding < cf:
k, v = map(lambda t: F.pad(t, (0, 0, padding, 0)), (k, v))
# compress keys and values
k, v = map(self.compress_fn, (k, v))
# attach a null key and value, in the case that the first query has no keys to pay attention to
nk, nv = map(lambda t: t.expand(b, -1, -1), (self.null_k, self.null_v))
k = torch.cat((nk, k), dim=1)
v = torch.cat((nv, v), dim=1)
# merge heads
q, k, v = map(lambda t: t.reshape(*t.shape[:2], h, -1).transpose(1, 2), (q, k, v))
# attention
dots = torch.einsum('bhid,bhjd->bhij', q, k) * d ** -0.5
mask_value = -torch.finfo(dots.dtype).max
# causal masking, if needed
if self.causal:
mask_q = mask_k = torch.arange(t, device=device)
if padding < cf:
mask_k = F.pad(mask_k, (padding, 0))
mask_k, _ = mask_k.reshape(-1, cf).max(dim=-1)
mask = mask_q[:, None] < mask_k[None, :]
mask = F.pad(mask, (1, 0), value=False)
dots.masked_fill_(mask[None, None, ...], mask_value)
del mask
# input masking
if input_mask is not None:
mask_q = mask_k = input_mask
if padding < cf:
mask_k = F.pad(mask_k, (padding, 0), value=True)
mask_k = mask_k.reshape(b, -1, cf).sum(dim=-1) > 0
mask = mask_q[:, None, :, None] < mask_k[:, None, None, :]
mask = F.pad(mask, (1, 0), value=True)
dots.masked_fill_(~mask, mask_value)
del mask
# attention
attn = dots.softmax(dim=-1)
# dropout
attn = self.dropout(attn)
out = torch.einsum('bhij,bhjd->bhid', attn, v)
# split heads and combine
out = out.transpose(1, 2).reshape(b, t, d)
return self.to_out(out)
|
memory-compressed-attention-master
|
memory_compressed_attention/memory_compressed_attention.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Note: To use the 'upload' functionality of this file, you must:
# $ pipenv install twine --dev
import io
import os
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command
# Package meta-data.
NAME = 'raveforce'
DESCRIPTION = 'An OpenAI Gym style toolkit for music generation experiments.'
URL = 'https://github.com/chaosprint/RaveForce'
EMAIL = 'chaosprint@gmail.com'
AUTHOR = 'Qichao Lan (chaosprint)'
REQUIRES_PYTHON = '>=3.6.0'
VERSION = '0.2.3'
# What packages are required for this module to be executed?
REQUIRED = [
'wasmer',
'numpy',
'wasmer_compiler_cranelift'
]
# What packages are optional?
EXTRAS = {
# 'fancy feature': ['django'],
}
# The rest you shouldn't have to touch too much :)
# ------------------------------------------------
# Except, perhaps the License and Trove Classifiers!
# If you do change the License, remember to change the Trove Classifier for that!
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.md' is present in your MANIFEST.in file!
try:
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
# Load the package's __version__.py module as a dictionary.
about = {}
if not VERSION:
project_slug = NAME.lower().replace("-", "_").replace(" ", "_")
with open(os.path.join(here, project_slug, '__version__.py')) as f:
exec(f.read(), about)
else:
about['__version__'] = VERSION
class UploadCommand(Command):
"""Support setup.py upload."""
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPI via Twine…')
os.system('twine upload dist/*')
self.status('Pushing git tags…')
os.system('git tag v{0}'.format(about['__version__']))
os.system('git push --tags')
sys.exit()
# Where the magic happens:
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(exclude=["tests", "*.tests", "*.tests.*", "tests.*"]),
# If your package is a single module, use this instead of 'packages':
# py_modules=['mypackage'],
# entry_points={
# 'console_scripts': ['mycli=mymodule:cli'],
# },
install_requires=REQUIRED,
extras_require=EXTRAS,
include_package_data=True,
license='MIT',
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
# $ setup.py publish support.
cmdclass={
'upload': UploadCommand,
},
)
|
RaveForce-main
|
setup.py
|
from wasmer import engine, Store, Module, Instance, Memory, ImportObject, Function, FunctionType, Type
from wasmer_compiler_cranelift import Compiler
from urllib.request import urlopen
import numpy as np
import sys
import datetime
import struct, random
def make(code="", target=[], total_step=16, step_len=0.125, criteria="raw",
action_space=[]):
return Env(code=code, target=target, total_step=total_step, step_len=step_len,
criteria=criteria, action_space=action_space)
#end
def now():
return datetime.datetime.now()
class Env(object):
def __init__(self, code="", target=[], total_step=16, step_len=0.125, criteria="raw",
action_space=[]):
self.elapse_block = 0
self.code = code
self.target = target
self.total_step = total_step
self.step_len = step_len
self.criteria = criteria
self.action_space = ActionSpace(action_space)
self.loaded = False
# some calculation
self.para_num = code.count('{}')
def reset(self, plot=False):
# use wasmer-python
if self.loaded:
self.instance.exports.reset()
else:
import_object = ImportObject()
store = Store(engine.JIT(Compiler))
import_object.register(
"env",
{
"now": Function(store, now, FunctionType([], [Type.F64]))
}
)
# self.module = Module(self.store, open('glicol_wasm.wasm', 'rb').read())
# self.module = Module(self.store, urlopen('https://cdn.jsdelivr.net/gh/chaosprint/glicol/js/src/glicol_wasm.wasm', 'rb').read())
binary = urlopen('https://cdn.jsdelivr.net/gh/chaosprint/glicol@v0.9.22/js/src/glicol_wasm.wasm').read()
module = Module(store, binary)
self.instance = Instance(module, import_object)
self.loaded = True
# empty_observation = np.zeros(int(self.step_len * self.total_step))
self.audio = [[], []]
empty_observation = np.zeros(len(self.target))
# print(self.action_space.actions)
# if plot:
# plt.plot(empty_observation)
return empty_observation
def step(self, action):
inPtr = self.instance.exports.alloc(128)
resultPtr = self.instance.exports.alloc_uint8array(256)
self.audioBufPtr = self.instance.exports.alloc_uint8array(256)
# send the code to the wasm
code = self.code.format(*action)
# print(self.criteria)
code = bytes(code, "utf-8")
codeLen = len(code)
codePtr = self.instance.exports.alloc_uint8array(codeLen)
self.memory = self.instance.exports.memory.uint8_view(codePtr)
self.memory[0:codeLen] = code
self.instance.exports.update(codePtr, codeLen)
audioBufPtr = self.instance.exports.alloc(256)
# start the engine
# self.instance.exports.run_without_samples(codePtr, codeLen)
self.num_block = int(self.step_len * 44100 / 128)
# self.elapse_block += self.num_block
# self.audio = []
for _ in range(self.num_block):
self.instance.exports.process(inPtr, audioBufPtr, 256, resultPtr)
bufuint8 = self.instance.exports.memory.uint8_view(offset=int(audioBufPtr))[:1024]
nth = 0
buf = []
while nth < 1024:
byte_arr = bytearray(bufuint8[nth: nth+4])
num = struct.unpack('<f', byte_arr)
buf.append(num[0])
nth += 4
result = self.instance.exports.memory.uint8_view(offset=resultPtr)
result_str = "".join(map(chr, filter(lambda x : x != 0, result[:256])))
# deprecated in 2022
# self.instance.exports.process_u8(self.audioBufPtr)
# self.memory = self.instance.exports.memory.uint8_view(self.audioBufPtr)
# self.buf = [struct.unpack('<f', bytearray(
# self.memory[i: i+4]))[0] for i in range(0,256,4)]
self.audio[0].extend(buf[:128])
# self.audio[1].extend(buf[128:256])
padded_observation = self.padding_to_total()
reward = self.calc_reward(padded_observation)
print("step {} in total {}".format(self.step, self.total_step))
done = self.step == self.total_step
info = 0 if result_str == "" else result_str
return padded_observation, reward, done, info
def render(self):
pass
def padding_to_total(self):
# pad_width = len(self.target) - len(self.audio[0])
# print(len(self.target), len(self.audio))
# padded = np.pad(self.audio, pad_width, "constant", constant_values=0)
# print(padded)
padded = np.zeros(len(self.target))
padded[:len(self.audio[0])] = self.audio[0]
return padded
def calc_reward(self, padded_observation):
if self.criteria == "raw":
# print(padded_observation)
# print(len(padded_observation), len(self.target))
# print((padded_observation - self.target))
# mse = 0.1
mse = (np.square(padded_observation - self.target)).mean(axis=0)
return -mse
else:
return 0
class ActionSpace(object):
def __init__(self, actions):
self.actions = actions
def sample(self):
result = list(range(len(self.actions)))
for i, action in enumerate(self.actions):
if action[0] == "lin":
result[i] = action[1] + random.random() * (action[2] - action[1])
elif action[0] == "exp":
point_a = np.log(action[1]+sys.float_info. min)
point_b = np.log(action[2])
result[i] = np.exp(point_a + random.random()*(point_b - point_a))
elif action[0] == "rel":
func = action[2]
by = result[action[1]]
result[i] = func(by)
elif action[0] == "choose":
result[i] = random.choice(action[1])
else:
pass
return result
|
RaveForce-main
|
raveforce/__init__.py
|
from setuptools import setup, find_packages
setup(
name = 'conformer',
packages = find_packages(),
version = '0.3.2',
license='MIT',
description = 'The convolutional module from the Conformer paper',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/conformer',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'audio'
],
install_requires=[
'einops>=0.6.1',
'torch'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
conformer-master
|
setup.py
|
from conformer.conformer import ConformerConvModule, ConformerBlock, Conformer
|
conformer-master
|
conformer/__init__.py
|
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange
from einops.layers.torch import Rearrange
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def calc_same_padding(kernel_size):
pad = kernel_size // 2
return (pad, pad - (kernel_size + 1) % 2)
# helper classes
class Swish(nn.Module):
def forward(self, x):
return x * x.sigmoid()
class GLU(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
out, gate = x.chunk(2, dim=self.dim)
return out * gate.sigmoid()
class DepthWiseConv1d(nn.Module):
def __init__(self, chan_in, chan_out, kernel_size, padding):
super().__init__()
self.padding = padding
self.conv = nn.Conv1d(chan_in, chan_out, kernel_size, groups = chan_in)
def forward(self, x):
x = F.pad(x, self.padding)
return self.conv(x)
# attention, feedforward, and conv module
class Scale(nn.Module):
def __init__(self, scale, fn):
super().__init__()
self.fn = fn
self.scale = scale
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) * self.scale
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = nn.LayerNorm(dim)
def forward(self, x, **kwargs):
x = self.norm(x)
return self.fn(x, **kwargs)
class Attention(nn.Module):
def __init__(
self,
dim,
heads = 8,
dim_head = 64,
dropout = 0.,
max_pos_emb = 512
):
super().__init__()
inner_dim = dim_head * heads
self.heads= heads
self.scale = dim_head ** -0.5
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)
self.to_out = nn.Linear(inner_dim, dim)
self.max_pos_emb = max_pos_emb
self.rel_pos_emb = nn.Embedding(2 * max_pos_emb + 1, dim_head)
self.dropout = nn.Dropout(dropout)
def forward(
self,
x,
context = None,
mask = None,
context_mask = None
):
n, device, h, max_pos_emb, has_context = x.shape[-2], x.device, self.heads, self.max_pos_emb, exists(context)
context = default(context, x)
q, k, v = (self.to_q(x), *self.to_kv(context).chunk(2, dim = -1))
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
# shaw's relative positional embedding
seq = torch.arange(n, device = device)
dist = rearrange(seq, 'i -> i ()') - rearrange(seq, 'j -> () j')
dist = dist.clamp(-max_pos_emb, max_pos_emb) + max_pos_emb
rel_pos_emb = self.rel_pos_emb(dist).to(q)
pos_attn = einsum('b h n d, n r d -> b h n r', q, rel_pos_emb) * self.scale
dots = dots + pos_attn
if exists(mask) or exists(context_mask):
mask = default(mask, lambda: torch.ones(*x.shape[:2], device = device))
context_mask = default(context_mask, mask) if not has_context else default(context_mask, lambda: torch.ones(*context.shape[:2], device = device))
mask_value = -torch.finfo(dots.dtype).max
mask = rearrange(mask, 'b i -> b () i ()') * rearrange(context_mask, 'b j -> b () () j')
dots.masked_fill_(~mask, mask_value)
attn = dots.softmax(dim = -1)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
out = self.to_out(out)
return self.dropout(out)
class FeedForward(nn.Module):
def __init__(
self,
dim,
mult = 4,
dropout = 0.
):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, dim * mult),
Swish(),
nn.Dropout(dropout),
nn.Linear(dim * mult, dim),
nn.Dropout(dropout)
)
def forward(self, x):
return self.net(x)
class ConformerConvModule(nn.Module):
def __init__(
self,
dim,
causal = False,
expansion_factor = 2,
kernel_size = 31,
dropout = 0.
):
super().__init__()
inner_dim = dim * expansion_factor
padding = calc_same_padding(kernel_size) if not causal else (kernel_size - 1, 0)
self.net = nn.Sequential(
nn.LayerNorm(dim),
Rearrange('b n c -> b c n'),
nn.Conv1d(dim, inner_dim * 2, 1),
GLU(dim=1),
DepthWiseConv1d(inner_dim, inner_dim, kernel_size = kernel_size, padding = padding),
nn.BatchNorm1d(inner_dim) if not causal else nn.Identity(),
Swish(),
nn.Conv1d(inner_dim, dim, 1),
Rearrange('b c n -> b n c'),
nn.Dropout(dropout)
)
def forward(self, x):
return self.net(x)
# Conformer Block
class ConformerBlock(nn.Module):
def __init__(
self,
*,
dim,
dim_head = 64,
heads = 8,
ff_mult = 4,
conv_expansion_factor = 2,
conv_kernel_size = 31,
attn_dropout = 0.,
ff_dropout = 0.,
conv_dropout = 0.,
conv_causal = False
):
super().__init__()
self.ff1 = FeedForward(dim = dim, mult = ff_mult, dropout = ff_dropout)
self.attn = Attention(dim = dim, dim_head = dim_head, heads = heads, dropout = attn_dropout)
self.conv = ConformerConvModule(dim = dim, causal = conv_causal, expansion_factor = conv_expansion_factor, kernel_size = conv_kernel_size, dropout = conv_dropout)
self.ff2 = FeedForward(dim = dim, mult = ff_mult, dropout = ff_dropout)
self.attn = PreNorm(dim, self.attn)
self.ff1 = Scale(0.5, PreNorm(dim, self.ff1))
self.ff2 = Scale(0.5, PreNorm(dim, self.ff2))
self.post_norm = nn.LayerNorm(dim)
def forward(self, x, mask = None):
x = self.ff1(x) + x
x = self.attn(x, mask = mask) + x
x = self.conv(x) + x
x = self.ff2(x) + x
x = self.post_norm(x)
return x
# Conformer
class Conformer(nn.Module):
def __init__(
self,
dim,
*,
depth,
dim_head = 64,
heads = 8,
ff_mult = 4,
conv_expansion_factor = 2,
conv_kernel_size = 31,
attn_dropout = 0.,
ff_dropout = 0.,
conv_dropout = 0.,
conv_causal = False
):
super().__init__()
self.dim = dim
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(ConformerBlock(
dim = dim,
dim_head = dim_head,
heads = heads,
ff_mult = ff_mult,
conv_expansion_factor = conv_expansion_factor,
conv_kernel_size = conv_kernel_size,
conv_causal = conv_causal
))
def forward(self, x):
for block in self.layers:
x = block(x)
return x
|
conformer-master
|
conformer/conformer.py
|
from tqdm import tqdm
import sidechainnet as scn
from ddpm_proteins.utils import get_msa_attention_embeddings, get_msa_transformer
# sidechainnet data
data = scn.load(
casp_version = 12,
thinning = 30,
with_pytorch = 'dataloaders',
batch_size = 1,
dynamic_batching = False
)
# constants
LENGTH_THRES = 256
# function for fetching MSAs, fill-in depending on your setup
def fetch_msas_fn(aa_str):
"""
given a protein as amino acid string
fill in a function that returns the MSAs, as a list of strings
(by default, it will return nothing, and only the primary sequence is fed into MSA Transformer)
"""
return []
# caching loop
model, batch_converter = get_msa_transformer()
for batch in tqdm(data['train']):
if batch.seqs.shape[1] > LENGTH_THRES:
continue
pids = batch.pids
seqs = batch.seqs.argmax(dim = -1)
_ = get_msa_attention_embeddings(
model,
batch_converter,
seqs,
batch.pids,
fetch_msas_fn
)
print('caching complete')
|
ddpm-proteins-main
|
cache.py
|
from setuptools import setup, find_packages
setup(
name = 'ddpm-proteins',
packages = find_packages(),
version = '0.0.11',
license='MIT',
description = 'Denoising Diffusion Probabilistic Models - for Proteins - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/ddpm-proteins',
keywords = [
'artificial intelligence',
'generative models',
'proteins'
],
install_requires=[
'einops',
'matplotlib',
'numpy',
'pillow',
'proDy',
'scipy',
'sidechainnet',
'seaborn',
'torch',
'torchvision',
'tqdm',
'wandb'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
ddpm-proteins-main
|
setup.py
|
import os
import torch
import sidechainnet as scn
from PIL import Image
from random import randrange
import torch
import torch.nn.functional as F
from torch import optim
from ddpm_proteins import Unet, GaussianDiffusion
from ddpm_proteins.utils import save_heatmap, broadcat, get_msa_attention_embeddings, symmetrize, get_msa_transformer, pad_image_to
from einops import rearrange
os.makedirs('./.tmps', exist_ok = True)
# constants
NUM_ITERATIONS = int(2e6)
IMAGE_SIZE = 256
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 8
LEARNING_RATE = 2e-5
SAMPLE_EVERY = 200
SCALE_DISTANCE_BY = 1e2
# experiment tracker
import wandb
wandb.init(project = 'ddpm-proteins')
wandb.run.name = f'proteins of length {IMAGE_SIZE} or less'
wandb.run.save()
# model
model = Unet(
dim = 32,
dim_mults = (1, 2, 4, 8),
channels = 1,
condition_dim = 1 + 144 # mask (1) + attention embedding size (144)
)
diffusion = GaussianDiffusion(
model,
image_size = IMAGE_SIZE,
timesteps = 1000, # number of steps
loss_type = 'l1' # L1 or L2
)
def cycle(loader, thres = 256):
while True:
for data in loader:
if data.seqs.shape[1] <= thres:
yield data
data = scn.load(
casp_version = 12,
thinning = 30,
with_pytorch = 'dataloaders',
batch_size = BATCH_SIZE,
dynamic_batching = False
)
model, batch_converter = get_msa_transformer()
model = model.cuda(1) # put msa transformer on cuda device 1
opt = optim.Adam(diffusion.parameters(), lr = LEARNING_RATE)
train_dl = cycle(data['train'], thres = IMAGE_SIZE)
valid_dl = cycle(data['test'], thres = IMAGE_SIZE)
diffusion = diffusion.cuda()
upper_triangular_mask = torch.ones(IMAGE_SIZE, IMAGE_SIZE).triu_(1).bool().cuda()
for ind in range(NUM_ITERATIONS):
for _ in range(GRADIENT_ACCUMULATE_EVERY):
batch = next(train_dl)
ids, seqs, coords, masks = batch.pids, batch.seqs, batch.crds, batch.msks
seqs = seqs.argmax(dim = -1)
coords = coords.reshape(BATCH_SIZE, -1, 14, 3)
coords = coords[:, :, 1].cuda() # pick off alpha carbon
dist = torch.cdist(coords, coords)
data = dist[:, None, :, :]
crossed_mask = (masks[:, None, :, None] * masks[:, None, None, :]).cuda()
data.masked_fill_(~crossed_mask.bool(), 0.)
data = pad_image_to(data, IMAGE_SIZE, value = 0.)
crossed_mask = pad_image_to(crossed_mask, IMAGE_SIZE, value = 0.)
data = (data / SCALE_DISTANCE_BY).clamp(0., 1.)
data = data * upper_triangular_mask[None, None, :, :]
msa_attention_embeds = get_msa_attention_embeddings(model, batch_converter, seqs, ids)
msa_attention_embeds = pad_image_to(msa_attention_embeds, IMAGE_SIZE)
condition_tensor = broadcat((msa_attention_embeds.cuda(0), crossed_mask.float()), dim = 1)
loss = diffusion(data, condition_tensor = condition_tensor)
(loss / GRADIENT_ACCUMULATE_EVERY).backward()
print(loss.item())
wandb.log({'loss': loss.item()})
opt.step()
opt.zero_grad()
if (ind % SAMPLE_EVERY) == 0:
batch = next(valid_dl)
ids, seqs, coords, masks = batch.pids, batch.seqs, batch.crds, batch.msks
seqs = seqs.argmax(dim = -1)
coords = coords.reshape(BATCH_SIZE, -1, 14, 3)
coords = coords[:, :, 1].cuda() # pick off alpha carbon
dist = torch.cdist(coords, coords)
data = dist[:, None, :, :]
crossed_mask = (masks[:, None, :, None] * masks[:, None, None, :]).cuda()
data.masked_fill_(~crossed_mask.bool(), 0.)
data = pad_image_to(data, IMAGE_SIZE, value = 0.)
valid_data = (data / SCALE_DISTANCE_BY).clamp(0., 1.)
crossed_mask = pad_image_to(crossed_mask, IMAGE_SIZE, value = 0.)[:1].float()
msa_attention_embeds = get_msa_attention_embeddings(model, batch_converter, seqs[:1], ids[:1])
msa_attention_embeds = pad_image_to(msa_attention_embeds, IMAGE_SIZE)
condition_tensor = broadcat((msa_attention_embeds.cuda(0), crossed_mask.float()), dim = 1)
sampled = diffusion.sample(batch_size = 1, condition_tensor = condition_tensor)[0][0]
sampled = sampled.clamp(0., 1.) * upper_triangular_mask
sampled = symmetrize(sampled)
img = save_heatmap(sampled, './.tmps/validation.png', dpi = 100, return_image = True)
crossed_mask_img = save_heatmap(crossed_mask[0][0], './.tmps/mask.png', dpi = 100, return_image = True)
truth_img = save_heatmap(valid_data[0][0], './.tmps/truth.png', dpi = 100, return_image = True)
wandb.log({'sample': wandb.Image(img), 'mask': wandb.Image(crossed_mask_img), 'truth': wandb.Image(truth_img)})
|
ddpm-proteins-main
|
train.py
|
from ddpm_proteins.ddpm_proteins import GaussianDiffusion, Unet, Trainer
|
ddpm-proteins-main
|
ddpm_proteins/__init__.py
|
import os
from PIL import Image
import seaborn as sn
import matplotlib.pyplot as plt
import torch
import torch.nn.functional as F
from sidechainnet.utils.sequence import ProteinVocabulary
from einops import rearrange
# general functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def broadcat(tensors, dim = -1):
num_tensors = len(tensors)
shape_lens = set(list(map(lambda t: len(t.shape), tensors)))
assert len(shape_lens) == 1, 'tensors must all have the same number of dimensions'
shape_len = list(shape_lens)[0]
dim = (dim + shape_len) if dim < 0 else dim
dims = list(zip(*map(lambda t: list(t.shape), tensors)))
expandable_dims = [(i, val) for i, val in enumerate(dims) if i != dim]
assert all([*map(lambda t: len(set(t[1])) <= 2, expandable_dims)]), 'invalid dimensions for broadcastable concatentation'
max_dims = list(map(lambda t: (t[0], max(t[1])), expandable_dims))
expanded_dims = list(map(lambda t: (t[0], (t[1],) * num_tensors), max_dims))
expanded_dims.insert(dim, (dim, dims[dim]))
expandable_shapes = list(zip(*map(lambda t: t[1], expanded_dims)))
tensors = list(map(lambda t: t[0].expand(*t[1]), zip(tensors, expandable_shapes)))
return torch.cat(tensors, dim = dim)
# singleton msa transformer
msa_instances = None
def get_msa_transformer():
global msa_instances
if not exists(msa_instances):
msa_model, alphabet = torch.hub.load("facebookresearch/esm", "esm_msa1_t12_100M_UR50S")
batch_converter = alphabet.get_batch_converter()
return msa_model, batch_converter
return msa_instances
# MSA embedding related functions
VOCAB = ProteinVocabulary()
def ids_to_aa_str(x):
assert isinstance(x, list), 'input must be a list'
id2aa = VOCAB._int2char
is_char = lambda c: isinstance(c, str) and len(c) == 1
out = []
for el in x:
if isinstance(el, list):
out.append(ids_to_aa_str(el))
elif isinstance(el, int):
out.append(id2aa[el])
else:
raise TypeError('type must be either list or character')
if all(map(is_char, out)):
return ''.join(out)
return out
def aa_str_to_embed_input(x):
assert isinstance(x, list), 'input must be a list'
out = []
for el in x:
if isinstance(el, list):
out.append(aa_str_to_embed_input(el))
elif isinstance(el, str):
out.append((None, el))
else:
raise TypeError('type must be either list or string')
return out
def apc(x):
a1 = x.sum(-1, keepdims=True)
a2 = x.sum(-2, keepdims=True)
a12 = x.sum((-1, -2), keepdims=True)
avg = a1 * a2
avg.div_(a12)
normalized = x - avg
return normalized
def symmetrize(x):
return x + x.transpose(-1, -2)
def pad_image_to(tensor, size, value = 0.):
remainder = size - tensor.shape[-1]
tensor = F.pad(tensor, (0, remainder, 0, remainder), value = value)
return tensor
# getting a single MSA attention embedding, with caching
CACHE_PATH = default(os.getenv('CACHE_PATH'), os.path.expanduser('~/.cache.ddpm-proteins'))
FETCH_FROM_CACHE = not exists(os.getenv('CLEAR_CACHE'))
os.makedirs(CACHE_PATH, exist_ok = True)
@torch.no_grad()
def get_msa_attention_embedding(
model,
batch_converter,
aa_str,
id,
fetch_msas_fn = lambda t: [],
cache = True
):
device = next(model.parameters()).device
cache_full_path = os.path.join(CACHE_PATH, f'{id}.pt')
if cache and FETCH_FROM_CACHE and os.path.exists(cache_full_path):
try:
loaded = torch.load(cache_full_path).to(device)
except:
loaded = None
if exists(loaded):
return loaded
msas = default(fetch_msas_fn(aa_str), [])
seq_with_msas = [aa_str, *msas]
embed_inputs = aa_str_to_embed_input(seq_with_msas)
_, _, msa_batch_tokens = batch_converter(embed_inputs)
results = model(msa_batch_tokens.to(device), need_head_weights = True)
attentions = results['row_attentions']
attentions = attentions[..., 1:, 1:]
attentions = rearrange(attentions, 'b l h m n -> b (l h) m n')
attentions = apc(symmetrize(attentions))
if cache:
print(f'caching to {cache_full_path}')
torch.save(attentions, cache_full_path)
return attentions
def get_msa_attention_embeddings(
model,
batch_converter,
seqs,
ids,
fetch_msas_fn = lambda t: [],
cache = True
):
n = seqs.shape[1]
seqs = rearrange(seqs, 'b n -> b () n')
aa_strs = ids_to_aa_str(seqs.cpu().tolist())
embeds_list = [get_msa_attention_embedding(model, batch_converter, aa, seq_id, cache = cache) for aa, seq_id in zip(aa_strs, ids)]
embeds_list = [pad_image_to(embed, n) for embed in embeds_list]
embeds = torch.cat(embeds_list, dim = 0)
return embeds
# training utils
def cycle(loader, thres = 256):
while True:
for data in loader:
if data.seqs.shape[1] <= thres:
yield data
def save_heatmap(tensor, filepath, dpi = 200, return_image = False):
heatmap = sn.heatmap(tensor.cpu().numpy())
figure = heatmap.get_figure()
figure.savefig(filepath, dpi = dpi)
plt.clf()
if not return_image:
return
return Image.open(filepath)
|
ddpm-proteins-main
|
ddpm_proteins/utils.py
|
import math
from math import log, pi
import copy
import torch
from torch import nn, einsum
import torch.nn.functional as F
from inspect import isfunction
from functools import partial
from torch.utils import data
from pathlib import Path
from torch.optim import Adam
from torchvision import transforms, utils
from PIL import Image
import numpy as np
from tqdm import tqdm
from einops import rearrange, repeat
from ddpm_proteins.utils import broadcat
try:
from apex import amp
APEX_AVAILABLE = True
except:
APEX_AVAILABLE = False
# constants
SAVE_AND_SAMPLE_EVERY = 1000
UPDATE_EMA_EVERY = 10
EXTS = ['jpg', 'jpeg', 'png']
RESULTS_FOLDER = Path('./results')
RESULTS_FOLDER.mkdir(exist_ok = True)
# helpers functions
def exists(x):
return x is not None
def default(val, d):
if exists(val):
return val
return d() if isfunction(d) else d
def cycle(dl):
while True:
for data in dl:
yield data
def num_to_groups(num, divisor):
groups = num // divisor
remainder = num % divisor
arr = [divisor] * groups
if remainder > 0:
arr.append(remainder)
return arr
def loss_backwards(fp16, loss, optimizer, **kwargs):
if fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward(**kwargs)
else:
loss.backward(**kwargs)
# small helper modules
class EMA():
def __init__(self, beta):
super().__init__()
self.beta = beta
def update_model_average(self, ma_model, current_model):
for current_params, ma_params in zip(current_model.parameters(), ma_model.parameters()):
old_weight, up_weight = ma_params.data, current_params.data
ma_params.data = self.update_average(old_weight, up_weight)
def update_average(self, old, new):
if old is None:
return new
return old * self.beta + (1 - self.beta) * new
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, *args, **kwargs):
return self.fn(x, *args, **kwargs) + x
class SinusoidalPosEmb(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
device = x.device
half_dim = self.dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, device=device) * -emb)
emb = x[:, None] * emb[None, :]
emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
return emb
class Mish(nn.Module):
def forward(self, x):
return x * torch.tanh(F.softplus(x))
class Upsample(nn.Module):
def __init__(self, dim):
super().__init__()
self.conv = nn.ConvTranspose2d(dim, dim, 4, 2, 1)
def forward(self, x):
return self.conv(x)
class Downsample(nn.Module):
def __init__(self, dim):
super().__init__()
self.conv = nn.Conv2d(dim, dim, 3, 2, 1)
def forward(self, x):
return self.conv(x)
# building block modules
class ResnetBlock(nn.Module):
def __init__(
self,
dim,
dim_out,
*,
time_emb_dim,
hybrid_dim_conv = False,
groups = 8
):
super().__init__()
kernels = ((3, 3),)
paddings = ((1, 1),)
if hybrid_dim_conv:
kernels = (*kernels, (9, 1), (1, 9))
paddings = (*paddings, (4, 0), (0, 4))
self.mlp = nn.Sequential(
Mish(),
nn.Linear(time_emb_dim, dim_out)
)
self.blocks_in = nn.ModuleList([])
self.blocks_out = nn.ModuleList([])
for kernel, padding in zip(kernels, paddings):
self.blocks_in.append(nn.Sequential(
nn.Conv2d(dim, dim_out, kernel, padding = padding),
nn.GroupNorm(groups, dim_out),
Mish()
))
self.blocks_out.append(nn.Sequential(
nn.Conv2d(dim_out, dim_out, kernel, padding = padding),
nn.GroupNorm(groups, dim_out),
Mish()
))
self.res_conv = nn.Conv2d(dim, dim_out, 1) if dim != dim_out else nn.Identity()
def forward(self, x, time_emb):
hiddens = [fn(x) for fn in self.blocks_in]
time_emb = self.mlp(time_emb)
time_emb = rearrange(time_emb, 'b c -> b c () ()')
hiddens = [h + time_emb for h in hiddens]
hiddens = [fn(h) for fn, h in zip(self.blocks_out, hiddens)]
return sum(hiddens) + self.res_conv(x)
# rotary embeddings
def apply_rotary_emb(q, k, pos_emb):
sin, cos = pos_emb
dim_rotary = sin.shape[-1]
(q, q_pass), (k, k_pass) = map(lambda t: (t[..., :dim_rotary], t[..., dim_rotary:]), (q, k))
q, k = map(lambda t: (t * cos) + (rotate_every_two(t) * sin), (q, k))
q, k = map(lambda t: torch.cat(t, dim = -1), ((q, q_pass), (k, k_pass)))
return q, k
def rotate_every_two(x):
x = rearrange(x, '... (d j) -> ... d j', j = 2)
x1, x2 = x.unbind(dim = -1)
x = torch.stack((-x2, x1), dim = -1)
return rearrange(x, '... d j -> ... (d j)')
class AxialRotaryEmbedding(nn.Module):
def __init__(self, dim, max_freq = 10):
super().__init__()
self.dim = dim
scales = torch.logspace(0., log(max_freq / 2) / log(2), self.dim // 4, base = 2)
self.cached_pos_emb = None
self.register_buffer('scales', scales)
def forward(self, x):
device, dtype, h, w = x.device, x.dtype, *x.shape[-2:]
if exists(self.cached_pos_emb):
return self.cached_pos_emb
seq_x = torch.linspace(-1., 1., steps = h, device = device)
seq_x = seq_x.unsqueeze(-1)
seq_y = torch.linspace(-1., 1., steps = w, device = device)
seq_y = seq_y.unsqueeze(-1)
scales = self.scales[(*((None,) * (len(seq_x.shape) - 1)), Ellipsis)]
scales = scales.to(x)
scales = self.scales[(*((None,) * (len(seq_y.shape) - 1)), Ellipsis)]
scales = scales.to(x)
seq_x = seq_x * scales * pi
seq_y = seq_y * scales * pi
x_sinu = repeat(seq_x, 'i d -> i j d', j = w)
y_sinu = repeat(seq_y, 'j d -> i j d', i = h)
sin = torch.cat((x_sinu.sin(), y_sinu.sin()), dim = -1)
cos = torch.cat((x_sinu.cos(), y_sinu.cos()), dim = -1)
sin, cos = map(lambda t: rearrange(t, 'i j d -> i j d'), (sin, cos))
sin, cos = map(lambda t: repeat(t, 'i j d -> () (i j) (d r)', r = 2), (sin, cos))
self.cached_pos_emb = (sin, cos)
return sin, cos
# linear attention
def linear_attn_kernel(t):
return F.elu(t) + 1
def linear_attention(q, k, v):
k_sum = k.sum(dim = -2)
D_inv = 1. / torch.einsum('...nd,...d->...n', q, k_sum.type_as(q))
context = torch.einsum('...nd,...ne->...de', k, v)
out = torch.einsum('...de,...nd,...n->...ne', context, q, D_inv)
return out
class LayerNorm(nn.Module):
def __init__(self, dim, eps = 1e-5):
super().__init__()
self.eps = eps
self.g = nn.Parameter(torch.ones(1, dim, 1, 1))
self.b = nn.Parameter(torch.zeros(1, dim, 1, 1))
def forward(self, x):
std = torch.var(x, dim = 1, unbiased = False, keepdim = True).sqrt()
mean = torch.mean(x, dim = 1, keepdim = True)
return (x - mean) / (std + self.eps) * self.g + self.b
class LinearAttention(nn.Module):
def __init__(self, dim, heads = 4, dim_head = 32):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
hidden_dim = dim_head * heads
self.pos_emb = AxialRotaryEmbedding(dim = dim_head)
self.norm = LayerNorm(dim)
self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False)
self.to_out = nn.Conv2d(hidden_dim, dim, 1)
def forward(self, x):
b, c, h, w, heads = *x.shape, self.heads
x = self.norm(x)
q, k, v = self.to_qkv(x).chunk(3, dim = 1)
q, k, v = map(lambda t: rearrange(t, 'b (h c) x y -> b h (x y) c', h = heads), (q, k, v))
sin, cos = self.pos_emb(x)
q, k = apply_rotary_emb(q, k, (sin, cos))
q = linear_attn_kernel(q)
k = linear_attn_kernel(k)
q = q * self.scale
out = linear_attention(q, k, v)
out = rearrange(out, 'b h (x y) c -> b (h c) x y', h = heads, x = h, y = w)
return self.to_out(out)
# model
class Unet(nn.Module):
def __init__(
self,
dim,
out_dim = None,
dim_mults=(1, 2, 4, 8),
groups = 8,
channels = 3,
condition_dim = 0,
hybrid_dim_conv = False,
):
super().__init__()
self.channels = channels
self.condition_dim = condition_dim
input_channels = channels + condition_dim # allow for conditioning, to prepare for MSA Transformers
dims = [input_channels, *map(lambda m: dim * m, dim_mults)]
in_out = list(zip(dims[:-1], dims[1:]))
self.time_pos_emb = SinusoidalPosEmb(dim)
self.mlp = nn.Sequential(
nn.Linear(dim, dim * 4),
Mish(),
nn.Linear(dim * 4, dim)
)
self.downs = nn.ModuleList([])
self.ups = nn.ModuleList([])
num_resolutions = len(in_out)
get_resnet_block = partial(ResnetBlock, time_emb_dim = dim, hybrid_dim_conv = hybrid_dim_conv)
for ind, (dim_in, dim_out) in enumerate(in_out):
is_first = ind == 0
is_last = ind >= (num_resolutions - 1)
self.downs.append(nn.ModuleList([
get_resnet_block(dim_in, dim_out),
get_resnet_block(dim_out, dim_out),
Residual(LinearAttention(dim_out)),
Downsample(dim_out) if not is_last else nn.Identity()
]))
mid_dim = dims[-1]
self.mid_block1 = get_resnet_block(mid_dim, mid_dim)
self.mid_attn = Residual(LinearAttention(mid_dim))
self.mid_block2 = get_resnet_block(mid_dim, mid_dim)
for ind, (dim_in, dim_out) in enumerate(reversed(in_out[1:])):
is_last = ind >= (num_resolutions - 1)
self.ups.append(nn.ModuleList([
get_resnet_block(dim_out * 2, dim_in),
get_resnet_block(dim_in, dim_in),
Residual(LinearAttention(dim_in)),
Upsample(dim_in) if not is_last else nn.Identity()
]))
out_dim = default(out_dim, channels)
self.final_conv = nn.Sequential(
nn.Sequential(
nn.Conv2d(dim, dim, 3, padding = 1),
nn.GroupNorm(groups, dim),
Mish()
),
nn.Conv2d(dim, out_dim, 1)
)
def forward(self, x, time):
t = self.time_pos_emb(time)
t = self.mlp(t)
h = []
for resnet, resnet2, attn, downsample in self.downs:
x = resnet(x, t)
x = resnet2(x, t)
x = attn(x)
h.append(x)
x = downsample(x)
x = self.mid_block1(x, t)
x = self.mid_attn(x)
x = self.mid_block2(x, t)
for resnet, resnet2, attn, upsample in self.ups:
x = torch.cat((x, h.pop()), dim=1)
x = resnet(x, t)
x = resnet2(x, t)
x = attn(x)
x = upsample(x)
return self.final_conv(x)
# gaussian diffusion trainer class
def extract(a, t, x_shape):
b, *_ = t.shape
out = a.gather(-1, t)
return out.reshape(b, *((1,) * (len(x_shape) - 1)))
def noise_like(shape, device, repeat=False):
repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))
noise = lambda: torch.randn(shape, device=device)
return repeat_noise() if repeat else noise()
def cosine_beta_schedule(timesteps, s = 0.008):
"""
cosine schedule
as proposed in https://openreview.net/forum?id=-NEXDKk8gZ
"""
steps = timesteps + 1
x = np.linspace(0, timesteps, steps)
alphas_cumprod = np.cos(((x / timesteps) + s) / (1 + s) * np.pi * 0.5) ** 2
alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
return np.clip(betas, a_min = 0, a_max = 0.999)
class GaussianDiffusion(nn.Module):
def __init__(
self,
denoise_model,
*,
image_size,
timesteps = 1000,
loss_type = 'l1',
betas = None
):
super().__init__()
self.channels = denoise_model.channels
self.condition_dim = denoise_model.condition_dim
self.image_size = image_size
self.denoise_model = denoise_model
if exists(betas):
betas = betas.detach().cpu().numpy() if isinstance(betas, torch.Tensor) else betas
else:
betas = cosine_beta_schedule(timesteps)
alphas = 1. - betas
alphas_cumprod = np.cumprod(alphas, axis=0)
alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
timesteps, = betas.shape
self.num_timesteps = int(timesteps)
self.loss_type = loss_type
to_torch = partial(torch.tensor, dtype=torch.float32)
self.register_buffer('betas', to_torch(betas))
self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev))
# calculations for diffusion q(x_t | x_{t-1}) and others
self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod)))
self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod)))
self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1)))
# calculations for posterior q(x_{t-1} | x_t, x_0)
posterior_variance = betas * (1. - alphas_cumprod_prev) / (1. - alphas_cumprod)
# above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
self.register_buffer('posterior_variance', to_torch(posterior_variance))
# below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20))))
self.register_buffer('posterior_mean_coef1', to_torch(
betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod)))
self.register_buffer('posterior_mean_coef2', to_torch(
(1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod)))
def q_mean_variance(self, x_start, t):
mean = extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
variance = extract(1. - self.alphas_cumprod, t, x_start.shape)
log_variance = extract(self.log_one_minus_alphas_cumprod, t, x_start.shape)
return mean, variance, log_variance
def predict_start_from_noise(self, x_t, t, noise):
return (
extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
)
def q_posterior(self, x_start, x_t, t):
posterior_mean = (
extract(self.posterior_mean_coef1, t, x_t.shape) * x_start +
extract(self.posterior_mean_coef2, t, x_t.shape) * x_t
)
posterior_variance = extract(self.posterior_variance, t, x_t.shape)
posterior_log_variance_clipped = extract(self.posterior_log_variance_clipped, t, x_t.shape)
return posterior_mean, posterior_variance, posterior_log_variance_clipped
def p_mean_variance(self, x, t, clip_denoised = True, condition_tensor = None):
denoise_model_input = x
if exists(condition_tensor):
denoise_model_input = broadcat((condition_tensor, x), dim = 1)
denoise_model_output = self.denoise_model(denoise_model_input, t)
x_recon = self.predict_start_from_noise(x, t = t, noise = denoise_model_output)
if clip_denoised:
x_recon.clamp_(0., 1.)
model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
return model_mean, posterior_variance, posterior_log_variance
@torch.no_grad()
def p_sample(self, x, t, clip_denoised = True, repeat_noise = False, condition_tensor = None):
b, *_, device = *x.shape, x.device
model_mean, _, model_log_variance = self.p_mean_variance(x = x, t = t, clip_denoised = clip_denoised, condition_tensor = condition_tensor)
noise = noise_like(x.shape, device, repeat_noise)
# no noise when t == 0
nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
@torch.no_grad()
def p_sample_loop(self, shape, condition_tensor = None):
device = self.betas.device
b = shape[0]
img = torch.randn(shape, device=device)
for i in tqdm(reversed(range(0, self.num_timesteps)), desc='sampling loop time step', total=self.num_timesteps):
img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), condition_tensor = condition_tensor)
return img
@torch.no_grad()
def sample(self, batch_size = 16, condition_tensor = None):
assert not (self.condition_dim > 0 and not exists(condition_tensor)), 'the conditioning tensor needs to be passed'
image_size = self.image_size
channels = self.channels
return self.p_sample_loop((batch_size, channels, image_size, image_size), condition_tensor = condition_tensor)
@torch.no_grad()
def interpolate(self, x1, x2, t = None, lam = 0.5):
b, *_, device = *x1.shape, x1.device
t = default(t, self.num_timesteps - 1)
assert x1.shape == x2.shape
t_batched = torch.stack([torch.tensor(t, device=device)] * b)
xt1, xt2 = map(lambda x: self.q_sample(x, t=t_batched), (x1, x2))
img = (1 - lam) * xt1 + lam * xt2
for i in tqdm(reversed(range(0, t)), desc='interpolation sample time step', total=t):
img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long))
return img
def q_sample(self, x_start, t, noise = None):
noise = default(noise, lambda: torch.randn_like(x_start))
return (
extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise
)
def p_losses(self, x_start, t, noise = None, condition_tensor = None):
b, c, h, w = x_start.shape
noise = default(noise, lambda: torch.randn_like(x_start))
x_noisy = self.q_sample(x_start = x_start, t = t, noise = noise)
if exists(condition_tensor):
x_noisy = broadcat((condition_tensor, x_noisy), dim = 1)
x_recon = self.denoise_model(x_noisy, t)
if self.loss_type == 'l1':
loss = (noise - x_recon).abs().mean()
elif self.loss_type == 'l2':
loss = F.mse_loss(noise, x_recon)
else:
raise NotImplementedError()
return loss
def forward(self, x, *args, **kwargs):
b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size
assert h == img_size and w == img_size, f'height and width of image must be {img_size}'
t = torch.randint(0, self.num_timesteps, (b,), device=device).long()
return self.p_losses(x, t, *args, **kwargs)
# dataset classes
class Dataset(data.Dataset):
def __init__(self, folder, image_size):
super().__init__()
self.folder = folder
self.image_size = image_size
self.paths = [p for ext in EXTS for p in Path(f'{folder}').glob(f'**/*.{ext}')]
self.transform = transforms.Compose([
transforms.Resize(image_size),
transforms.RandomHorizontalFlip(),
transforms.CenterCrop(image_size),
transforms.ToTensor()
])
def __len__(self):
return len(self.paths)
def __getitem__(self, index):
path = self.paths[index]
img = Image.open(path)
return self.transform(img)
# trainer class
class Trainer(object):
def __init__(
self,
diffusion_model,
folder,
*,
ema_decay = 0.995,
image_size = 128,
train_batch_size = 32,
train_lr = 2e-5,
train_num_steps = 100000,
gradient_accumulate_every = 2,
fp16 = False,
step_start_ema = 2000
):
super().__init__()
self.model = diffusion_model
self.ema = EMA(ema_decay)
self.ema_model = copy.deepcopy(self.model)
self.step_start_ema = step_start_ema
self.batch_size = train_batch_size
self.image_size = diffusion_model.image_size
self.gradient_accumulate_every = gradient_accumulate_every
self.train_num_steps = train_num_steps
self.ds = Dataset(folder, image_size)
self.dl = cycle(data.DataLoader(self.ds, batch_size = train_batch_size, shuffle=True, pin_memory=True))
self.opt = Adam(diffusion_model.parameters(), lr=train_lr)
self.step = 0
assert not fp16 or fp16 and APEX_AVAILABLE, 'Apex must be installed in order for mixed precision training to be turned on'
self.fp16 = fp16
if fp16:
(self.model, self.ema_model), self.opt = amp.initialize([self.model, self.ema_model], self.opt, opt_level='O1')
self.reset_parameters()
def reset_parameters(self):
self.ema_model.load_state_dict(self.model.state_dict())
def step_ema(self):
if self.step < self.step_start_ema:
self.reset_parameters()
return
self.ema.update_model_average(self.ema_model, self.model)
def save(self, milestone):
data = {
'step': self.step,
'model': self.model.state_dict(),
'ema': self.ema_model.state_dict()
}
torch.save(data, str(RESULTS_FOLDER / f'model-{milestone}.pt'))
def load(self, milestone):
data = torch.load(str(RESULTS_FOLDER / f'model-{milestone}.pt'))
self.step = data['step']
self.model.load_state_dict(data['model'])
self.ema_model.load_state_dict(data['ema'])
def train(self):
backwards = partial(loss_backwards, self.fp16)
while self.step < self.train_num_steps:
for i in range(self.gradient_accumulate_every):
data = next(self.dl).cuda()
loss = self.model(data)
print(f'{self.step}: {loss.item()}')
backwards(loss / self.gradient_accumulate_every, self.opt)
self.opt.step()
self.opt.zero_grad()
if self.step % UPDATE_EMA_EVERY == 0:
self.step_ema()
if self.step != 0 and self.step % SAVE_AND_SAMPLE_EVERY == 0:
milestone = self.step // SAVE_AND_SAMPLE_EVERY
batches = num_to_groups(36, self.batch_size)
all_images_list = list(map(lambda n: self.ema_model.sample(batch_size=n), batches))
all_images = torch.cat(all_images_list, dim=0)
utils.save_image(all_images, str(RESULTS_FOLDER / f'sample-{milestone}.png'), nrow=6)
self.save(milestone)
self.step += 1
print('training completed')
|
ddpm-proteins-main
|
ddpm_proteins/ddpm_proteins.py
|
from setuptools import setup
__VERSION__ = '0.0.3'
setup(name='adamod',
version=__VERSION__,
description='AdaMod optimization algorithm, build on PyTorch.',
long_description=open("README.md", encoding='utf-8').read(),
long_description_content_type="text/markdown",
keywords=['machine learning', 'deep learning'],
classifiers=[
'Intended Audience :: Science/Research',
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
url='https://github.com/karrynest/AdaMod',
author='Jianbang Ding',
author_email='jianbangding@pku.edu.cn',
license='Apache',
packages=['adamod'],
install_requires=[
'torch>=0.4.0',
],
zip_safe=False,
python_requires='>=3.6.0')
|
AdaMod-master
|
setup.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.