python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
"""Official evaluation script for SQuAD version 2.0.
In addition to basic functionality, we also compute additional statistics and
plot precision-recall curves if an additional na_prob.json file is provided.
This file is expected to map question ID's to the model's predicted probability
that a question is unanswerable.
"""
import argparse
import collections
import json
import numpy as np
import os
import re
import string
import sys
OPTS = None
def parse_args():
parser = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0.")
parser.add_argument("data_file", metavar="data.json", help="Input data JSON file.")
parser.add_argument("pred_file", metavar="pred.json", help="Model predictions.")
parser.add_argument(
"--out-file", "-o", metavar="eval.json", help="Write accuracy metrics to file (default is stdout)."
)
parser.add_argument(
"--na-prob-file", "-n", metavar="na_prob.json", help="Model estimates of probability of no answer."
)
parser.add_argument(
"--na-prob-thresh",
"-t",
type=float,
default=1.0,
help='Predict "" if no-answer probability exceeds this (default = 1.0).',
)
parser.add_argument(
"--out-image-dir", "-p", metavar="out_images", default=None, help="Save precision-recall curves to directory."
)
parser.add_argument("--verbose", "-v", action="store_true")
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def make_qid_to_has_ans(dataset):
qid_to_has_ans = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
qid_to_has_ans[qa["id"]] = bool(qa["answers"])
return qid_to_has_ans
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
regex = re.compile(r"\b(a|an|the)\b", re.UNICODE)
return re.sub(regex, " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def get_tokens(s):
if not s:
return []
return normalize_answer(s).split()
def compute_exact(a_gold, a_pred):
return int(normalize_answer(a_gold) == normalize_answer(a_pred))
def compute_f1(a_gold, a_pred):
gold_toks = get_tokens(a_gold)
pred_toks = get_tokens(a_pred)
common = collections.Counter(gold_toks) & collections.Counter(pred_toks)
num_same = sum(common.values())
if len(gold_toks) == 0 or len(pred_toks) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
precision = 1.0 * num_same / len(pred_toks)
recall = 1.0 * num_same / len(gold_toks)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def get_raw_scores(dataset, preds):
exact_scores = {}
f1_scores = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
qid = qa["id"]
gold_answers = [a["text"] for a in qa["answers"] if normalize_answer(a["text"])]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
gold_answers = [""]
if qid not in preds:
print("Missing prediction for %s" % qid)
continue
a_pred = preds[qid]
# Take max over all gold answers
exact_scores[qid] = max(compute_exact(a, a_pred) for a in gold_answers)
f1_scores[qid] = max(compute_f1(a, a_pred) for a in gold_answers)
return exact_scores, f1_scores
def apply_no_ans_threshold(scores, na_probs, qid_to_has_ans, na_prob_thresh):
new_scores = {}
for qid, s in scores.items():
pred_na = na_probs[qid] > na_prob_thresh
if pred_na:
new_scores[qid] = float(not qid_to_has_ans[qid])
else:
new_scores[qid] = s
return new_scores
def make_eval_dict(exact_scores, f1_scores, qid_list=None):
if not qid_list:
total = len(exact_scores)
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values()) / total),
("f1", 100.0 * sum(f1_scores.values()) / total),
("total", total),
]
)
else:
total = len(qid_list)
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list) / total),
("f1", 100.0 * sum(f1_scores[k] for k in qid_list) / total),
("total", total),
]
)
def merge_eval(main_eval, new_eval, prefix):
for k in new_eval:
main_eval["%s_%s" % (prefix, k)] = new_eval[k]
def plot_pr_curve(precisions, recalls, out_image, title):
plt.step(recalls, precisions, color="b", alpha=0.2, where="post")
plt.fill_between(recalls, precisions, step="post", alpha=0.2, color="b")
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.xlim([0.0, 1.05])
plt.ylim([0.0, 1.05])
plt.title(title)
plt.savefig(out_image)
plt.clf()
def make_precision_recall_eval(scores, na_probs, num_true_pos, qid_to_has_ans, out_image=None, title=None):
qid_list = sorted(na_probs, key=lambda k: na_probs[k])
true_pos = 0.0
cur_p = 1.0
cur_r = 0.0
precisions = [1.0]
recalls = [0.0]
avg_prec = 0.0
for i, qid in enumerate(qid_list):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
cur_p = true_pos / float(i + 1)
cur_r = true_pos / float(num_true_pos)
if i == len(qid_list) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(cur_p)
recalls.append(cur_r)
if out_image:
plot_pr_curve(precisions, recalls, out_image, title)
return {"ap": 100.0 * avg_prec}
def run_precision_recall_analysis(main_eval, exact_raw, f1_raw, na_probs, qid_to_has_ans, out_image_dir):
if out_image_dir and not os.path.exists(out_image_dir):
os.makedirs(out_image_dir)
num_true_pos = sum(1 for v in qid_to_has_ans.values() if v)
if num_true_pos == 0:
return
pr_exact = make_precision_recall_eval(
exact_raw,
na_probs,
num_true_pos,
qid_to_has_ans,
out_image=os.path.join(out_image_dir, "pr_exact.png"),
title="Precision-Recall curve for Exact Match score",
)
pr_f1 = make_precision_recall_eval(
f1_raw,
na_probs,
num_true_pos,
qid_to_has_ans,
out_image=os.path.join(out_image_dir, "pr_f1.png"),
title="Precision-Recall curve for F1 score",
)
oracle_scores = {k: float(v) for k, v in qid_to_has_ans.items()}
pr_oracle = make_precision_recall_eval(
oracle_scores,
na_probs,
num_true_pos,
qid_to_has_ans,
out_image=os.path.join(out_image_dir, "pr_oracle.png"),
title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)",
)
merge_eval(main_eval, pr_exact, "pr_exact")
merge_eval(main_eval, pr_f1, "pr_f1")
merge_eval(main_eval, pr_oracle, "pr_oracle")
def histogram_na_prob(na_probs, qid_list, image_dir, name):
if not qid_list:
return
x = [na_probs[k] for k in qid_list]
weights = np.ones_like(x) / float(len(x))
plt.hist(x, weights=weights, bins=20, range=(0.0, 1.0))
plt.xlabel("Model probability of no-answer")
plt.ylabel("Proportion of dataset")
plt.title("Histogram of no-answer probability: %s" % name)
plt.savefig(os.path.join(image_dir, "na_prob_hist_%s.png" % name))
plt.clf()
def find_best_thresh(preds, scores, na_probs, qid_to_has_ans):
num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
cur_score = num_no_ans
best_score = cur_score
best_thresh = 0.0
qid_list = sorted(na_probs, key=lambda k: na_probs[k])
for i, qid in enumerate(qid_list):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
diff = scores[qid]
else:
if preds[qid]:
diff = -1
else:
diff = 0
cur_score += diff
if cur_score > best_score:
best_score = cur_score
best_thresh = na_probs[qid]
return 100.0 * best_score / len(scores), best_thresh
def find_all_best_thresh(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans):
best_exact, exact_thresh = find_best_thresh(preds, exact_raw, na_probs, qid_to_has_ans)
best_f1, f1_thresh = find_best_thresh(preds, f1_raw, na_probs, qid_to_has_ans)
main_eval["best_exact"] = best_exact
main_eval["best_exact_thresh"] = exact_thresh
main_eval["best_f1"] = best_f1
main_eval["best_f1_thresh"] = f1_thresh
def main():
with open(OPTS.data_file) as f:
dataset_json = json.load(f)
dataset = dataset_json["data"]
with open(OPTS.pred_file) as f:
preds = json.load(f)
if OPTS.na_prob_file:
with open(OPTS.na_prob_file) as f:
na_probs = json.load(f)
else:
na_probs = {k: 0.0 for k in preds}
qid_to_has_ans = make_qid_to_has_ans(dataset) # maps qid to True/False
has_ans_qids = [k for k, v in qid_to_has_ans.items() if v]
no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v]
exact_raw, f1_raw = get_raw_scores(dataset, preds)
exact_thresh = apply_no_ans_threshold(exact_raw, na_probs, qid_to_has_ans, OPTS.na_prob_thresh)
f1_thresh = apply_no_ans_threshold(f1_raw, na_probs, qid_to_has_ans, OPTS.na_prob_thresh)
out_eval = make_eval_dict(exact_thresh, f1_thresh)
if has_ans_qids:
has_ans_eval = make_eval_dict(exact_thresh, f1_thresh, qid_list=has_ans_qids)
merge_eval(out_eval, has_ans_eval, "HasAns")
if no_ans_qids:
no_ans_eval = make_eval_dict(exact_thresh, f1_thresh, qid_list=no_ans_qids)
merge_eval(out_eval, no_ans_eval, "NoAns")
if OPTS.na_prob_file:
find_all_best_thresh(out_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans)
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(out_eval, exact_raw, f1_raw, na_probs, qid_to_has_ans, OPTS.out_image_dir)
histogram_na_prob(na_probs, has_ans_qids, OPTS.out_image_dir, "hasAns")
histogram_na_prob(na_probs, no_ans_qids, OPTS.out_image_dir, "noAns")
if OPTS.out_file:
with open(OPTS.out_file, "w") as f:
json.dump(out_eval, f)
else:
print(json.dumps(out_eval, indent=2))
if __name__ == "__main__":
OPTS = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
|
nlp-master
|
metrics/squad_v2/evaluate.py
|
# coding=utf-8
# Copyright 2020 The HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" SQuAD metric. """
import nlp
from .evaluate import evaluate
_CITATION = """\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
"""
_DESCRIPTION = """
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
"""
_KWARGS_DESCRIPTION = """
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict {'text': list of possible texts for the answer, as a list of strings}
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
"""
class Squad(nlp.Metric):
def _info(self):
return nlp.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=nlp.Features({
'predictions': {
"id": nlp.Value("string"),
"prediction_text": nlp.Value("string")
},
'references': {
'id': nlp.Value('string'),
'answers': nlp.features.Sequence(
{"text": nlp.Value("string"), "answer_start": nlp.Value("int32"),}
)
},
}),
codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"],
reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"]
)
def _compute(self, predictions, references):
pred_dict = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
dataset = [{'paragraphs': [{'qas': [
{"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]], "id": ref["id"]} for ref in references
]}]}]
score = evaluate(dataset=dataset, predictions=pred_dict)
return score
|
nlp-master
|
metrics/squad/squad.py
|
""" Official evaluation script for v1.1 of the SQuAD dataset. """
from __future__ import print_function
from collections import Counter
import string
import re
import argparse
import json
import sys
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(prediction, ground_truth):
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
return (normalize_answer(prediction) == normalize_answer(ground_truth))
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def evaluate(dataset, predictions):
f1 = exact_match = total = 0
for article in dataset:
for paragraph in article['paragraphs']:
for qa in paragraph['qas']:
total += 1
if qa['id'] not in predictions:
message = 'Unanswered question ' + qa['id'] + \
' will receive score 0.'
print(message, file=sys.stderr)
continue
ground_truths = list(map(lambda x: x['text'], qa['answers']))
prediction = predictions[qa['id']]
exact_match += metric_max_over_ground_truths(
exact_match_score, prediction, ground_truths)
f1 += metric_max_over_ground_truths(
f1_score, prediction, ground_truths)
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
return {'exact_match': exact_match, 'f1': f1}
if __name__ == '__main__':
expected_version = '1.1'
parser = argparse.ArgumentParser(
description='Evaluation for SQuAD ' + expected_version)
parser.add_argument('dataset_file', help='Dataset file')
parser.add_argument('prediction_file', help='Prediction File')
args = parser.parse_args()
with open(args.dataset_file) as dataset_file:
dataset_json = json.load(dataset_file)
if (dataset_json['version'] != expected_version):
print('Evaluation expects v-' + expected_version +
', but got dataset with v-' + dataset_json['version'],
file=sys.stderr)
dataset = dataset_json['data']
with open(args.prediction_file) as prediction_file:
predictions = json.load(prediction_file)
print(json.dumps(evaluate(dataset, predictions)))
|
nlp-master
|
metrics/squad/evaluate.py
|
# coding=utf-8
# Copyright 2020 The HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" ROUGE metric from Google Research github repo. """
import nlp
# The dependencies in https://github.com/google-research/google-research/blob/master/rouge/requirements.txt
import absl # Here to have a nice missing dependency error message early on
import nltk # Here to have a nice missing dependency error message early on
import numpy # Here to have a nice missing dependency error message early on
import six # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer
from rouge_score import scoring
_CITATION = """\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
"""
_DESCRIPTION = """\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
"""
_KWARGS_DESCRIPTION = """
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Returns:
rouge1: rouge_1 f1,
rouge2: rouge_2 f1,
rougeL: rouge_l f1,
rougeLsum: rouge_l precision
"""
class Rouge(nlp.Metric):
def _info(self):
return nlp.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=nlp.Features({
'predictions': nlp.Value('string', id='sequence'),
'references': nlp.Value('string', id='sequence'),
}),
codebase_urls=["https://github.com/google-research/google-research/tree/master/rouge"],
reference_urls=["https://en.wikipedia.org/wiki/ROUGE_(metric)",
"https://github.com/google-research/google-research/tree/master/rouge"]
)
def _compute(self, predictions, references, rouge_types=None, use_agregator=True, use_stemmer=False):
if rouge_types is None:
rouge_types = ['rouge1', 'rougeL']
scorer = rouge_scorer.RougeScorer(rouge_types=rouge_types, use_stemmer=use_stemmer)
if use_agregator:
aggregator = scoring.BootstrapAggregator()
else:
scores = []
for ref, pred in zip(references, predictions):
score = scorer.score(ref, pred)
if use_agregator:
aggregator.add_scores(score)
else:
scores.append(score)
if use_agregator:
result = aggregator.aggregate()
else:
result = {}
for key in scores[0]:
result[key] = list(score[key] for score in scores)
return result
|
nlp-master
|
metrics/rouge/rouge.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""The SuperGLUE benchmark."""
from __future__ import absolute_import, division, print_function
import json
import os
import six
import nlp
_SUPER_GLUE_CITATION = """\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
Note that each SuperGLUE dataset has its own citation. Please see the source to
get the correct citation for each contained dataset.
"""
_GLUE_DESCRIPTION = """\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
_BOOLQ_DESCRIPTION = """\
BoolQ (Boolean Questions, Clark et al., 2019a) is a QA task where each example consists of a short
passage and a yes/no question about the passage. The questions are provided anonymously and
unsolicited by users of the Google search engine, and afterwards paired with a paragraph from a
Wikipedia article containing the answer. Following the original work, we evaluate with accuracy."""
_CB_DESCRIPTION = """\
The CommitmentBank (De Marneffe et al., 2019) is a corpus of short texts in which at least
one sentence contains an embedded clause. Each of these embedded clauses is annotated with the
degree to which we expect that the person who wrote the text is committed to the truth of the clause.
The resulting task framed as three-class textual entailment on examples that are drawn from the Wall
Street Journal, fiction from the British National Corpus, and Switchboard. Each example consists
of a premise containing an embedded clause and the corresponding hypothesis is the extraction of
that clause. We use a subset of the data that had inter-annotator agreement above 0.85. The data is
imbalanced (relatively fewer neutral examples), so we evaluate using accuracy and F1, where for
multi-class F1 we compute the unweighted average of the F1 per class."""
_COPA_DESCRIPTION = """\
The Choice Of Plausible Alternatives (COPA, Roemmele et al., 2011) dataset is a causal
reasoning task in which a system is given a premise sentence and two possible alternatives. The
system must choose the alternative which has the more plausible causal relationship with the premise.
The method used for the construction of the alternatives ensures that the task requires causal reasoning
to solve. Examples either deal with alternative possible causes or alternative possible effects of the
premise sentence, accompanied by a simple question disambiguating between the two instance
types for the model. All examples are handcrafted and focus on topics from online blogs and a
photography-related encyclopedia. Following the recommendation of the authors, we evaluate using
accuracy."""
_RECORD_DESCRIPTION = """\
(Reading Comprehension with Commonsense Reasoning Dataset, Zhang et al., 2018) is a
multiple-choice QA task. Each example consists of a news article and a Cloze-style question about
the article in which one entity is masked out. The system must predict the masked out entity from a
given list of possible entities in the provided passage, where the same entity may be expressed using
multiple different surface forms, all of which are considered correct. Articles are drawn from CNN
and Daily Mail. Following the original work, we evaluate with max (over all mentions) token-level
F1 and exact match (EM)."""
_RTE_DESCRIPTION = """\
The Recognizing Textual Entailment (RTE) datasets come from a series of annual competitions
on textual entailment, the problem of predicting whether a given premise sentence entails a given
hypothesis sentence (also known as natural language inference, NLI). RTE was previously included
in GLUE, and we use the same data and format as before: We merge data from RTE1 (Dagan
et al., 2006), RTE2 (Bar Haim et al., 2006), RTE3 (Giampiccolo et al., 2007), and RTE5 (Bentivogli
et al., 2009). All datasets are combined and converted to two-class classification: entailment and
not_entailment. Of all the GLUE tasks, RTE was among those that benefited from transfer learning
the most, jumping from near random-chance performance (~56%) at the time of GLUE's launch to
85% accuracy (Liu et al., 2019c) at the time of writing. Given the eight point gap with respect to
human performance, however, the task is not yet solved by machines, and we expect the remaining
gap to be difficult to close."""
_MULTIRC_DESCRIPTION = """\
The Multi-Sentence Reading Comprehension dataset (MultiRC, Khashabi et al., 2018)
is a true/false question-answering task. Each example consists of a context paragraph, a question
about that paragraph, and a list of possible answers to that question which must be labeled as true or
false. Question-answering (QA) is a popular problem with many datasets. We use MultiRC because
of a number of desirable properties: (i) each question can have multiple possible correct answers,
so each question-answer pair must be evaluated independent of other pairs, (ii) the questions are
designed such that answering each question requires drawing facts from multiple context sentences,
and (iii) the question-answer pair format more closely matches the API of other SuperGLUE tasks
than span-based extractive QA does. The paragraphs are drawn from seven domains including news,
fiction, and historical text."""
_WIC_DESCRIPTION = """\
The Word-in-Context (WiC, Pilehvar and Camacho-Collados, 2019) dataset supports a word
sense disambiguation task cast as binary classification over sentence pairs. Given two sentences and a
polysemous (sense-ambiguous) word that appears in both sentences, the task is to determine whether
the word is used with the same sense in both sentences. Sentences are drawn from WordNet (Miller,
1995), VerbNet (Schuler, 2005), and Wiktionary. We follow the original work and evaluate using
accuracy."""
_WSC_DESCRIPTION = """\
The Winograd Schema Challenge (WSC, Levesque et al., 2012) is a reading comprehension
task in which a system must read a sentence with a pronoun and select the referent of that pronoun
from a list of choices. Given the difficulty of this task and the headroom still left, we have included
WSC in SuperGLUE and recast the dataset into its coreference form. The task is cast as a binary
classification problem, as opposed to N-multiple choice, in order to isolate the model's ability to
understand the coreference links within a sentence as opposed to various other strategies that may
come into play in multiple choice conditions. With that in mind, we create a split with 65% negative
majority class in the validation set, reflecting the distribution of the hidden test set, and 52% negative
class in the training set. The training and validation examples are drawn from the original Winograd
Schema dataset (Levesque et al., 2012), as well as those distributed by the affiliated organization
Commonsense Reasoning. The test examples are derived from fiction books and have been shared
with us by the authors of the original dataset. Previously, a version of WSC recast as NLI as included
in GLUE, known as WNLI. No substantial progress was made on WNLI, with many submissions
opting to submit only majority class predictions. WNLI was made especially difficult due to an
adversarial train/dev split: Premise sentences that appeared in the training set sometimes appeared
in the development set with a different hypothesis and a flipped label. If a system memorized the
training set without meaningfully generalizing, which was easy due to the small size of the training
set, it could perform far below chance on the development set. We remove this adversarial design
in the SuperGLUE version of WSC by ensuring that no sentences are shared between the training,
validation, and test sets.
However, the validation and test sets come from different domains, with the validation set consisting
of ambiguous examples such that changing one non-noun phrase word will change the coreference
dependencies in the sentence. The test set consists only of more straightforward examples, with a
high number of noun phrases (and thus more choices for the model), but low to no ambiguity."""
_AXB_DESCRIPTION = """\
An expert-constructed,
diagnostic dataset that automatically tests models for a broad range of linguistic, commonsense, and
world knowledge. Each example in this broad-coverage diagnostic is a sentence pair labeled with
a three-way entailment relation (entailment, neutral, or contradiction) and tagged with labels that
indicate the phenomena that characterize the relationship between the two sentences. Submissions
to the GLUE leaderboard are required to include predictions from the submission's MultiNLI
classifier on the diagnostic dataset, and analyses of the results were shown alongside the main
leaderboard. Since this broad-coverage diagnostic task has proved difficult for top models, we retain
it in SuperGLUE. However, since MultiNLI is not part of SuperGLUE, we collapse contradiction
and neutral into a single not_entailment label, and request that submissions include predictions
on the resulting set from the model used for the RTE task.
"""
_AXG_DESCRIPTION = """\
Winogender is designed to measure gender
bias in coreference resolution systems. We use the Diverse Natural Language Inference Collection
(DNC; Poliak et al., 2018) version that casts Winogender as a textual entailment task. Each example
consists of a premise sentence with a male or female pronoun and a hypothesis giving a possible
antecedent of the pronoun. Examples occur in minimal pairs, where the only difference between
an example and its pair is the gender of the pronoun in the premise. Performance on Winogender
is measured with both accuracy and the gender parity score: the percentage of minimal pairs for
which the predictions are the same. We note that a system can trivially obtain a perfect gender parity
score by guessing the same class for all examples, so a high gender parity score is meaningless unless
accompanied by high accuracy. As a diagnostic test of gender bias, we view the schemas as having high
positive predictive value and low negative predictive value; that is, they may demonstrate the presence
of gender bias in a system, but not prove its absence.
"""
_BOOLQ_CITATION = """\
@inproceedings{clark2019boolq,
title={BoolQ: Exploring the Surprising Difficulty of Natural Yes/No Questions},
author={Clark, Christopher and Lee, Kenton and Chang, Ming-Wei, and Kwiatkowski, Tom and Collins, Michael, and Toutanova, Kristina},
booktitle={NAACL},
year={2019}
}"""
_CB_CITATION = """\
@article{de marneff_simons_tonhauser_2019,
title={The CommitmentBank: Investigating projection in naturally occurring discourse},
journal={proceedings of Sinn und Bedeutung 23},
author={De Marneff, Marie-Catherine and Simons, Mandy and Tonhauser, Judith},
year={2019}
}"""
_COPA_CITATION = """\
@inproceedings{roemmele2011choice,
title={Choice of plausible alternatives: An evaluation of commonsense causal reasoning},
author={Roemmele, Melissa and Bejan, Cosmin Adrian and Gordon, Andrew S},
booktitle={2011 AAAI Spring Symposium Series},
year={2011}
}"""
_RECORD_CITATION = """\
@article{zhang2018record,
title={Record: Bridging the gap between human and machine commonsense reading comprehension},
author={Zhang, Sheng and Liu, Xiaodong and Liu, Jingjing and Gao, Jianfeng and Duh, Kevin and Van Durme, Benjamin},
journal={arXiv preprint arXiv:1810.12885},
year={2018}
}"""
_RTE_CITATION = """\
@inproceedings{dagan2005pascal,
title={The PASCAL recognising textual entailment challenge},
author={Dagan, Ido and Glickman, Oren and Magnini, Bernardo},
booktitle={Machine Learning Challenges Workshop},
pages={177--190},
year={2005},
organization={Springer}
}
@inproceedings{bar2006second,
title={The second pascal recognising textual entailment challenge},
author={Bar-Haim, Roy and Dagan, Ido and Dolan, Bill and Ferro, Lisa and Giampiccolo, Danilo and Magnini, Bernardo and Szpektor, Idan},
booktitle={Proceedings of the second PASCAL challenges workshop on recognising textual entailment},
volume={6},
number={1},
pages={6--4},
year={2006},
organization={Venice}
}
@inproceedings{giampiccolo2007third,
title={The third pascal recognizing textual entailment challenge},
author={Giampiccolo, Danilo and Magnini, Bernardo and Dagan, Ido and Dolan, Bill},
booktitle={Proceedings of the ACL-PASCAL workshop on textual entailment and paraphrasing},
pages={1--9},
year={2007},
organization={Association for Computational Linguistics}
}
@inproceedings{bentivogli2009fifth,
title={The Fifth PASCAL Recognizing Textual Entailment Challenge.},
author={Bentivogli, Luisa and Clark, Peter and Dagan, Ido and Giampiccolo, Danilo},
booktitle={TAC},
year={2009}
}"""
_MULTIRC_CITATION = """\
@inproceedings{MultiRC2018,
author = {Daniel Khashabi and Snigdha Chaturvedi and Michael Roth and Shyam Upadhyay and Dan Roth},
title = {Looking Beyond the Surface:A Challenge Set for Reading Comprehension over Multiple Sentences},
booktitle = {Proceedings of North American Chapter of the Association for Computational Linguistics (NAACL)},
year = {2018}
}"""
_WIC_CITATION = """\
@article{DBLP:journals/corr/abs-1808-09121,
author={Mohammad Taher Pilehvar and os{\'{e}} Camacho{-}Collados},
title={WiC: 10, 000 Example Pairs for Evaluating Context-Sensitive Representations},
journal={CoRR},
volume={abs/1808.09121},
year={2018},
url={http://arxiv.org/abs/1808.09121},
archivePrefix={arXiv},
eprint={1808.09121},
timestamp={Mon, 03 Sep 2018 13:36:40 +0200},
biburl={https://dblp.org/rec/bib/journals/corr/abs-1808-09121},
bibsource={dblp computer science bibliography, https://dblp.org}
}"""
_WSC_CITATION = """\
@inproceedings{levesque2012winograd,
title={The winograd schema challenge},
author={Levesque, Hector and Davis, Ernest and Morgenstern, Leora},
booktitle={Thirteenth International Conference on the Principles of Knowledge Representation and Reasoning},
year={2012}
}"""
_AXG_CITATION = """\
@inproceedings{rudinger-EtAl:2018:N18,
author = {Rudinger, Rachel and Naradowsky, Jason and Leonard, Brian and {Van Durme}, Benjamin},
title = {Gender Bias in Coreference Resolution},
booktitle = {Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies},
month = {June},
year = {2018},
address = {New Orleans, Louisiana},
publisher = {Association for Computational Linguistics}
}
"""
class SuperGlueConfig(nlp.BuilderConfig):
"""BuilderConfig for SuperGLUE."""
def __init__(self, features, data_url, citation, url, label_classes=("False", "True"), **kwargs):
"""BuilderConfig for SuperGLUE.
Args:
features: `list[string]`, list of the features that will appear in the
feature dict. Should not include "label".
data_url: `string`, url to download the zip file from.
citation: `string`, citation for the data set.
url: `string`, url for information about the data set.
label_classes: `list[string]`, the list of classes for the label if the
label is present as a string. Non-string labels will be cast to either
'False' or 'True'.
**kwargs: keyword arguments forwarded to super.
"""
# Version history:
# 1.0.2: Fixed non-nondeterminism in ReCoRD.
# 1.0.1: Change from the pre-release trial version of SuperGLUE (v1.9) to
# the full release (v2.0).
# 1.0.0: S3 (new shuffling, sharding and slicing mechanism).
# 0.0.2: Initial version.
super(SuperGlueConfig, self).__init__(version=nlp.Version("1.0.2"), **kwargs)
self.features = features
self.label_classes = label_classes
self.data_url = data_url
self.citation = citation
self.url = url
class SuperGlue(nlp.GeneratorBasedBuilder):
"""The SuperGLUE benchmark."""
BUILDER_CONFIGS = [
SuperGlueConfig(
name="boolq",
description=_BOOLQ_DESCRIPTION,
features=["question", "passage"],
data_url="https://dl.fbaipublicfiles.com/glue/superglue/data/v2/BoolQ.zip",
citation=_BOOLQ_CITATION,
url="https://github.com/google-research-datasets/boolean-questions",
),
SuperGlueConfig(
name="cb",
description=_CB_DESCRIPTION,
features=["premise", "hypothesis"],
label_classes=["entailment", "contradiction", "neutral"],
data_url="https://dl.fbaipublicfiles.com/glue/superglue/data/v2/CB.zip",
citation=_CB_CITATION,
url="https://github.com/mcdm/CommitmentBank",
),
SuperGlueConfig(
name="copa",
description=_COPA_DESCRIPTION,
label_classes=["choice1", "choice2"],
# Note that question will only be the X in the statement "What's
# the X for this?".
features=["premise", "choice1", "choice2", "question"],
data_url="https://dl.fbaipublicfiles.com/glue/superglue/data/v2/COPA.zip",
citation=_COPA_CITATION,
url="http://people.ict.usc.edu/~gordon/copa.html",
),
SuperGlueConfig(
name="multirc",
description=_MULTIRC_DESCRIPTION,
features=["paragraph", "question", "answer"],
data_url="https://dl.fbaipublicfiles.com/glue/superglue/data/v2/MultiRC.zip",
citation=_MULTIRC_CITATION,
url="https://cogcomp.org/multirc/",
),
SuperGlueConfig(
name="record",
description=_RECORD_DESCRIPTION,
# Note that entities and answers will be a sequences of strings. Query
# will contain @placeholder as a substring, which represents the word
# to be substituted in.
features=["passage", "query", "entities", "answers"],
data_url="https://dl.fbaipublicfiles.com/glue/superglue/data/v2/ReCoRD.zip",
citation=_RECORD_CITATION,
url="https://sheng-z.github.io/ReCoRD-explorer/",
),
SuperGlueConfig(
name="rte",
description=_RTE_DESCRIPTION,
features=["premise", "hypothesis"],
label_classes=["entailment", "not_entailment"],
data_url="https://dl.fbaipublicfiles.com/glue/superglue/data/v2/RTE.zip",
citation=_RTE_CITATION,
url="https://aclweb.org/aclwiki/Recognizing_Textual_Entailment",
),
SuperGlueConfig(
name="wic",
description=_WIC_DESCRIPTION,
# Note that start1, start2, end1, and end2 will be integers stored as
# nlp.Value('int32').
features=["word", "sentence1", "sentence2", "start1", "start2", "end1", "end2"],
data_url="https://dl.fbaipublicfiles.com/glue/superglue/data/v2/WiC.zip",
citation=_WIC_CITATION,
url="https://pilehvar.github.io/wic/",
),
SuperGlueConfig(
name="wsc",
description=_WSC_DESCRIPTION,
# Note that span1_index and span2_index will be integers stored as
# nlp.Value('int32').
features=["text", "span1_index", "span2_index", "span1_text", "span2_text"],
data_url="https://dl.fbaipublicfiles.com/glue/superglue/data/v2/WSC.zip",
citation=_WSC_CITATION,
url="https://cs.nyu.edu/faculty/davise/papers/WinogradSchemas/WS.html",
),
SuperGlueConfig(
name="wsc.fixed",
description=(
_WSC_DESCRIPTION + "\n\nThis version fixes issues where the spans are not actually "
"substrings of the text."
),
# Note that span1_index and span2_index will be integers stored as
# nlp.Value('int32').
features=["text", "span1_index", "span2_index", "span1_text", "span2_text"],
data_url="https://dl.fbaipublicfiles.com/glue/superglue/data/v2/WSC.zip",
citation=_WSC_CITATION,
url="https://cs.nyu.edu/faculty/davise/papers/WinogradSchemas/WS.html",
),
SuperGlueConfig(
name="axb",
description=_AXB_DESCRIPTION,
features=["sentence1", "sentence2"],
label_classes=["entailment", "not_entailment"],
data_url="https://dl.fbaipublicfiles.com/glue/superglue/data/v2/AX-b.zip",
citation="", # The GLUE citation is sufficient.
url="https://gluebenchmark.com/diagnostics",
),
SuperGlueConfig(
name="axg",
description=_AXG_DESCRIPTION,
features=["premise", "hypothesis"],
label_classes=["entailment", "not_entailment"],
data_url="https://dl.fbaipublicfiles.com/glue/superglue/data/v2/AX-g.zip",
citation=_AXG_CITATION,
url="https://github.com/rudinger/winogender-schemas",
),
]
def _info(self):
features = {feature: nlp.Value("string") for feature in self.config.features}
if self.config.name.startswith("wsc"):
features["span1_index"] = nlp.Value("int32")
features["span2_index"] = nlp.Value("int32")
if self.config.name == "wic":
features["start1"] = nlp.Value("int32")
features["start2"] = nlp.Value("int32")
features["end1"] = nlp.Value("int32")
features["end2"] = nlp.Value("int32")
if self.config.name == "multirc":
features["idx"] = dict(
{"paragraph": nlp.Value("int32"), "question": nlp.Value("int32"), "answer": nlp.Value("int32"),}
)
elif self.config.name == "record":
features["idx"] = dict({"passage": nlp.Value("int32"), "query": nlp.Value("int32"),})
else:
features["idx"] = nlp.Value("int32")
if self.config.name == "record":
# Entities are the set of possible choices for the placeholder.
features["entities"] = nlp.features.Sequence(nlp.Value("string"))
# Answers are the subset of entities that are correct.
features["answers"] = nlp.features.Sequence(nlp.Value("string"))
else:
features["label"] = nlp.features.ClassLabel(names=self.config.label_classes)
return nlp.DatasetInfo(
description=_GLUE_DESCRIPTION + self.config.description,
features=nlp.Features(features),
homepage=self.config.url,
citation=self.config.citation + "\n" + _SUPER_GLUE_CITATION,
)
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract(self.config.data_url) or ""
task_name = _get_task_name_from_data_url(self.config.data_url)
dl_dir = os.path.join(dl_dir, task_name)
if self.config.name in ["axb", "axg"]:
return [
nlp.SplitGenerator(
name=nlp.Split.TEST,
gen_kwargs={
"data_file": os.path.join(dl_dir, "{}.jsonl".format(task_name)),
"split": nlp.Split.TEST,
},
),
]
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
gen_kwargs={"data_file": os.path.join(dl_dir, "train.jsonl"), "split": nlp.Split.TRAIN,},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
gen_kwargs={"data_file": os.path.join(dl_dir, "val.jsonl"), "split": nlp.Split.VALIDATION,},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
gen_kwargs={"data_file": os.path.join(dl_dir, "test.jsonl"), "split": nlp.Split.TEST,},
),
]
def _generate_examples(self, data_file, split):
with open(data_file) as f:
for line in f:
row = json.loads(line)
if self.config.name == "multirc":
paragraph = row["passage"]
for question in paragraph["questions"]:
for answer in question["answers"]:
label = answer.get("label")
key = "%s_%s_%s" % (row["idx"], question["idx"], answer["idx"])
yield key, {
"paragraph": paragraph["text"],
"question": question["question"],
"answer": answer["text"],
"label": -1 if label is None else _cast_label(bool(label)),
"idx": {"paragraph": row["idx"], "question": question["idx"], "answer": answer["idx"]},
}
elif self.config.name == "record":
passage = row["passage"]
for qa in row["qas"]:
yield qa["idx"], {
"passage": passage["text"],
"query": qa["query"],
"entities": _get_record_entities(passage),
"answers": _get_record_answers(qa),
"idx": {"passage": row["idx"], "query": qa["idx"]},
}
else:
if self.config.name.startswith("wsc"):
row.update(row["target"])
example = {feature: row[feature] for feature in self.config.features}
if self.config.name == "wsc.fixed":
example = _fix_wst(example)
example["idx"] = row["idx"]
if "label" in row:
if self.config.name == "copa":
example["label"] = "choice2" if row["label"] else "choice1"
else:
example["label"] = _cast_label(row["label"])
else:
assert split == nlp.Split.TEST, row
example["label"] = -1
yield example["idx"], example
def _fix_wst(ex):
"""Fixes most cases where spans are not actually substrings of text."""
def _fix_span_text(k):
"""Fixes a single span."""
text = ex[k + "_text"]
index = ex[k + "_index"]
if text in ex["text"]:
return
if text in ("Kamenev and Zinoviev", "Kamenev, Zinoviev, and Stalin"):
# There is no way to correct these examples since the subjects have
# intervening text.
return
if "theyscold" in text:
ex["text"].replace("theyscold", "they scold")
ex["span2_index"] = 10
# Make sure case of the first words match.
first_word = ex["text"].split()[index]
if first_word[0].islower():
text = text[0].lower() + text[1:]
else:
text = text[0].upper() + text[1:]
# Remove punctuation in span.
text = text.rstrip(".")
# Replace incorrect whitespace character in span.
text = text.replace("\n", " ")
ex[k + "_text"] = text
assert ex[k + "_text"] in ex["text"], ex
_fix_span_text("span1")
_fix_span_text("span2")
return ex
def _cast_label(label):
"""Converts the label into the appropriate string version."""
if isinstance(label, six.string_types):
return label
elif isinstance(label, bool):
return "True" if label else "False"
elif isinstance(label, six.integer_types):
assert label in (0, 1)
return str(label)
else:
raise ValueError("Invalid label format.")
def _get_record_entities(passage):
"""Returns the unique set of entities."""
text = passage["text"]
entities = set()
for entity in passage["entities"]:
entities.add(text[entity["start"] : entity["end"] + 1])
return sorted(entities)
def _get_record_answers(qa):
"""Returns the unique set of answers."""
if "answers" not in qa:
return []
answers = set()
for answer in qa["answers"]:
answers.add(answer["text"])
return sorted(answers)
def _get_task_name_from_data_url(data_url):
return data_url.split("/")[-1].split(".")[0]
|
nlp-master
|
datasets/super_glue/super_glue.py
|
"""TODO(math_qa): Add a description here."""
from __future__ import absolute_import, division, print_function
import json
import os
import nlp
# TODO(math_qa): BibTeX citation
_CITATION = """
"""
# TODO(math_qa):
_DESCRIPTION = """
Our dataset is gathered by using a new representation language to annotate over the AQuA-RAT dataset. AQuA-RAT has provided the questions, options, rationale, and the correct options.
"""
_URL = "https://math-qa.github.io/math-QA/data/MathQA.zip"
class MathQa(nlp.GeneratorBasedBuilder):
"""TODO(math_qa): Short description of my dataset."""
# TODO(math_qa): Set up version.
VERSION = nlp.Version("0.1.0")
def _info(self):
# TODO(math_qa): Specifies the nlp.DatasetInfo object
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
# These are the features of your dataset like images, labels ...
"Problem": nlp.Value("string"),
"Rationale": nlp.Value("string"),
"options": nlp.Value("string"),
"correct": nlp.Value("string"),
"annotated_formula": nlp.Value("string"),
"linear_formula": nlp.Value("string"),
"category": nlp.Value("string"),
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://math-qa.github.io/math-QA/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(math_qa): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
dl_path = dl_manager.download_and_extract(_URL)
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(dl_path, "train.json")},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(dl_path, "test.json")},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(dl_path, "dev.json")},
),
]
def _generate_examples(self, filepath):
"""Yields examples."""
# TODO(math_qa): Yields (key, example) tuples from the dataset
with open(filepath) as f:
data = json.load(f)
for id_, row in enumerate(data):
yield id_, row
|
nlp-master
|
datasets/math_qa/math_qa.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""The Adversarial NLI Corpus."""
from __future__ import absolute_import, division, print_function
import json
import os
import nlp
_CITATION = """\
@InProceedings{nie2019adversarial,
title={Adversarial NLI: A New Benchmark for Natural Language Understanding},
author={Nie, Yixin
and Williams, Adina
and Dinan, Emily
and Bansal, Mohit
and Weston, Jason
and Kiela, Douwe},
booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics",
year = "2020",
publisher = "Association for Computational Linguistics",
}
"""
_DESCRIPTION = """\
The Adversarial Natural Language Inference (ANLI) is a new large-scale NLI benchmark dataset,
The dataset is collected via an iterative, adversarial human-and-model-in-the-loop procedure.
ANLI is much more difficult than its predecessors including SNLI and MNLI.
It contains three rounds. Each round has train/dev/test splits.
"""
stdnli_label = {
"e": "entailment",
"n": "neutral",
"c": "contradiction",
}
class ANLIConfig(nlp.BuilderConfig):
"""BuilderConfig for ANLI."""
def __init__(self, **kwargs):
"""BuilderConfig for ANLI.
Args:
.
**kwargs: keyword arguments forwarded to super.
"""
super(ANLIConfig, self).__init__(
version=nlp.Version("0.1.0", "New split API (https://tensorflow.org/datasets/splits)"), **kwargs
)
class ANLI(nlp.GeneratorBasedBuilder):
"""ANLI: The ANLI Dataset."""
BUILDER_CONFIGS = [
ANLIConfig(name="plain_text", description="Plain text",),
]
def _info(self):
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features(
{
"uid": nlp.Value("string"),
"premise": nlp.Value("string"),
"hypothesis": nlp.Value("string"),
"label": nlp.features.ClassLabel(names=["entailment", "neutral", "contradiction"]),
"reason": nlp.Value("string"),
}
),
# No default supervised_keys (as we have to pass both premise
# and hypothesis as input).
supervised_keys=None,
homepage="https://github.com/facebookresearch/anli/",
citation=_CITATION,
)
def _vocab_text_gen(self, filepath):
for _, ex in self._generate_examples(filepath):
yield " ".join([ex["premise"], ex["hypothesis"]])
def _split_generators(self, dl_manager):
downloaded_dir = dl_manager.download_and_extract("https://dl.fbaipublicfiles.com/anli/anli_v0.1.zip")
anli_path = os.path.join(downloaded_dir, "anli_v0.1")
path_dict = dict()
for round_tag in ["R1", "R2", "R3"]:
path_dict[round_tag] = dict()
for split_name in ["train", "dev", "test"]:
path_dict[round_tag][split_name] = os.path.join(anli_path, round_tag, f"{split_name}.jsonl")
return [
# Round 1
nlp.SplitGenerator(name="train_r1", gen_kwargs={"filepath": path_dict["R1"]["train"]}),
nlp.SplitGenerator(name="dev_r1", gen_kwargs={"filepath": path_dict["R1"]["dev"]}),
nlp.SplitGenerator(name="test_r1", gen_kwargs={"filepath": path_dict["R1"]["test"]}),
# Round 2
nlp.SplitGenerator(name="train_r2", gen_kwargs={"filepath": path_dict["R2"]["train"]}),
nlp.SplitGenerator(name="dev_r2", gen_kwargs={"filepath": path_dict["R2"]["dev"]}),
nlp.SplitGenerator(name="test_r2", gen_kwargs={"filepath": path_dict["R2"]["test"]}),
# Round 3
nlp.SplitGenerator(name="train_r3", gen_kwargs={"filepath": path_dict["R3"]["train"]}),
nlp.SplitGenerator(name="dev_r3", gen_kwargs={"filepath": path_dict["R3"]["dev"]}),
nlp.SplitGenerator(name="test_r3", gen_kwargs={"filepath": path_dict["R3"]["test"]}),
]
def _generate_examples(self, filepath):
"""Generate mnli examples.
Args:
filepath: a string
Yields:
dictionaries containing "premise", "hypothesis" and "label" strings
"""
for idx, line in enumerate(open(filepath, "rb")):
if line is not None:
line = line.strip().decode("utf-8")
item = json.loads(line)
reason_text = ""
if "reason" in item:
reason_text = item["reason"]
yield item["uid"], {
"uid": item["uid"],
"premise": item["context"],
"hypothesis": item["hypothesis"],
"label": stdnli_label[item["label"]],
"reason": reason_text,
}
|
nlp-master
|
datasets/anli/anli.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""WMT: Translate dataset."""
from __future__ import absolute_import, division, print_function
import codecs
import functools
import glob
import gzip
import itertools
import logging
import os
import re
import xml.etree.cElementTree as ElementTree
from abc import ABC, abstractmethod
import six
import nlp
_DESCRIPTION = """\
Translate dataset based on the data from statmt.org.
Versions exists for the different years using a combination of multiple data
sources. The base `wmt_translate` allows you to create your own config to choose
your own data/language pair by creating a custom `nlp.translate.wmt.WmtConfig`.
```
config = nlp.wmt.WmtConfig(
version="0.0.1",
language_pair=("fr", "de"),
subsets={
nlp.Split.TRAIN: ["commoncrawl_frde"],
nlp.Split.VALIDATION: ["euelections_dev2019"],
},
)
builder = nlp.builder("wmt_translate", config=config)
```
"""
CWMT_SUBSET_NAMES = ["casia2015", "casict2011", "casict2015", "datum2015", "datum2017", "neu2017"]
class SubDataset(object):
"""Class to keep track of information on a sub-dataset of WMT."""
def __init__(self, name, target, sources, url, path, manual_dl_files=None):
"""Sub-dataset of WMT.
Args:
name: `string`, a unique dataset identifier.
target: `string`, the target language code.
sources: `set<string>`, the set of source language codes.
url: `string` or `(string, string)`, URL(s) or URL template(s) specifying
where to download the raw data from. If two strings are provided, the
first is used for the source language and the second for the target.
Template strings can either contain '{src}' placeholders that will be
filled in with the source language code, '{0}' and '{1}' placeholders
that will be filled in with the source and target language codes in
alphabetical order, or all 3.
path: `string` or `(string, string)`, path(s) or path template(s)
specifing the path to the raw data relative to the root of the
downloaded archive. If two strings are provided, the dataset is assumed
to be made up of parallel text files, the first being the source and the
second the target. If one string is provided, both languages are assumed
to be stored within the same file and the extension is used to determine
how to parse it. Template strings should be formatted the same as in
`url`.
manual_dl_files: `<list>(string)` (optional), the list of files that must
be manually downloaded to the data directory.
"""
self._paths = (path,) if isinstance(path, six.string_types) else path
self._urls = (url,) if isinstance(url, six.string_types) else url
self._manual_dl_files = manual_dl_files if manual_dl_files else []
self.name = name
self.target = target
self.sources = set(sources)
def _inject_language(self, src, strings):
"""Injects languages into (potentially) template strings."""
if src not in self.sources:
raise ValueError("Invalid source for '{0}': {1}".format(self.name, src))
def _format_string(s):
if "{0}" in s and "{1}" and "{src}" in s:
return s.format(*sorted([src, self.target]), src=src)
elif "{0}" in s and "{1}" in s:
return s.format(*sorted([src, self.target]))
elif "{src}" in s:
return s.format(src=src)
else:
return s
return [_format_string(s) for s in strings]
def get_url(self, src):
return self._inject_language(src, self._urls)
def get_manual_dl_files(self, src):
return self._inject_language(src, self._manual_dl_files)
def get_path(self, src):
return self._inject_language(src, self._paths)
# Subsets used in the training sets for various years of WMT.
_TRAIN_SUBSETS = [
# pylint:disable=line-too-long
SubDataset(
name="commoncrawl",
target="en", # fr-de pair in commoncrawl_frde
sources={"cs", "de", "es", "fr", "ru"},
url="http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz",
path=("commoncrawl.{src}-en.{src}", "commoncrawl.{src}-en.en"),
),
SubDataset(
name="commoncrawl_frde",
target="de",
sources={"fr"},
url=(
"http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/commoncrawl.fr.gz",
"http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/commoncrawl.de.gz",
),
path=("", ""),
),
SubDataset(
name="czeng_10",
target="en",
sources={"cs"},
url="http://ufal.mff.cuni.cz/czeng/czeng10",
manual_dl_files=["data-plaintext-format.%d.tar" % i for i in range(10)],
# Each tar contains multiple files, which we process specially in
# _parse_czeng.
path=("data.plaintext-format/??train.gz",) * 10,
),
SubDataset(
name="czeng_16pre",
target="en",
sources={"cs"},
url="http://ufal.mff.cuni.cz/czeng/czeng16pre",
manual_dl_files=["czeng16pre.deduped-ignoring-sections.txt.gz"],
path="",
),
SubDataset(
name="czeng_16",
target="en",
sources={"cs"},
url="http://ufal.mff.cuni.cz/czeng",
manual_dl_files=["data-plaintext-format.%d.tar" % i for i in range(10)],
# Each tar contains multiple files, which we process specially in
# _parse_czeng.
path=("data.plaintext-format/??train.gz",) * 10,
),
SubDataset(
# This dataset differs from the above in the filtering that is applied
# during parsing.
name="czeng_17",
target="en",
sources={"cs"},
url="http://ufal.mff.cuni.cz/czeng",
manual_dl_files=["data-plaintext-format.%d.tar" % i for i in range(10)],
# Each tar contains multiple files, which we process specially in
# _parse_czeng.
path=("data.plaintext-format/??train.gz",) * 10,
),
SubDataset(
name="dcep_v1",
target="en",
sources={"lv"},
url="http://data.statmt.org/wmt17/translation-task/dcep.lv-en.v1.tgz",
path=("dcep.en-lv/dcep.lv", "dcep.en-lv/dcep.en"),
),
SubDataset(
name="europarl_v7",
target="en",
sources={"cs", "de", "es", "fr"},
url="http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz",
path=("training/europarl-v7.{src}-en.{src}", "training/europarl-v7.{src}-en.en"),
),
SubDataset(
name="europarl_v7_frde",
target="de",
sources={"fr"},
url=(
"http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/europarl-v7.fr.gz",
"http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/europarl-v7.de.gz",
),
path=("", ""),
),
SubDataset(
name="europarl_v8_18",
target="en",
sources={"et", "fi"},
url="http://data.statmt.org/wmt18/translation-task/training-parallel-ep-v8.tgz",
path=("training/europarl-v8.{src}-en.{src}", "training/europarl-v8.{src}-en.en"),
),
SubDataset(
name="europarl_v8_16",
target="en",
sources={"fi", "ro"},
url="http://data.statmt.org/wmt16/translation-task/training-parallel-ep-v8.tgz",
path=("training-parallel-ep-v8/europarl-v8.{src}-en.{src}", "training-parallel-ep-v8/europarl-v8.{src}-en.en"),
),
SubDataset(
name="europarl_v9",
target="en",
sources={"cs", "de", "fi", "lt"},
url="http://www.statmt.org/europarl/v9/training/europarl-v9.{src}-en.tsv.gz",
path="",
),
SubDataset(
name="gigafren",
target="en",
sources={"fr"},
url="http://www.statmt.org/wmt10/training-giga-fren.tar",
path=("giga-fren.release2.fixed.fr.gz", "giga-fren.release2.fixed.en.gz"),
),
SubDataset(
name="hindencorp_01",
target="en",
sources={"hi"},
url="http://ufallab.ms.mff.cuni.cz/~bojar/hindencorp",
manual_dl_files=["hindencorp0.1.gz"],
path="",
),
SubDataset(
name="leta_v1",
target="en",
sources={"lv"},
url="http://data.statmt.org/wmt17/translation-task/leta.v1.tgz",
path=("LETA-lv-en/leta.lv", "LETA-lv-en/leta.en"),
),
SubDataset(
name="multiun",
target="en",
sources={"es", "fr"},
url="http://www.statmt.org/wmt13/training-parallel-un.tgz",
path=("un/undoc.2000.{src}-en.{src}", "un/undoc.2000.{src}-en.en"),
),
SubDataset(
name="newscommentary_v9",
target="en",
sources={"cs", "de", "fr", "ru"},
url="http://www.statmt.org/wmt14/training-parallel-nc-v9.tgz",
path=("training/news-commentary-v9.{src}-en.{src}", "training/news-commentary-v9.{src}-en.en"),
),
SubDataset(
name="newscommentary_v10",
target="en",
sources={"cs", "de", "fr", "ru"},
url="http://www.statmt.org/wmt15/training-parallel-nc-v10.tgz",
path=("news-commentary-v10.{src}-en.{src}", "news-commentary-v10.{src}-en.en"),
),
SubDataset(
name="newscommentary_v11",
target="en",
sources={"cs", "de", "ru"},
url="http://data.statmt.org/wmt16/translation-task/training-parallel-nc-v11.tgz",
path=(
"training-parallel-nc-v11/news-commentary-v11.{src}-en.{src}",
"training-parallel-nc-v11/news-commentary-v11.{src}-en.en",
),
),
SubDataset(
name="newscommentary_v12",
target="en",
sources={"cs", "de", "ru", "zh"},
url="http://data.statmt.org/wmt17/translation-task/training-parallel-nc-v12.tgz",
path=("training/news-commentary-v12.{src}-en.{src}", "training/news-commentary-v12.{src}-en.en"),
),
SubDataset(
name="newscommentary_v13",
target="en",
sources={"cs", "de", "ru", "zh"},
url="http://data.statmt.org/wmt18/translation-task/training-parallel-nc-v13.tgz",
path=(
"training-parallel-nc-v13/news-commentary-v13.{src}-en.{src}",
"training-parallel-nc-v13/news-commentary-v13.{src}-en.en",
),
),
SubDataset(
name="newscommentary_v14",
target="en", # fr-de pair in newscommentary_v14_frde
sources={"cs", "de", "kk", "ru", "zh"},
url="http://data.statmt.org/news-commentary/v14/training/news-commentary-v14.{0}-{1}.tsv.gz",
path="",
),
SubDataset(
name="newscommentary_v14_frde",
target="de",
sources={"fr"},
url="http://data.statmt.org/news-commentary/v14/training/news-commentary-v14.de-fr.tsv.gz",
path="",
),
SubDataset(
name="onlinebooks_v1",
target="en",
sources={"lv"},
url="http://data.statmt.org/wmt17/translation-task/books.lv-en.v1.tgz",
path=("farewell/farewell.lv", "farewell/farewell.en"),
),
SubDataset(
name="paracrawl_v1",
target="en",
sources={"cs", "de", "et", "fi", "ru"},
url="https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-{src}.zipporah0-dedup-clean.tgz",
path=(
"paracrawl-release1.en-{src}.zipporah0-dedup-clean.{src}",
"paracrawl-release1.en-{src}.zipporah0-dedup-clean.en",
),
),
SubDataset(
name="paracrawl_v1_ru",
target="en",
sources={"ru"},
url="https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-ru.zipporah0-dedup-clean.tgz",
path=(
"paracrawl-release1.en-ru.zipporah0-dedup-clean.ru",
"paracrawl-release1.en-ru.zipporah0-dedup-clean.en",
),
),
SubDataset(
name="paracrawl_v3",
target="en", # fr-de pair in paracrawl_v3_frde
sources={"cs", "de", "fi", "lt"},
url="https://s3.amazonaws.com/web-language-models/paracrawl/release3/en-{src}.bicleaner07.tmx.gz",
path="",
),
SubDataset(
name="paracrawl_v3_frde",
target="de",
sources={"fr"},
url=(
"http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/de-fr.bicleaner07.de.gz",
"http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/de-fr.bicleaner07.fr.gz",
),
path=("", ""),
),
SubDataset(
name="rapid_2016",
target="en",
sources={"de", "et", "fi"},
url="http://data.statmt.org/wmt18/translation-task/rapid2016.tgz",
path=("rapid2016.{0}-{1}.{src}", "rapid2016.{0}-{1}.en"),
),
SubDataset(
name="rapid_2016_ltfi",
target="en",
sources={"fi", "lt"},
url="https://tilde-model.s3-eu-west-1.amazonaws.com/rapid2016.en-{src}.tmx.zip",
path="rapid2016.en-{src}.tmx",
),
SubDataset(
name="rapid_2019",
target="en",
sources={"de"},
url="https://s3-eu-west-1.amazonaws.com/tilde-model/rapid2019.de-en.zip",
path=("rapid2019.de-en.de", "rapid2019.de-en.en"),
),
SubDataset(
name="setimes_2",
target="en",
sources={"ro", "tr"},
url="http://opus.nlpl.eu/download.php?f=SETIMES/v2/tmx/en-{src}.tmx.gz",
path="",
),
SubDataset(
name="uncorpus_v1",
target="en",
sources={"ru", "zh"},
url="https://storage.googleapis.com/tfdataset-data/downloadataset/uncorpus/UNv1.0.en-{src}.tar.gz",
path=("en-{src}/UNv1.0.en-{src}.{src}", "en-{src}/UNv1.0.en-{src}.en"),
),
SubDataset(
name="wikiheadlines_fi",
target="en",
sources={"fi"},
url="http://www.statmt.org/wmt15/wiki-titles.tgz",
path="wiki/fi-en/titles.fi-en",
),
SubDataset(
name="wikiheadlines_hi",
target="en",
sources={"hi"},
url="http://www.statmt.org/wmt14/wiki-titles.tgz",
path="wiki/hi-en/wiki-titles.hi-en",
),
SubDataset(
# Verified that wmt14 and wmt15 files are identical.
name="wikiheadlines_ru",
target="en",
sources={"ru"},
url="http://www.statmt.org/wmt15/wiki-titles.tgz",
path="wiki/ru-en/wiki.ru-en",
),
SubDataset(
name="wikititles_v1",
target="en",
sources={"cs", "de", "fi", "gu", "kk", "lt", "ru", "zh"},
url="http://data.statmt.org/wikititles/v1/wikititles-v1.{src}-en.tsv.gz",
path="",
),
SubDataset(
name="yandexcorpus",
target="en",
sources={"ru"},
url="https://translate.yandex.ru/corpus?lang=en",
manual_dl_files=["1mcorpus.zip"],
path=("corpus.en_ru.1m.ru", "corpus.en_ru.1m.en"),
),
# pylint:enable=line-too-long
] + [
SubDataset( # pylint:disable=g-complex-comprehension
name=ss,
target="en",
sources={"zh"},
url="ftp://cwmt-wmt:cwmt-wmt@nlp.nju.edu.cn/parallel/%s.zip" % ss,
path=("%s/*_c[hn].txt" % ss, "%s/*_en.txt" % ss),
)
for ss in CWMT_SUBSET_NAMES
]
_DEV_SUBSETS = [
SubDataset(
name="euelections_dev2019",
target="de",
sources={"fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/euelections_dev2019.fr-de.src.fr", "dev/euelections_dev2019.fr-de.tgt.de"),
),
SubDataset(
name="newsdev2014",
target="en",
sources={"hi"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdev2014.hi", "dev/newsdev2014.en"),
),
SubDataset(
name="newsdev2015",
target="en",
sources={"fi"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdev2015-fien-src.{src}.sgm", "dev/newsdev2015-fien-ref.en.sgm"),
),
SubDataset(
name="newsdiscussdev2015",
target="en",
sources={"ro", "tr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdiscussdev2015-{src}en-src.{src}.sgm", "dev/newsdiscussdev2015-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdev2016",
target="en",
sources={"ro", "tr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdev2016-{src}en-src.{src}.sgm", "dev/newsdev2016-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdev2017",
target="en",
sources={"lv", "zh"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdev2017-{src}en-src.{src}.sgm", "dev/newsdev2017-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdev2018",
target="en",
sources={"et"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdev2018-{src}en-src.{src}.sgm", "dev/newsdev2018-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdev2019",
target="en",
sources={"gu", "kk", "lt"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdev2019-{src}en-src.{src}.sgm", "dev/newsdev2019-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdiscussdev2015",
target="en",
sources={"fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdiscussdev2015-{src}en-src.{src}.sgm", "dev/newsdiscussdev2015-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdiscusstest2015",
target="en",
sources={"fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdiscusstest2015-{src}en-src.{src}.sgm", "dev/newsdiscusstest2015-{src}en-ref.en.sgm"),
),
SubDataset(
name="newssyscomb2009",
target="en",
sources={"cs", "de", "es", "fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newssyscomb2009.{src}", "dev/newssyscomb2009.en"),
),
SubDataset(
name="newstest2008",
target="en",
sources={"cs", "de", "es", "fr", "hu"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/news-test2008.{src}", "dev/news-test2008.en"),
),
SubDataset(
name="newstest2009",
target="en",
sources={"cs", "de", "es", "fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2009.{src}", "dev/newstest2009.en"),
),
SubDataset(
name="newstest2010",
target="en",
sources={"cs", "de", "es", "fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2010.{src}", "dev/newstest2010.en"),
),
SubDataset(
name="newstest2011",
target="en",
sources={"cs", "de", "es", "fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2011.{src}", "dev/newstest2011.en"),
),
SubDataset(
name="newstest2012",
target="en",
sources={"cs", "de", "es", "fr", "ru"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2012.{src}", "dev/newstest2012.en"),
),
SubDataset(
name="newstest2013",
target="en",
sources={"cs", "de", "es", "fr", "ru"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2013.{src}", "dev/newstest2013.en"),
),
SubDataset(
name="newstest2014",
target="en",
sources={"cs", "de", "es", "fr", "hi", "ru"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2014-{src}en-src.{src}.sgm", "dev/newstest2014-{src}en-ref.en.sgm"),
),
SubDataset(
name="newstest2015",
target="en",
sources={"cs", "de", "fi", "ru"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2015-{src}en-src.{src}.sgm", "dev/newstest2015-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdiscusstest2015",
target="en",
sources={"fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdiscusstest2015-{src}en-src.{src}.sgm", "dev/newsdiscusstest2015-{src}en-ref.en.sgm"),
),
SubDataset(
name="newstest2016",
target="en",
sources={"cs", "de", "fi", "ro", "ru", "tr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2016-{src}en-src.{src}.sgm", "dev/newstest2016-{src}en-ref.en.sgm"),
),
SubDataset(
name="newstestB2016",
target="en",
sources={"fi"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstestB2016-enfi-ref.{src}.sgm", "dev/newstestB2016-enfi-src.en.sgm"),
),
SubDataset(
name="newstest2017",
target="en",
sources={"cs", "de", "fi", "lv", "ru", "tr", "zh"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2017-{src}en-src.{src}.sgm", "dev/newstest2017-{src}en-ref.en.sgm"),
),
SubDataset(
name="newstestB2017",
target="en",
sources={"fi"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstestB2017-fien-src.fi.sgm", "dev/newstestB2017-fien-ref.en.sgm"),
),
SubDataset(
name="newstest2018",
target="en",
sources={"cs", "de", "et", "fi", "ru", "tr", "zh"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2018-{src}en-src.{src}.sgm", "dev/newstest2018-{src}en-ref.en.sgm"),
),
]
DATASET_MAP = {dataset.name: dataset for dataset in _TRAIN_SUBSETS + _DEV_SUBSETS}
_CZENG17_FILTER = SubDataset(
name="czeng17_filter",
target="en",
sources={"cs"},
url="http://ufal.mff.cuni.cz/czeng/download.php?f=convert_czeng16_to_17.pl.zip",
path="convert_czeng16_to_17.pl",
)
class WmtConfig(nlp.BuilderConfig):
"""BuilderConfig for WMT."""
def __init__(self, url=None, citation=None, description=None, language_pair=(None, None), subsets=None, **kwargs):
"""BuilderConfig for WMT.
Args:
url: The reference URL for the dataset.
citation: The paper citation for the dataset.
description: The description of the dataset.
language_pair: pair of languages that will be used for translation. Should
contain 2 letter coded strings. For example: ("en", "de").
configuration for the `nlp.features.text.TextEncoder` used for the
`nlp.features.text.Translation` features.
subsets: Dict[split, list[str]]. List of the subset to use for each of the
split. Note that WMT subclasses overwrite this parameter.
**kwargs: keyword arguments forwarded to super.
"""
name = "%s-%s" % (language_pair[0], language_pair[1])
if "name" in kwargs: # Add name suffix for custom configs
name += "." + kwargs.pop("name")
super(WmtConfig, self).__init__(name=name, description=description, **kwargs)
self.url = url or "http://www.statmt.org"
self.citation = citation
self.language_pair = language_pair
self.subsets = subsets
# TODO(PVP): remove when manual dir works
# +++++++++++++++++++++
if language_pair[1] in ["cs", "hi", "ru"]:
assert NotImplementedError(
"The dataset for {}-en is currently not fully supported.".format(language_pair[1])
)
# +++++++++++++++++++++
class Wmt(ABC, nlp.GeneratorBasedBuilder):
"""WMT translation dataset."""
def __init__(self, *args, **kwargs):
if type(self) == Wmt and "config" not in kwargs: # pylint: disable=unidiomatic-typecheck
raise ValueError(
"The raw `wmt_translate` can only be instantiated with the config "
"kwargs. You may want to use one of the `wmtYY_translate` "
"implementation instead to get the WMT dataset for a specific year."
)
super(Wmt, self).__init__(*args, **kwargs)
@property
@abstractmethod
def _subsets(self):
"""Subsets that make up each split of the dataset."""
raise NotImplementedError("This is a abstract method")
@property
def subsets(self):
"""Subsets that make up each split of the dataset for the language pair."""
source, target = self.config.language_pair
filtered_subsets = {}
for split, ss_names in self._subsets.items():
filtered_subsets[split] = []
for ss_name in ss_names:
dataset = DATASET_MAP[ss_name]
if dataset.target != target or source not in dataset.sources:
logging.info("Skipping sub-dataset that does not include language pair: %s", ss_name)
else:
filtered_subsets[split].append(ss_name)
logging.info("Using sub-datasets: %s", filtered_subsets)
return filtered_subsets
def _info(self):
src, target = self.config.language_pair
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features({"translation": nlp.features.Translation(languages=self.config.language_pair)}),
supervised_keys=(src, target),
homepage=self.config.url,
citation=self.config.citation,
)
def _vocab_text_gen(self, split_subsets, extraction_map, language):
for _, ex in self._generate_examples(split_subsets, extraction_map, with_translation=False):
yield ex[language]
def _split_generators(self, dl_manager):
source, _ = self.config.language_pair
manual_paths_dict = {}
urls_to_download = {}
for ss_name in itertools.chain.from_iterable(self.subsets.values()):
if ss_name == "czeng_17":
# CzEng1.7 is CzEng1.6 with some blocks filtered out. We must download
# the filtering script so we can parse out which blocks need to be
# removed.
urls_to_download[_CZENG17_FILTER.name] = _CZENG17_FILTER.get_url(source)
# get dataset
dataset = DATASET_MAP[ss_name]
if dataset.get_manual_dl_files(source):
# TODO(PVP): following two lines skip configs that are incomplete for now
# +++++++++++++++++++++
logging.info("Skipping {} for now. Incomplete dataset for {}".format(dataset.name, self.config.name))
continue
# +++++++++++++++++++++
manual_dl_files = dataset.get_manual_dl_files(source)
manual_paths = [
os.path.join(os.path.abspath(os.path.expanduser(dl_manager.manual_dir)), fname)
for fname in manual_dl_files
]
assert all(
os.path.exists(path) for path in manual_paths
), "For {0}, you must manually download the following file(s) from {1} and place them in {2}: {3}".format(
dataset.name, dataset.get_url(source), dl_manager.manual_dir, ", ".join(manual_dl_files)
)
# set manual path for correct subset
manual_paths_dict[ss_name] = manual_paths
else:
urls_to_download[ss_name] = dataset.get_url(source)
# Download and extract files from URLs.
downloaded_files = dl_manager.download_and_extract(urls_to_download)
# Extract manually downloaded files.
manual_files = dl_manager.extract(manual_paths_dict)
extraction_map = dict(downloaded_files, **manual_files)
for language in self.config.language_pair:
self._vocab_text_gen(self.subsets[nlp.Split.TRAIN], extraction_map, language)
return [
nlp.SplitGenerator( # pylint:disable=g-complex-comprehension
name=split, gen_kwargs={"split_subsets": split_subsets, "extraction_map": extraction_map}
)
for split, split_subsets in self.subsets.items()
]
def _generate_examples(self, split_subsets, extraction_map, with_translation=True):
"""Returns the examples in the raw (text) form."""
source, _ = self.config.language_pair
def _get_local_paths(dataset, extract_dirs):
rel_paths = dataset.get_path(source)
if len(extract_dirs) == 1:
extract_dirs = extract_dirs * len(rel_paths)
return [
os.path.join(ex_dir, rel_path) if rel_path else ex_dir
for ex_dir, rel_path in zip(extract_dirs, rel_paths)
]
for ss_name in split_subsets:
# TODO(PVP) remove following five lines when manual data works
# +++++++++++++++++++++
dataset = DATASET_MAP[ss_name]
source, _ = self.config.language_pair
if dataset.get_manual_dl_files(source):
logging.info("Skipping {} for now. Incomplete dataset for {}".format(dataset.name, self.config.name))
continue
# +++++++++++++++++++++
logging.info("Generating examples from: %s", ss_name)
dataset = DATASET_MAP[ss_name]
extract_dirs = extraction_map[ss_name]
files = _get_local_paths(dataset, extract_dirs)
if ss_name.startswith("czeng"):
if ss_name.endswith("16pre"):
sub_generator = functools.partial(_parse_tsv, language_pair=("en", "cs"))
elif ss_name.endswith("17"):
filter_path = _get_local_paths(_CZENG17_FILTER, extraction_map[_CZENG17_FILTER.name])[0]
sub_generator = functools.partial(_parse_czeng, filter_path=filter_path)
else:
sub_generator = _parse_czeng
elif ss_name == "hindencorp_01":
sub_generator = _parse_hindencorp
elif len(files) == 2:
if ss_name.endswith("_frde"):
sub_generator = _parse_frde_bitext
else:
sub_generator = _parse_parallel_sentences
elif len(files) == 1:
fname = files[0]
# Note: Due to formatting used by `download_manager`, the file
# extension may not be at the end of the file path.
if ".tsv" in fname:
sub_generator = _parse_tsv
elif (
ss_name.startswith("newscommentary_v14")
or ss_name.startswith("europarl_v9")
or ss_name.startswith("wikititles_v1")
):
sub_generator = functools.partial(_parse_tsv, language_pair=self.config.language_pair)
elif "tmx" in fname or ss_name.startswith("paracrawl_v3"):
sub_generator = _parse_tmx
elif ss_name.startswith("wikiheadlines"):
sub_generator = _parse_wikiheadlines
else:
raise ValueError("Unsupported file format: %s" % fname)
else:
raise ValueError("Invalid number of files: %d" % len(files))
for sub_key, ex in sub_generator(*files):
if not all(ex.values()):
continue
# TODO(adarob): Add subset feature.
# ex["subset"] = subset
key = "{}/{}".format(ss_name, sub_key)
if with_translation is True:
ex = {"translation": ex}
yield key, ex
def _parse_parallel_sentences(f1, f2):
"""Returns examples from parallel SGML or text files, which may be gzipped."""
def _parse_text(path):
"""Returns the sentences from a single text file, which may be gzipped."""
split_path = path.split(".")
if split_path[-1] == "gz":
lang = split_path[-2]
with open(path, "rb") as f, gzip.GzipFile(fileobj=f) as g:
return g.read().decode("utf-8").split("\n"), lang
if split_path[-1] == "txt":
# CWMT
lang = split_path[-2].split("_")[-1]
lang = "zh" if lang in ("ch", "cn") else lang
else:
lang = split_path[-1]
with open(path, "rb") as f:
return f.read().decode("utf-8").split("\n"), lang
def _parse_sgm(path):
"""Returns sentences from a single SGML file."""
lang = path.split(".")[-2]
sentences = []
# Note: We can't use the XML parser since some of the files are badly
# formatted.
seg_re = re.compile(r"<seg id=\"\d+\">(.*)</seg>")
with open(path) as f:
for line in f:
seg_match = re.match(seg_re, line)
if seg_match:
assert len(seg_match.groups()) == 1
sentences.append(seg_match.groups()[0])
return sentences, lang
parse_file = _parse_sgm if f1.endswith(".sgm") else _parse_text
# Some datasets (e.g., CWMT) contain multiple parallel files specified with
# a wildcard. We sort both sets to align them and parse them one by one.
f1_files = sorted(glob.glob(f1))
f2_files = sorted(glob.glob(f2))
assert f1_files and f2_files, "No matching files found: %s, %s." % (f1, f2)
assert len(f1_files) == len(f2_files), "Number of files do not match: %d vs %d for %s vs %s." % (
len(f1_files),
len(f2_files),
f1,
f2,
)
for f_id, (f1_i, f2_i) in enumerate(zip(sorted(f1_files), sorted(f2_files))):
l1_sentences, l1 = parse_file(f1_i)
l2_sentences, l2 = parse_file(f2_i)
assert len(l1_sentences) == len(l2_sentences), "Sizes do not match: %d vs %d for %s vs %s." % (
len(l1_sentences),
len(l2_sentences),
f1_i,
f2_i,
)
for line_id, (s1, s2) in enumerate(zip(l1_sentences, l2_sentences)):
key = "{}/{}".format(f_id, line_id)
yield key, {l1: s1, l2: s2}
def _parse_frde_bitext(fr_path, de_path):
with open(fr_path) as f:
fr_sentences = f.read().split("\n")
with open(de_path) as f:
de_sentences = f.read().split("\n")
assert len(fr_sentences) == len(de_sentences), "Sizes do not match: %d vs %d for %s vs %s." % (
len(fr_sentences),
len(de_sentences),
fr_path,
de_path,
)
for line_id, (s1, s2) in enumerate(zip(fr_sentences, de_sentences)):
yield line_id, {"fr": s1, "de": s2}
def _parse_tmx(path):
"""Generates examples from TMX file."""
def _get_tuv_lang(tuv):
for k, v in tuv.items():
if k.endswith("}lang"):
return v
raise AssertionError("Language not found in `tuv` attributes.")
def _get_tuv_seg(tuv):
segs = tuv.findall("seg")
assert len(segs) == 1, "Invalid number of segments: %d" % len(segs)
return segs[0].text
with open(path, "rb") as f:
if six.PY3:
# Workaround due to: https://github.com/tensorflow/tensorflow/issues/33563
utf_f = codecs.getreader("utf-8")(f)
else:
utf_f = f
for line_id, (_, elem) in enumerate(ElementTree.iterparse(utf_f)):
if elem.tag == "tu":
yield line_id, {_get_tuv_lang(tuv): _get_tuv_seg(tuv) for tuv in elem.iterfind("tuv")}
elem.clear()
def _parse_tsv(path, language_pair=None):
"""Generates examples from TSV file."""
if language_pair is None:
lang_match = re.match(r".*\.([a-z][a-z])-([a-z][a-z])\.tsv", path)
assert lang_match is not None, "Invalid TSV filename: %s" % path
l1, l2 = lang_match.groups()
else:
l1, l2 = language_pair
with open(path) as f:
for j, line in enumerate(f):
cols = line.split("\t")
if len(cols) != 2:
logging.warning("Skipping line %d in TSV (%s) with %d != 2 columns.", j, path, len(cols))
continue
s1, s2 = cols
yield j, {l1: s1.strip(), l2: s2.strip()}
def _parse_wikiheadlines(path):
"""Generates examples from Wikiheadlines dataset file."""
lang_match = re.match(r".*\.([a-z][a-z])-([a-z][a-z])$", path)
assert lang_match is not None, "Invalid Wikiheadlines filename: %s" % path
l1, l2 = lang_match.groups()
with open(path) as f:
for line_id, line in enumerate(f):
s1, s2 = line.split("|||")
yield line_id, {l1: s1.strip(), l2: s2.strip()}
def _parse_czeng(*paths, **kwargs):
"""Generates examples from CzEng v1.6, with optional filtering for v1.7."""
filter_path = kwargs.get("filter_path", None)
if filter_path:
re_block = re.compile(r"^[^-]+-b(\d+)-\d\d[tde]")
with open(filter_path) as f:
bad_blocks = {blk for blk in re.search(r"qw{([\s\d]*)}", f.read()).groups()[0].split()}
logging.info("Loaded %d bad blocks to filter from CzEng v1.6 to make v1.7.", len(bad_blocks))
for path in paths:
for gz_path in sorted(glob.glob(path)):
with open(gz_path, "rb") as g, gzip.GzipFile(fileobj=g) as f:
filename = os.path.basename(gz_path)
for line_id, line in enumerate(f):
line = line.decode("utf-8") # required for py3
if not line.strip():
continue
id_, unused_score, cs, en = line.split("\t")
if filter_path:
block_match = re.match(re_block, id_)
if block_match and block_match.groups()[0] in bad_blocks:
continue
sub_key = "{}/{}".format(filename, line_id)
yield sub_key, {
"cs": cs.strip(),
"en": en.strip(),
}
def _parse_hindencorp(path):
with open(path) as f:
for line_id, line in enumerate(f):
split_line = line.split("\t")
if len(split_line) != 5:
logging.warning("Skipping invalid HindEnCorp line: %s", line)
continue
yield line_id, {"translation": {"en": split_line[3].strip(), "hi": split_line[4].strip()}}
|
nlp-master
|
datasets/wmt18/wmt_utils.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""WMT18: Translate dataset."""
import nlp
from .wmt_utils import CWMT_SUBSET_NAMES, Wmt, WmtConfig
_URL = "http://www.statmt.org/wmt18/translation-task.html"
_CITATION = """\
@InProceedings{bojar-EtAl:2018:WMT1,
author = {Bojar, Ond\v{r}ej and Federmann, Christian and Fishel, Mark
and Graham, Yvette and Haddow, Barry and Huck, Matthias and
Koehn, Philipp and Monz, Christof},
title = {Findings of the 2018 Conference on Machine Translation (WMT18)},
booktitle = {Proceedings of the Third Conference on Machine Translation,
Volume 2: Shared Task Papers},
month = {October},
year = {2018},
address = {Belgium, Brussels},
publisher = {Association for Computational Linguistics},
pages = {272--307},
url = {http://www.aclweb.org/anthology/W18-6401}
}
"""
_LANGUAGE_PAIRS = [(lang, "en") for lang in ["cs", "de", "et", "fi", "kk", "ru", "tr", "zh"]]
class Wmt18(Wmt):
"""WMT 18 translation datasets for all {xx, "en"} language pairs."""
# Version history:
# 1.0.0: S3 (new shuffling, sharding and slicing mechanism).
BUILDER_CONFIGS = [
WmtConfig( # pylint:disable=g-complex-comprehension
description="WMT 2018 %s-%s translation task dataset." % (l1, l2),
url=_URL,
citation=_CITATION,
language_pair=(l1, l2),
version=nlp.Version("1.0.0"),
)
for l1, l2 in _LANGUAGE_PAIRS
]
@property
def manual_download_instructions(self):
if self.config.language_pair[1] in ["cs", "hi", "ru"]:
return "Please download the data manually as explained. TODO(PVP)"
@property
def _subsets(self):
return {
nlp.Split.TRAIN: [
"europarl_v7",
"europarl_v8_18",
"paracrawl_v1",
"commoncrawl",
"newscommentary_v13",
"czeng_17",
"yandexcorpus",
"wikiheadlines_fi",
"wikiheadlines_ru",
"setimes_2",
"uncorpus_v1",
"rapid_2016",
]
+ CWMT_SUBSET_NAMES,
nlp.Split.VALIDATION: ["newsdev2018", "newstest2017", "newstestB2017"],
nlp.Split.TEST: ["newstest2018"],
}
|
nlp-master
|
datasets/wmt18/wmt18.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""TED talk high/low-resource paired language data set from Qi, et al. 2018."""
from __future__ import absolute_import, division, print_function
import os
import nlp
_DESCRIPTION = """\
Data sets derived from TED talk transcripts for comparing similar language pairs
where one is high resource and the other is low resource.
"""
_CITATION = """\
@inproceedings{Ye2018WordEmbeddings,
author = {Ye, Qi and Devendra, Sachan and Matthieu, Felix and Sarguna, Padmanabhan and Graham, Neubig},
title = {When and Why are pre-trained word embeddings useful for Neural Machine Translation},
booktitle = {HLT-NAACL},
year = {2018},
}
"""
_DATA_URL = "http://www.phontron.com/data/qi18naacl-dataset.tar.gz"
_VALID_LANGUAGE_PAIRS = (
("az", "en"),
("az_tr", "en"),
("be", "en"),
("be_ru", "en"),
("es", "pt"),
("fr", "pt"),
("gl", "en"),
("gl_pt", "en"),
("he", "pt"),
("it", "pt"),
("pt", "en"),
("ru", "en"),
("ru", "pt"),
("tr", "en"),
)
class TedHrlrConfig(nlp.BuilderConfig):
"""BuilderConfig for TED talk data comparing high/low resource languages."""
def __init__(self, language_pair=(None, None), **kwargs):
"""BuilderConfig for TED talk data comparing high/low resource languages.
The first language in `language_pair` should either be a 2-letter coded
string or two such strings joined by an underscore (e.g., "az" or "az_tr").
In cases where it contains two languages, the train data set will contain an
(unlabelled) mix of the two languages and the validation and test sets
will contain only the first language. This dataset will refer to the
source language by the 5-letter string with the underscore. The second
language in `language_pair` must be a 2-letter coded string.
For example, to get pairings between Russian and English, specify
`("ru", "en")` as `language_pair`. To get a mix of Belarusian and Russian in
the training set and purely Belarusian in the validation and test sets,
specify `("be_ru", "en")`.
Args:
language_pair: pair of languages that will be used for translation. The
first will be used as source and second as target in supervised mode.
**kwargs: keyword arguments forwarded to super.
"""
name = "%s_to_%s" % (language_pair[0].replace("_", ""), language_pair[1])
description = ("Translation dataset from %s to %s in plain text.") % (language_pair[0], language_pair[1])
super(TedHrlrConfig, self).__init__(name=name, description=description, **kwargs)
# Validate language pair.
assert language_pair in _VALID_LANGUAGE_PAIRS, (
"Config language pair (%s, " "%s) not supported"
) % language_pair
self.language_pair = language_pair
class TedHrlr(nlp.GeneratorBasedBuilder):
"""TED talk data set for comparing high and low resource languages."""
BUILDER_CONFIGS = [
TedHrlrConfig( # pylint: disable=g-complex-comprehension
language_pair=pair, version=nlp.Version("1.0.0", "New split API (https://tensorflow.org/datasets/splits)"),
)
for pair in _VALID_LANGUAGE_PAIRS
]
def _info(self):
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features({"translation": nlp.features.Translation(languages=self.config.language_pair)}),
homepage="https://github.com/neulab/word-embeddings-for-nmt",
supervised_keys=self.config.language_pair,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract(_DATA_URL)
source, target = self.config.language_pair
data_dir = os.path.join(dl_dir, "datasets", "%s_to_%s" % (source, target))
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
gen_kwargs={
"source_file": os.path.join(data_dir, "{}.train".format(source.replace("_", "-"))),
"target_file": os.path.join(data_dir, "{}.train".format(target)),
},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
gen_kwargs={
"source_file": os.path.join(data_dir, "{}.dev".format(source.split("_")[0])),
"target_file": os.path.join(data_dir, "{}.dev".format(target)),
},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
gen_kwargs={
"source_file": os.path.join(data_dir, "{}.test".format(source.split("_")[0])),
"target_file": os.path.join(data_dir, "{}.test".format(target)),
},
),
]
def _generate_examples(self, source_file, target_file):
"""This function returns the examples in the raw (text) form."""
with open(source_file) as f:
source_sentences = f.read().split("\n")
with open(target_file) as f:
target_sentences = f.read().split("\n")
assert len(target_sentences) == len(source_sentences), "Sizes do not match: %d vs %d for %s vs %s." % (
len(source_sentences),
len(target_sentences),
source_file,
target_file,
)
source, target = self.config.language_pair
for idx, (l1, l2) in enumerate(zip(source_sentences, target_sentences)):
result = {"translation": {source: l1, target: l2}}
# Make sure that both translations are non-empty.
if all(result.values()):
yield idx, result
|
nlp-master
|
datasets/ted_hrlr/ted_hrlr.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# Copyright 2019 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Yelp Polarity Reviews dataset."""
from __future__ import absolute_import, division, print_function
import os
import nlp
_DESCRIPTION = """\
Large Yelp Review Dataset.
This is a dataset for binary sentiment classification. \
We provide a set of 560,000 highly polar yelp reviews for training, and 38,000 for testing. \
ORIGIN
The Yelp reviews dataset consists of reviews from Yelp. It is extracted
from the Yelp Dataset Challenge 2015 data. For more information, please
refer to http://www.yelp.com/dataset_challenge
The Yelp reviews polarity dataset is constructed by
Xiang Zhang (xiang.zhang@nyu.edu) from the above dataset.
It is first used as a text classification benchmark in the following paper:
Xiang Zhang, Junbo Zhao, Yann LeCun. Character-level Convolutional Networks
for Text Classification. Advances in Neural Information Processing Systems 28
(NIPS 2015).
DESCRIPTION
The Yelp reviews polarity dataset is constructed by considering stars 1 and 2
negative, and 3 and 4 positive. For each polarity 280,000 training samples and
19,000 testing samples are take randomly. In total there are 560,000 trainig
samples and 38,000 testing samples. Negative polarity is class 1,
and positive class 2.
The files train.csv and test.csv contain all the training samples as
comma-sparated values. There are 2 columns in them, corresponding to class
index (1 and 2) and review text. The review texts are escaped using double
quotes ("), and any internal double quote is escaped by 2 double quotes ("").
New lines are escaped by a backslash followed with an "n" character,
that is "\n".
"""
_CITATION = """\
@article{zhangCharacterlevelConvolutionalNetworks2015,
archivePrefix = {arXiv},
eprinttype = {arxiv},
eprint = {1509.01626},
primaryClass = {cs},
title = {Character-Level {{Convolutional Networks}} for {{Text Classification}}},
abstract = {This article offers an empirical exploration on the use of character-level convolutional networks (ConvNets) for text classification. We constructed several large-scale datasets to show that character-level convolutional networks could achieve state-of-the-art or competitive results. Comparisons are offered against traditional models such as bag of words, n-grams and their TFIDF variants, and deep learning models such as word-based ConvNets and recurrent neural networks.},
journal = {arXiv:1509.01626 [cs]},
author = {Zhang, Xiang and Zhao, Junbo and LeCun, Yann},
month = sep,
year = {2015},
}
"""
_DOWNLOAD_URL = "https://s3.amazonaws.com/fast-ai-nlp/yelp_review_polarity_csv.tgz"
class YelpPolarityReviewsConfig(nlp.BuilderConfig):
"""BuilderConfig for YelpPolarityReviews."""
def __init__(self, **kwargs):
"""BuilderConfig for YelpPolarityReviews.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(YelpPolarityReviewsConfig, self).__init__(
version=nlp.Version("1.0.0", "New split API (https://tensorflow.org/datasets/splits)"), **kwargs
)
class YelpPolarity(nlp.GeneratorBasedBuilder):
"""Yelp Polarity reviews dataset."""
BUILDER_CONFIGS = [YelpPolarityReviewsConfig(name="plain_text", description="Plain text",)]
def _info(self):
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features({"text": nlp.Value("string"), "label": nlp.features.ClassLabel(names=["1", "2"]),}),
supervised_keys=None,
homepage="https://course.fast.ai/datasets",
citation=_CITATION,
)
def _vocab_text_gen(self, train_file):
for _, ex in self._generate_examples(train_file):
yield ex["text"]
def _split_generators(self, dl_manager):
arch_path = dl_manager.download_and_extract(_DOWNLOAD_URL)
train_file = os.path.join(arch_path, "yelp_review_polarity_csv", "train.csv")
test_file = os.path.join(arch_path, "yelp_review_polarity_csv", "test.csv")
return [
nlp.SplitGenerator(name=nlp.Split.TRAIN, gen_kwargs={"filepath": train_file}),
nlp.SplitGenerator(name=nlp.Split.TEST, gen_kwargs={"filepath": test_file}),
]
def _generate_examples(self, filepath):
"""Generate Yelp examples."""
with open(filepath) as f:
for line_id, line in enumerate(f):
# The format of the line is:
# "1", "The text of the review."
yield line_id, {"text": line[5:-2].strip(), "label": line[1]}
|
nlp-master
|
datasets/yelp_polarity/yelp_polarity.py
|
from __future__ import absolute_import, division, print_function
import glob
import logging
import os
import nlp
_CITATION = """\
@inproceedings{schler2006effects,
title={Effects of age and gender on blogging.},
author={Schler, Jonathan and Koppel, Moshe and Argamon, Shlomo and Pennebaker, James W},
booktitle={AAAI spring symposium: Computational approaches to analyzing weblogs},
volume={6},
pages={199--205},
year={2006}
}
"""
_DESCRIPTION = """\
The Blog Authorship Corpus consists of the collected posts of 19,320 bloggers gathered from blogger.com in August 2004. The corpus incorporates a total of 681,288 posts and over 140 million words - or approximately 35 posts and 7250 words per person.
Each blog is presented as a separate file, the name of which indicates a blogger id# and the blogger’s self-provided gender, age, industry and astrological sign. (All are labeled for gender and age but for many, industry and/or sign is marked as unknown.)
All bloggers included in the corpus fall into one of three age groups:
· 8240 "10s" blogs (ages 13-17),
· 8086 "20s" blogs(ages 23-27)
· 2994 "30s" blogs (ages 33-47).
For each age group there are an equal number of male and female bloggers.
Each blog in the corpus includes at least 200 occurrences of common English words. All formatting has been stripped with two exceptions. Individual posts within a single blogger are separated by the date of the following post and links within a post are denoted by the label urllink.
The corpus may be freely used for non-commercial research purposes
"""
_URL = "https://u.cs.biu.ac.il/~koppel/BlogCorpus.htm"
_DATA_URL = "http://www.cs.biu.ac.il/~koppel/blogs/blogs.zip"
class BlogAuthorshipCorpusConfig(nlp.BuilderConfig):
"""BuilderConfig for BlogAuthorship."""
def __init__(self, data_url, **kwargs):
"""BuilderConfig for BlogAuthorship
Args:
data_url: `string`, url to the dataset (word or raw level)
**kwargs: keyword arguments forwarded to super.
"""
super(BlogAuthorshipCorpusConfig, self).__init__(version=nlp.Version("1.0.0",), **kwargs)
self.data_url = data_url
class BlogAuthorshipCorpus(nlp.GeneratorBasedBuilder):
"""TODO(BlogAuthorship): Short description of my dataset."""
VERSION = nlp.Version("0.1.0")
BUILDER_CONFIGS = [
BlogAuthorshipCorpusConfig(
name="blog-authorship-corpus",
data_url=_DATA_URL,
description="word level dataset. No processing is needed other than replacing newlines with <eos> tokens.",
),
]
def _info(self):
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
"text": nlp.Value("string"),
"date": nlp.Value("string"),
"gender": nlp.Value("string"),
"age": nlp.Value("int32"),
"horoscope": nlp.Value("string"),
"job": nlp.Value("string"),
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage=_URL,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
if self.config.name == "blog-authorship-corpus":
data = dl_manager.download_and_extract(self.config.data_url)
data_dir = os.path.join(data, "blogs")
files = sorted(glob.glob(os.path.join(data_dir, "*.xml")))
train_files = []
validation_files = []
for i, file_path in enumerate(files):
# 95% / 5% (train / val) split
if i % 20 == 0:
validation_files.append(file_path)
else:
train_files.append(file_path)
return [
nlp.SplitGenerator(name=nlp.Split.TRAIN, gen_kwargs={"files": train_files, "split": "train"},),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION, gen_kwargs={"files": validation_files, "split": "validation"},
),
]
else:
raise ValueError("{} does not exist".format(self.config.name))
def _generate_examples(self, files, split):
def parse_date(line):
# parse line to date
return line.strip().split("<date>")[-1].split("</date>")[0]
for file_path in files:
counter = 0
file_name = os.path.basename(file_path)
logging.info("generating examples from = %s", file_path)
file_id, gender, age, job, horoscope = tuple(file_name.split(".")[:-1])
# Note: import xml.etree.ElementTree as etree does not work. File cannot be parsed
# use open instead
with open(file_path) as f:
# some files are corrupted, so have to work with python`s try here
try:
date = ""
for line in f:
line = line.strip()
if "<date>" in line:
date = parse_date(line)
elif line != "" and not line.startswith("<"):
# need sub_id to be certain that no tf_records is identical
sub_id = counter
counter += 1
if date == "":
logging.warning("Date missing for {} in {}".format(line, file_name))
assert date is not None, "Date is missing before {}".format(line)
blog = {
"text": line,
"date": date,
"gender": gender,
"age": int(age),
"job": job,
"horoscope": horoscope,
}
yield "{}_{}_{}".format(file_id, sub_id, date), blog
else:
continue
except UnicodeDecodeError as e:
logging.warning("{} cannot be loaded. Error message: {}".format(file_path, e))
|
nlp-master
|
datasets/blog_authorship_corpus/blog_authorship_corpus.py
|
"""TODO(kor_nli): Add a description here."""
from __future__ import absolute_import, division, print_function
import csv
import os
import nlp
# TODO(kor_nli): BibTeX citation
_CITATION = """\
@article{ham2020kornli,
title={KorNLI and KorSTS: New Benchmark Datasets for Korean Natural Language Understanding},
author={Ham, Jiyeon and Choe, Yo Joong and Park, Kyubyong and Choi, Ilji and Soh, Hyungjoon},
journal={arXiv preprint arXiv:2004.03289},
year={2020}
}
"""
# TODO(kor_nli):
_DESCRIPTION = """ Korean Natural Language Inference datasets
"""
_URL = "https://github.com/kakaobrain/KorNLUDatasets/archive/master.zip"
class KorNLIConfig(nlp.BuilderConfig):
"""BuilderConfig for KorNLI."""
def __init__(self, **kwargs):
"""BuilderConfig for KorNLI.
Args:
**kwargs: keyword arguments forwarded to super.
"""
# Version 1.1.0 remove empty document and summary strings.
super(KorNLIConfig, self).__init__(version=nlp.Version("1.0.0"), **kwargs)
class KorNli(nlp.GeneratorBasedBuilder):
"""TODO(kor_nli): Short description of my dataset."""
# TODO(kor_nli): Set up version.
VERSION = nlp.Version("1.0.0")
BUILDER_CONFIGS = [
KorNLIConfig(name="multi_nli", description="Korean multi NLI datasets"),
KorNLIConfig(name="snli", description="Korean SNLI dataset"),
KorNLIConfig(name="xnli", description="Korean XNLI dataset"),
]
def _info(self):
# TODO(kor_nli): Specifies the nlp.DatasetInfo object
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
# These are the features of your dataset like images, labels ...
"sentence1": nlp.Value("string"),
"sentence2": nlp.Value("string"),
"gold_label": nlp.Value("string"),
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://github.com/kakaobrain/KorNLUDatasets",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(kor_nli): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
dl_dir = dl_manager.download_and_extract(_URL)
dl_dir = os.path.join(dl_dir, "KorNLUDatasets-master", "KorNLI")
if self.config.name == "multi_nli":
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(dl_dir, "multinli.train.ko.tsv")},
),
]
elif self.config.name == "snli":
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(dl_dir, "snli_1.0_train.ko.tsv")},
),
]
else:
return [
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(dl_dir, "xnli.dev.ko.tsv")},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(dl_dir, "xnli.test.ko.tsv")},
),
]
def _generate_examples(self, filepath):
"""Yields examples."""
# TODO(kor_nli): Yields (key, example) tuples from the dataset
with open(filepath) as f:
data = csv.DictReader(f, dialect="excel-tab")
for id_, row in enumerate(data):
if len(row) != 3:
continue
yield id_, row
|
nlp-master
|
datasets/kor_nli/kor_nli.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""IMDB movie reviews dataset."""
from __future__ import absolute_import, division, print_function
import os
import nlp
_DESCRIPTION = """\
Movie Review Dataset.
This is a dataset of containing 5,331 positive and 5,331 negative processed
sentences from Rotten Tomatoes movie reviews. This data was first used in Bo
Pang and Lillian Lee, ``Seeing stars: Exploiting class relationships for
sentiment categorization with respect to rating scales.'', Proceedings of the
ACL, 2005.
"""
_CITATION = """\
@InProceedings{Pang+Lee:05a,
author = {Bo Pang and Lillian Lee},
title = {Seeing stars: Exploiting class relationships for sentiment
categorization with respect to rating scales},
booktitle = {Proceedings of the ACL},
year = 2005
}
"""
_DOWNLOAD_URL = "http://www.cs.cornell.edu/people/pabo/movie-review-data/rt-polaritydata.tar.gz"
class RottenTomatoesMovieReview(nlp.GeneratorBasedBuilder):
"""Cornell Rotten Tomatoes movie reviews dataset."""
VERSION = nlp.Version("1.0.0")
def _info(self):
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features(
{"text": nlp.Value("string"), "label": nlp.features.ClassLabel(names=["neg", "pos"])}
),
supervised_keys=[""],
homepage="http://www.cs.cornell.edu/people/pabo/movie-review-data/",
citation=_CITATION,
)
def _vocab_text_gen(self, archive):
for _, ex in self._generate_examples(archive, os.path.join("aclImdb", "train")):
yield ex["text"]
def _split_generators(self, dl_manager):
""" Downloads Rotten Tomatoes sentences. """
extracted_folder_path = dl_manager.download_and_extract(_DOWNLOAD_URL)
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN, gen_kwargs={"split_key": "train", "data_dir": extracted_folder_path},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION, gen_kwargs={"split_key": "validation", "data_dir": extracted_folder_path},
),
nlp.SplitGenerator(
name=nlp.Split.TEST, gen_kwargs={"split_key": "test", "data_dir": extracted_folder_path},
),
]
def _get_examples_from_split(self, split_key, data_dir):
""" Reads Rotten Tomatoes sentences and splits into 80% train,
10% validation, and 10% test, as is the practice set out in Jinfeng
Li, ``TEXTBUGGER: Generating Adversarial Text Against Real-world
Applications.''
"""
data_dir = os.path.join(data_dir, "rt-polaritydata")
pos_samples = open(os.path.join(data_dir, "rt-polarity.pos"), encoding="latin-1").readlines()
pos_samples = list(map(lambda t: t.strip(), pos_samples))
neg_samples = open(os.path.join(data_dir, "rt-polarity.neg"), encoding="latin-1").readlines()
neg_samples = list(map(lambda t: t.strip(), neg_samples))
# 80/10/10 split
i1 = int(len(pos_samples) * 0.8 + 0.5)
i2 = int(len(pos_samples) * 0.9 + 0.5)
train_samples = pos_samples[:i1] + neg_samples[:i1]
train_labels = (["pos"] * i1) + (["neg"] * i1)
validation_samples = pos_samples[i1:i2] + neg_samples[i1:i2]
validation_labels = (["pos"] * (i2 - i1)) + (["neg"] * (i2 - i1))
test_samples = pos_samples[i2:] + neg_samples[i2:]
test_labels = (["pos"] * (len(pos_samples) - i2)) + (["neg"] * (len(pos_samples) - i2))
if split_key == "train":
return (train_samples, train_labels)
if split_key == "validation":
return (validation_samples, validation_labels)
if split_key == "test":
return (test_samples, test_labels)
else:
raise ValueError(f"Invalid split key {split_key}")
def _generate_examples(self, split_key, data_dir):
"""Yields examples for a given split of MR."""
split_text, split_labels = self._get_examples_from_split(split_key, data_dir)
for text, label in zip(split_text, split_labels):
data_key = split_key + "_" + text
feature_dict = {"text": text, "label": label}
yield data_key, feature_dict
|
nlp-master
|
datasets/rotten_tomatoes/rotten_tomatoes.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tiny Shakespeare dataset."""
from __future__ import absolute_import, division, print_function
import os
import nlp
_CITATION = """\
@misc{
author={Karpathy, Andrej},
title={char-rnn},
year={2015},
howpublished={\\url{https://github.com/karpathy/char-rnn}}
}"""
_DESCRIPTION = """\
40,000 lines of Shakespeare from a variety of Shakespeare's plays. \
Featured in Andrej Karpathy's blog post 'The Unreasonable Effectiveness of \
Recurrent Neural Networks': \
http://karpathy.github.io/2015/05/21/rnn-effectiveness/.
To use for e.g. character modelling:
```
d = nlp.load_dataset(name='tiny_shakespeare')['train']
d = d.map(lambda x: nlp.Value('strings').unicode_split(x['text'], 'UTF-8'))
# train split includes vocabulary for other splits
vocabulary = sorted(set(next(iter(d)).numpy()))
d = d.map(lambda x: {'cur_char': x[:-1], 'next_char': x[1:]})
d = d.unbatch()
seq_len = 100
batch_size = 2
d = d.batch(seq_len)
d = d.batch(batch_size)
```
"""
class TinyShakespeare(nlp.GeneratorBasedBuilder):
"""Tiny Shakespeare dataset builder."""
VERSION = nlp.Version("1.0.0")
def _info(self):
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features({"text": nlp.Value("string")}),
supervised_keys=None,
homepage="https://github.com/karpathy/char-rnn/blob/master/data/tinyshakespeare/input.txt",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
download_path = dl_manager.download_and_extract(
"https://raw.githubusercontent.com/karpathy/char-rnn/master/data/tinyshakespeare/input.txt"
)
if os.path.isdir(download_path):
# During testing the download manager mock gives us a directory
txt_path = os.path.join(download_path, "input.txt")
else:
txt_path = download_path
with open(txt_path, "r") as f:
text = f.read()
# 90/5/5 split
i = int(len(text) * 0.9)
train_text, text = text[:i], text[i:]
i = int(len(text) * 0.5)
validation_text, text = text[:i], text[i:]
test_text = text
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"split_key": "train", "split_text": train_text},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION, gen_kwargs={"split_key": "validation", "split_text": validation_text},
),
nlp.SplitGenerator(name=nlp.Split.TEST, gen_kwargs={"split_key": "test", "split_text": test_text},),
]
def _generate_examples(self, split_key, split_text):
"""Yields examples."""
data_key = split_key # Should uniquely identify the thing yielded
feature_dict = {"text": split_text}
yield data_key, feature_dict
|
nlp-master
|
datasets/tiny_shakespeare/tiny_shakespeare.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""WMT16: Translate dataset."""
import nlp
from .wmt_utils import Wmt, WmtConfig
_URL = "http://www.statmt.org/wmt16/translation-task.html"
_CITATION = """
@InProceedings{bojar-EtAl:2016:WMT1,
author = {Bojar, Ond\v{r}ej and Chatterjee, Rajen and Federmann, Christian and Graham, Yvette and Haddow, Barry and Huck, Matthias and Jimeno Yepes, Antonio and Koehn, Philipp and Logacheva, Varvara and Monz, Christof and Negri, Matteo and Neveol, Aurelie and Neves, Mariana and Popel, Martin and Post, Matt and Rubino, Raphael and Scarton, Carolina and Specia, Lucia and Turchi, Marco and Verspoor, Karin and Zampieri, Marcos},
title = {Findings of the 2016 Conference on Machine Translation},
booktitle = {Proceedings of the First Conference on Machine Translation},
month = {August},
year = {2016},
address = {Berlin, Germany},
publisher = {Association for Computational Linguistics},
pages = {131--198},
url = {http://www.aclweb.org/anthology/W/W16/W16-2301}
}
"""
_LANGUAGE_PAIRS = [(lang, "en") for lang in ["cs", "de", "fi", "ro", "ru", "tr"]]
class Wmt16(Wmt):
"""WMT 16 translation datasets for all {xx, "en"} language pairs."""
BUILDER_CONFIGS = [
WmtConfig( # pylint:disable=g-complex-comprehension
description="WMT 2016 %s-%s translation task dataset." % (l1, l2),
url=_URL,
citation=_CITATION,
language_pair=(l1, l2),
version=nlp.Version("1.0.0"),
)
for l1, l2 in _LANGUAGE_PAIRS
]
@property
def _subsets(self):
return {
nlp.Split.TRAIN: [
"europarl_v7",
"europarl_v8_16",
"commoncrawl",
"newscommentary_v11",
"czeng_16pre",
"yandexcorpus",
"wikiheadlines_fi",
"wikiheadlines_ru",
"setimes_2",
],
nlp.Split.VALIDATION: ["newsdev2016", "newstest2015"],
nlp.Split.TEST: ["newstest2016", "newstestB2016"],
}
|
nlp-master
|
datasets/wmt16/wmt16.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""WMT: Translate dataset."""
from __future__ import absolute_import, division, print_function
import codecs
import functools
import glob
import gzip
import itertools
import logging
import os
import re
import xml.etree.cElementTree as ElementTree
from abc import ABC, abstractmethod
import six
import nlp
_DESCRIPTION = """\
Translate dataset based on the data from statmt.org.
Versions exists for the different years using a combination of multiple data
sources. The base `wmt_translate` allows you to create your own config to choose
your own data/language pair by creating a custom `nlp.translate.wmt.WmtConfig`.
```
config = nlp.wmt.WmtConfig(
version="0.0.1",
language_pair=("fr", "de"),
subsets={
nlp.Split.TRAIN: ["commoncrawl_frde"],
nlp.Split.VALIDATION: ["euelections_dev2019"],
},
)
builder = nlp.builder("wmt_translate", config=config)
```
"""
CWMT_SUBSET_NAMES = ["casia2015", "casict2011", "casict2015", "datum2015", "datum2017", "neu2017"]
class SubDataset(object):
"""Class to keep track of information on a sub-dataset of WMT."""
def __init__(self, name, target, sources, url, path, manual_dl_files=None):
"""Sub-dataset of WMT.
Args:
name: `string`, a unique dataset identifier.
target: `string`, the target language code.
sources: `set<string>`, the set of source language codes.
url: `string` or `(string, string)`, URL(s) or URL template(s) specifying
where to download the raw data from. If two strings are provided, the
first is used for the source language and the second for the target.
Template strings can either contain '{src}' placeholders that will be
filled in with the source language code, '{0}' and '{1}' placeholders
that will be filled in with the source and target language codes in
alphabetical order, or all 3.
path: `string` or `(string, string)`, path(s) or path template(s)
specifing the path to the raw data relative to the root of the
downloaded archive. If two strings are provided, the dataset is assumed
to be made up of parallel text files, the first being the source and the
second the target. If one string is provided, both languages are assumed
to be stored within the same file and the extension is used to determine
how to parse it. Template strings should be formatted the same as in
`url`.
manual_dl_files: `<list>(string)` (optional), the list of files that must
be manually downloaded to the data directory.
"""
self._paths = (path,) if isinstance(path, six.string_types) else path
self._urls = (url,) if isinstance(url, six.string_types) else url
self._manual_dl_files = manual_dl_files if manual_dl_files else []
self.name = name
self.target = target
self.sources = set(sources)
def _inject_language(self, src, strings):
"""Injects languages into (potentially) template strings."""
if src not in self.sources:
raise ValueError("Invalid source for '{0}': {1}".format(self.name, src))
def _format_string(s):
if "{0}" in s and "{1}" and "{src}" in s:
return s.format(*sorted([src, self.target]), src=src)
elif "{0}" in s and "{1}" in s:
return s.format(*sorted([src, self.target]))
elif "{src}" in s:
return s.format(src=src)
else:
return s
return [_format_string(s) for s in strings]
def get_url(self, src):
return self._inject_language(src, self._urls)
def get_manual_dl_files(self, src):
return self._inject_language(src, self._manual_dl_files)
def get_path(self, src):
return self._inject_language(src, self._paths)
# Subsets used in the training sets for various years of WMT.
_TRAIN_SUBSETS = [
# pylint:disable=line-too-long
SubDataset(
name="commoncrawl",
target="en", # fr-de pair in commoncrawl_frde
sources={"cs", "de", "es", "fr", "ru"},
url="http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz",
path=("commoncrawl.{src}-en.{src}", "commoncrawl.{src}-en.en"),
),
SubDataset(
name="commoncrawl_frde",
target="de",
sources={"fr"},
url=(
"http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/commoncrawl.fr.gz",
"http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/commoncrawl.de.gz",
),
path=("", ""),
),
SubDataset(
name="czeng_10",
target="en",
sources={"cs"},
url="http://ufal.mff.cuni.cz/czeng/czeng10",
manual_dl_files=["data-plaintext-format.%d.tar" % i for i in range(10)],
# Each tar contains multiple files, which we process specially in
# _parse_czeng.
path=("data.plaintext-format/??train.gz",) * 10,
),
SubDataset(
name="czeng_16pre",
target="en",
sources={"cs"},
url="http://ufal.mff.cuni.cz/czeng/czeng16pre",
manual_dl_files=["czeng16pre.deduped-ignoring-sections.txt.gz"],
path="",
),
SubDataset(
name="czeng_16",
target="en",
sources={"cs"},
url="http://ufal.mff.cuni.cz/czeng",
manual_dl_files=["data-plaintext-format.%d.tar" % i for i in range(10)],
# Each tar contains multiple files, which we process specially in
# _parse_czeng.
path=("data.plaintext-format/??train.gz",) * 10,
),
SubDataset(
# This dataset differs from the above in the filtering that is applied
# during parsing.
name="czeng_17",
target="en",
sources={"cs"},
url="http://ufal.mff.cuni.cz/czeng",
manual_dl_files=["data-plaintext-format.%d.tar" % i for i in range(10)],
# Each tar contains multiple files, which we process specially in
# _parse_czeng.
path=("data.plaintext-format/??train.gz",) * 10,
),
SubDataset(
name="dcep_v1",
target="en",
sources={"lv"},
url="http://data.statmt.org/wmt17/translation-task/dcep.lv-en.v1.tgz",
path=("dcep.en-lv/dcep.lv", "dcep.en-lv/dcep.en"),
),
SubDataset(
name="europarl_v7",
target="en",
sources={"cs", "de", "es", "fr"},
url="http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz",
path=("training/europarl-v7.{src}-en.{src}", "training/europarl-v7.{src}-en.en"),
),
SubDataset(
name="europarl_v7_frde",
target="de",
sources={"fr"},
url=(
"http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/europarl-v7.fr.gz",
"http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/europarl-v7.de.gz",
),
path=("", ""),
),
SubDataset(
name="europarl_v8_18",
target="en",
sources={"et", "fi"},
url="http://data.statmt.org/wmt18/translation-task/training-parallel-ep-v8.tgz",
path=("training/europarl-v8.{src}-en.{src}", "training/europarl-v8.{src}-en.en"),
),
SubDataset(
name="europarl_v8_16",
target="en",
sources={"fi", "ro"},
url="http://data.statmt.org/wmt16/translation-task/training-parallel-ep-v8.tgz",
path=("training-parallel-ep-v8/europarl-v8.{src}-en.{src}", "training-parallel-ep-v8/europarl-v8.{src}-en.en"),
),
SubDataset(
name="europarl_v9",
target="en",
sources={"cs", "de", "fi", "lt"},
url="http://www.statmt.org/europarl/v9/training/europarl-v9.{src}-en.tsv.gz",
path="",
),
SubDataset(
name="gigafren",
target="en",
sources={"fr"},
url="http://www.statmt.org/wmt10/training-giga-fren.tar",
path=("giga-fren.release2.fixed.fr.gz", "giga-fren.release2.fixed.en.gz"),
),
SubDataset(
name="hindencorp_01",
target="en",
sources={"hi"},
url="http://ufallab.ms.mff.cuni.cz/~bojar/hindencorp",
manual_dl_files=["hindencorp0.1.gz"],
path="",
),
SubDataset(
name="leta_v1",
target="en",
sources={"lv"},
url="http://data.statmt.org/wmt17/translation-task/leta.v1.tgz",
path=("LETA-lv-en/leta.lv", "LETA-lv-en/leta.en"),
),
SubDataset(
name="multiun",
target="en",
sources={"es", "fr"},
url="http://www.statmt.org/wmt13/training-parallel-un.tgz",
path=("un/undoc.2000.{src}-en.{src}", "un/undoc.2000.{src}-en.en"),
),
SubDataset(
name="newscommentary_v9",
target="en",
sources={"cs", "de", "fr", "ru"},
url="http://www.statmt.org/wmt14/training-parallel-nc-v9.tgz",
path=("training/news-commentary-v9.{src}-en.{src}", "training/news-commentary-v9.{src}-en.en"),
),
SubDataset(
name="newscommentary_v10",
target="en",
sources={"cs", "de", "fr", "ru"},
url="http://www.statmt.org/wmt15/training-parallel-nc-v10.tgz",
path=("news-commentary-v10.{src}-en.{src}", "news-commentary-v10.{src}-en.en"),
),
SubDataset(
name="newscommentary_v11",
target="en",
sources={"cs", "de", "ru"},
url="http://data.statmt.org/wmt16/translation-task/training-parallel-nc-v11.tgz",
path=(
"training-parallel-nc-v11/news-commentary-v11.{src}-en.{src}",
"training-parallel-nc-v11/news-commentary-v11.{src}-en.en",
),
),
SubDataset(
name="newscommentary_v12",
target="en",
sources={"cs", "de", "ru", "zh"},
url="http://data.statmt.org/wmt17/translation-task/training-parallel-nc-v12.tgz",
path=("training/news-commentary-v12.{src}-en.{src}", "training/news-commentary-v12.{src}-en.en"),
),
SubDataset(
name="newscommentary_v13",
target="en",
sources={"cs", "de", "ru", "zh"},
url="http://data.statmt.org/wmt18/translation-task/training-parallel-nc-v13.tgz",
path=(
"training-parallel-nc-v13/news-commentary-v13.{src}-en.{src}",
"training-parallel-nc-v13/news-commentary-v13.{src}-en.en",
),
),
SubDataset(
name="newscommentary_v14",
target="en", # fr-de pair in newscommentary_v14_frde
sources={"cs", "de", "kk", "ru", "zh"},
url="http://data.statmt.org/news-commentary/v14/training/news-commentary-v14.{0}-{1}.tsv.gz",
path="",
),
SubDataset(
name="newscommentary_v14_frde",
target="de",
sources={"fr"},
url="http://data.statmt.org/news-commentary/v14/training/news-commentary-v14.de-fr.tsv.gz",
path="",
),
SubDataset(
name="onlinebooks_v1",
target="en",
sources={"lv"},
url="http://data.statmt.org/wmt17/translation-task/books.lv-en.v1.tgz",
path=("farewell/farewell.lv", "farewell/farewell.en"),
),
SubDataset(
name="paracrawl_v1",
target="en",
sources={"cs", "de", "et", "fi", "ru"},
url="https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-{src}.zipporah0-dedup-clean.tgz",
path=(
"paracrawl-release1.en-{src}.zipporah0-dedup-clean.{src}",
"paracrawl-release1.en-{src}.zipporah0-dedup-clean.en",
),
),
SubDataset(
name="paracrawl_v1_ru",
target="en",
sources={"ru"},
url="https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-ru.zipporah0-dedup-clean.tgz",
path=(
"paracrawl-release1.en-ru.zipporah0-dedup-clean.ru",
"paracrawl-release1.en-ru.zipporah0-dedup-clean.en",
),
),
SubDataset(
name="paracrawl_v3",
target="en", # fr-de pair in paracrawl_v3_frde
sources={"cs", "de", "fi", "lt"},
url="https://s3.amazonaws.com/web-language-models/paracrawl/release3/en-{src}.bicleaner07.tmx.gz",
path="",
),
SubDataset(
name="paracrawl_v3_frde",
target="de",
sources={"fr"},
url=(
"http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/de-fr.bicleaner07.de.gz",
"http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/de-fr.bicleaner07.fr.gz",
),
path=("", ""),
),
SubDataset(
name="rapid_2016",
target="en",
sources={"de", "et", "fi"},
url="http://data.statmt.org/wmt18/translation-task/rapid2016.tgz",
path=("rapid2016.{0}-{1}.{src}", "rapid2016.{0}-{1}.en"),
),
SubDataset(
name="rapid_2016_ltfi",
target="en",
sources={"fi", "lt"},
url="https://tilde-model.s3-eu-west-1.amazonaws.com/rapid2016.en-{src}.tmx.zip",
path="rapid2016.en-{src}.tmx",
),
SubDataset(
name="rapid_2019",
target="en",
sources={"de"},
url="https://s3-eu-west-1.amazonaws.com/tilde-model/rapid2019.de-en.zip",
path=("rapid2019.de-en.de", "rapid2019.de-en.en"),
),
SubDataset(
name="setimes_2",
target="en",
sources={"ro", "tr"},
url="http://opus.nlpl.eu/download.php?f=SETIMES/v2/tmx/en-{src}.tmx.gz",
path="",
),
SubDataset(
name="uncorpus_v1",
target="en",
sources={"ru", "zh"},
url="https://storage.googleapis.com/tfdataset-data/downloadataset/uncorpus/UNv1.0.en-{src}.tar.gz",
path=("en-{src}/UNv1.0.en-{src}.{src}", "en-{src}/UNv1.0.en-{src}.en"),
),
SubDataset(
name="wikiheadlines_fi",
target="en",
sources={"fi"},
url="http://www.statmt.org/wmt15/wiki-titles.tgz",
path="wiki/fi-en/titles.fi-en",
),
SubDataset(
name="wikiheadlines_hi",
target="en",
sources={"hi"},
url="http://www.statmt.org/wmt14/wiki-titles.tgz",
path="wiki/hi-en/wiki-titles.hi-en",
),
SubDataset(
# Verified that wmt14 and wmt15 files are identical.
name="wikiheadlines_ru",
target="en",
sources={"ru"},
url="http://www.statmt.org/wmt15/wiki-titles.tgz",
path="wiki/ru-en/wiki.ru-en",
),
SubDataset(
name="wikititles_v1",
target="en",
sources={"cs", "de", "fi", "gu", "kk", "lt", "ru", "zh"},
url="http://data.statmt.org/wikititles/v1/wikititles-v1.{src}-en.tsv.gz",
path="",
),
SubDataset(
name="yandexcorpus",
target="en",
sources={"ru"},
url="https://translate.yandex.ru/corpus?lang=en",
manual_dl_files=["1mcorpus.zip"],
path=("corpus.en_ru.1m.ru", "corpus.en_ru.1m.en"),
),
# pylint:enable=line-too-long
] + [
SubDataset( # pylint:disable=g-complex-comprehension
name=ss,
target="en",
sources={"zh"},
url="ftp://cwmt-wmt:cwmt-wmt@nlp.nju.edu.cn/parallel/%s.zip" % ss,
path=("%s/*_c[hn].txt" % ss, "%s/*_en.txt" % ss),
)
for ss in CWMT_SUBSET_NAMES
]
_DEV_SUBSETS = [
SubDataset(
name="euelections_dev2019",
target="de",
sources={"fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/euelections_dev2019.fr-de.src.fr", "dev/euelections_dev2019.fr-de.tgt.de"),
),
SubDataset(
name="newsdev2014",
target="en",
sources={"hi"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdev2014.hi", "dev/newsdev2014.en"),
),
SubDataset(
name="newsdev2015",
target="en",
sources={"fi"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdev2015-fien-src.{src}.sgm", "dev/newsdev2015-fien-ref.en.sgm"),
),
SubDataset(
name="newsdiscussdev2015",
target="en",
sources={"ro", "tr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdiscussdev2015-{src}en-src.{src}.sgm", "dev/newsdiscussdev2015-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdev2016",
target="en",
sources={"ro", "tr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdev2016-{src}en-src.{src}.sgm", "dev/newsdev2016-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdev2017",
target="en",
sources={"lv", "zh"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdev2017-{src}en-src.{src}.sgm", "dev/newsdev2017-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdev2018",
target="en",
sources={"et"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdev2018-{src}en-src.{src}.sgm", "dev/newsdev2018-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdev2019",
target="en",
sources={"gu", "kk", "lt"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdev2019-{src}en-src.{src}.sgm", "dev/newsdev2019-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdiscussdev2015",
target="en",
sources={"fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdiscussdev2015-{src}en-src.{src}.sgm", "dev/newsdiscussdev2015-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdiscusstest2015",
target="en",
sources={"fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdiscusstest2015-{src}en-src.{src}.sgm", "dev/newsdiscusstest2015-{src}en-ref.en.sgm"),
),
SubDataset(
name="newssyscomb2009",
target="en",
sources={"cs", "de", "es", "fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newssyscomb2009.{src}", "dev/newssyscomb2009.en"),
),
SubDataset(
name="newstest2008",
target="en",
sources={"cs", "de", "es", "fr", "hu"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/news-test2008.{src}", "dev/news-test2008.en"),
),
SubDataset(
name="newstest2009",
target="en",
sources={"cs", "de", "es", "fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2009.{src}", "dev/newstest2009.en"),
),
SubDataset(
name="newstest2010",
target="en",
sources={"cs", "de", "es", "fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2010.{src}", "dev/newstest2010.en"),
),
SubDataset(
name="newstest2011",
target="en",
sources={"cs", "de", "es", "fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2011.{src}", "dev/newstest2011.en"),
),
SubDataset(
name="newstest2012",
target="en",
sources={"cs", "de", "es", "fr", "ru"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2012.{src}", "dev/newstest2012.en"),
),
SubDataset(
name="newstest2013",
target="en",
sources={"cs", "de", "es", "fr", "ru"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2013.{src}", "dev/newstest2013.en"),
),
SubDataset(
name="newstest2014",
target="en",
sources={"cs", "de", "es", "fr", "hi", "ru"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2014-{src}en-src.{src}.sgm", "dev/newstest2014-{src}en-ref.en.sgm"),
),
SubDataset(
name="newstest2015",
target="en",
sources={"cs", "de", "fi", "ru"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2015-{src}en-src.{src}.sgm", "dev/newstest2015-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdiscusstest2015",
target="en",
sources={"fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdiscusstest2015-{src}en-src.{src}.sgm", "dev/newsdiscusstest2015-{src}en-ref.en.sgm"),
),
SubDataset(
name="newstest2016",
target="en",
sources={"cs", "de", "fi", "ro", "ru", "tr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2016-{src}en-src.{src}.sgm", "dev/newstest2016-{src}en-ref.en.sgm"),
),
SubDataset(
name="newstestB2016",
target="en",
sources={"fi"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstestB2016-enfi-ref.{src}.sgm", "dev/newstestB2016-enfi-src.en.sgm"),
),
SubDataset(
name="newstest2017",
target="en",
sources={"cs", "de", "fi", "lv", "ru", "tr", "zh"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2017-{src}en-src.{src}.sgm", "dev/newstest2017-{src}en-ref.en.sgm"),
),
SubDataset(
name="newstestB2017",
target="en",
sources={"fi"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstestB2017-fien-src.fi.sgm", "dev/newstestB2017-fien-ref.en.sgm"),
),
SubDataset(
name="newstest2018",
target="en",
sources={"cs", "de", "et", "fi", "ru", "tr", "zh"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2018-{src}en-src.{src}.sgm", "dev/newstest2018-{src}en-ref.en.sgm"),
),
]
DATASET_MAP = {dataset.name: dataset for dataset in _TRAIN_SUBSETS + _DEV_SUBSETS}
_CZENG17_FILTER = SubDataset(
name="czeng17_filter",
target="en",
sources={"cs"},
url="http://ufal.mff.cuni.cz/czeng/download.php?f=convert_czeng16_to_17.pl.zip",
path="convert_czeng16_to_17.pl",
)
class WmtConfig(nlp.BuilderConfig):
"""BuilderConfig for WMT."""
def __init__(self, url=None, citation=None, description=None, language_pair=(None, None), subsets=None, **kwargs):
"""BuilderConfig for WMT.
Args:
url: The reference URL for the dataset.
citation: The paper citation for the dataset.
description: The description of the dataset.
language_pair: pair of languages that will be used for translation. Should
contain 2 letter coded strings. For example: ("en", "de").
configuration for the `nlp.features.text.TextEncoder` used for the
`nlp.features.text.Translation` features.
subsets: Dict[split, list[str]]. List of the subset to use for each of the
split. Note that WMT subclasses overwrite this parameter.
**kwargs: keyword arguments forwarded to super.
"""
name = "%s-%s" % (language_pair[0], language_pair[1])
if "name" in kwargs: # Add name suffix for custom configs
name += "." + kwargs.pop("name")
super(WmtConfig, self).__init__(name=name, description=description, **kwargs)
self.url = url or "http://www.statmt.org"
self.citation = citation
self.language_pair = language_pair
self.subsets = subsets
# TODO(PVP): remove when manual dir works
# +++++++++++++++++++++
if language_pair[1] in ["cs", "hi", "ru"]:
assert NotImplementedError(
"The dataset for {}-en is currently not fully supported.".format(language_pair[1])
)
# +++++++++++++++++++++
class Wmt(ABC, nlp.GeneratorBasedBuilder):
"""WMT translation dataset."""
def __init__(self, *args, **kwargs):
if type(self) == Wmt and "config" not in kwargs: # pylint: disable=unidiomatic-typecheck
raise ValueError(
"The raw `wmt_translate` can only be instantiated with the config "
"kwargs. You may want to use one of the `wmtYY_translate` "
"implementation instead to get the WMT dataset for a specific year."
)
super(Wmt, self).__init__(*args, **kwargs)
@property
@abstractmethod
def _subsets(self):
"""Subsets that make up each split of the dataset."""
raise NotImplementedError("This is a abstract method")
@property
def subsets(self):
"""Subsets that make up each split of the dataset for the language pair."""
source, target = self.config.language_pair
filtered_subsets = {}
for split, ss_names in self._subsets.items():
filtered_subsets[split] = []
for ss_name in ss_names:
dataset = DATASET_MAP[ss_name]
if dataset.target != target or source not in dataset.sources:
logging.info("Skipping sub-dataset that does not include language pair: %s", ss_name)
else:
filtered_subsets[split].append(ss_name)
logging.info("Using sub-datasets: %s", filtered_subsets)
return filtered_subsets
def _info(self):
src, target = self.config.language_pair
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features({"translation": nlp.features.Translation(languages=self.config.language_pair)}),
supervised_keys=(src, target),
homepage=self.config.url,
citation=self.config.citation,
)
def _vocab_text_gen(self, split_subsets, extraction_map, language):
for _, ex in self._generate_examples(split_subsets, extraction_map, with_translation=False):
yield ex[language]
def _split_generators(self, dl_manager):
source, _ = self.config.language_pair
manual_paths_dict = {}
urls_to_download = {}
for ss_name in itertools.chain.from_iterable(self.subsets.values()):
if ss_name == "czeng_17":
# CzEng1.7 is CzEng1.6 with some blocks filtered out. We must download
# the filtering script so we can parse out which blocks need to be
# removed.
urls_to_download[_CZENG17_FILTER.name] = _CZENG17_FILTER.get_url(source)
# get dataset
dataset = DATASET_MAP[ss_name]
if dataset.get_manual_dl_files(source):
# TODO(PVP): following two lines skip configs that are incomplete for now
# +++++++++++++++++++++
logging.info("Skipping {} for now. Incomplete dataset for {}".format(dataset.name, self.config.name))
continue
# +++++++++++++++++++++
manual_dl_files = dataset.get_manual_dl_files(source)
manual_paths = [
os.path.join(os.path.abspath(os.path.expanduser(dl_manager.manual_dir)), fname)
for fname in manual_dl_files
]
assert all(
os.path.exists(path) for path in manual_paths
), "For {0}, you must manually download the following file(s) from {1} and place them in {2}: {3}".format(
dataset.name, dataset.get_url(source), dl_manager.manual_dir, ", ".join(manual_dl_files)
)
# set manual path for correct subset
manual_paths_dict[ss_name] = manual_paths
else:
urls_to_download[ss_name] = dataset.get_url(source)
# Download and extract files from URLs.
downloaded_files = dl_manager.download_and_extract(urls_to_download)
# Extract manually downloaded files.
manual_files = dl_manager.extract(manual_paths_dict)
extraction_map = dict(downloaded_files, **manual_files)
for language in self.config.language_pair:
self._vocab_text_gen(self.subsets[nlp.Split.TRAIN], extraction_map, language)
return [
nlp.SplitGenerator( # pylint:disable=g-complex-comprehension
name=split, gen_kwargs={"split_subsets": split_subsets, "extraction_map": extraction_map}
)
for split, split_subsets in self.subsets.items()
]
def _generate_examples(self, split_subsets, extraction_map, with_translation=True):
"""Returns the examples in the raw (text) form."""
source, _ = self.config.language_pair
def _get_local_paths(dataset, extract_dirs):
rel_paths = dataset.get_path(source)
if len(extract_dirs) == 1:
extract_dirs = extract_dirs * len(rel_paths)
return [
os.path.join(ex_dir, rel_path) if rel_path else ex_dir
for ex_dir, rel_path in zip(extract_dirs, rel_paths)
]
for ss_name in split_subsets:
# TODO(PVP) remove following five lines when manual data works
# +++++++++++++++++++++
dataset = DATASET_MAP[ss_name]
source, _ = self.config.language_pair
if dataset.get_manual_dl_files(source):
logging.info("Skipping {} for now. Incomplete dataset for {}".format(dataset.name, self.config.name))
continue
# +++++++++++++++++++++
logging.info("Generating examples from: %s", ss_name)
dataset = DATASET_MAP[ss_name]
extract_dirs = extraction_map[ss_name]
files = _get_local_paths(dataset, extract_dirs)
if ss_name.startswith("czeng"):
if ss_name.endswith("16pre"):
sub_generator = functools.partial(_parse_tsv, language_pair=("en", "cs"))
elif ss_name.endswith("17"):
filter_path = _get_local_paths(_CZENG17_FILTER, extraction_map[_CZENG17_FILTER.name])[0]
sub_generator = functools.partial(_parse_czeng, filter_path=filter_path)
else:
sub_generator = _parse_czeng
elif ss_name == "hindencorp_01":
sub_generator = _parse_hindencorp
elif len(files) == 2:
if ss_name.endswith("_frde"):
sub_generator = _parse_frde_bitext
else:
sub_generator = _parse_parallel_sentences
elif len(files) == 1:
fname = files[0]
# Note: Due to formatting used by `download_manager`, the file
# extension may not be at the end of the file path.
if ".tsv" in fname:
sub_generator = _parse_tsv
elif (
ss_name.startswith("newscommentary_v14")
or ss_name.startswith("europarl_v9")
or ss_name.startswith("wikititles_v1")
):
sub_generator = functools.partial(_parse_tsv, language_pair=self.config.language_pair)
elif "tmx" in fname or ss_name.startswith("paracrawl_v3") or ss_name.startswith("setimes_2"):
sub_generator = _parse_tmx
elif ss_name.startswith("wikiheadlines"):
sub_generator = _parse_wikiheadlines
else:
raise ValueError(f"Unsupported file format: {fname} for {ss_name}")
else:
raise ValueError(f"Invalid number of files: {len(files)}")
for sub_key, ex in sub_generator(*files):
if not all(ex.values()):
continue
# TODO(adarob): Add subset feature.
# ex["subset"] = subset
key = "{}/{}".format(ss_name, sub_key)
if with_translation is True:
ex = {"translation": ex}
yield key, ex
def _parse_parallel_sentences(f1, f2):
"""Returns examples from parallel SGML or text files, which may be gzipped."""
def _parse_text(path):
"""Returns the sentences from a single text file, which may be gzipped."""
split_path = path.split(".")
if split_path[-1] == "gz":
lang = split_path[-2]
with open(path, "rb") as f, gzip.GzipFile(fileobj=f) as g:
return g.read().decode("utf-8").split("\n"), lang
if split_path[-1] == "txt":
# CWMT
lang = split_path[-2].split("_")[-1]
lang = "zh" if lang in ("ch", "cn") else lang
else:
lang = split_path[-1]
with open(path, "rb") as f:
return f.read().decode("utf-8").split("\n"), lang
def _parse_sgm(path):
"""Returns sentences from a single SGML file."""
lang = path.split(".")[-2]
sentences = []
# Note: We can't use the XML parser since some of the files are badly
# formatted.
seg_re = re.compile(r"<seg id=\"\d+\">(.*)</seg>")
with open(path) as f:
for line in f:
seg_match = re.match(seg_re, line)
if seg_match:
assert len(seg_match.groups()) == 1
sentences.append(seg_match.groups()[0])
return sentences, lang
parse_file = _parse_sgm if f1.endswith(".sgm") else _parse_text
# Some datasets (e.g., CWMT) contain multiple parallel files specified with
# a wildcard. We sort both sets to align them and parse them one by one.
f1_files = sorted(glob.glob(f1))
f2_files = sorted(glob.glob(f2))
assert f1_files and f2_files, "No matching files found: %s, %s." % (f1, f2)
assert len(f1_files) == len(f2_files), "Number of files do not match: %d vs %d for %s vs %s." % (
len(f1_files),
len(f2_files),
f1,
f2,
)
for f_id, (f1_i, f2_i) in enumerate(zip(sorted(f1_files), sorted(f2_files))):
l1_sentences, l1 = parse_file(f1_i)
l2_sentences, l2 = parse_file(f2_i)
assert len(l1_sentences) == len(l2_sentences), "Sizes do not match: %d vs %d for %s vs %s." % (
len(l1_sentences),
len(l2_sentences),
f1_i,
f2_i,
)
for line_id, (s1, s2) in enumerate(zip(l1_sentences, l2_sentences)):
key = "{}/{}".format(f_id, line_id)
yield key, {l1: s1, l2: s2}
def _parse_frde_bitext(fr_path, de_path):
with open(fr_path) as f:
fr_sentences = f.read().split("\n")
with open(de_path) as f:
de_sentences = f.read().split("\n")
assert len(fr_sentences) == len(de_sentences), "Sizes do not match: %d vs %d for %s vs %s." % (
len(fr_sentences),
len(de_sentences),
fr_path,
de_path,
)
for line_id, (s1, s2) in enumerate(zip(fr_sentences, de_sentences)):
yield line_id, {"fr": s1, "de": s2}
def _parse_tmx(path):
"""Generates examples from TMX file."""
def _get_tuv_lang(tuv):
for k, v in tuv.items():
if k.endswith("}lang"):
return v
raise AssertionError("Language not found in `tuv` attributes.")
def _get_tuv_seg(tuv):
segs = tuv.findall("seg")
assert len(segs) == 1, "Invalid number of segments: %d" % len(segs)
return segs[0].text
with open(path, "rb") as f:
if six.PY3:
# Workaround due to: https://github.com/tensorflow/tensorflow/issues/33563
utf_f = codecs.getreader("utf-8")(f)
else:
utf_f = f
for line_id, (_, elem) in enumerate(ElementTree.iterparse(utf_f)):
if elem.tag == "tu":
yield line_id, {_get_tuv_lang(tuv): _get_tuv_seg(tuv) for tuv in elem.iterfind("tuv")}
elem.clear()
def _parse_tsv(path, language_pair=None):
"""Generates examples from TSV file."""
if language_pair is None:
lang_match = re.match(r".*\.([a-z][a-z])-([a-z][a-z])\.tsv", path)
assert lang_match is not None, "Invalid TSV filename: %s" % path
l1, l2 = lang_match.groups()
else:
l1, l2 = language_pair
with open(path) as f:
for j, line in enumerate(f):
cols = line.split("\t")
if len(cols) != 2:
logging.warning("Skipping line %d in TSV (%s) with %d != 2 columns.", j, path, len(cols))
continue
s1, s2 = cols
yield j, {l1: s1.strip(), l2: s2.strip()}
def _parse_wikiheadlines(path):
"""Generates examples from Wikiheadlines dataset file."""
lang_match = re.match(r".*\.([a-z][a-z])-([a-z][a-z])$", path)
assert lang_match is not None, "Invalid Wikiheadlines filename: %s" % path
l1, l2 = lang_match.groups()
with open(path) as f:
for line_id, line in enumerate(f):
s1, s2 = line.split("|||")
yield line_id, {l1: s1.strip(), l2: s2.strip()}
def _parse_czeng(*paths, **kwargs):
"""Generates examples from CzEng v1.6, with optional filtering for v1.7."""
filter_path = kwargs.get("filter_path", None)
if filter_path:
re_block = re.compile(r"^[^-]+-b(\d+)-\d\d[tde]")
with open(filter_path) as f:
bad_blocks = {blk for blk in re.search(r"qw{([\s\d]*)}", f.read()).groups()[0].split()}
logging.info("Loaded %d bad blocks to filter from CzEng v1.6 to make v1.7.", len(bad_blocks))
for path in paths:
for gz_path in sorted(glob.glob(path)):
with open(gz_path, "rb") as g, gzip.GzipFile(fileobj=g) as f:
filename = os.path.basename(gz_path)
for line_id, line in enumerate(f):
line = line.decode("utf-8") # required for py3
if not line.strip():
continue
id_, unused_score, cs, en = line.split("\t")
if filter_path:
block_match = re.match(re_block, id_)
if block_match and block_match.groups()[0] in bad_blocks:
continue
sub_key = "{}/{}".format(filename, line_id)
yield sub_key, {
"cs": cs.strip(),
"en": en.strip(),
}
def _parse_hindencorp(path):
with open(path) as f:
for line_id, line in enumerate(f):
split_line = line.split("\t")
if len(split_line) != 5:
logging.warning("Skipping invalid HindEnCorp line: %s", line)
continue
yield line_id, {"translation": {"en": split_line[3].strip(), "hi": split_line[4].strip()}}
|
nlp-master
|
datasets/wmt16/wmt_utils.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""The General Language Understanding Evaluation (GLUE) benchmark."""
from __future__ import absolute_import, division, print_function
import csv
import os
import textwrap
import numpy as np
import six
import nlp
_GLUE_CITATION = """\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
Note that each GLUE dataset has its own citation. Please see the source to see
the correct citation for each contained dataset."""
_GLUE_DESCRIPTION = """\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
"""
_MRPC_DEV_IDS = "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2Fmrpc_dev_ids.tsv?alt=media&token=ec5c0836-31d5-48f4-b431-7480817f1adc"
_MRPC_TRAIN = "https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_train.txt"
_MRPC_TEST = "https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_test.txt"
_MNLI_BASE_KWARGS = dict(
text_features={"premise": "sentence1", "hypothesis": "sentence2",},
label_classes=["entailment", "neutral", "contradiction"],
label_column="gold_label",
data_url="https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FMNLI.zip?alt=media&token=50329ea1-e339-40e2-809c-10c40afff3ce",
data_dir="MNLI",
citation=textwrap.dedent(
"""\
@InProceedings{N18-1101,
author = "Williams, Adina
and Nangia, Nikita
and Bowman, Samuel",
title = "A Broad-Coverage Challenge Corpus for
Sentence Understanding through Inference",
booktitle = "Proceedings of the 2018 Conference of
the North American Chapter of the
Association for Computational Linguistics:
Human Language Technologies, Volume 1 (Long
Papers)",
year = "2018",
publisher = "Association for Computational Linguistics",
pages = "1112--1122",
location = "New Orleans, Louisiana",
url = "http://aclweb.org/anthology/N18-1101"
}
@article{bowman2015large,
title={A large annotated corpus for learning natural language inference},
author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D},
journal={arXiv preprint arXiv:1508.05326},
year={2015}
}"""
),
url="http://www.nyu.edu/projects/bowman/multinli/",
)
class GlueConfig(nlp.BuilderConfig):
"""BuilderConfig for GLUE."""
def __init__(
self,
text_features,
label_column,
data_url,
data_dir,
citation,
url,
label_classes=None,
process_label=lambda x: x,
**kwargs,
):
"""BuilderConfig for GLUE.
Args:
text_features: `dict[string, string]`, map from the name of the feature
dict for each text field to the name of the column in the tsv file
label_column: `string`, name of the column in the tsv file corresponding
to the label
data_url: `string`, url to download the zip file from
data_dir: `string`, the path to the folder containing the tsv files in the
downloaded zip
citation: `string`, citation for the data set
url: `string`, url for information about the data set
label_classes: `list[string]`, the list of classes if the label is
categorical. If not provided, then the label will be of type
`nlp.Value('float32')`.
process_label: `Function[string, any]`, function taking in the raw value
of the label and processing it to the form required by the label feature
**kwargs: keyword arguments forwarded to super.
"""
super(GlueConfig, self).__init__(
version=nlp.Version("1.0.0", "New split API (https://tensorflow.org/datasets/splits)"), **kwargs
)
self.text_features = text_features
self.label_column = label_column
self.label_classes = label_classes
self.data_url = data_url
self.data_dir = data_dir
self.citation = citation
self.url = url
self.process_label = process_label
class Glue(nlp.GeneratorBasedBuilder):
"""The General Language Understanding Evaluation (GLUE) benchmark."""
BUILDER_CONFIGS = [
GlueConfig(
name="cola",
description=textwrap.dedent(
"""\
The Corpus of Linguistic Acceptability consists of English
acceptability judgments drawn from books and journal articles on
linguistic theory. Each example is a sequence of words annotated
with whether it is a grammatical English sentence."""
),
text_features={"sentence": "sentence"},
label_classes=["unacceptable", "acceptable"],
label_column="is_acceptable",
data_url="https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FCoLA.zip?alt=media&token=46d5e637-3411-4188-bc44-5809b5bfb5f4",
data_dir="CoLA",
citation=textwrap.dedent(
"""\
@article{warstadt2018neural,
title={Neural Network Acceptability Judgments},
author={Warstadt, Alex and Singh, Amanpreet and Bowman, Samuel R},
journal={arXiv preprint arXiv:1805.12471},
year={2018}
}"""
),
url="https://nyu-mll.github.io/CoLA/",
),
GlueConfig(
name="sst2",
description=textwrap.dedent(
"""\
The Stanford Sentiment Treebank consists of sentences from movie reviews and
human annotations of their sentiment. The task is to predict the sentiment of a
given sentence. We use the two-way (positive/negative) class split, and use only
sentence-level labels."""
),
text_features={"sentence": "sentence"},
label_classes=["negative", "positive"],
label_column="label",
data_url="https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSST-2.zip?alt=media&token=aabc5f6b-e466-44a2-b9b4-cf6337f84ac8",
data_dir="SST-2",
citation=textwrap.dedent(
"""\
@inproceedings{socher2013recursive,
title={Recursive deep models for semantic compositionality over a sentiment treebank},
author={Socher, Richard and Perelygin, Alex and Wu, Jean and Chuang, Jason and Manning, Christopher D and Ng, Andrew and Potts, Christopher},
booktitle={Proceedings of the 2013 conference on empirical methods in natural language processing},
pages={1631--1642},
year={2013}
}"""
),
url="https://nlp.stanford.edu/sentiment/index.html",
),
GlueConfig(
name="mrpc",
description=textwrap.dedent(
"""\
The Microsoft Research Paraphrase Corpus (Dolan & Brockett, 2005) is a corpus of
sentence pairs automatically extracted from online news sources, with human annotations
for whether the sentences in the pair are semantically equivalent."""
), # pylint: disable=line-too-long
text_features={"sentence1": "", "sentence2": ""},
label_classes=["not_equivalent", "equivalent"],
label_column="Quality",
data_url="", # MRPC isn't hosted by GLUE.
data_dir="MRPC",
citation=textwrap.dedent(
"""\
@inproceedings{dolan2005automatically,
title={Automatically constructing a corpus of sentential paraphrases},
author={Dolan, William B and Brockett, Chris},
booktitle={Proceedings of the Third International Workshop on Paraphrasing (IWP2005)},
year={2005}
}"""
),
url="https://www.microsoft.com/en-us/download/details.aspx?id=52398",
),
GlueConfig(
name="qqp",
description=textwrap.dedent(
"""\
The Quora Question Pairs2 dataset is a collection of question pairs from the
community question-answering website Quora. The task is to determine whether a
pair of questions are semantically equivalent."""
),
text_features={"question1": "question1", "question2": "question2",},
label_classes=["not_duplicate", "duplicate"],
label_column="is_duplicate",
data_url="https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FQQP.zip?alt=media&token=700c6acf-160d-4d89-81d1-de4191d02cb5",
data_dir="QQP",
citation=textwrap.dedent(
"""\
@online{WinNT,
author = {Iyer, Shankar and Dandekar, Nikhil and Csernai, Kornel},
title = {First Quora Dataset Release: Question Pairs},
year = {2017},
url = {https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs},
urldate = {2019-04-03}
}"""
),
url="https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs",
),
GlueConfig(
name="stsb",
description=textwrap.dedent(
"""\
The Semantic Textual Similarity Benchmark (Cer et al., 2017) is a collection of
sentence pairs drawn from news headlines, video and image captions, and natural
language inference data. Each pair is human-annotated with a similarity score
from 1 to 5."""
),
text_features={"sentence1": "sentence1", "sentence2": "sentence2",},
label_column="score",
data_url="https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSTS-B.zip?alt=media&token=bddb94a7-8706-4e0d-a694-1109e12273b5",
data_dir="STS-B",
citation=textwrap.dedent(
"""\
@article{cer2017semeval,
title={Semeval-2017 task 1: Semantic textual similarity-multilingual and cross-lingual focused evaluation},
author={Cer, Daniel and Diab, Mona and Agirre, Eneko and Lopez-Gazpio, Inigo and Specia, Lucia},
journal={arXiv preprint arXiv:1708.00055},
year={2017}
}"""
),
url="http://ixa2.si.ehu.es/stswiki/index.php/STSbenchmark",
process_label=np.float32,
),
GlueConfig(
name="mnli",
description=textwrap.dedent(
"""\
The Multi-Genre Natural Language Inference Corpus is a crowdsourced
collection of sentence pairs with textual entailment annotations. Given a premise sentence
and a hypothesis sentence, the task is to predict whether the premise entails the hypothesis
(entailment), contradicts the hypothesis (contradiction), or neither (neutral). The premise sentences are
gathered from ten different sources, including transcribed speech, fiction, and government reports.
We use the standard test set, for which we obtained private labels from the authors, and evaluate
on both the matched (in-domain) and mismatched (cross-domain) section. We also use and recommend
the SNLI corpus as 550k examples of auxiliary training data."""
),
**_MNLI_BASE_KWARGS,
),
GlueConfig(
name="mnli_mismatched",
description=textwrap.dedent(
"""\
The mismatched validation and test splits from MNLI.
See the "mnli" BuilderConfig for additional information."""
),
**_MNLI_BASE_KWARGS,
),
GlueConfig(
name="mnli_matched",
description=textwrap.dedent(
"""\
The matched validation and test splits from MNLI.
See the "mnli" BuilderConfig for additional information."""
),
**_MNLI_BASE_KWARGS,
),
GlueConfig(
name="qnli",
description=textwrap.dedent(
"""\
The Stanford Question Answering Dataset is a question-answering
dataset consisting of question-paragraph pairs, where one of the sentences in the paragraph (drawn
from Wikipedia) contains the answer to the corresponding question (written by an annotator). We
convert the task into sentence pair classification by forming a pair between each question and each
sentence in the corresponding context, and filtering out pairs with low lexical overlap between the
question and the context sentence. The task is to determine whether the context sentence contains
the answer to the question. This modified version of the original task removes the requirement that
the model select the exact answer, but also removes the simplifying assumptions that the answer
is always present in the input and that lexical overlap is a reliable cue."""
), # pylint: disable=line-too-long
text_features={"question": "question", "sentence": "sentence",},
label_classes=["entailment", "not_entailment"],
label_column="label",
data_url="https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FQNLIv2.zip?alt=media&token=6fdcf570-0fc5-4631-8456-9505272d1601",
data_dir="QNLI",
citation=textwrap.dedent(
"""\
@article{rajpurkar2016squad,
title={Squad: 100,000+ questions for machine comprehension of text},
author={Rajpurkar, Pranav and Zhang, Jian and Lopyrev, Konstantin and Liang, Percy},
journal={arXiv preprint arXiv:1606.05250},
year={2016}
}"""
),
url="https://rajpurkar.github.io/SQuAD-explorer/",
),
GlueConfig(
name="rte",
description=textwrap.dedent(
"""\
The Recognizing Textual Entailment (RTE) datasets come from a series of annual textual
entailment challenges. We combine the data from RTE1 (Dagan et al., 2006), RTE2 (Bar Haim
et al., 2006), RTE3 (Giampiccolo et al., 2007), and RTE5 (Bentivogli et al., 2009).4 Examples are
constructed based on news and Wikipedia text. We convert all datasets to a two-class split, where
for three-class datasets we collapse neutral and contradiction into not entailment, for consistency."""
), # pylint: disable=line-too-long
text_features={"sentence1": "sentence1", "sentence2": "sentence2",},
label_classes=["entailment", "not_entailment"],
label_column="label",
data_url="https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FRTE.zip?alt=media&token=5efa7e85-a0bb-4f19-8ea2-9e1840f077fb",
data_dir="RTE",
citation=textwrap.dedent(
"""\
@inproceedings{dagan2005pascal,
title={The PASCAL recognising textual entailment challenge},
author={Dagan, Ido and Glickman, Oren and Magnini, Bernardo},
booktitle={Machine Learning Challenges Workshop},
pages={177--190},
year={2005},
organization={Springer}
}
@inproceedings{bar2006second,
title={The second pascal recognising textual entailment challenge},
author={Bar-Haim, Roy and Dagan, Ido and Dolan, Bill and Ferro, Lisa and Giampiccolo, Danilo and Magnini, Bernardo and Szpektor, Idan},
booktitle={Proceedings of the second PASCAL challenges workshop on recognising textual entailment},
volume={6},
number={1},
pages={6--4},
year={2006},
organization={Venice}
}
@inproceedings{giampiccolo2007third,
title={The third pascal recognizing textual entailment challenge},
author={Giampiccolo, Danilo and Magnini, Bernardo and Dagan, Ido and Dolan, Bill},
booktitle={Proceedings of the ACL-PASCAL workshop on textual entailment and paraphrasing},
pages={1--9},
year={2007},
organization={Association for Computational Linguistics}
}
@inproceedings{bentivogli2009fifth,
title={The Fifth PASCAL Recognizing Textual Entailment Challenge.},
author={Bentivogli, Luisa and Clark, Peter and Dagan, Ido and Giampiccolo, Danilo},
booktitle={TAC},
year={2009}
}"""
),
url="https://aclweb.org/aclwiki/Recognizing_Textual_Entailment",
),
GlueConfig(
name="wnli",
description=textwrap.dedent(
"""\
The Winograd Schema Challenge (Levesque et al., 2011) is a reading comprehension task
in which a system must read a sentence with a pronoun and select the referent of that pronoun from
a list of choices. The examples are manually constructed to foil simple statistical methods: Each
one is contingent on contextual information provided by a single word or phrase in the sentence.
To convert the problem into sentence pair classification, we construct sentence pairs by replacing
the ambiguous pronoun with each possible referent. The task is to predict if the sentence with the
pronoun substituted is entailed by the original sentence. We use a small evaluation set consisting of
new examples derived from fiction books that was shared privately by the authors of the original
corpus. While the included training set is balanced between two classes, the test set is imbalanced
between them (65% not entailment). Also, due to a data quirk, the development set is adversarial:
hypotheses are sometimes shared between training and development examples, so if a model memorizes the
training examples, they will predict the wrong label on corresponding development set
example. As with QNLI, each example is evaluated separately, so there is not a systematic correspondence
between a model's score on this task and its score on the unconverted original task. We
call converted dataset WNLI (Winograd NLI)."""
),
text_features={"sentence1": "sentence1", "sentence2": "sentence2",},
label_classes=["not_entailment", "entailment"],
label_column="label",
data_url="https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FWNLI.zip?alt=media&token=068ad0a0-ded7-4bd7-99a5-5e00222e0faf",
data_dir="WNLI",
citation=textwrap.dedent(
"""\
@inproceedings{levesque2012winograd,
title={The winograd schema challenge},
author={Levesque, Hector and Davis, Ernest and Morgenstern, Leora},
booktitle={Thirteenth International Conference on the Principles of Knowledge Representation and Reasoning},
year={2012}
}"""
),
url="https://cs.nyu.edu/faculty/davise/papers/WinogradSchemas/WS.html",
),
GlueConfig(
name="ax",
description=textwrap.dedent(
"""\
A manually-curated evaluation dataset for fine-grained analysis of
system performance on a broad range of linguistic phenomena. This
dataset evaluates sentence understanding through Natural Language
Inference (NLI) problems. Use a model trained on MulitNLI to produce
predictions for this dataset."""
),
text_features={"premise": "sentence1", "hypothesis": "sentence2",},
label_classes=["entailment", "neutral", "contradiction"],
label_column="", # No label since we only have test set.
# We must use a URL shortener since the URL from GLUE is very long and
# causes issues in TFDS.
data_url="https://bit.ly/2BOtOJ7",
data_dir="", # We are downloading a tsv.
citation="", # The GLUE citation is sufficient.
url="https://gluebenchmark.com/diagnostics",
),
]
def _info(self):
features = {text_feature: nlp.Value("string") for text_feature in six.iterkeys(self.config.text_features)}
if self.config.label_classes:
features["label"] = nlp.features.ClassLabel(names=self.config.label_classes)
else:
features["label"] = nlp.Value("float32")
features["idx"] = nlp.Value("int32")
return nlp.DatasetInfo(
description=_GLUE_DESCRIPTION,
features=nlp.Features(features),
homepage=self.config.url,
citation=self.config.citation + "\n" + _GLUE_CITATION,
)
def _split_generators(self, dl_manager):
if self.config.name == "ax":
data_file = dl_manager.download(self.config.data_url)
return [nlp.SplitGenerator(name=nlp.Split.TEST, gen_kwargs={"data_file": data_file, "split": "test",})]
if self.config.name == "mrpc":
data_dir = None
mrpc_files = dl_manager.download({"dev_ids": _MRPC_DEV_IDS, "train": _MRPC_TRAIN, "test": _MRPC_TEST,})
else:
dl_dir = dl_manager.download_and_extract(self.config.data_url)
data_dir = os.path.join(dl_dir, self.config.data_dir)
mrpc_files = None
train_split = nlp.SplitGenerator(
name=nlp.Split.TRAIN,
gen_kwargs={
"data_file": os.path.join(data_dir or "", "train.tsv"),
"split": "train",
"mrpc_files": mrpc_files,
},
)
if self.config.name == "mnli":
return [
train_split,
_mnli_split_generator("validation_matched", data_dir, "dev", matched=True),
_mnli_split_generator("validation_mismatched", data_dir, "dev", matched=False),
_mnli_split_generator("test_matched", data_dir, "test", matched=True),
_mnli_split_generator("test_mismatched", data_dir, "test", matched=False),
]
elif self.config.name == "mnli_matched":
return [
_mnli_split_generator("validation", data_dir, "dev", matched=True),
_mnli_split_generator("test", data_dir, "test", matched=True),
]
elif self.config.name == "mnli_mismatched":
return [
_mnli_split_generator("validation", data_dir, "dev", matched=False),
_mnli_split_generator("test", data_dir, "test", matched=False),
]
else:
return [
train_split,
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
gen_kwargs={
"data_file": os.path.join(data_dir or "", "dev.tsv"),
"split": "dev",
"mrpc_files": mrpc_files,
},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
gen_kwargs={
"data_file": os.path.join(data_dir or "", "test.tsv"),
"split": "test",
"mrpc_files": mrpc_files,
},
),
]
def _generate_examples(self, data_file, split, mrpc_files=None):
if self.config.name == "mrpc":
# We have to prepare the MRPC dataset from the original sources ourselves.
examples = self._generate_example_mrpc_files(mrpc_files=mrpc_files, split=split)
for example in examples:
yield example["idx"], example
else:
process_label = self.config.process_label
label_classes = self.config.label_classes
# The train and dev files for CoLA are the only tsv files without a
# header.
is_cola_non_test = self.config.name == "cola" and split != "test"
with open(data_file, encoding="utf8") as f:
reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
if is_cola_non_test:
reader = csv.reader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
for n, row in enumerate(reader):
if is_cola_non_test:
row = {
"sentence": row[3],
"is_acceptable": row[1],
}
example = {feat: row[col] for feat, col in six.iteritems(self.config.text_features)}
example["idx"] = n
if self.config.label_column in row:
label = row[self.config.label_column]
# For some tasks, the label is represented as 0 and 1 in the tsv
# files and needs to be cast to integer to work with the feature.
if label_classes and label not in label_classes:
label = int(label) if label else None
example["label"] = process_label(label)
else:
example["label"] = process_label(-1)
# Filter out corrupted rows.
for value in six.itervalues(example):
if value is None:
break
else:
yield example["idx"], example
def _generate_example_mrpc_files(self, mrpc_files, split):
if split == "test":
with open(mrpc_files["test"]) as f:
reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
for n, row in enumerate(reader):
yield {
"sentence1": row["#1 String"],
"sentence2": row["#2 String"],
"label": -1,
"idx": n,
}
else:
with open(mrpc_files["dev_ids"]) as f:
reader = csv.reader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
dev_ids = [[row[0], row[1]] for row in reader]
with open(mrpc_files["train"]) as f:
# The first 3 bytes are the utf-8 BOM \xef\xbb\xbf, which messes with
# the Quality key.
f.seek(3)
reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
for n, row in enumerate(reader):
is_row_in_dev = [row["#1 ID"], row["#2 ID"]] in dev_ids
if is_row_in_dev == (split == "dev"):
yield {
"sentence1": row["#1 String"],
"sentence2": row["#2 String"],
"label": int(row["Quality"]),
"idx": n,
}
def _mnli_split_generator(name, data_dir, split, matched):
return nlp.SplitGenerator(
name=name,
gen_kwargs={
"data_file": os.path.join(data_dir, "%s_%s.tsv" % (split, "matched" if matched else "mismatched")),
"split": split,
"mrpc_files": None,
},
)
|
nlp-master
|
datasets/glue/glue.py
|
"""TODO(blended_skill_talk): Add a description here."""
from __future__ import absolute_import, division, print_function
import json
import os
import nlp
# TODO(blended_skill_talk): BibTeX citation
_CITATION = """\
@misc{smith2020evaluating,
title={Can You Put it All Together: Evaluating Conversational Agents' Ability to Blend Skills},
author={Eric Michael Smith and Mary Williamson and Kurt Shuster and Jason Weston and Y-Lan Boureau},
year={2020},
eprint={2004.08449},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
# TODO(blended_skill_talk):
_DESCRIPTION = """\
A dataset of 7k conversations explicitly designed to exhibit multiple conversation modes: displaying personality, having empathy, and demonstrating knowledge.
"""
_URL = "http://parl.ai/downloads/blended_skill_talk/blended_skill_talk.tar.gz"
_TASK = ["convai2", "empathetic_dialogues", "wizard_of_wikipedia"]
class BlendedSkillTalk(nlp.GeneratorBasedBuilder):
"""TODO(blended_skill_talk): Short description of my dataset."""
# TODO(blended_skill_talk): Set up version.
VERSION = nlp.Version("1.0.0")
def _info(self):
# TODO(blended_skill_talk): Specifies the nlp.DatasetInfo object
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
"personas": nlp.features.Sequence({"persona": nlp.Value("string"),}),
"additional_context": nlp.Value("string"),
"previous_utterance": nlp.features.Sequence({"previous_utterance": nlp.Value("string"),}),
"context": nlp.Value("string"),
"free_messages": nlp.features.Sequence({"free_message": nlp.Value("string"),}),
"guided_messgaes": nlp.features.Sequence({"guided_messgae": nlp.Value("string"),}),
"suggestions": nlp.features.Sequence({task: nlp.Value("string") for task in _TASK})
# These are the features of your dataset like images, labels ...
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://parl.ai/projects/bst/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(blended_skill_talk): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
data_dir = dl_manager.download_and_extract(_URL)
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, "train.json")},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, "valid.json")},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, "test.json")},
),
]
def _generate_examples(self, filepath):
"""Yields examples."""
# TODO(blended_skill_talk): Yields (key, example) tuples from the dataset
with open(filepath) as f:
data = json.load(f)
for id_, row in enumerate(data):
personas = [row["personas"][1][0], row["personas"][1][1]]
dialogs = [dialog[1] for dialog in row["dialog"]]
free_messages = []
guided_messages = []
for i in range(len(dialogs) // 2):
free_messages.append(dialogs[2 * i])
guided_messages.append(dialogs[2 * i + 1])
context = row["context_dataset"]
add_context = row["additional_context"] if context == "wizard_of_wikipedia" else ""
previous_utterance = [row["free_turker_utterance"], row["guided_turker_utterance"]]
suggestions = row["suggestions"]
convai_suggestions = []
empathetic_suggestions = []
wow_suggestions = []
for i in range(len(suggestions) // 2):
convai_suggestions.append(suggestions[2 * i + 1]["convai2"])
empathetic_suggestions.append(suggestions[2 * i + 1]["empathetic_dialogues"])
wow_suggestions.append(suggestions[2 * i + 1]["wizard_of_wikipedia"])
yield id_, {
"personas": {"persona": personas,},
"additional_context": add_context,
"previous_utterance": {"previous_utterance": previous_utterance,},
"context": context,
"free_messages": {"free_message": free_messages,},
"guided_messgaes": {"guided_messgae": guided_messages,},
"suggestions": {
"convai2": convai_suggestions,
"empathetic_dialogues": empathetic_suggestions,
"wizard_of_wikipedia": wow_suggestions,
},
}
|
nlp-master
|
datasets/blended_skill_talk/blended_skill_talk.py
|
"""TODO(cosmos_qa): Add a description here."""
from __future__ import absolute_import, division, print_function
import csv
import json
import os
import nlp
# TODO(cosmos_qa): BibTeX citation
_CITATION = """\
@inproceedings{cosmos,
title={COSMOS QA: Machine Reading Comprehension
with Contextual Commonsense Reasoning},
author={Lifu Huang and Ronan Le Bras and Chandra Bhagavatula and Yejin Choi},
booktitle ={arXiv:1909.00277v2},
year={2019}
}
"""
# TODO(cosmos_qa):
_DESCRIPTION = """\
Cosmos QA is a large-scale dataset of 35.6K problems that require commonsense-based reading comprehension, formulated as multiple-choice questions. It focuses on reading between the lines over a diverse collection of people's everyday narratives, asking questions concerning on the likely causes or effects of events that require reasoning beyond the exact text spans in the context
"""
_URL = "https://github.com/wilburOne/cosmosqa/raw/master/data/"
_TEST_FILE = "test.jsonl"
_TRAIN_FILE = "train.csv"
_DEV_FILE = "valid.csv"
class CosmosQa(nlp.GeneratorBasedBuilder):
"""TODO(cosmos_qa): Short description of my dataset."""
# TODO(cosmos_qa): Set up version.
VERSION = nlp.Version("0.1.0")
def _info(self):
# TODO(cosmos_qa): Specifies the nlp.DatasetInfo object
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
"id": nlp.Value("string"),
"context": nlp.Value("string"),
"question": nlp.Value("string"),
"answer0": nlp.Value("string"),
"answer1": nlp.Value("string"),
"answer2": nlp.Value("string"),
"answer3": nlp.Value("string"),
"label": nlp.Value("int32")
# These are the features of your dataset like images, labels ...
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://wilburone.github.io/cosmos/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(cosmos_qa): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
urls_to_download = {
"train": os.path.join(_URL, _TRAIN_FILE),
"test": os.path.join(_URL, _TEST_FILE),
"dev": os.path.join(_URL, _DEV_FILE),
}
dl_dir = dl_manager.download_and_extract(urls_to_download)
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": dl_dir["train"], "split": "train"},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": dl_dir["test"], "split": "test"},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": dl_dir["dev"], "split": "dev"},
),
]
def _generate_examples(self, filepath, split):
"""Yields examples."""
# TODO(cosmos_qa): Yields (key, example) tuples from the dataset
with open(filepath) as f:
if split == "test":
for id_, row in enumerate(f):
data = json.loads(row)
yield id_, {
"id": data["id"],
"context": data["context"],
"question": data["question"],
"answer0": data["answer0"],
"answer1": data["answer1"],
"answer2": data["answer2"],
"answer3": data["answer3"],
"label": int(data.get("label", -1)),
}
else:
data = csv.DictReader(f)
for id_, row in enumerate(data):
yield id_, {
"id": row["id"],
"context": row["context"],
"question": row["question"],
"answer0": row["answer0"],
"answer1": row["answer1"],
"answer2": row["answer2"],
"answer3": row["answer3"],
"label": int(row.get("label", -1)),
}
|
nlp-master
|
datasets/cosmos_qa/cosmos_qa.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""BLiMP dataset with minimal pairs of grammatical phenomena in English."""
from __future__ import absolute_import, division, print_function
import json
import os
import nlp
_CITATION = """
@article{warstadt2019blimp,
title={BLiMP: A Benchmark of Linguistic Minimal Pairs for English},
author={Warstadt, Alex and Parrish, Alicia and Liu, Haokun and Mohananey, Anhad and Peng, Wei, and Wang, Sheng-Fu and Bowman, Samuel R},
journal={arXiv preprint arXiv:1912.00582},
year={2019}
}
"""
_DESCRIPTION = """
BLiMP is a challenge set for evaluating what language models (LMs) know about
major grammatical phenomena in English. BLiMP consists of 67 sub-datasets, each
containing 1000 minimal pairs isolating specific contrasts in syntax,
morphology, or semantics. The data is automatically generated according to
expert-crafted grammars.
"""
_PROJECT_URL = "https://github.com/alexwarstadt/blimp/tree/master/"
_DOWNLOAD_URL = "https://raw.githubusercontent.com/alexwarstadt/blimp/master/"
class BlimpConfig(nlp.BuilderConfig):
"""BuilderConfig for Blimp."""
def __init__(self, paradigm_uid, **kwargs):
"""BuilderConfig for Blimp.
Args:
paradigm_uid: string, UID of the linguistic paradigm
**kwargs: keyword arguments forwarded to super.
"""
name = paradigm_uid
description = _DESCRIPTION
description += ("This configuration includes the paradigm {}.").format(name)
super(BlimpConfig, self).__init__(name=name, description=description, version=nlp.Version("0.1.0"), **kwargs)
class Blimp(nlp.GeneratorBasedBuilder):
"""Minimal grammatical and ungrammatical pairs of 67 linguistic paradigms."""
all_paradigms = [
"adjunct_island",
"anaphor_gender_agreement",
"anaphor_number_agreement",
"animate_subject_passive",
"animate_subject_trans",
"causative",
"complex_NP_island",
"coordinate_structure_constraint_complex_left_branch",
"coordinate_structure_constraint_object_extraction",
"determiner_noun_agreement_1",
"determiner_noun_agreement_2",
"determiner_noun_agreement_irregular_1",
"determiner_noun_agreement_irregular_2",
"determiner_noun_agreement_with_adj_2",
"determiner_noun_agreement_with_adj_irregular_1",
"determiner_noun_agreement_with_adj_irregular_2",
"determiner_noun_agreement_with_adjective_1",
"distractor_agreement_relational_noun",
"distractor_agreement_relative_clause",
"drop_argument",
"ellipsis_n_bar_1",
"ellipsis_n_bar_2",
"existential_there_object_raising",
"existential_there_quantifiers_1",
"existential_there_quantifiers_2",
"existential_there_subject_raising",
"expletive_it_object_raising",
"inchoative",
"intransitive",
"irregular_past_participle_adjectives",
"irregular_past_participle_verbs",
"irregular_plural_subject_verb_agreement_1",
"irregular_plural_subject_verb_agreement_2",
"left_branch_island_echo_question",
"left_branch_island_simple_question",
"matrix_question_npi_licensor_present",
"npi_present_1",
"npi_present_2",
"only_npi_licensor_present",
"only_npi_scope",
"passive_1",
"passive_2",
"principle_A_c_command",
"principle_A_case_1",
"principle_A_case_2",
"principle_A_domain_1",
"principle_A_domain_2",
"principle_A_domain_3",
"principle_A_reconstruction",
"regular_plural_subject_verb_agreement_1",
"regular_plural_subject_verb_agreement_2",
"sentential_negation_npi_licensor_present",
"sentential_negation_npi_scope",
"sentential_subject_island",
"superlative_quantifiers_1",
"superlative_quantifiers_2",
"tough_vs_raising_1",
"tough_vs_raising_2",
"transitive",
"wh_island",
"wh_questions_object_gap",
"wh_questions_subject_gap",
"wh_questions_subject_gap_long_distance",
"wh_vs_that_no_gap",
"wh_vs_that_no_gap_long_distance",
"wh_vs_that_with_gap",
"wh_vs_that_with_gap_long_distance",
]
BUILDER_CONFIGS = [BlimpConfig(paradigm_uid=paradigm) for paradigm in all_paradigms]
def _info(self):
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features(
{
"sentence_good": nlp.Value("string"),
"sentence_bad": nlp.Value("string"),
"field": nlp.Value("string"),
"linguistics_term": nlp.Value("string"),
"UID": nlp.Value("string"),
"simple_LM_method": nlp.Value("bool"),
"one_prefix_method": nlp.Value("bool"),
"two_prefix_method": nlp.Value("bool"),
"lexically_identical": nlp.Value("bool"),
"pair_id": nlp.Value("int32"),
}
),
supervised_keys=None,
# Homepage of the dataset for documentation
homepage=_PROJECT_URL,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
cfg = self.config
download_urls = {cfg.name: os.path.join(_DOWNLOAD_URL, "data", cfg.name + ".jsonl")}
downloaded_files = dl_manager.download_and_extract(download_urls)
return [nlp.SplitGenerator(name=nlp.Split.TRAIN, gen_kwargs={"filepath": downloaded_files[cfg.name]})]
def _generate_examples(self, filepath):
"""Yields examples."""
with open(filepath, "rb") as f:
for line in f:
line_dict = json.loads(line)
id_ = line_dict["UID"] + "_" + line_dict["pairID"]
feats = {
"sentence_good": line_dict["sentence_good"],
"sentence_bad": line_dict["sentence_bad"],
"field": line_dict["field"],
"linguistics_term": line_dict["linguistics_term"],
"UID": line_dict["UID"],
"simple_LM_method": line_dict["simple_LM_method"],
"one_prefix_method": line_dict["one_prefix_method"],
"two_prefix_method": line_dict["two_prefix_method"],
"lexically_identical": line_dict["lexically_identical"],
"pair_id": int(line_dict["pairID"]),
}
yield id_, feats
|
nlp-master
|
datasets/blimp/blimp.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""WMT17: Translate dataset."""
import nlp
from .wmt_utils import CWMT_SUBSET_NAMES, Wmt, WmtConfig
_URL = "http://www.statmt.org/wmt17/translation-task.html"
_CITATION = """
@InProceedings{bojar-EtAl:2017:WMT1,
author = {Bojar, Ond\v{r}ej and Chatterjee, Rajen and Federmann, Christian and Graham, Yvette and Haddow, Barry and Huang, Shujian and Huck, Matthias and Koehn, Philipp and Liu, Qun and Logacheva, Varvara and Monz, Christof and Negri, Matteo and Post, Matt and Rubino, Raphael and Specia, Lucia and Turchi, Marco},
title = {Findings of the 2017 Conference on Machine Translation (WMT17)},
booktitle = {Proceedings of the Second Conference on Machine Translation, Volume 2: Shared Task Papers},
month = {September},
year = {2017},
address = {Copenhagen, Denmark},
publisher = {Association for Computational Linguistics},
pages = {169--214},
url = {http://www.aclweb.org/anthology/W17-4717}
}
"""
_LANGUAGE_PAIRS = [(lang, "en") for lang in ["cs", "de", "fi", "lv", "ru", "tr", "zh"]]
class Wmt17(Wmt):
"""WMT 17 translation datasets for all {xx, "en"} language pairs."""
BUILDER_CONFIGS = [
WmtConfig( # pylint:disable=g-complex-comprehension
description="WMT 2017 %s-%s translation task dataset." % (l1, l2),
url=_URL,
citation=_CITATION,
language_pair=(l1, l2),
version=nlp.Version("1.0.0"),
)
for l1, l2 in _LANGUAGE_PAIRS
]
@property
def manual_download_instructions(self):
if self.config.language_pair[1] in ["cs", "hi", "ru"]:
return "Please download the data manually as explained. TODO(PVP)"
@property
def _subsets(self):
return {
nlp.Split.TRAIN: [
"europarl_v7",
"europarl_v8_16",
"commoncrawl",
"newscommentary_v12",
"czeng_16",
"yandexcorpus",
"wikiheadlines_fi",
"wikiheadlines_ru",
"setimes_2",
"uncorpus_v1",
"rapid_2016",
"leta_v1",
"dcep_v1",
"onlinebooks_v1",
]
+ CWMT_SUBSET_NAMES,
nlp.Split.VALIDATION: ["newsdev2017", "newstest2016", "newstestB2016"],
nlp.Split.TEST: ["newstest2017", "newstestB2017"],
}
|
nlp-master
|
datasets/wmt17/wmt17.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""WMT: Translate dataset."""
from __future__ import absolute_import, division, print_function
import codecs
import functools
import glob
import gzip
import itertools
import logging
import os
import re
import xml.etree.cElementTree as ElementTree
from abc import ABC, abstractmethod
import six
import nlp
_DESCRIPTION = """\
Translate dataset based on the data from statmt.org.
Versions exists for the different years using a combination of multiple data
sources. The base `wmt_translate` allows you to create your own config to choose
your own data/language pair by creating a custom `nlp.translate.wmt.WmtConfig`.
```
config = nlp.wmt.WmtConfig(
version="0.0.1",
language_pair=("fr", "de"),
subsets={
nlp.Split.TRAIN: ["commoncrawl_frde"],
nlp.Split.VALIDATION: ["euelections_dev2019"],
},
)
builder = nlp.builder("wmt_translate", config=config)
```
"""
CWMT_SUBSET_NAMES = ["casia2015", "casict2011", "casict2015", "datum2015", "datum2017", "neu2017"]
class SubDataset(object):
"""Class to keep track of information on a sub-dataset of WMT."""
def __init__(self, name, target, sources, url, path, manual_dl_files=None):
"""Sub-dataset of WMT.
Args:
name: `string`, a unique dataset identifier.
target: `string`, the target language code.
sources: `set<string>`, the set of source language codes.
url: `string` or `(string, string)`, URL(s) or URL template(s) specifying
where to download the raw data from. If two strings are provided, the
first is used for the source language and the second for the target.
Template strings can either contain '{src}' placeholders that will be
filled in with the source language code, '{0}' and '{1}' placeholders
that will be filled in with the source and target language codes in
alphabetical order, or all 3.
path: `string` or `(string, string)`, path(s) or path template(s)
specifing the path to the raw data relative to the root of the
downloaded archive. If two strings are provided, the dataset is assumed
to be made up of parallel text files, the first being the source and the
second the target. If one string is provided, both languages are assumed
to be stored within the same file and the extension is used to determine
how to parse it. Template strings should be formatted the same as in
`url`.
manual_dl_files: `<list>(string)` (optional), the list of files that must
be manually downloaded to the data directory.
"""
self._paths = (path,) if isinstance(path, six.string_types) else path
self._urls = (url,) if isinstance(url, six.string_types) else url
self._manual_dl_files = manual_dl_files if manual_dl_files else []
self.name = name
self.target = target
self.sources = set(sources)
def _inject_language(self, src, strings):
"""Injects languages into (potentially) template strings."""
if src not in self.sources:
raise ValueError("Invalid source for '{0}': {1}".format(self.name, src))
def _format_string(s):
if "{0}" in s and "{1}" and "{src}" in s:
return s.format(*sorted([src, self.target]), src=src)
elif "{0}" in s and "{1}" in s:
return s.format(*sorted([src, self.target]))
elif "{src}" in s:
return s.format(src=src)
else:
return s
return [_format_string(s) for s in strings]
def get_url(self, src):
return self._inject_language(src, self._urls)
def get_manual_dl_files(self, src):
return self._inject_language(src, self._manual_dl_files)
def get_path(self, src):
return self._inject_language(src, self._paths)
# Subsets used in the training sets for various years of WMT.
_TRAIN_SUBSETS = [
# pylint:disable=line-too-long
SubDataset(
name="commoncrawl",
target="en", # fr-de pair in commoncrawl_frde
sources={"cs", "de", "es", "fr", "ru"},
url="http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz",
path=("commoncrawl.{src}-en.{src}", "commoncrawl.{src}-en.en"),
),
SubDataset(
name="commoncrawl_frde",
target="de",
sources={"fr"},
url=(
"http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/commoncrawl.fr.gz",
"http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/commoncrawl.de.gz",
),
path=("", ""),
),
SubDataset(
name="czeng_10",
target="en",
sources={"cs"},
url="http://ufal.mff.cuni.cz/czeng/czeng10",
manual_dl_files=["data-plaintext-format.%d.tar" % i for i in range(10)],
# Each tar contains multiple files, which we process specially in
# _parse_czeng.
path=("data.plaintext-format/??train.gz",) * 10,
),
SubDataset(
name="czeng_16pre",
target="en",
sources={"cs"},
url="http://ufal.mff.cuni.cz/czeng/czeng16pre",
manual_dl_files=["czeng16pre.deduped-ignoring-sections.txt.gz"],
path="",
),
SubDataset(
name="czeng_16",
target="en",
sources={"cs"},
url="http://ufal.mff.cuni.cz/czeng",
manual_dl_files=["data-plaintext-format.%d.tar" % i for i in range(10)],
# Each tar contains multiple files, which we process specially in
# _parse_czeng.
path=("data.plaintext-format/??train.gz",) * 10,
),
SubDataset(
# This dataset differs from the above in the filtering that is applied
# during parsing.
name="czeng_17",
target="en",
sources={"cs"},
url="http://ufal.mff.cuni.cz/czeng",
manual_dl_files=["data-plaintext-format.%d.tar" % i for i in range(10)],
# Each tar contains multiple files, which we process specially in
# _parse_czeng.
path=("data.plaintext-format/??train.gz",) * 10,
),
SubDataset(
name="dcep_v1",
target="en",
sources={"lv"},
url="http://data.statmt.org/wmt17/translation-task/dcep.lv-en.v1.tgz",
path=("dcep.en-lv/dcep.lv", "dcep.en-lv/dcep.en"),
),
SubDataset(
name="europarl_v7",
target="en",
sources={"cs", "de", "es", "fr"},
url="http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz",
path=("training/europarl-v7.{src}-en.{src}", "training/europarl-v7.{src}-en.en"),
),
SubDataset(
name="europarl_v7_frde",
target="de",
sources={"fr"},
url=(
"http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/europarl-v7.fr.gz",
"http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/europarl-v7.de.gz",
),
path=("", ""),
),
SubDataset(
name="europarl_v8_18",
target="en",
sources={"et", "fi"},
url="http://data.statmt.org/wmt18/translation-task/training-parallel-ep-v8.tgz",
path=("training/europarl-v8.{src}-en.{src}", "training/europarl-v8.{src}-en.en"),
),
SubDataset(
name="europarl_v8_16",
target="en",
sources={"fi", "ro"},
url="http://data.statmt.org/wmt16/translation-task/training-parallel-ep-v8.tgz",
path=("training-parallel-ep-v8/europarl-v8.{src}-en.{src}", "training-parallel-ep-v8/europarl-v8.{src}-en.en"),
),
SubDataset(
name="europarl_v9",
target="en",
sources={"cs", "de", "fi", "lt"},
url="http://www.statmt.org/europarl/v9/training/europarl-v9.{src}-en.tsv.gz",
path="",
),
SubDataset(
name="gigafren",
target="en",
sources={"fr"},
url="http://www.statmt.org/wmt10/training-giga-fren.tar",
path=("giga-fren.release2.fixed.fr.gz", "giga-fren.release2.fixed.en.gz"),
),
SubDataset(
name="hindencorp_01",
target="en",
sources={"hi"},
url="http://ufallab.ms.mff.cuni.cz/~bojar/hindencorp",
manual_dl_files=["hindencorp0.1.gz"],
path="",
),
SubDataset(
name="leta_v1",
target="en",
sources={"lv"},
url="http://data.statmt.org/wmt17/translation-task/leta.v1.tgz",
path=("LETA-lv-en/leta.lv", "LETA-lv-en/leta.en"),
),
SubDataset(
name="multiun",
target="en",
sources={"es", "fr"},
url="http://www.statmt.org/wmt13/training-parallel-un.tgz",
path=("un/undoc.2000.{src}-en.{src}", "un/undoc.2000.{src}-en.en"),
),
SubDataset(
name="newscommentary_v9",
target="en",
sources={"cs", "de", "fr", "ru"},
url="http://www.statmt.org/wmt14/training-parallel-nc-v9.tgz",
path=("training/news-commentary-v9.{src}-en.{src}", "training/news-commentary-v9.{src}-en.en"),
),
SubDataset(
name="newscommentary_v10",
target="en",
sources={"cs", "de", "fr", "ru"},
url="http://www.statmt.org/wmt15/training-parallel-nc-v10.tgz",
path=("news-commentary-v10.{src}-en.{src}", "news-commentary-v10.{src}-en.en"),
),
SubDataset(
name="newscommentary_v11",
target="en",
sources={"cs", "de", "ru"},
url="http://data.statmt.org/wmt16/translation-task/training-parallel-nc-v11.tgz",
path=(
"training-parallel-nc-v11/news-commentary-v11.{src}-en.{src}",
"training-parallel-nc-v11/news-commentary-v11.{src}-en.en",
),
),
SubDataset(
name="newscommentary_v12",
target="en",
sources={"cs", "de", "ru", "zh"},
url="http://data.statmt.org/wmt17/translation-task/training-parallel-nc-v12.tgz",
path=("training/news-commentary-v12.{src}-en.{src}", "training/news-commentary-v12.{src}-en.en"),
),
SubDataset(
name="newscommentary_v13",
target="en",
sources={"cs", "de", "ru", "zh"},
url="http://data.statmt.org/wmt18/translation-task/training-parallel-nc-v13.tgz",
path=(
"training-parallel-nc-v13/news-commentary-v13.{src}-en.{src}",
"training-parallel-nc-v13/news-commentary-v13.{src}-en.en",
),
),
SubDataset(
name="newscommentary_v14",
target="en", # fr-de pair in newscommentary_v14_frde
sources={"cs", "de", "kk", "ru", "zh"},
url="http://data.statmt.org/news-commentary/v14/training/news-commentary-v14.{0}-{1}.tsv.gz",
path="",
),
SubDataset(
name="newscommentary_v14_frde",
target="de",
sources={"fr"},
url="http://data.statmt.org/news-commentary/v14/training/news-commentary-v14.de-fr.tsv.gz",
path="",
),
SubDataset(
name="onlinebooks_v1",
target="en",
sources={"lv"},
url="http://data.statmt.org/wmt17/translation-task/books.lv-en.v1.tgz",
path=("farewell/farewell.lv", "farewell/farewell.en"),
),
SubDataset(
name="paracrawl_v1",
target="en",
sources={"cs", "de", "et", "fi", "ru"},
url="https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-{src}.zipporah0-dedup-clean.tgz",
path=(
"paracrawl-release1.en-{src}.zipporah0-dedup-clean.{src}",
"paracrawl-release1.en-{src}.zipporah0-dedup-clean.en",
),
),
SubDataset(
name="paracrawl_v1_ru",
target="en",
sources={"ru"},
url="https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-ru.zipporah0-dedup-clean.tgz",
path=(
"paracrawl-release1.en-ru.zipporah0-dedup-clean.ru",
"paracrawl-release1.en-ru.zipporah0-dedup-clean.en",
),
),
SubDataset(
name="paracrawl_v3",
target="en", # fr-de pair in paracrawl_v3_frde
sources={"cs", "de", "fi", "lt"},
url="https://s3.amazonaws.com/web-language-models/paracrawl/release3/en-{src}.bicleaner07.tmx.gz",
path="",
),
SubDataset(
name="paracrawl_v3_frde",
target="de",
sources={"fr"},
url=(
"http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/de-fr.bicleaner07.de.gz",
"http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/de-fr.bicleaner07.fr.gz",
),
path=("", ""),
),
SubDataset(
name="rapid_2016",
target="en",
sources={"de", "et", "fi"},
url="http://data.statmt.org/wmt18/translation-task/rapid2016.tgz",
path=("rapid2016.{0}-{1}.{src}", "rapid2016.{0}-{1}.en"),
),
SubDataset(
name="rapid_2016_ltfi",
target="en",
sources={"fi", "lt"},
url="https://tilde-model.s3-eu-west-1.amazonaws.com/rapid2016.en-{src}.tmx.zip",
path="rapid2016.en-{src}.tmx",
),
SubDataset(
name="rapid_2019",
target="en",
sources={"de"},
url="https://s3-eu-west-1.amazonaws.com/tilde-model/rapid2019.de-en.zip",
path=("rapid2019.de-en.de", "rapid2019.de-en.en"),
),
SubDataset(
name="setimes_2",
target="en",
sources={"ro", "tr"},
url="http://opus.nlpl.eu/download.php?f=SETIMES/v2/tmx/en-{src}.tmx.gz",
path="",
),
SubDataset(
name="uncorpus_v1",
target="en",
sources={"ru", "zh"},
url="https://storage.googleapis.com/tfdataset-data/downloadataset/uncorpus/UNv1.0.en-{src}.tar.gz",
path=("en-{src}/UNv1.0.en-{src}.{src}", "en-{src}/UNv1.0.en-{src}.en"),
),
SubDataset(
name="wikiheadlines_fi",
target="en",
sources={"fi"},
url="http://www.statmt.org/wmt15/wiki-titles.tgz",
path="wiki/fi-en/titles.fi-en",
),
SubDataset(
name="wikiheadlines_hi",
target="en",
sources={"hi"},
url="http://www.statmt.org/wmt14/wiki-titles.tgz",
path="wiki/hi-en/wiki-titles.hi-en",
),
SubDataset(
# Verified that wmt14 and wmt15 files are identical.
name="wikiheadlines_ru",
target="en",
sources={"ru"},
url="http://www.statmt.org/wmt15/wiki-titles.tgz",
path="wiki/ru-en/wiki.ru-en",
),
SubDataset(
name="wikititles_v1",
target="en",
sources={"cs", "de", "fi", "gu", "kk", "lt", "ru", "zh"},
url="http://data.statmt.org/wikititles/v1/wikititles-v1.{src}-en.tsv.gz",
path="",
),
SubDataset(
name="yandexcorpus",
target="en",
sources={"ru"},
url="https://translate.yandex.ru/corpus?lang=en",
manual_dl_files=["1mcorpus.zip"],
path=("corpus.en_ru.1m.ru", "corpus.en_ru.1m.en"),
),
# pylint:enable=line-too-long
] + [
SubDataset( # pylint:disable=g-complex-comprehension
name=ss,
target="en",
sources={"zh"},
url="ftp://cwmt-wmt:cwmt-wmt@nlp.nju.edu.cn/parallel/%s.zip" % ss,
path=("%s/*_c[hn].txt" % ss, "%s/*_en.txt" % ss),
)
for ss in CWMT_SUBSET_NAMES
]
_DEV_SUBSETS = [
SubDataset(
name="euelections_dev2019",
target="de",
sources={"fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/euelections_dev2019.fr-de.src.fr", "dev/euelections_dev2019.fr-de.tgt.de"),
),
SubDataset(
name="newsdev2014",
target="en",
sources={"hi"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdev2014.hi", "dev/newsdev2014.en"),
),
SubDataset(
name="newsdev2015",
target="en",
sources={"fi"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdev2015-fien-src.{src}.sgm", "dev/newsdev2015-fien-ref.en.sgm"),
),
SubDataset(
name="newsdiscussdev2015",
target="en",
sources={"ro", "tr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdiscussdev2015-{src}en-src.{src}.sgm", "dev/newsdiscussdev2015-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdev2016",
target="en",
sources={"ro", "tr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdev2016-{src}en-src.{src}.sgm", "dev/newsdev2016-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdev2017",
target="en",
sources={"lv", "zh"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdev2017-{src}en-src.{src}.sgm", "dev/newsdev2017-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdev2018",
target="en",
sources={"et"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdev2018-{src}en-src.{src}.sgm", "dev/newsdev2018-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdev2019",
target="en",
sources={"gu", "kk", "lt"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdev2019-{src}en-src.{src}.sgm", "dev/newsdev2019-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdiscussdev2015",
target="en",
sources={"fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdiscussdev2015-{src}en-src.{src}.sgm", "dev/newsdiscussdev2015-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdiscusstest2015",
target="en",
sources={"fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdiscusstest2015-{src}en-src.{src}.sgm", "dev/newsdiscusstest2015-{src}en-ref.en.sgm"),
),
SubDataset(
name="newssyscomb2009",
target="en",
sources={"cs", "de", "es", "fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newssyscomb2009.{src}", "dev/newssyscomb2009.en"),
),
SubDataset(
name="newstest2008",
target="en",
sources={"cs", "de", "es", "fr", "hu"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/news-test2008.{src}", "dev/news-test2008.en"),
),
SubDataset(
name="newstest2009",
target="en",
sources={"cs", "de", "es", "fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2009.{src}", "dev/newstest2009.en"),
),
SubDataset(
name="newstest2010",
target="en",
sources={"cs", "de", "es", "fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2010.{src}", "dev/newstest2010.en"),
),
SubDataset(
name="newstest2011",
target="en",
sources={"cs", "de", "es", "fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2011.{src}", "dev/newstest2011.en"),
),
SubDataset(
name="newstest2012",
target="en",
sources={"cs", "de", "es", "fr", "ru"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2012.{src}", "dev/newstest2012.en"),
),
SubDataset(
name="newstest2013",
target="en",
sources={"cs", "de", "es", "fr", "ru"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2013.{src}", "dev/newstest2013.en"),
),
SubDataset(
name="newstest2014",
target="en",
sources={"cs", "de", "es", "fr", "hi", "ru"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2014-{src}en-src.{src}.sgm", "dev/newstest2014-{src}en-ref.en.sgm"),
),
SubDataset(
name="newstest2015",
target="en",
sources={"cs", "de", "fi", "ru"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2015-{src}en-src.{src}.sgm", "dev/newstest2015-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdiscusstest2015",
target="en",
sources={"fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdiscusstest2015-{src}en-src.{src}.sgm", "dev/newsdiscusstest2015-{src}en-ref.en.sgm"),
),
SubDataset(
name="newstest2016",
target="en",
sources={"cs", "de", "fi", "ro", "ru", "tr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2016-{src}en-src.{src}.sgm", "dev/newstest2016-{src}en-ref.en.sgm"),
),
SubDataset(
name="newstestB2016",
target="en",
sources={"fi"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstestB2016-enfi-ref.{src}.sgm", "dev/newstestB2016-enfi-src.en.sgm"),
),
SubDataset(
name="newstest2017",
target="en",
sources={"cs", "de", "fi", "lv", "ru", "tr", "zh"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2017-{src}en-src.{src}.sgm", "dev/newstest2017-{src}en-ref.en.sgm"),
),
SubDataset(
name="newstestB2017",
target="en",
sources={"fi"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstestB2017-fien-src.fi.sgm", "dev/newstestB2017-fien-ref.en.sgm"),
),
SubDataset(
name="newstest2018",
target="en",
sources={"cs", "de", "et", "fi", "ru", "tr", "zh"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2018-{src}en-src.{src}.sgm", "dev/newstest2018-{src}en-ref.en.sgm"),
),
]
DATASET_MAP = {dataset.name: dataset for dataset in _TRAIN_SUBSETS + _DEV_SUBSETS}
_CZENG17_FILTER = SubDataset(
name="czeng17_filter",
target="en",
sources={"cs"},
url="http://ufal.mff.cuni.cz/czeng/download.php?f=convert_czeng16_to_17.pl.zip",
path="convert_czeng16_to_17.pl",
)
class WmtConfig(nlp.BuilderConfig):
"""BuilderConfig for WMT."""
def __init__(self, url=None, citation=None, description=None, language_pair=(None, None), subsets=None, **kwargs):
"""BuilderConfig for WMT.
Args:
url: The reference URL for the dataset.
citation: The paper citation for the dataset.
description: The description of the dataset.
language_pair: pair of languages that will be used for translation. Should
contain 2 letter coded strings. For example: ("en", "de").
configuration for the `nlp.features.text.TextEncoder` used for the
`nlp.features.text.Translation` features.
subsets: Dict[split, list[str]]. List of the subset to use for each of the
split. Note that WMT subclasses overwrite this parameter.
**kwargs: keyword arguments forwarded to super.
"""
name = "%s-%s" % (language_pair[0], language_pair[1])
if "name" in kwargs: # Add name suffix for custom configs
name += "." + kwargs.pop("name")
super(WmtConfig, self).__init__(name=name, description=description, **kwargs)
self.url = url or "http://www.statmt.org"
self.citation = citation
self.language_pair = language_pair
self.subsets = subsets
# TODO(PVP): remove when manual dir works
# +++++++++++++++++++++
if language_pair[1] in ["cs", "hi", "ru"]:
assert NotImplementedError(
"The dataset for {}-en is currently not fully supported.".format(language_pair[1])
)
# +++++++++++++++++++++
class Wmt(ABC, nlp.GeneratorBasedBuilder):
"""WMT translation dataset."""
def __init__(self, *args, **kwargs):
if type(self) == Wmt and "config" not in kwargs: # pylint: disable=unidiomatic-typecheck
raise ValueError(
"The raw `wmt_translate` can only be instantiated with the config "
"kwargs. You may want to use one of the `wmtYY_translate` "
"implementation instead to get the WMT dataset for a specific year."
)
super(Wmt, self).__init__(*args, **kwargs)
@property
@abstractmethod
def _subsets(self):
"""Subsets that make up each split of the dataset."""
raise NotImplementedError("This is a abstract method")
@property
def subsets(self):
"""Subsets that make up each split of the dataset for the language pair."""
source, target = self.config.language_pair
filtered_subsets = {}
for split, ss_names in self._subsets.items():
filtered_subsets[split] = []
for ss_name in ss_names:
dataset = DATASET_MAP[ss_name]
if dataset.target != target or source not in dataset.sources:
logging.info("Skipping sub-dataset that does not include language pair: %s", ss_name)
else:
filtered_subsets[split].append(ss_name)
logging.info("Using sub-datasets: %s", filtered_subsets)
return filtered_subsets
def _info(self):
src, target = self.config.language_pair
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features({"translation": nlp.features.Translation(languages=self.config.language_pair)}),
supervised_keys=(src, target),
homepage=self.config.url,
citation=self.config.citation,
)
def _vocab_text_gen(self, split_subsets, extraction_map, language):
for _, ex in self._generate_examples(split_subsets, extraction_map, with_translation=False):
yield ex[language]
def _split_generators(self, dl_manager):
source, _ = self.config.language_pair
manual_paths_dict = {}
urls_to_download = {}
for ss_name in itertools.chain.from_iterable(self.subsets.values()):
if ss_name == "czeng_17":
# CzEng1.7 is CzEng1.6 with some blocks filtered out. We must download
# the filtering script so we can parse out which blocks need to be
# removed.
urls_to_download[_CZENG17_FILTER.name] = _CZENG17_FILTER.get_url(source)
# get dataset
dataset = DATASET_MAP[ss_name]
if dataset.get_manual_dl_files(source):
# TODO(PVP): following two lines skip configs that are incomplete for now
# +++++++++++++++++++++
logging.info("Skipping {} for now. Incomplete dataset for {}".format(dataset.name, self.config.name))
continue
# +++++++++++++++++++++
manual_dl_files = dataset.get_manual_dl_files(source)
manual_paths = [
os.path.join(os.path.abspath(os.path.expanduser(dl_manager.manual_dir)), fname)
for fname in manual_dl_files
]
assert all(
os.path.exists(path) for path in manual_paths
), "For {0}, you must manually download the following file(s) from {1} and place them in {2}: {3}".format(
dataset.name, dataset.get_url(source), dl_manager.manual_dir, ", ".join(manual_dl_files)
)
# set manual path for correct subset
manual_paths_dict[ss_name] = manual_paths
else:
urls_to_download[ss_name] = dataset.get_url(source)
# Download and extract files from URLs.
downloaded_files = dl_manager.download_and_extract(urls_to_download)
# Extract manually downloaded files.
manual_files = dl_manager.extract(manual_paths_dict)
extraction_map = dict(downloaded_files, **manual_files)
for language in self.config.language_pair:
self._vocab_text_gen(self.subsets[nlp.Split.TRAIN], extraction_map, language)
return [
nlp.SplitGenerator( # pylint:disable=g-complex-comprehension
name=split, gen_kwargs={"split_subsets": split_subsets, "extraction_map": extraction_map}
)
for split, split_subsets in self.subsets.items()
]
def _generate_examples(self, split_subsets, extraction_map, with_translation=True):
"""Returns the examples in the raw (text) form."""
source, _ = self.config.language_pair
def _get_local_paths(dataset, extract_dirs):
rel_paths = dataset.get_path(source)
if len(extract_dirs) == 1:
extract_dirs = extract_dirs * len(rel_paths)
return [
os.path.join(ex_dir, rel_path) if rel_path else ex_dir
for ex_dir, rel_path in zip(extract_dirs, rel_paths)
]
for ss_name in split_subsets:
# TODO(PVP) remove following five lines when manual data works
# +++++++++++++++++++++
dataset = DATASET_MAP[ss_name]
source, _ = self.config.language_pair
if dataset.get_manual_dl_files(source):
logging.info("Skipping {} for now. Incomplete dataset for {}".format(dataset.name, self.config.name))
continue
# +++++++++++++++++++++
logging.info("Generating examples from: %s", ss_name)
dataset = DATASET_MAP[ss_name]
extract_dirs = extraction_map[ss_name]
files = _get_local_paths(dataset, extract_dirs)
if ss_name.startswith("czeng"):
if ss_name.endswith("16pre"):
sub_generator = functools.partial(_parse_tsv, language_pair=("en", "cs"))
elif ss_name.endswith("17"):
filter_path = _get_local_paths(_CZENG17_FILTER, extraction_map[_CZENG17_FILTER.name])[0]
sub_generator = functools.partial(_parse_czeng, filter_path=filter_path)
else:
sub_generator = _parse_czeng
elif ss_name == "hindencorp_01":
sub_generator = _parse_hindencorp
elif len(files) == 2:
if ss_name.endswith("_frde"):
sub_generator = _parse_frde_bitext
else:
sub_generator = _parse_parallel_sentences
elif len(files) == 1:
fname = files[0]
# Note: Due to formatting used by `download_manager`, the file
# extension may not be at the end of the file path.
if ".tsv" in fname:
sub_generator = _parse_tsv
elif (
ss_name.startswith("newscommentary_v14")
or ss_name.startswith("europarl_v9")
or ss_name.startswith("wikititles_v1")
):
sub_generator = functools.partial(_parse_tsv, language_pair=self.config.language_pair)
elif "tmx" in fname or ss_name.startswith("paracrawl_v3"):
sub_generator = _parse_tmx
elif ss_name.startswith("wikiheadlines"):
sub_generator = _parse_wikiheadlines
else:
raise ValueError("Unsupported file format: %s" % fname)
else:
raise ValueError("Invalid number of files: %d" % len(files))
for sub_key, ex in sub_generator(*files):
if not all(ex.values()):
continue
# TODO(adarob): Add subset feature.
# ex["subset"] = subset
key = "{}/{}".format(ss_name, sub_key)
if with_translation is True:
ex = {"translation": ex}
yield key, ex
def _parse_parallel_sentences(f1, f2):
"""Returns examples from parallel SGML or text files, which may be gzipped."""
def _parse_text(path):
"""Returns the sentences from a single text file, which may be gzipped."""
split_path = path.split(".")
if split_path[-1] == "gz":
lang = split_path[-2]
with open(path, "rb") as f, gzip.GzipFile(fileobj=f) as g:
return g.read().decode("utf-8").split("\n"), lang
if split_path[-1] == "txt":
# CWMT
lang = split_path[-2].split("_")[-1]
lang = "zh" if lang in ("ch", "cn") else lang
else:
lang = split_path[-1]
with open(path, "rb") as f:
return f.read().decode("utf-8").split("\n"), lang
def _parse_sgm(path):
"""Returns sentences from a single SGML file."""
lang = path.split(".")[-2]
sentences = []
# Note: We can't use the XML parser since some of the files are badly
# formatted.
seg_re = re.compile(r"<seg id=\"\d+\">(.*)</seg>")
with open(path) as f:
for line in f:
seg_match = re.match(seg_re, line)
if seg_match:
assert len(seg_match.groups()) == 1
sentences.append(seg_match.groups()[0])
return sentences, lang
parse_file = _parse_sgm if f1.endswith(".sgm") else _parse_text
# Some datasets (e.g., CWMT) contain multiple parallel files specified with
# a wildcard. We sort both sets to align them and parse them one by one.
f1_files = sorted(glob.glob(f1))
f2_files = sorted(glob.glob(f2))
assert f1_files and f2_files, "No matching files found: %s, %s." % (f1, f2)
assert len(f1_files) == len(f2_files), "Number of files do not match: %d vs %d for %s vs %s." % (
len(f1_files),
len(f2_files),
f1,
f2,
)
for f_id, (f1_i, f2_i) in enumerate(zip(sorted(f1_files), sorted(f2_files))):
l1_sentences, l1 = parse_file(f1_i)
l2_sentences, l2 = parse_file(f2_i)
assert len(l1_sentences) == len(l2_sentences), "Sizes do not match: %d vs %d for %s vs %s." % (
len(l1_sentences),
len(l2_sentences),
f1_i,
f2_i,
)
for line_id, (s1, s2) in enumerate(zip(l1_sentences, l2_sentences)):
key = "{}/{}".format(f_id, line_id)
yield key, {l1: s1, l2: s2}
def _parse_frde_bitext(fr_path, de_path):
with open(fr_path) as f:
fr_sentences = f.read().split("\n")
with open(de_path) as f:
de_sentences = f.read().split("\n")
assert len(fr_sentences) == len(de_sentences), "Sizes do not match: %d vs %d for %s vs %s." % (
len(fr_sentences),
len(de_sentences),
fr_path,
de_path,
)
for line_id, (s1, s2) in enumerate(zip(fr_sentences, de_sentences)):
yield line_id, {"fr": s1, "de": s2}
def _parse_tmx(path):
"""Generates examples from TMX file."""
def _get_tuv_lang(tuv):
for k, v in tuv.items():
if k.endswith("}lang"):
return v
raise AssertionError("Language not found in `tuv` attributes.")
def _get_tuv_seg(tuv):
segs = tuv.findall("seg")
assert len(segs) == 1, "Invalid number of segments: %d" % len(segs)
return segs[0].text
with open(path, "rb") as f:
if six.PY3:
# Workaround due to: https://github.com/tensorflow/tensorflow/issues/33563
utf_f = codecs.getreader("utf-8")(f)
else:
utf_f = f
for line_id, (_, elem) in enumerate(ElementTree.iterparse(utf_f)):
if elem.tag == "tu":
yield line_id, {_get_tuv_lang(tuv): _get_tuv_seg(tuv) for tuv in elem.iterfind("tuv")}
elem.clear()
def _parse_tsv(path, language_pair=None):
"""Generates examples from TSV file."""
if language_pair is None:
lang_match = re.match(r".*\.([a-z][a-z])-([a-z][a-z])\.tsv", path)
assert lang_match is not None, "Invalid TSV filename: %s" % path
l1, l2 = lang_match.groups()
else:
l1, l2 = language_pair
with open(path) as f:
for j, line in enumerate(f):
cols = line.split("\t")
if len(cols) != 2:
logging.warning("Skipping line %d in TSV (%s) with %d != 2 columns.", j, path, len(cols))
continue
s1, s2 = cols
yield j, {l1: s1.strip(), l2: s2.strip()}
def _parse_wikiheadlines(path):
"""Generates examples from Wikiheadlines dataset file."""
lang_match = re.match(r".*\.([a-z][a-z])-([a-z][a-z])$", path)
assert lang_match is not None, "Invalid Wikiheadlines filename: %s" % path
l1, l2 = lang_match.groups()
with open(path) as f:
for line_id, line in enumerate(f):
s1, s2 = line.split("|||")
yield line_id, {l1: s1.strip(), l2: s2.strip()}
def _parse_czeng(*paths, **kwargs):
"""Generates examples from CzEng v1.6, with optional filtering for v1.7."""
filter_path = kwargs.get("filter_path", None)
if filter_path:
re_block = re.compile(r"^[^-]+-b(\d+)-\d\d[tde]")
with open(filter_path) as f:
bad_blocks = {blk for blk in re.search(r"qw{([\s\d]*)}", f.read()).groups()[0].split()}
logging.info("Loaded %d bad blocks to filter from CzEng v1.6 to make v1.7.", len(bad_blocks))
for path in paths:
for gz_path in sorted(glob.glob(path)):
with open(gz_path, "rb") as g, gzip.GzipFile(fileobj=g) as f:
filename = os.path.basename(gz_path)
for line_id, line in enumerate(f):
line = line.decode("utf-8") # required for py3
if not line.strip():
continue
id_, unused_score, cs, en = line.split("\t")
if filter_path:
block_match = re.match(re_block, id_)
if block_match and block_match.groups()[0] in bad_blocks:
continue
sub_key = "{}/{}".format(filename, line_id)
yield sub_key, {
"cs": cs.strip(),
"en": en.strip(),
}
def _parse_hindencorp(path):
with open(path) as f:
for line_id, line in enumerate(f):
split_line = line.split("\t")
if len(split_line) != 5:
logging.warning("Skipping invalid HindEnCorp line: %s", line)
continue
yield line_id, {"translation": {"en": split_line[3].strip(), "hi": split_line[4].strip()}}
|
nlp-master
|
datasets/wmt17/wmt_utils.py
|
"""TODO(sciQ): Add a description here."""
from __future__ import absolute_import, division, print_function
import json
import os
import nlp
# TODO(sciQ): BibTeX citation
_CITATION = """\
@inproceedings{SciQ,
title={Crowdsourcing Multiple Choice Science Questions},
author={Johannes Welbl, Nelson F. Liu, Matt Gardner},
year={2017},
journal={arXiv:1707.06209v1}
}
"""
# TODO(sciQ):
_DESCRIPTION = """\
The SciQ dataset contains 13,679 crowdsourced science exam questions about Physics, Chemistry and Biology, among others. The questions are in multiple-choice format with 4 answer options each. For the majority of the questions, an additional paragraph with supporting evidence for the correct answer is provided.
"""
_URL = "https://s3-us-west-2.amazonaws.com/ai2-website/data/SciQ.zip"
class Sciq(nlp.GeneratorBasedBuilder):
"""TODO(sciQ): Short description of my dataset."""
# TODO(sciQ): Set up version.
VERSION = nlp.Version("0.1.0")
def _info(self):
# TODO(sciQ): Specifies the nlp.DatasetInfo object
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
# These are the features of your dataset like images, labels ...
"question": nlp.Value("string"),
"distractor3": nlp.Value("string"),
"distractor1": nlp.Value("string"),
"distractor2": nlp.Value("string"),
"correct_answer": nlp.Value("string"),
"support": nlp.Value("string"),
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://allenai.org/data/sciq",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(sciQ): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
dl_dir = dl_manager.download_and_extract(_URL)
data_dir = os.path.join(dl_dir, "SciQ dataset-2 3")
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, "train.json")},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, "valid.json")},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, "test.json")},
),
]
def _generate_examples(self, filepath):
"""Yields examples."""
# TODO(sciQ): Yields (key, example) tuples from the dataset
with open(filepath) as f:
data = json.load(f)
for id_, row in enumerate(data):
yield id_, row
|
nlp-master
|
datasets/sciq/sciq.py
|
"""TODO(cmrc2018): Add a description here."""
from __future__ import absolute_import, division, print_function
import json
import os
import nlp
# TODO(cmrc2018): BibTeX citation
_CITATION = """\
@inproceedings{cui-emnlp2019-cmrc2018,
title = {A Span-Extraction Dataset for {C}hinese Machine Reading Comprehension},
author = {Cui, Yiming and
Liu, Ting and
Che, Wanxiang and
Xiao, Li and
Chen, Zhipeng and
Ma, Wentao and
Wang, Shijin and
Hu, Guoping},
booktitle = {Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)},
month = {nov},
year = {2019},
address = {Hong Kong, China},
publisher = {Association for Computational Linguistics},
url = {https://www.aclweb.org/anthology/D19-1600},
doi = {10.18653/v1/D19-1600},
pages = {5886--5891}}
"""
# TODO(cmrc2018):
_DESCRIPTION = """\
A Span-Extraction dataset for Chinese machine reading comprehension to add language
diversities in this area. The dataset is composed by near 20,000 real questions annotated
on Wikipedia paragraphs by human experts. We also annotated a challenge set which
contains the questions that need comprehensive understanding and multi-sentence
inference throughout the context.
"""
_URL = "https://github.com/ymcui/cmrc2018"
_TRAIN_FILE = "https://worksheets.codalab.org/rest/bundles/0x15022f0c4d3944a599ab27256686b9ac/contents/blob/"
_DEV_FILE = "https://worksheets.codalab.org/rest/bundles/0x72252619f67b4346a85e122049c3eabd/contents/blob/"
_TEST_FILE = "https://worksheets.codalab.org/rest/bundles/0x182c2e71fac94fc2a45cc1a3376879f7/contents/blob/"
class Cmrc2018(nlp.GeneratorBasedBuilder):
"""TODO(cmrc2018): Short description of my dataset."""
# TODO(cmrc2018): Set up version.
VERSION = nlp.Version("0.1.0")
def _info(self):
# TODO(cmrc2018): Specifies the nlp.DatasetInfo object
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
"id": nlp.Value("string"),
"context": nlp.Value("string"),
"question": nlp.Value("string"),
"answers": nlp.features.Sequence(
{"text": nlp.Value("string"), "answer_start": nlp.Value("int32"),}
),
# These are the features of your dataset like images, labels ...
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage=_URL,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(cmrc2018): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
urls_to_download = {"train": _TRAIN_FILE, "dev": _DEV_FILE, "test": _TEST_FILE}
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
nlp.SplitGenerator(name=nlp.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
nlp.SplitGenerator(name=nlp.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
nlp.SplitGenerator(name=nlp.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
]
def _generate_examples(self, filepath):
"""Yields examples."""
# TODO(cmrc2018): Yields (key, example) tuples from the dataset
with open(filepath) as f:
data = json.load(f)
for example in data["data"]:
for paragraph in example["paragraphs"]:
context = paragraph["context"].strip()
for qa in paragraph["qas"]:
question = qa["question"].strip()
id_ = qa["id"]
answer_starts = [answer["answer_start"] for answer in qa["answers"]]
answers = [answer["text"].strip() for answer in qa["answers"]]
yield id_, {
"context": context,
"question": question,
"id": id_,
"answers": {"answer_start": answer_starts, "text": answers,},
}
|
nlp-master
|
datasets/cmrc2018/cmrc2018.py
|
"""TODO(break_data): Add a description here."""
from __future__ import absolute_import, division, print_function
import csv
import json
import os
import textwrap
import six
import nlp
# TODO(break): BibTeX citation
_CITATION = """\
@article{Wolfson2020Break,
title={Break It Down: A Question Understanding Benchmark},
author={Wolfson, Tomer and Geva, Mor and Gupta, Ankit and Gardner, Matt and Goldberg, Yoav and Deutch, Daniel and Berant, Jonathan},
journal={Transactions of the Association for Computational Linguistics},
year={2020},
}
"""
# TODO(break):
_DESCRIPTION = """\
Break is a human annotated dataset of natural language questions and their Question Decomposition Meaning Representations
(QDMRs). Break consists of 83,978 examples sampled from 10 question answering datasets over text, images and databases.
This repository contains the Break dataset along with information on the exact data format.
"""
_URL = "https://github.com/allenai/Break/raw/master/break_dataset/Break-dataset.zip"
class BreakDataConfig(nlp.BuilderConfig):
"""BuilderConfig for Break"""
def __init__(self, text_features, lexicon_tokens, **kwargs):
"""
Args:
text_features: `dict[string, string]`, map from the name of the feature
dict for each text field to the name of the column in the tsv file
lexicon_tokens: to define if we want to load the lexicon_tokens files or not
**kwargs: keyword arguments forwarded to super.
"""
super(BreakDataConfig, self).__init__(
version=nlp.Version("1.0.0", "New split API (https://tensorflow.org/datasets/splits)"), **kwargs
)
self.text_features = text_features
self.lexicon_tokens = lexicon_tokens
class BreakData(nlp.GeneratorBasedBuilder):
"""TODO(break_data): Short description of my dataset."""
# TODO(break_data): Set up version.
VERSION = nlp.Version("0.1.0")
BUILDER_CONFIGS = [
BreakDataConfig(
name="QDMR-high-level",
description=textwrap.dedent(
"""
Contains questions annotated with the high-level variant of QDMR. These decomposition are exclusive to Reading
Comprehension tasks (Section 2). lexicon_tokens files are also provided."""
),
text_features={
"question_id": "question_id",
"question_text": "question_text",
"decomposition": "decomposition",
"operators": "operators",
"split": "split",
},
lexicon_tokens=False,
),
BreakDataConfig(
name="QDMR-high-level-lexicon",
description=textwrap.dedent(
"""
Contains questions annotated with the high-level variant of QDMR. These decomposition are exclusive to Reading
Comprehension tasks (Section 2). lexicon_tokens files are also provided."""
),
text_features={"source": "source", "allowed_tokens": "allowed_tokens",},
lexicon_tokens=True,
),
BreakDataConfig(
name="QDMR",
description=textwrap.dedent(
"""
Contains questions over text, images and databases annotated with their Question Decomposition Meaning
Representation. In addition to the train, dev and (hidden) test sets we provide lexicon_tokens files. For
each question, the lexicon file contains the set of valid tokens that could potentially appear in its
decomposition """
),
text_features={
"question_id": "question_id",
"question_text": "question_text",
"decomposition": "decomposition",
"operators": "operators",
"split": "split",
},
lexicon_tokens=False,
),
BreakDataConfig(
name="QDMR-lexicon",
description=textwrap.dedent(
"""
Contains questions over text, images and databases annotated with their Question Decomposition Meaning
Representation. In addition to the train, dev and (hidden) test sets we provide lexicon_tokens files. For
each question, the lexicon file contains the set of valid tokens that could potentially appear in its
decomposition """
),
text_features={"source": "source", "allowed_tokens": "allowed_tokens",},
lexicon_tokens=True,
),
BreakDataConfig(
name="logical-forms",
description=textwrap.dedent(
"""
Contains questions and QDMRs annotated with full logical-forms of QDMR operators + arguments. Full logical-forms
were inferred by the annotation-consistency algorithm described in """
),
lexicon_tokens=False,
text_features={
"question_id": "question_id",
"question_text": "question_text",
"decomposition": "decomposition",
"operators": "operators",
"split": "split",
"program": "program",
},
),
]
def _info(self):
# TODO(break_data): Specifies the nlp.DatasetInfo object
features = {text_feature: nlp.Value("string") for text_feature in six.iterkeys(self.config.text_features)}
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
features
# These are the features of your dataset like images, labels ...
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://github.com/allenai/Break",
citation=_CITATION,
)
# if
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(break_data): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
dl_dir = dl_manager.download_and_extract(_URL)
data_dir = os.path.join(dl_dir, "Break-dataset")
qdmr_high_level = os.path.join(data_dir, "QDMR-high-level")
qdmr = os.path.join(data_dir, "QDMR")
logical = os.path.join(data_dir, "logical-forms")
if self.config.name == "QDMR" or self.config.name == "QDMR-lexicon":
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": os.path.join(qdmr, "train.csv")
if not self.config.lexicon_tokens
else os.path.join(qdmr, "train_lexicon_tokens.json")
},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": os.path.join(qdmr, "dev.csv")
if not self.config.lexicon_tokens
else os.path.join(qdmr, "dev_lexicon_tokens.json")
},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": os.path.join(qdmr, "test.csv")
if not self.config.lexicon_tokens
else os.path.join(qdmr, "test_lexicon_tokens.json")
},
),
]
elif self.config.name == "QDMR-high-level" or self.config.name == "QDMR-high-level-lexicon":
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": os.path.join(qdmr_high_level, "train.csv")
if not self.config.lexicon_tokens
else os.path.join(qdmr_high_level, "train_lexicon_tokens.json")
},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": os.path.join(qdmr_high_level, "dev.csv")
if not self.config.lexicon_tokens
else os.path.join(qdmr_high_level, "dev_lexicon_tokens.json")
},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": os.path.join(qdmr_high_level, "test.csv")
if not self.config.lexicon_tokens
else os.path.join(qdmr_high_level, "test_lexicon_tokens.json")
},
),
]
elif self.config.name == "logical-forms":
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(logical, "train.csv")},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(logical, "dev.csv")},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(logical, "test.csv")},
),
]
def _generate_examples(self, filepath):
"""Yields examples."""
# TODO(break_data): Yields (key, example) tuples from the dataset
with open(filepath) as f:
if (
self.config.name == "QDMR-high-level"
or self.config.name == "QDMR"
or self.config.name == "logical-forms"
):
data = csv.DictReader(f)
for id_, row in enumerate(data):
yield id_, row
elif self.config.name == "QDMR-high-level-lexicon" or self.config.name == "QDMR-lexicon":
for id_, row in enumerate(f):
data = json.loads(row)
yield id_, data
|
nlp-master
|
datasets/break_data/break_data.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""WMT: Translate dataset."""
from __future__ import absolute_import, division, print_function
import codecs
import functools
import glob
import gzip
import itertools
import logging
import os
import re
import xml.etree.cElementTree as ElementTree
from abc import ABC, abstractmethod
import six
import nlp
_DESCRIPTION = """\
Translate dataset based on the data from statmt.org.
Versions exists for the different years using a combination of multiple data
sources. The base `wmt_translate` allows you to create your own config to choose
your own data/language pair by creating a custom `nlp.translate.wmt.WmtConfig`.
```
config = nlp.wmt.WmtConfig(
version="0.0.1",
language_pair=("fr", "de"),
subsets={
nlp.Split.TRAIN: ["commoncrawl_frde"],
nlp.Split.VALIDATION: ["euelections_dev2019"],
},
)
builder = nlp.builder("wmt_translate", config=config)
```
"""
CWMT_SUBSET_NAMES = ["casia2015", "casict2011", "casict2015", "datum2015", "datum2017", "neu2017"]
class SubDataset(object):
"""Class to keep track of information on a sub-dataset of WMT."""
def __init__(self, name, target, sources, url, path, manual_dl_files=None):
"""Sub-dataset of WMT.
Args:
name: `string`, a unique dataset identifier.
target: `string`, the target language code.
sources: `set<string>`, the set of source language codes.
url: `string` or `(string, string)`, URL(s) or URL template(s) specifying
where to download the raw data from. If two strings are provided, the
first is used for the source language and the second for the target.
Template strings can either contain '{src}' placeholders that will be
filled in with the source language code, '{0}' and '{1}' placeholders
that will be filled in with the source and target language codes in
alphabetical order, or all 3.
path: `string` or `(string, string)`, path(s) or path template(s)
specifing the path to the raw data relative to the root of the
downloaded archive. If two strings are provided, the dataset is assumed
to be made up of parallel text files, the first being the source and the
second the target. If one string is provided, both languages are assumed
to be stored within the same file and the extension is used to determine
how to parse it. Template strings should be formatted the same as in
`url`.
manual_dl_files: `<list>(string)` (optional), the list of files that must
be manually downloaded to the data directory.
"""
self._paths = (path,) if isinstance(path, six.string_types) else path
self._urls = (url,) if isinstance(url, six.string_types) else url
self._manual_dl_files = manual_dl_files if manual_dl_files else []
self.name = name
self.target = target
self.sources = set(sources)
def _inject_language(self, src, strings):
"""Injects languages into (potentially) template strings."""
if src not in self.sources:
raise ValueError("Invalid source for '{0}': {1}".format(self.name, src))
def _format_string(s):
if "{0}" in s and "{1}" and "{src}" in s:
return s.format(*sorted([src, self.target]), src=src)
elif "{0}" in s and "{1}" in s:
return s.format(*sorted([src, self.target]))
elif "{src}" in s:
return s.format(src=src)
else:
return s
return [_format_string(s) for s in strings]
def get_url(self, src):
return self._inject_language(src, self._urls)
def get_manual_dl_files(self, src):
return self._inject_language(src, self._manual_dl_files)
def get_path(self, src):
return self._inject_language(src, self._paths)
# Subsets used in the training sets for various years of WMT.
_TRAIN_SUBSETS = [
# pylint:disable=line-too-long
SubDataset(
name="commoncrawl",
target="en", # fr-de pair in commoncrawl_frde
sources={"cs", "de", "es", "fr", "ru"},
url="http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz",
path=("commoncrawl.{src}-en.{src}", "commoncrawl.{src}-en.en"),
),
SubDataset(
name="commoncrawl_frde",
target="de",
sources={"fr"},
url=(
"http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/commoncrawl.fr.gz",
"http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/commoncrawl.de.gz",
),
path=("", ""),
),
SubDataset(
name="czeng_10",
target="en",
sources={"cs"},
url="http://ufal.mff.cuni.cz/czeng/czeng10",
manual_dl_files=["data-plaintext-format.%d.tar" % i for i in range(10)],
# Each tar contains multiple files, which we process specially in
# _parse_czeng.
path=("data.plaintext-format/??train.gz",) * 10,
),
SubDataset(
name="czeng_16pre",
target="en",
sources={"cs"},
url="http://ufal.mff.cuni.cz/czeng/czeng16pre",
manual_dl_files=["czeng16pre.deduped-ignoring-sections.txt.gz"],
path="",
),
SubDataset(
name="czeng_16",
target="en",
sources={"cs"},
url="http://ufal.mff.cuni.cz/czeng",
manual_dl_files=["data-plaintext-format.%d.tar" % i for i in range(10)],
# Each tar contains multiple files, which we process specially in
# _parse_czeng.
path=("data.plaintext-format/??train.gz",) * 10,
),
SubDataset(
# This dataset differs from the above in the filtering that is applied
# during parsing.
name="czeng_17",
target="en",
sources={"cs"},
url="http://ufal.mff.cuni.cz/czeng",
manual_dl_files=["data-plaintext-format.%d.tar" % i for i in range(10)],
# Each tar contains multiple files, which we process specially in
# _parse_czeng.
path=("data.plaintext-format/??train.gz",) * 10,
),
SubDataset(
name="dcep_v1",
target="en",
sources={"lv"},
url="http://data.statmt.org/wmt17/translation-task/dcep.lv-en.v1.tgz",
path=("dcep.en-lv/dcep.lv", "dcep.en-lv/dcep.en"),
),
SubDataset(
name="europarl_v7",
target="en",
sources={"cs", "de", "es", "fr"},
url="http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz",
path=("training/europarl-v7.{src}-en.{src}", "training/europarl-v7.{src}-en.en"),
),
SubDataset(
name="europarl_v7_frde",
target="de",
sources={"fr"},
url=(
"http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/europarl-v7.fr.gz",
"http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/europarl-v7.de.gz",
),
path=("", ""),
),
SubDataset(
name="europarl_v8_18",
target="en",
sources={"et", "fi"},
url="http://data.statmt.org/wmt18/translation-task/training-parallel-ep-v8.tgz",
path=("training/europarl-v8.{src}-en.{src}", "training/europarl-v8.{src}-en.en"),
),
SubDataset(
name="europarl_v8_16",
target="en",
sources={"fi", "ro"},
url="http://data.statmt.org/wmt16/translation-task/training-parallel-ep-v8.tgz",
path=("training-parallel-ep-v8/europarl-v8.{src}-en.{src}", "training-parallel-ep-v8/europarl-v8.{src}-en.en"),
),
SubDataset(
name="europarl_v9",
target="en",
sources={"cs", "de", "fi", "lt"},
url="http://www.statmt.org/europarl/v9/training/europarl-v9.{src}-en.tsv.gz",
path="",
),
SubDataset(
name="gigafren",
target="en",
sources={"fr"},
url="http://www.statmt.org/wmt10/training-giga-fren.tar",
path=("giga-fren.release2.fixed.fr.gz", "giga-fren.release2.fixed.en.gz"),
),
SubDataset(
name="hindencorp_01",
target="en",
sources={"hi"},
url="http://ufallab.ms.mff.cuni.cz/~bojar/hindencorp",
manual_dl_files=["hindencorp0.1.gz"],
path="",
),
SubDataset(
name="leta_v1",
target="en",
sources={"lv"},
url="http://data.statmt.org/wmt17/translation-task/leta.v1.tgz",
path=("LETA-lv-en/leta.lv", "LETA-lv-en/leta.en"),
),
SubDataset(
name="multiun",
target="en",
sources={"es", "fr"},
url="http://www.statmt.org/wmt13/training-parallel-un.tgz",
path=("un/undoc.2000.{src}-en.{src}", "un/undoc.2000.{src}-en.en"),
),
SubDataset(
name="newscommentary_v9",
target="en",
sources={"cs", "de", "fr", "ru"},
url="http://www.statmt.org/wmt14/training-parallel-nc-v9.tgz",
path=("training/news-commentary-v9.{src}-en.{src}", "training/news-commentary-v9.{src}-en.en"),
),
SubDataset(
name="newscommentary_v10",
target="en",
sources={"cs", "de", "fr", "ru"},
url="http://www.statmt.org/wmt15/training-parallel-nc-v10.tgz",
path=("news-commentary-v10.{src}-en.{src}", "news-commentary-v10.{src}-en.en"),
),
SubDataset(
name="newscommentary_v11",
target="en",
sources={"cs", "de", "ru"},
url="http://data.statmt.org/wmt16/translation-task/training-parallel-nc-v11.tgz",
path=(
"training-parallel-nc-v11/news-commentary-v11.{src}-en.{src}",
"training-parallel-nc-v11/news-commentary-v11.{src}-en.en",
),
),
SubDataset(
name="newscommentary_v12",
target="en",
sources={"cs", "de", "ru", "zh"},
url="http://data.statmt.org/wmt17/translation-task/training-parallel-nc-v12.tgz",
path=("training/news-commentary-v12.{src}-en.{src}", "training/news-commentary-v12.{src}-en.en"),
),
SubDataset(
name="newscommentary_v13",
target="en",
sources={"cs", "de", "ru", "zh"},
url="http://data.statmt.org/wmt18/translation-task/training-parallel-nc-v13.tgz",
path=(
"training-parallel-nc-v13/news-commentary-v13.{src}-en.{src}",
"training-parallel-nc-v13/news-commentary-v13.{src}-en.en",
),
),
SubDataset(
name="newscommentary_v14",
target="en", # fr-de pair in newscommentary_v14_frde
sources={"cs", "de", "kk", "ru", "zh"},
url="http://data.statmt.org/news-commentary/v14/training/news-commentary-v14.{0}-{1}.tsv.gz",
path="",
),
SubDataset(
name="newscommentary_v14_frde",
target="de",
sources={"fr"},
url="http://data.statmt.org/news-commentary/v14/training/news-commentary-v14.de-fr.tsv.gz",
path="",
),
SubDataset(
name="onlinebooks_v1",
target="en",
sources={"lv"},
url="http://data.statmt.org/wmt17/translation-task/books.lv-en.v1.tgz",
path=("farewell/farewell.lv", "farewell/farewell.en"),
),
SubDataset(
name="paracrawl_v1",
target="en",
sources={"cs", "de", "et", "fi", "ru"},
url="https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-{src}.zipporah0-dedup-clean.tgz",
path=(
"paracrawl-release1.en-{src}.zipporah0-dedup-clean.{src}",
"paracrawl-release1.en-{src}.zipporah0-dedup-clean.en",
),
),
SubDataset(
name="paracrawl_v1_ru",
target="en",
sources={"ru"},
url="https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-ru.zipporah0-dedup-clean.tgz",
path=(
"paracrawl-release1.en-ru.zipporah0-dedup-clean.ru",
"paracrawl-release1.en-ru.zipporah0-dedup-clean.en",
),
),
SubDataset(
name="paracrawl_v3",
target="en", # fr-de pair in paracrawl_v3_frde
sources={"cs", "de", "fi", "lt"},
url="https://s3.amazonaws.com/web-language-models/paracrawl/release3/en-{src}.bicleaner07.tmx.gz",
path="",
),
SubDataset(
name="paracrawl_v3_frde",
target="de",
sources={"fr"},
url=(
"http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/de-fr.bicleaner07.de.gz",
"http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/de-fr.bicleaner07.fr.gz",
),
path=("", ""),
),
SubDataset(
name="rapid_2016",
target="en",
sources={"de", "et", "fi"},
url="http://data.statmt.org/wmt18/translation-task/rapid2016.tgz",
path=("rapid2016.{0}-{1}.{src}", "rapid2016.{0}-{1}.en"),
),
SubDataset(
name="rapid_2016_ltfi",
target="en",
sources={"fi", "lt"},
url="https://tilde-model.s3-eu-west-1.amazonaws.com/rapid2016.en-{src}.tmx.zip",
path="rapid2016.en-{src}.tmx",
),
SubDataset(
name="rapid_2019",
target="en",
sources={"de"},
url="https://s3-eu-west-1.amazonaws.com/tilde-model/rapid2019.de-en.zip",
path=("rapid2019.de-en.de", "rapid2019.de-en.en"),
),
SubDataset(
name="setimes_2",
target="en",
sources={"ro", "tr"},
url="http://opus.nlpl.eu/download.php?f=SETIMES/v2/tmx/en-{src}.tmx.gz",
path="",
),
SubDataset(
name="uncorpus_v1",
target="en",
sources={"ru", "zh"},
url="https://storage.googleapis.com/tfdataset-data/downloadataset/uncorpus/UNv1.0.en-{src}.tar.gz",
path=("en-{src}/UNv1.0.en-{src}.{src}", "en-{src}/UNv1.0.en-{src}.en"),
),
SubDataset(
name="wikiheadlines_fi",
target="en",
sources={"fi"},
url="http://www.statmt.org/wmt15/wiki-titles.tgz",
path="wiki/fi-en/titles.fi-en",
),
SubDataset(
name="wikiheadlines_hi",
target="en",
sources={"hi"},
url="http://www.statmt.org/wmt14/wiki-titles.tgz",
path="wiki/hi-en/wiki-titles.hi-en",
),
SubDataset(
# Verified that wmt14 and wmt15 files are identical.
name="wikiheadlines_ru",
target="en",
sources={"ru"},
url="http://www.statmt.org/wmt15/wiki-titles.tgz",
path="wiki/ru-en/wiki.ru-en",
),
SubDataset(
name="wikititles_v1",
target="en",
sources={"cs", "de", "fi", "gu", "kk", "lt", "ru", "zh"},
url="http://data.statmt.org/wikititles/v1/wikititles-v1.{src}-en.tsv.gz",
path="",
),
SubDataset(
name="yandexcorpus",
target="en",
sources={"ru"},
url="https://translate.yandex.ru/corpus?lang=en",
manual_dl_files=["1mcorpus.zip"],
path=("corpus.en_ru.1m.ru", "corpus.en_ru.1m.en"),
),
# pylint:enable=line-too-long
] + [
SubDataset( # pylint:disable=g-complex-comprehension
name=ss,
target="en",
sources={"zh"},
url="ftp://cwmt-wmt:cwmt-wmt@nlp.nju.edu.cn/parallel/%s.zip" % ss,
path=("%s/*_c[hn].txt" % ss, "%s/*_en.txt" % ss),
)
for ss in CWMT_SUBSET_NAMES
]
_DEV_SUBSETS = [
SubDataset(
name="euelections_dev2019",
target="de",
sources={"fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/euelections_dev2019.fr-de.src.fr", "dev/euelections_dev2019.fr-de.tgt.de"),
),
SubDataset(
name="newsdev2014",
target="en",
sources={"hi"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdev2014.hi", "dev/newsdev2014.en"),
),
SubDataset(
name="newsdev2015",
target="en",
sources={"fi"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdev2015-fien-src.{src}.sgm", "dev/newsdev2015-fien-ref.en.sgm"),
),
SubDataset(
name="newsdiscussdev2015",
target="en",
sources={"ro", "tr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdiscussdev2015-{src}en-src.{src}.sgm", "dev/newsdiscussdev2015-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdev2016",
target="en",
sources={"ro", "tr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdev2016-{src}en-src.{src}.sgm", "dev/newsdev2016-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdev2017",
target="en",
sources={"lv", "zh"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdev2017-{src}en-src.{src}.sgm", "dev/newsdev2017-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdev2018",
target="en",
sources={"et"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdev2018-{src}en-src.{src}.sgm", "dev/newsdev2018-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdev2019",
target="en",
sources={"gu", "kk", "lt"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdev2019-{src}en-src.{src}.sgm", "dev/newsdev2019-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdiscussdev2015",
target="en",
sources={"fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdiscussdev2015-{src}en-src.{src}.sgm", "dev/newsdiscussdev2015-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdiscusstest2015",
target="en",
sources={"fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdiscusstest2015-{src}en-src.{src}.sgm", "dev/newsdiscusstest2015-{src}en-ref.en.sgm"),
),
SubDataset(
name="newssyscomb2009",
target="en",
sources={"cs", "de", "es", "fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newssyscomb2009.{src}", "dev/newssyscomb2009.en"),
),
SubDataset(
name="newstest2008",
target="en",
sources={"cs", "de", "es", "fr", "hu"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/news-test2008.{src}", "dev/news-test2008.en"),
),
SubDataset(
name="newstest2009",
target="en",
sources={"cs", "de", "es", "fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2009.{src}", "dev/newstest2009.en"),
),
SubDataset(
name="newstest2010",
target="en",
sources={"cs", "de", "es", "fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2010.{src}", "dev/newstest2010.en"),
),
SubDataset(
name="newstest2011",
target="en",
sources={"cs", "de", "es", "fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2011.{src}", "dev/newstest2011.en"),
),
SubDataset(
name="newstest2012",
target="en",
sources={"cs", "de", "es", "fr", "ru"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2012.{src}", "dev/newstest2012.en"),
),
SubDataset(
name="newstest2013",
target="en",
sources={"cs", "de", "es", "fr", "ru"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2013.{src}", "dev/newstest2013.en"),
),
SubDataset(
name="newstest2014",
target="en",
sources={"cs", "de", "es", "fr", "hi", "ru"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2014-{src}en-src.{src}.sgm", "dev/newstest2014-{src}en-ref.en.sgm"),
),
SubDataset(
name="newstest2015",
target="en",
sources={"cs", "de", "fi", "ru"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2015-{src}en-src.{src}.sgm", "dev/newstest2015-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdiscusstest2015",
target="en",
sources={"fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdiscusstest2015-{src}en-src.{src}.sgm", "dev/newsdiscusstest2015-{src}en-ref.en.sgm"),
),
SubDataset(
name="newstest2016",
target="en",
sources={"cs", "de", "fi", "ro", "ru", "tr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2016-{src}en-src.{src}.sgm", "dev/newstest2016-{src}en-ref.en.sgm"),
),
SubDataset(
name="newstestB2016",
target="en",
sources={"fi"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstestB2016-enfi-ref.{src}.sgm", "dev/newstestB2016-enfi-src.en.sgm"),
),
SubDataset(
name="newstest2017",
target="en",
sources={"cs", "de", "fi", "lv", "ru", "tr", "zh"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2017-{src}en-src.{src}.sgm", "dev/newstest2017-{src}en-ref.en.sgm"),
),
SubDataset(
name="newstestB2017",
target="en",
sources={"fi"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstestB2017-fien-src.fi.sgm", "dev/newstestB2017-fien-ref.en.sgm"),
),
SubDataset(
name="newstest2018",
target="en",
sources={"cs", "de", "et", "fi", "ru", "tr", "zh"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2018-{src}en-src.{src}.sgm", "dev/newstest2018-{src}en-ref.en.sgm"),
),
]
DATASET_MAP = {dataset.name: dataset for dataset in _TRAIN_SUBSETS + _DEV_SUBSETS}
_CZENG17_FILTER = SubDataset(
name="czeng17_filter",
target="en",
sources={"cs"},
url="http://ufal.mff.cuni.cz/czeng/download.php?f=convert_czeng16_to_17.pl.zip",
path="convert_czeng16_to_17.pl",
)
class WmtConfig(nlp.BuilderConfig):
"""BuilderConfig for WMT."""
def __init__(self, url=None, citation=None, description=None, language_pair=(None, None), subsets=None, **kwargs):
"""BuilderConfig for WMT.
Args:
url: The reference URL for the dataset.
citation: The paper citation for the dataset.
description: The description of the dataset.
language_pair: pair of languages that will be used for translation. Should
contain 2 letter coded strings. For example: ("en", "de").
configuration for the `nlp.features.text.TextEncoder` used for the
`nlp.features.text.Translation` features.
subsets: Dict[split, list[str]]. List of the subset to use for each of the
split. Note that WMT subclasses overwrite this parameter.
**kwargs: keyword arguments forwarded to super.
"""
name = "%s-%s" % (language_pair[0], language_pair[1])
if "name" in kwargs: # Add name suffix for custom configs
name += "." + kwargs.pop("name")
super(WmtConfig, self).__init__(name=name, description=description, **kwargs)
self.url = url or "http://www.statmt.org"
self.citation = citation
self.language_pair = language_pair
self.subsets = subsets
# TODO(PVP): remove when manual dir works
# +++++++++++++++++++++
if language_pair[1] in ["cs", "hi", "ru"]:
assert NotImplementedError(
"The dataset for {}-en is currently not fully supported.".format(language_pair[1])
)
# +++++++++++++++++++++
class Wmt(ABC, nlp.GeneratorBasedBuilder):
"""WMT translation dataset."""
def __init__(self, *args, **kwargs):
if type(self) == Wmt and "config" not in kwargs: # pylint: disable=unidiomatic-typecheck
raise ValueError(
"The raw `wmt_translate` can only be instantiated with the config "
"kwargs. You may want to use one of the `wmtYY_translate` "
"implementation instead to get the WMT dataset for a specific year."
)
super(Wmt, self).__init__(*args, **kwargs)
@property
@abstractmethod
def _subsets(self):
"""Subsets that make up each split of the dataset."""
raise NotImplementedError("This is a abstract method")
@property
def subsets(self):
"""Subsets that make up each split of the dataset for the language pair."""
source, target = self.config.language_pair
filtered_subsets = {}
for split, ss_names in self._subsets.items():
filtered_subsets[split] = []
for ss_name in ss_names:
dataset = DATASET_MAP[ss_name]
if dataset.target != target or source not in dataset.sources:
logging.info("Skipping sub-dataset that does not include language pair: %s", ss_name)
else:
filtered_subsets[split].append(ss_name)
logging.info("Using sub-datasets: %s", filtered_subsets)
return filtered_subsets
def _info(self):
src, target = self.config.language_pair
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features({"translation": nlp.features.Translation(languages=self.config.language_pair)}),
supervised_keys=(src, target),
homepage=self.config.url,
citation=self.config.citation,
)
def _vocab_text_gen(self, split_subsets, extraction_map, language):
for _, ex in self._generate_examples(split_subsets, extraction_map, with_translation=False):
yield ex[language]
def _split_generators(self, dl_manager):
source, _ = self.config.language_pair
manual_paths_dict = {}
urls_to_download = {}
for ss_name in itertools.chain.from_iterable(self.subsets.values()):
if ss_name == "czeng_17":
# CzEng1.7 is CzEng1.6 with some blocks filtered out. We must download
# the filtering script so we can parse out which blocks need to be
# removed.
urls_to_download[_CZENG17_FILTER.name] = _CZENG17_FILTER.get_url(source)
# get dataset
dataset = DATASET_MAP[ss_name]
if dataset.get_manual_dl_files(source):
# TODO(PVP): following two lines skip configs that are incomplete for now
# +++++++++++++++++++++
logging.info("Skipping {} for now. Incomplete dataset for {}".format(dataset.name, self.config.name))
continue
# +++++++++++++++++++++
manual_dl_files = dataset.get_manual_dl_files(source)
manual_paths = [
os.path.join(os.path.abspath(os.path.expanduser(dl_manager.manual_dir)), fname)
for fname in manual_dl_files
]
assert all(
os.path.exists(path) for path in manual_paths
), "For {0}, you must manually download the following file(s) from {1} and place them in {2}: {3}".format(
dataset.name, dataset.get_url(source), dl_manager.manual_dir, ", ".join(manual_dl_files)
)
# set manual path for correct subset
manual_paths_dict[ss_name] = manual_paths
else:
urls_to_download[ss_name] = dataset.get_url(source)
# Download and extract files from URLs.
downloaded_files = dl_manager.download_and_extract(urls_to_download)
# Extract manually downloaded files.
manual_files = dl_manager.extract(manual_paths_dict)
extraction_map = dict(downloaded_files, **manual_files)
for language in self.config.language_pair:
self._vocab_text_gen(self.subsets[nlp.Split.TRAIN], extraction_map, language)
return [
nlp.SplitGenerator( # pylint:disable=g-complex-comprehension
name=split, gen_kwargs={"split_subsets": split_subsets, "extraction_map": extraction_map}
)
for split, split_subsets in self.subsets.items()
]
def _generate_examples(self, split_subsets, extraction_map, with_translation=True):
"""Returns the examples in the raw (text) form."""
source, _ = self.config.language_pair
def _get_local_paths(dataset, extract_dirs):
rel_paths = dataset.get_path(source)
if len(extract_dirs) == 1:
extract_dirs = extract_dirs * len(rel_paths)
return [
os.path.join(ex_dir, rel_path) if rel_path else ex_dir
for ex_dir, rel_path in zip(extract_dirs, rel_paths)
]
for ss_name in split_subsets:
# TODO(PVP) remove following five lines when manual data works
# +++++++++++++++++++++
dataset = DATASET_MAP[ss_name]
source, _ = self.config.language_pair
if dataset.get_manual_dl_files(source):
logging.info("Skipping {} for now. Incomplete dataset for {}".format(dataset.name, self.config.name))
continue
# +++++++++++++++++++++
logging.info("Generating examples from: %s", ss_name)
dataset = DATASET_MAP[ss_name]
extract_dirs = extraction_map[ss_name]
files = _get_local_paths(dataset, extract_dirs)
if ss_name.startswith("czeng"):
if ss_name.endswith("16pre"):
sub_generator = functools.partial(_parse_tsv, language_pair=("en", "cs"))
elif ss_name.endswith("17"):
filter_path = _get_local_paths(_CZENG17_FILTER, extraction_map[_CZENG17_FILTER.name])[0]
sub_generator = functools.partial(_parse_czeng, filter_path=filter_path)
else:
sub_generator = _parse_czeng
elif ss_name == "hindencorp_01":
sub_generator = _parse_hindencorp
elif len(files) == 2:
if ss_name.endswith("_frde"):
sub_generator = _parse_frde_bitext
else:
sub_generator = _parse_parallel_sentences
elif len(files) == 1:
fname = files[0]
# Note: Due to formatting used by `download_manager`, the file
# extension may not be at the end of the file path.
if ".tsv" in fname:
sub_generator = _parse_tsv
elif (
ss_name.startswith("newscommentary_v14")
or ss_name.startswith("europarl_v9")
or ss_name.startswith("wikititles_v1")
):
sub_generator = functools.partial(_parse_tsv, language_pair=self.config.language_pair)
elif "tmx" in fname or ss_name.startswith("paracrawl_v3"):
sub_generator = _parse_tmx
elif ss_name.startswith("wikiheadlines"):
sub_generator = _parse_wikiheadlines
else:
raise ValueError("Unsupported file format: %s" % fname)
else:
raise ValueError("Invalid number of files: %d" % len(files))
for sub_key, ex in sub_generator(*files):
if not all(ex.values()):
continue
# TODO(adarob): Add subset feature.
# ex["subset"] = subset
key = "{}/{}".format(ss_name, sub_key)
if with_translation is True:
ex = {"translation": ex}
yield key, ex
def _parse_parallel_sentences(f1, f2):
"""Returns examples from parallel SGML or text files, which may be gzipped."""
def _parse_text(path):
"""Returns the sentences from a single text file, which may be gzipped."""
split_path = path.split(".")
if split_path[-1] == "gz":
lang = split_path[-2]
with open(path, "rb") as f, gzip.GzipFile(fileobj=f) as g:
return g.read().decode("utf-8").split("\n"), lang
if split_path[-1] == "txt":
# CWMT
lang = split_path[-2].split("_")[-1]
lang = "zh" if lang in ("ch", "cn") else lang
else:
lang = split_path[-1]
with open(path, "rb") as f:
return f.read().decode("utf-8").split("\n"), lang
def _parse_sgm(path):
"""Returns sentences from a single SGML file."""
lang = path.split(".")[-2]
sentences = []
# Note: We can't use the XML parser since some of the files are badly
# formatted.
seg_re = re.compile(r"<seg id=\"\d+\">(.*)</seg>")
with open(path) as f:
for line in f:
seg_match = re.match(seg_re, line)
if seg_match:
assert len(seg_match.groups()) == 1
sentences.append(seg_match.groups()[0])
return sentences, lang
parse_file = _parse_sgm if f1.endswith(".sgm") else _parse_text
# Some datasets (e.g., CWMT) contain multiple parallel files specified with
# a wildcard. We sort both sets to align them and parse them one by one.
f1_files = sorted(glob.glob(f1))
f2_files = sorted(glob.glob(f2))
assert f1_files and f2_files, "No matching files found: %s, %s." % (f1, f2)
assert len(f1_files) == len(f2_files), "Number of files do not match: %d vs %d for %s vs %s." % (
len(f1_files),
len(f2_files),
f1,
f2,
)
for f_id, (f1_i, f2_i) in enumerate(zip(sorted(f1_files), sorted(f2_files))):
l1_sentences, l1 = parse_file(f1_i)
l2_sentences, l2 = parse_file(f2_i)
assert len(l1_sentences) == len(l2_sentences), "Sizes do not match: %d vs %d for %s vs %s." % (
len(l1_sentences),
len(l2_sentences),
f1_i,
f2_i,
)
for line_id, (s1, s2) in enumerate(zip(l1_sentences, l2_sentences)):
key = "{}/{}".format(f_id, line_id)
yield key, {l1: s1, l2: s2}
def _parse_frde_bitext(fr_path, de_path):
with open(fr_path) as f:
fr_sentences = f.read().split("\n")
with open(de_path) as f:
de_sentences = f.read().split("\n")
assert len(fr_sentences) == len(de_sentences), "Sizes do not match: %d vs %d for %s vs %s." % (
len(fr_sentences),
len(de_sentences),
fr_path,
de_path,
)
for line_id, (s1, s2) in enumerate(zip(fr_sentences, de_sentences)):
yield line_id, {"fr": s1, "de": s2}
def _parse_tmx(path):
"""Generates examples from TMX file."""
def _get_tuv_lang(tuv):
for k, v in tuv.items():
if k.endswith("}lang"):
return v
raise AssertionError("Language not found in `tuv` attributes.")
def _get_tuv_seg(tuv):
segs = tuv.findall("seg")
assert len(segs) == 1, "Invalid number of segments: %d" % len(segs)
return segs[0].text
with open(path, "rb") as f:
if six.PY3:
# Workaround due to: https://github.com/tensorflow/tensorflow/issues/33563
utf_f = codecs.getreader("utf-8")(f)
else:
utf_f = f
for line_id, (_, elem) in enumerate(ElementTree.iterparse(utf_f)):
if elem.tag == "tu":
yield line_id, {_get_tuv_lang(tuv): _get_tuv_seg(tuv) for tuv in elem.iterfind("tuv")}
elem.clear()
def _parse_tsv(path, language_pair=None):
"""Generates examples from TSV file."""
if language_pair is None:
lang_match = re.match(r".*\.([a-z][a-z])-([a-z][a-z])\.tsv", path)
assert lang_match is not None, "Invalid TSV filename: %s" % path
l1, l2 = lang_match.groups()
else:
l1, l2 = language_pair
with open(path) as f:
for j, line in enumerate(f):
cols = line.split("\t")
if len(cols) != 2:
logging.warning("Skipping line %d in TSV (%s) with %d != 2 columns.", j, path, len(cols))
continue
s1, s2 = cols
yield j, {l1: s1.strip(), l2: s2.strip()}
def _parse_wikiheadlines(path):
"""Generates examples from Wikiheadlines dataset file."""
lang_match = re.match(r".*\.([a-z][a-z])-([a-z][a-z])$", path)
assert lang_match is not None, "Invalid Wikiheadlines filename: %s" % path
l1, l2 = lang_match.groups()
with open(path) as f:
for line_id, line in enumerate(f):
s1, s2 = line.split("|||")
yield line_id, {l1: s1.strip(), l2: s2.strip()}
def _parse_czeng(*paths, **kwargs):
"""Generates examples from CzEng v1.6, with optional filtering for v1.7."""
filter_path = kwargs.get("filter_path", None)
if filter_path:
re_block = re.compile(r"^[^-]+-b(\d+)-\d\d[tde]")
with open(filter_path) as f:
bad_blocks = {blk for blk in re.search(r"qw{([\s\d]*)}", f.read()).groups()[0].split()}
logging.info("Loaded %d bad blocks to filter from CzEng v1.6 to make v1.7.", len(bad_blocks))
for path in paths:
for gz_path in sorted(glob.glob(path)):
with open(gz_path, "rb") as g, gzip.GzipFile(fileobj=g) as f:
filename = os.path.basename(gz_path)
for line_id, line in enumerate(f):
line = line.decode("utf-8") # required for py3
if not line.strip():
continue
id_, unused_score, cs, en = line.split("\t")
if filter_path:
block_match = re.match(re_block, id_)
if block_match and block_match.groups()[0] in bad_blocks:
continue
sub_key = "{}/{}".format(filename, line_id)
yield sub_key, {
"cs": cs.strip(),
"en": en.strip(),
}
def _parse_hindencorp(path):
with open(path) as f:
for line_id, line in enumerate(f):
split_line = line.split("\t")
if len(split_line) != 5:
logging.warning("Skipping invalid HindEnCorp line: %s", line)
continue
yield line_id, {"translation": {"en": split_line[3].strip(), "hi": split_line[4].strip()}}
|
nlp-master
|
datasets/wmt19/wmt_utils.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""WMT19: Translate dataset."""
import nlp
from .wmt_utils import CWMT_SUBSET_NAMES, Wmt, WmtConfig
_URL = "http://www.statmt.org/wmt19/translation-task.html"
# TODO(adarob): Update with citation of overview paper once it is published.
_CITATION = """
@ONLINE {wmt19translate,
author = {Wikimedia Foundation},
title = {ACL 2019 Fourth Conference on Machine Translation (WMT19), Shared Task: Machine Translation of News},
url = {http://www.statmt.org/wmt19/translation-task.html}
}
"""
_LANGUAGE_PAIRS = [(lang, "en") for lang in ["cs", "de", "fi", "gu", "kk", "lt", "ru", "zh"]] + [("fr", "de")]
class Wmt19(Wmt):
"""WMT 19 translation datasets for {(xx, "en")} + ("fr", "de") pairs."""
# Version history:
# 1.0.0: S3 (new shuffling, sharding and slicing mechanism).
BUILDER_CONFIGS = [
WmtConfig( # pylint:disable=g-complex-comprehension
description="WMT 2019 %s-%s translation task dataset." % (l1, l2),
url=_URL,
citation=_CITATION,
language_pair=(l1, l2),
version=nlp.Version("1.0.0"),
)
for l1, l2 in _LANGUAGE_PAIRS
]
@property
def manual_download_instructions(self):
if self.config.language_pair[1] in ["cs", "hi", "ru"]:
return "Please download the data manually as explained. TODO(PVP)"
@property
def _subsets(self):
return {
nlp.Split.TRAIN: [
"europarl_v9",
"europarl_v7_frde",
"paracrawl_v3",
"paracrawl_v1_ru",
"paracrawl_v3_frde",
"commoncrawl",
"commoncrawl_frde",
"newscommentary_v14",
"newscommentary_v14_frde",
"czeng_17",
"yandexcorpus",
"wikititles_v1",
"uncorpus_v1",
"rapid_2016_ltfi",
"rapid_2019",
]
+ CWMT_SUBSET_NAMES,
nlp.Split.VALIDATION: ["euelections_dev2019", "newsdev2019", "newstest2018"],
}
|
nlp-master
|
datasets/wmt19/wmt19.py
|
"""TODO(wiki_qa): Add a description here."""
from __future__ import absolute_import, division, print_function
import csv
import os
import nlp
# TODO(wiki_qa): BibTeX citation
_CITATION = """\
@InProceedings{YangYihMeek:EMNLP2015:WikiQA,
author = {{Yi}, Yang and {Wen-tau}, Yih and {Christopher} Meek},
title = "{WikiQA: A Challenge Dataset for Open-Domain Question Answering}",
journal = {Association for Computational Linguistics},
year = 2015,
doi = {10.18653/v1/D15-1237},
pages = {2013–2018},
}
"""
# TODO(wiki_qa):
_DESCRIPTION = """\
Wiki Question Answering corpus from Microsoft
"""
_DATA_URL = "https://download.microsoft.com/download/E/5/f/E5FCFCEE-7005-4814-853D-DAA7C66507E0/WikiQACorpus.zip" #'https://www.microsoft.com/en-us/download/confirmation.aspx?id=52419'
class WikiQa(nlp.GeneratorBasedBuilder):
"""TODO(wiki_qa): Short description of my dataset."""
# TODO(wiki_qa): Set up version.
VERSION = nlp.Version("0.1.0")
def _info(self):
# TODO(wiki_qa): Specifies the nlp.DatasetInfo object
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
"question_id": nlp.Value("string"),
"question": nlp.Value("string"),
"document_title": nlp.Value("string"),
"answer": nlp.Value("string"),
"label": nlp.features.ClassLabel(num_classes=2),
# These are the features of your dataset like images, labels ...
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://www.microsoft.com/en-us/download/details.aspx?id=52419",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(wiki_qa): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
dl_dir = dl_manager.download_and_extract(_DATA_URL)
dl_dir = os.path.join(dl_dir, "WikiQACorpus")
# dl_dir = os.path.join(dl_dir, '')
return [
nlp.SplitGenerator(name=nlp.Split.TEST, gen_kwargs={"filepath": os.path.join(dl_dir, "WikiQA-test.tsv")}),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION, gen_kwargs={"filepath": os.path.join(dl_dir, "WikiQA-dev.tsv")}
),
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(dl_dir, "WikiQA-train.tsv")},
),
]
def _generate_examples(self, filepath):
"""Yields examples."""
# TODO(wiki_qa): Yields (key, example) tuples from the dataset
with open(filepath) as f:
reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
for idx, row in enumerate(reader):
yield idx, {
"question_id": row["QuestionID"],
"question": row["Question"],
"document_title": row["DocumentTitle"],
"answer": row["Sentence"],
"label": row["Label"],
}
|
nlp-master
|
datasets/wiki_qa/wiki_qa.py
|
"""TODO(sciTail): Add a description here."""
from __future__ import absolute_import, division, print_function
import csv
import json
import os
import textwrap
import nlp
# TODO(sciTail): BibTeX citation
_CITATION = """\
inproceedings{scitail,
Author = {Tushar Khot and Ashish Sabharwal and Peter Clark},
Booktitle = {AAAI},
Title = {{SciTail}: A Textual Entailment Dataset from Science Question Answering},
Year = {2018}
}
"""
# TODO(sciTail):
_DESCRIPTION = """\
The SciTail dataset is an entailment dataset created from multiple-choice science exams and web sentences. Each question
and the correct answer choice are converted into an assertive statement to form the hypothesis. We use information
retrieval to obtain relevant text from a large text corpus of web sentences, and use these sentences as a premise P. We
crowdsource the annotation of such premise-hypothesis pair as supports (entails) or not (neutral), in order to create
the SciTail dataset. The dataset contains 27,026 examples with 10,101 examples with entails label and 16,925 examples
with neutral label
"""
_URL = "http://data.allenai.org.s3.amazonaws.com/downloads/SciTailV1.1.zip"
class ScitailConfig(nlp.BuilderConfig):
""" BuilderConfig for Xquad"""
def __init__(self, **kwargs):
"""
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(ScitailConfig, self).__init__(
version=nlp.Version("1.1.0", "New split API (https://tensorflow.org/datasets/splits)"), **kwargs
)
class Scitail(nlp.GeneratorBasedBuilder):
"""TODO(sciTail): Short description of my dataset."""
# TODO(sciTail): Set up version.
VERSION = nlp.Version("1.1.0")
BUILDER_CONFIGS = [
ScitailConfig(
name="snli_format",
description="JSONL format used by SNLI with a JSON object corresponding to each entailment example in each line.",
),
ScitailConfig(
name="tsv_format", description="Tab-separated format with three columns: premise hypothesis label"
),
ScitailConfig(
name="dgem_format",
description="Tab-separated format used by the DGEM model: premise hypothesis label hypothesis graph structure",
),
ScitailConfig(
name="predictor_format",
description=textwrap.dedent(
"""\
AllenNLP predictors work only with JSONL format. This folder contains the SciTail train/dev/test in JSONL format
so that it can be loaded into the predictors. Each line is a JSON object with the following keys:
gold_label : the example label from {entails, neutral}
sentence1: the premise
sentence2: the hypothesis
sentence2_structure: structure from the hypothesis """
),
),
]
def _info(self):
# TODO(sciTail): Specifies the nlp.DatasetInfo object
if self.config.name == "snli_format":
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
"sentence1_binary_parse": nlp.Value("string"),
"sentence1_parse": nlp.Value("string"),
"sentence1": nlp.Value("string"),
"sentence2_parse": nlp.Value("string"),
"sentence2": nlp.Value("string"),
"annotator_labels": nlp.features.Sequence({"annotator_label": nlp.Value("string")}),
"gold_label": nlp.Value("string")
# These are the features of your dataset like images, labels ...
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://allenai.org/data/scitail",
citation=_CITATION,
)
elif self.config.name == "tsv_format":
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
"premise": nlp.Value("string"),
"hypothesis": nlp.Value("string"),
"label": nlp.Value("string")
# These are the features of your dataset like images, labels ...
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://allenai.org/data/scitail",
citation=_CITATION,
)
elif self.config.name == "predictor_format":
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
"answer": nlp.Value("string"),
"sentence2_structure": nlp.Value("string"),
"sentence1": nlp.Value("string"),
"sentence2": nlp.Value("string"),
"gold_label": nlp.Value("string"),
"question": nlp.Value("string")
# These are the features of your dataset like images, labels ...
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://allenai.org/data/scitail",
citation=_CITATION,
)
elif self.config.name == "dgem_format":
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
"premise": nlp.Value("string"),
"hypothesis": nlp.Value("string"),
"label": nlp.Value("string"),
"hypothesis_graph_structure": nlp.Value("string")
# These are the features of your dataset like images, labels ...
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://allenai.org/data/scitail",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(sciTail): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
dl_dir = dl_manager.download_and_extract(_URL)
data_dir = os.path.join(dl_dir, "SciTailV1.1")
snli = os.path.join(data_dir, "snli_format")
dgem = os.path.join(data_dir, "dgem_format")
tsv = os.path.join(data_dir, "tsv_format")
predictor = os.path.join(data_dir, "predictor_format")
if self.config.name == "snli_format":
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(snli, "scitail_1.0_train.txt")},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(snli, "scitail_1.0_test.txt")},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(snli, "scitail_1.0_dev.txt")},
),
]
elif self.config.name == "tsv_format":
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(tsv, "scitail_1.0_train.tsv")},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(tsv, "scitail_1.0_test.tsv")},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(tsv, "scitail_1.0_dev.tsv")},
),
]
elif self.config.name == "predictor_format":
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(predictor, "scitail_1.0_structure_train.jsonl")},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(predictor, "scitail_1.0_structure_test.jsonl")},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(predictor, "scitail_1.0_structure_dev.jsonl")},
),
]
elif self.config.name == "dgem_format":
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(dgem, "scitail_1.0_structure_train.tsv")},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(dgem, "scitail_1.0_structure_test.tsv")},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(dgem, "scitail_1.0_structure_dev.tsv")},
),
]
def _generate_examples(self, filepath):
"""Yields examples."""
# TODO(sciTail): Yields (key, example) tuples from the dataset
with open(filepath) as f:
if self.config.name == "snli_format":
for id_, row in enumerate(f):
data = json.loads(row)
yield id_, {
"sentence1_binary_parse": data["sentence1_binary_parse"],
"sentence1_parse": data["sentence1_parse"],
"sentence1": data["sentence1"],
"sentence2_parse": data["sentence2_parse"],
"sentence2": data["sentence2"],
"annotator_labels": {"annotator_label": data["annotator_labels"]},
"gold_label": data["gold_label"],
}
elif self.config.name == "tsv_format":
data = csv.reader(f, delimiter="\t")
for id_, row in enumerate(data):
yield id_, {"premise": row[0], "hypothesis": row[1], "label": row[2]}
elif self.config.name == "dgem_format":
data = csv.reader(f, delimiter="\t")
for id_, row in enumerate(data):
yield id_, {
"premise": row[0],
"hypothesis": row[1],
"label": row[2],
"hypothesis_graph_structure": row[3],
}
elif self.config.name == "predictor_format":
for id_, row in enumerate(f):
data = json.loads(row)
yield id_, {
"answer": data["answer"],
"sentence2_structure": data["sentence2_structure"],
"sentence1": data["sentence1"],
"sentence2": data["sentence2"],
"gold_label": data["gold_label"],
"question": data["question"],
}
|
nlp-master
|
datasets/scitail/scitail.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Librispeech language modeling dataset."""
from __future__ import absolute_import, division, print_function
import nlp
_CITATION = """\
@inproceedings{panayotov2015librispeech,
title={Librispeech: an ASR corpus based on public domain audio books},
author={Panayotov, Vassil and Chen, Guoguo and Povey, Daniel and Khudanpur, Sanjeev},
booktitle={Acoustics, Speech and Signal Processing (ICASSP), 2015 IEEE International Conference on},
pages={5206--5210},
year={2015},
organization={IEEE}
}
"""
_DESCRIPTION = """\
Language modeling resources to be used in conjunction with the LibriSpeech ASR corpus.
"""
_URL = "http://www.openslr.org/11"
_DL_URL = "http://www.openslr.org/resources/11/librispeech-lm-norm.txt.gz"
class LibrispeechLm(nlp.GeneratorBasedBuilder):
"""Librispeech language modeling dataset."""
VERSION = nlp.Version("0.1.0")
def _info(self):
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features({"text": nlp.Value("string"),}),
supervised_keys=("text", "text"),
homepage=_URL,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
archive_path = dl_manager.download_and_extract(_DL_URL)
return [
nlp.SplitGenerator(name=nlp.Split.TRAIN, gen_kwargs={"archive_path": archive_path}),
]
def _generate_examples(self, archive_path):
"""Yields examples."""
with open(archive_path, "r") as f:
for key, line in enumerate(f):
text = line.strip()
if text: # Skip empty lines.
yield key, {"text": text}
|
nlp-master
|
datasets/librispeech_lm/librispeech_lm.py
|
"""TODO(xcopa): Add a description here."""
from __future__ import absolute_import, division, print_function
import json
import os
import nlp
# TODO(xcopa): BibTeX citation
_CITATION = """\
@article{ponti2020xcopa,
title={{XCOPA: A} Multilingual Dataset for Causal Commonsense Reasoning},
author={Edoardo M. Ponti, Goran Glava\v{s}, Olga Majewska, Qianchu Liu, Ivan Vuli\'{c} and Anna Korhonen},
journal={arXiv preprint},
year={2020},
url={https://ducdauge.github.io/files/xcopa.pdf}
}
@inproceedings{roemmele2011choice,
title={Choice of plausible alternatives: An evaluation of commonsense causal reasoning},
author={Roemmele, Melissa and Bejan, Cosmin Adrian and Gordon, Andrew S},
booktitle={2011 AAAI Spring Symposium Series},
year={2011},
url={https://people.ict.usc.edu/~gordon/publications/AAAI-SPRING11A.PDF},
}
"""
# TODO(xcopa):
_DESCRIPTION = """\
XCOPA: A Multilingual Dataset for Causal Commonsense Reasoning
The Cross-lingual Choice of Plausible Alternatives dataset is a benchmark to evaluate the ability of machine learning models to transfer commonsense reasoning across
languages. The dataset is the translation and reannotation of the English COPA (Roemmele et al. 2011) and covers 11 languages from 11 families and several areas around
the globe. The dataset is challenging as it requires both the command of world knowledge and the ability to generalise to new languages. All the details about the
creation of XCOPA and the implementation of the baselines are available in the paper.\n
"""
_LANG = ["et", "ht", "it", "id", "qu", "sw", "zh", "ta", "th", "tr", "vi"]
_URL = "https://github.com/cambridgeltl/xcopa/archive/master.zip"
class XcopaConfig(nlp.BuilderConfig):
"""BuilderConfig for Break"""
def __init__(self, **kwargs):
"""
Args:
data_dir: directory for the given language dataset
**kwargs: keyword arguments forwarded to super.
"""
super(XcopaConfig, self).__init__(
version=nlp.Version("1.0.0", "New split API (https://tensorflow.org/datasets/splits)"), **kwargs
)
class Xcopa(nlp.GeneratorBasedBuilder):
"""TODO(xcopa): Short description of my dataset."""
# TODO(xcopa): Set up version.
VERSION = nlp.Version("0.1.0")
BUILDER_CONFIGS = [XcopaConfig(name=lang, description="Xcopa language {}".format(lang),) for lang in _LANG]
def _info(self):
# TODO(xcopa): Specifies the nlp.DatasetInfo object
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION + self.config.description,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
# These are the features of your dataset like images, labels ...
"premise": nlp.Value("string"),
"choice1": nlp.Value("string"),
"choice2": nlp.Value("string"),
"question": nlp.Value("string"),
"label": nlp.Value("int32"),
"idx": nlp.Value("int32"),
"changed": nlp.Value("bool"),
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://github.com/cambridgeltl/xcopa",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(xcopa): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
dl_dir = dl_manager.download_and_extract(_URL)
data_dir = os.path.join(dl_dir, "xcopa-master", "data", self.config.name)
return [
nlp.SplitGenerator(
name=nlp.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, "test." + self.config.name + ".jsonl")},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, "val." + self.config.name + ".jsonl")},
),
]
def _generate_examples(self, filepath):
"""Yields examples."""
# TODO(xcopa): Yields (key, example) tuples from the dataset
with open(filepath) as f:
for row in f:
data = json.loads(row)
idx = data["idx"]
yield idx, data
|
nlp-master
|
datasets/xcopa/xcopa.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Wikipedia dataset containing cleaned articles of all languages."""
from __future__ import absolute_import, division, print_function
import codecs
import json
import logging
import re
import xml.etree.cElementTree as etree
import six
import nlp
if six.PY3:
import bz2 # pylint:disable=g-import-not-at-top
else:
# py2's built-in bz2 package does not support reading from file objects.
import bz2file as bz2 # pylint:disable=g-import-not-at-top
_CITATION = """\
@ONLINE {wikidump,
author = {Wikimedia Foundation},
title = {Wikimedia Downloads},
url = {https://dumps.wikimedia.org}
}
"""
_DESCRIPTION = """\
Wikipedia dataset containing cleaned articles of all languages.
The datasets are built from the Wikipedia dump
(https://dumps.wikimedia.org/) with one split per language. Each example
contains the content of one full Wikipedia article with cleaning to strip
markdown and unwanted sections (references, etc.).
"""
_LICENSE = (
"This work is licensed under the Creative Commons Attribution-ShareAlike "
"3.0 Unported License. To view a copy of this license, visit "
"http://creativecommons.org/licenses/by-sa/3.0/ or send a letter to "
"Creative Commons, PO Box 1866, Mountain View, CA 94042, USA."
)
# Source: https://en.wikipedia.org/wiki/List_of_Wikipedias (accessed 3/1/2019)
# Removed because no articles: hz.
WIKIPEDIA_LANGUAGES = [
"aa",
"ab",
"ace",
"ady",
"af",
"ak",
"als",
"am",
"an",
"ang",
"ar",
"arc",
"arz",
"as",
"ast",
"atj",
"av",
"ay",
"az",
"azb",
"ba",
"bar",
"bat-smg",
"bcl",
"be",
"be-x-old",
"bg",
"bh",
"bi",
"bjn",
"bm",
"bn",
"bo",
"bpy",
"br",
"bs",
"bug",
"bxr",
"ca",
"cbk-zam",
"cdo",
"ce",
"ceb",
"ch",
"cho",
"chr",
"chy",
"ckb",
"co",
"cr",
"crh",
"cs",
"csb",
"cu",
"cv",
"cy",
"da",
"de",
"din",
"diq",
"dsb",
"dty",
"dv",
"dz",
"ee",
"el",
"eml",
"en",
"eo",
"es",
"et",
"eu",
"ext",
"fa",
"ff",
"fi",
"fiu-vro",
"fj",
"fo",
"fr",
"frp",
"frr",
"fur",
"fy",
"ga",
"gag",
"gan",
"gd",
"gl",
"glk",
"gn",
"gom",
"gor",
"got",
"gu",
"gv",
"ha",
"hak",
"haw",
"he",
"hi",
"hif",
"ho",
"hr",
"hsb",
"ht",
"hu",
"hy",
"ia",
"id",
"ie",
"ig",
"ii",
"ik",
"ilo",
"inh",
"io",
"is",
"it",
"iu",
"ja",
"jam",
"jbo",
"jv",
"ka",
"kaa",
"kab",
"kbd",
"kbp",
"kg",
"ki",
"kj",
"kk",
"kl",
"km",
"kn",
"ko",
"koi",
"krc",
"ks",
"ksh",
"ku",
"kv",
"kw",
"ky",
"la",
"lad",
"lb",
"lbe",
"lez",
"lfn",
"lg",
"li",
"lij",
"lmo",
"ln",
"lo",
"lrc",
"lt",
"ltg",
"lv",
"mai",
"map-bms",
"mdf",
"mg",
"mh",
"mhr",
"mi",
"min",
"mk",
"ml",
"mn",
"mr",
"mrj",
"ms",
"mt",
"mus",
"mwl",
"my",
"myv",
"mzn",
"na",
"nah",
"nap",
"nds",
"nds-nl",
"ne",
"new",
"ng",
"nl",
"nn",
"no",
"nov",
"nrm",
"nso",
"nv",
"ny",
"oc",
"olo",
"om",
"or",
"os",
"pa",
"pag",
"pam",
"pap",
"pcd",
"pdc",
"pfl",
"pi",
"pih",
"pl",
"pms",
"pnb",
"pnt",
"ps",
"pt",
"qu",
"rm",
"rmy",
"rn",
"ro",
"roa-rup",
"roa-tara",
"ru",
"rue",
"rw",
"sa",
"sah",
"sat",
"sc",
"scn",
"sco",
"sd",
"se",
"sg",
"sh",
"si",
"simple",
"sk",
"sl",
"sm",
"sn",
"so",
"sq",
"sr",
"srn",
"ss",
"st",
"stq",
"su",
"sv",
"sw",
"szl",
"ta",
"tcy",
"te",
"tet",
"tg",
"th",
"ti",
"tk",
"tl",
"tn",
"to",
"tpi",
"tr",
"ts",
"tt",
"tum",
"tw",
"ty",
"tyv",
"udm",
"ug",
"uk",
"ur",
"uz",
"ve",
"vec",
"vep",
"vi",
"vls",
"vo",
"wa",
"war",
"wo",
"wuu",
"xal",
"xh",
"xmf",
"yi",
"yo",
"za",
"zea",
"zh",
"zh-classical",
"zh-min-nan",
"zh-yue",
"zu",
]
_BASE_URL_TMPL = "https://dumps.wikimedia.org/{lang}wiki/{date}/"
_INFO_FILE = "dumpstatus.json"
class WikipediaConfig(nlp.BuilderConfig):
"""BuilderConfig for Wikipedia."""
def __init__(self, language=None, date=None, **kwargs):
"""BuilderConfig for Wikipedia.
Args:
language: string, the language code for the Wikipedia dump to use.
date: string, date of the Wikipedia dump in YYYYMMDD format. A list of
available dates can be found at https://dumps.wikimedia.org/enwiki/.
**kwargs: keyword arguments forwarded to super.
"""
super(WikipediaConfig, self).__init__(
name="{0}.{1}".format(date, language),
description="Wikipedia dataset for {0}, parsed from {1} dump.".format(language, date),
**kwargs,
)
self.date = date
self.language = language
_VERSION = nlp.Version("1.0.0", "New split API (https://tensorflow.org/datasets/splits)")
class Wikipedia(nlp.BeamBasedBuilder):
"""Wikipedia dataset."""
# Use mirror (your.org) to avoid download caps.
BUILDER_CONFIGS = [
WikipediaConfig(version=_VERSION, language=lang, date="20200501",) # pylint:disable=g-complex-comprehension
for lang in WIKIPEDIA_LANGUAGES
]
def _info(self):
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features({"title": nlp.Value("string"), "text": nlp.Value("string")}),
# No default supervised_keys.
supervised_keys=None,
homepage="https://dumps.wikimedia.org",
citation=_CITATION,
)
def _split_generators(self, dl_manager, pipeline):
def _base_url(lang):
return _BASE_URL_TMPL.format(lang=lang.replace("-", "_"), date=self.config.date)
lang = self.config.language
info_url = _base_url(lang) + _INFO_FILE
# Use dictionary since testing mock always returns the same result.
downloaded_files = dl_manager.download_and_extract({"info": info_url})
xml_urls = []
total_bytes = 0
with open(downloaded_files["info"]) as f:
dump_info = json.load(f)
multistream_dump_info = dump_info["jobs"]["articlesmultistreamdump"]
assert multistream_dump_info["status"] == "done", (
"Specified dump (%s) multistream status is not 'done': %s"
% (_base_url(lang), multistream_dump_info["status"])
)
for fname, info in multistream_dump_info["files"].items():
if ".xml" not in fname:
continue
total_bytes += info["size"]
xml_urls.append(_base_url(lang) + fname)
# Use dictionary since testing mock always returns the same result.
downloaded_files = dl_manager.download({"xml": xml_urls})
if not pipeline.is_local():
downloaded_files = dl_manager.ship_files_with_pipeline(downloaded_files, pipeline)
return [
nlp.SplitGenerator( # pylint:disable=g-complex-comprehension
name=nlp.Split.TRAIN, gen_kwargs={"filepaths": downloaded_files["xml"], "language": lang}
)
]
def _build_pcollection(self, pipeline, filepaths, language):
"""Build PCollection of examples in the raw (text) form."""
import apache_beam as beam
import mwparserfromhell
def _extract_content(filepath):
"""Extracts article content from a single WikiMedia XML file."""
logging.info("generating examples from = %s", filepath)
with beam.io.filesystems.FileSystems.open(filepath) as f:
f = bz2.BZ2File(filename=f)
if six.PY3:
# Workaround due to:
# https://github.com/tensorflow/tensorflow/issues/33563
utf_f = codecs.getreader("utf-8")(f)
else:
utf_f = f
# To clear root, to free-up more memory than just `elem.clear()`.
context = etree.iterparse(utf_f, events=("end",))
context = iter(context)
unused_event, root = next(context)
for unused_event, elem in context:
if not elem.tag.endswith("page"):
continue
namespace = elem.tag[:-4]
title = elem.find("./{0}title".format(namespace)).text
ns = elem.find("./{0}ns".format(namespace)).text
id_ = elem.find("./{0}id".format(namespace)).text
# Filter pages that are not in the "main" namespace.
if ns != "0":
root.clear()
continue
raw_content = elem.find("./{0}revision/{0}text".format(namespace)).text
root.clear()
# Filter redirects.
if raw_content is None or raw_content.lower().startswith("#redirect"):
beam.metrics.Metrics.counter(language, "filtered-redirects").inc()
continue
beam.metrics.Metrics.counter(language, "extracted-examples").inc()
yield (id_, title, raw_content)
def _clean_content(inputs):
"""Cleans raw wikicode to extract text."""
id_, title, raw_content = inputs
try:
text = _parse_and_clean_wikicode(raw_content, parser=mwparserfromhell)
except (mwparserfromhell.parser.ParserError) as e:
beam.metrics.Metrics.counter(language, "parser-error").inc()
logging.error("mwparserfromhell ParseError: %s", e)
return
if not text:
beam.metrics.Metrics.counter(language, "empty-clean-examples").inc()
return
beam.metrics.Metrics.counter(language, "cleaned-examples").inc()
yield id_, {"title": title, "text": text}
return (
pipeline
| "Initialize" >> beam.Create(filepaths)
| "Extract content" >> beam.FlatMap(_extract_content)
| "Distribute" >> beam.transforms.Reshuffle()
| "Clean content" >> beam.FlatMap(_clean_content)
)
def _parse_and_clean_wikicode(raw_content, parser):
"""Strips formatting and unwanted sections from raw page content."""
wikicode = parser.parse(raw_content)
# Filters for references, tables, and file/image links.
re_rm_wikilink = re.compile("^(?:File|Image|Media):", flags=re.IGNORECASE | re.UNICODE)
def rm_wikilink(obj):
return bool(re_rm_wikilink.match(six.text_type(obj.title)))
def rm_tag(obj):
return six.text_type(obj.tag) in {"ref", "table"}
def rm_template(obj):
return obj.name.lower() in {"reflist", "notelist", "notelist-ua", "notelist-lr", "notelist-ur", "notelist-lg"}
def try_remove_obj(obj, section):
try:
section.remove(obj)
except ValueError:
# For unknown reasons, objects are sometimes not found.
pass
section_text = []
# Filter individual sections to clean.
for section in wikicode.get_sections(flat=True, include_lead=True, include_headings=True):
for obj in section.ifilter_wikilinks(matches=rm_wikilink, recursive=True):
try_remove_obj(obj, section)
for obj in section.ifilter_templates(matches=rm_template, recursive=True):
try_remove_obj(obj, section)
for obj in section.ifilter_tags(matches=rm_tag, recursive=True):
try_remove_obj(obj, section)
section_text.append(section.strip_code().strip())
return "\n\n".join(section_text)
|
nlp-master
|
datasets/wikipedia/wikipedia.py
|
"""TODO(lc_quad): Add a description here."""
from __future__ import absolute_import, division, print_function
import json
import os
import nlp
# TODO(lc_quad): BibTeX citation
_CITATION = """
@inproceedings{dubey2017lc2,
title={LC-QuAD 2.0: A Large Dataset for Complex Question Answering over Wikidata and DBpedia},
author={Dubey, Mohnish and Banerjee, Debayan and Abdelkawi, Abdelrahman and Lehmann, Jens},
booktitle={Proceedings of the 18th International Semantic Web Conference (ISWC)},
year={2019},
organization={Springer}
}
"""
# TODO(lc_quad):
_DESCRIPTION = """\
LC-QuAD 2.0 is a Large Question Answering dataset with 30,000 pairs of question and its corresponding SPARQL query. The target knowledge base is Wikidata and DBpedia, specifically the 2018 version. Please see our paper for details about the dataset creation process and framework.
"""
_URL = "https://github.com/AskNowQA/LC-QuAD2.0/archive/master.zip"
class LcQuad(nlp.GeneratorBasedBuilder):
"""TODO(lc_quad): Short description of my dataset."""
# TODO(lc_quad): Set up version.
VERSION = nlp.Version("2.0.0")
def _info(self):
# TODO(lc_quad): Specifies the nlp.DatasetInfo object
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
"NNQT_question": nlp.Value("string"),
"uid": nlp.Value("int32"),
"subgraph": nlp.Value("string"),
"template_index": nlp.Value("int32"),
"question": nlp.Value("string"),
"sparql_wikidata": nlp.Value("string"),
"sparql_dbpedia18": nlp.Value("string"),
"template": nlp.Value("string"),
# "template_id": nlp.Value('string'),
"paraphrased_question": nlp.Value("string")
# These are the features of your dataset like images, labels ...
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="http://lc-quad.sda.tech/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(lc_quad): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
dl_dir = dl_manager.download_and_extract(_URL)
dl_dir = os.path.join(dl_dir, "LC-QuAD2.0-master", "dataset")
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(dl_dir, "train.json")},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(dl_dir, "test.json")},
),
]
def _generate_examples(self, filepath):
"""Yields examples."""
# TODO(lc_quad): Yields (key, example) tuples from the dataset
with open(filepath) as f:
data = json.load(f)
for id_, row in enumerate(data):
is_list = False
for key in row:
if key != "answer" and isinstance(row[key], list):
is_list = True
if is_list:
continue
yield id_, {
"NNQT_question": row["NNQT_question"],
"uid": row["uid"],
"subgraph": row["subgraph"],
"template_index": row["template_index"],
"question": row["question"],
"sparql_wikidata": row["sparql_wikidata"],
"sparql_dbpedia18": row["sparql_dbpedia18"],
"template": row["template"],
# "template_id": str(row['template_id']),
"paraphrased_question": row["paraphrased_question"],
}
|
nlp-master
|
datasets/lc_quad/lc_quad.py
|
"""ARCD: Arabic Reading Comprehension Dataset."""
from __future__ import absolute_import, division, print_function
import json
import logging
import os
import nlp
_CITATION = """\
@inproceedings{mozannar-etal-2019-neural,
title = {Neural {A}rabic Question Answering},
author = {Mozannar, Hussein and Maamary, Elie and El Hajal, Karl and Hajj, Hazem},
booktitle = {Proceedings of the Fourth Arabic Natural Language Processing Workshop},
month = {aug},
year = {2019},
address = {Florence, Italy},
publisher = {Association for Computational Linguistics},
url = {https://www.aclweb.org/anthology/W19-4612},
doi = {10.18653/v1/W19-4612},
pages = {108--118},
abstract = {This paper tackles the problem of open domain factual Arabic question answering (QA) using Wikipedia as our knowledge source. This constrains the answer of any question to be a span of text in Wikipedia. Open domain QA for Arabic entails three challenges: annotated QA datasets in Arabic, large scale efficient information retrieval and machine reading comprehension. To deal with the lack of Arabic QA datasets we present the Arabic Reading Comprehension Dataset (ARCD) composed of 1,395 questions posed by crowdworkers on Wikipedia articles, and a machine translation of the Stanford Question Answering Dataset (Arabic-SQuAD). Our system for open domain question answering in Arabic (SOQAL) is based on two components: (1) a document retriever using a hierarchical TF-IDF approach and (2) a neural reading comprehension model using the pre-trained bi-directional transformer BERT. Our experiments on ARCD indicate the effectiveness of our approach with our BERT-based reader achieving a 61.3 F1 score, and our open domain system SOQAL achieving a 27.6 F1 score.}
}
"""
_DESCRIPTION = """\
Arabic Reading Comprehension Dataset (ARCD) composed of 1,395 questions\
posed by crowdworkers on Wikipedia articles.
"""
class ArcdConfig(nlp.BuilderConfig):
"""BuilderConfig for ARCD."""
def __init__(self, **kwargs):
"""BuilderConfig for ARCD.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(ArcdConfig, self).__init__(**kwargs)
class Arcd(nlp.GeneratorBasedBuilder):
"""ARCD: Arabic Reading Comprehension Dataset."""
_URL = "https://raw.githubusercontent.com/husseinmozannar/SOQAL/master/data/"
_DEV_FILE = "arcd-test.json"
_TRAINING_FILE = "arcd-train.json"
BUILDER_CONFIGS = [
ArcdConfig(
name="plain_text",
version=nlp.Version("1.0.0", "New split API (https://tensorflow.org/datasets/splits)"),
description="Plain text",
)
]
def _info(self):
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features(
{
"id": nlp.Value("string"),
"title": nlp.Value("string"),
"context": nlp.Value("string"),
"question": nlp.Value("string"),
"answers": nlp.features.Sequence(
{"text": nlp.Value("string"), "answer_start": nlp.Value("int32")}
),
}
),
# No default supervised_keys (as we have to pass both question
# and context as input).
supervised_keys=None,
homepage="https://github.com/husseinmozannar/SOQAL/tree/master/data",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
urls_to_download = {
"train": os.path.join(self._URL, self._TRAINING_FILE),
"dev": os.path.join(self._URL, self._DEV_FILE),
}
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
nlp.SplitGenerator(name=nlp.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
nlp.SplitGenerator(name=nlp.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
]
def _generate_examples(self, filepath):
"""This function returns the examples in the raw (text) form."""
logging.info("generating examples from = %s", filepath)
with open(filepath) as f:
arcd = json.load(f)
for article in arcd["data"]:
title = article.get("title", "").strip()
for paragraph in article["paragraphs"]:
context = paragraph["context"].strip()
for qa in paragraph["qas"]:
question = qa["question"].strip()
id_ = qa["id"]
answer_starts = [answer["answer_start"] for answer in qa["answers"]]
answers = [answer["text"].strip() for answer in qa["answers"]]
# Features currently used are "context", "question", and "answers".
# Others are extracted here for the ease of future expansions.
yield id_, {
"title": title,
"context": context,
"question": question,
"id": id_,
"answers": {"answer_start": answer_starts, "text": answers},
}
|
nlp-master
|
datasets/arcd/arcd.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""XNLI: The Cross-Lingual NLI Corpus."""
from __future__ import absolute_import, division, print_function
import collections
import csv
import os
import six
import nlp
_CITATION = """\
@InProceedings{conneau2018xnli,
author = {Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin},
title = {XNLI: Evaluating Cross-lingual Sentence Representations},
booktitle = {Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing},
year = {2018},
publisher = {Association for Computational Linguistics},
location = {Brussels, Belgium},
}"""
_DESCRIPTION = """\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
"""
_DATA_URL = "https://www.nyu.edu/projects/bowman/xnli/XNLI-1.0.zip"
_LANGUAGES = ("ar", "bg", "de", "el", "en", "es", "fr", "hi", "ru", "sw", "th", "tr", "ur", "vi", "zh")
class Xnli(nlp.GeneratorBasedBuilder):
"""XNLI: The Cross-Lingual NLI Corpus. Version 1.0."""
BUILDER_CONFIGS = [
nlp.BuilderConfig(
name="plain_text",
version=nlp.Version("1.0.0", "New split API (https://tensorflow.org/datasets/splits)"),
description="Plain text import of XNLI",
)
]
def _info(self):
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features(
{
"premise": nlp.features.Translation(languages=_LANGUAGES,),
"hypothesis": nlp.features.TranslationVariableLanguages(languages=_LANGUAGES,),
"label": nlp.features.ClassLabel(names=["entailment", "neutral", "contradiction"]),
}
),
# No default supervised_keys (as we have to pass both premise
# and hypothesis as input).
supervised_keys=None,
homepage="https://www.nyu.edu/projects/bowman/xnli/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract(_DATA_URL)
data_dir = os.path.join(dl_dir, "XNLI-1.0")
return [
nlp.SplitGenerator(name=nlp.Split.TEST, gen_kwargs={"filepath": os.path.join(data_dir, "xnli.test.tsv")}),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION, gen_kwargs={"filepath": os.path.join(data_dir, "xnli.dev.tsv")}
),
]
def _generate_examples(self, filepath):
"""This function returns the examples in the raw (text) form."""
rows_per_pair_id = collections.defaultdict(list)
with open(filepath) as f:
reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
rows_per_pair_id[row["pairID"]].append(row)
for rows in six.itervalues(rows_per_pair_id):
premise = {row["language"]: row["sentence1"] for row in rows}
hypothesis = {row["language"]: row["sentence2"] for row in rows}
yield rows[0]["pairID"], {
"premise": premise,
"hypothesis": hypothesis,
"label": rows[0]["gold_label"],
}
|
nlp-master
|
datasets/xnli/xnli.py
|
"""TODO(squad_v1_pt): Add a description here."""
from __future__ import absolute_import, division, print_function
import json
import os
import nlp
# TODO(squad_v1_pt): BibTeX citation
_CITATION = """\
@article{2016arXiv160605250R,
author = {{Rajpurkar}, Pranav and {Zhang}, Jian and {Lopyrev},
Konstantin and {Liang}, Percy},
title = "{SQuAD: 100,000+ Questions for Machine Comprehension of Text}",
journal = {arXiv e-prints},
year = 2016,
eid = {arXiv:1606.05250},
pages = {arXiv:1606.05250},
archivePrefix = {arXiv},
eprint = {1606.05250},
}
"""
# TODO(squad_v1_pt):
_DESCRIPTION = """\
Portuguese translation of the SQuAD dataset. The translation was performed automatically using the Google Cloud API.
"""
_URL = "https://github.com/nunorc/squad-v1.1-pt/raw/master"
_TRAIN_FILE = "train-v1.1-pt.json"
_DEV_FILE = "dev-v1.1-pt.json"
class SquadV1Pt(nlp.GeneratorBasedBuilder):
"""TODO(squad_v1_pt): Short description of my dataset."""
# TODO(squad_v1_pt): Set up version.
VERSION = nlp.Version("1.1.0")
def _info(self):
# TODO(squad_v1_pt): Specifies the nlp.DatasetInfo object
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
"id": nlp.Value("string"),
"title": nlp.Value("string"),
"context": nlp.Value("string"),
"question": nlp.Value("string"),
"answers": nlp.features.Sequence(
{"text": nlp.Value("string"), "answer_start": nlp.Value("int32"),}
),
# These are the features of your dataset like images, labels ...
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://github.com/nunorc/squad-v1.1-pt",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(squad_v1_pt): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
urls_to_download = {"train": os.path.join(_URL, _TRAIN_FILE), "dev": os.path.join(_URL, _DEV_FILE)}
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
nlp.SplitGenerator(name=nlp.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
nlp.SplitGenerator(name=nlp.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
]
def _generate_examples(self, filepath):
"""Yields examples."""
# TODO(squad_v1_pt): Yields (key, example) tuples from the dataset
with open(filepath) as f:
data = json.load(f)
for example in data["data"]:
title = example.get("title", "").strip()
for paragraph in example["paragraphs"]:
context = paragraph["context"].strip()
for qa in paragraph["qas"]:
question = qa["question"].strip()
id_ = qa["id"]
answer_starts = [answer["answer_start"] for answer in qa["answers"]]
answers = [answer["text"].strip() for answer in qa["answers"]]
yield id_, {
"title": title,
"context": context,
"question": question,
"id": id_,
"answers": {"answer_start": answer_starts, "text": answers,},
}
|
nlp-master
|
datasets/squad_v1_pt/squad_v1_pt.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Scientific Papers Dataset."""
from __future__ import absolute_import, division, print_function
import json
import os
import nlp
_CITATION = """
@article{Cohan_2018,
title={A Discourse-Aware Attention Model for Abstractive Summarization of
Long Documents},
url={http://dx.doi.org/10.18653/v1/n18-2097},
DOI={10.18653/v1/n18-2097},
journal={Proceedings of the 2018 Conference of the North American Chapter of
the Association for Computational Linguistics: Human Language
Technologies, Volume 2 (Short Papers)},
publisher={Association for Computational Linguistics},
author={Cohan, Arman and Dernoncourt, Franck and Kim, Doo Soon and Bui, Trung and Kim, Seokhwan and Chang, Walter and Goharian, Nazli},
year={2018}
}
"""
_DESCRIPTION = """
Scientific papers datasets contains two sets of long and structured documents.
The datasets are obtained from ArXiv and PubMed OpenAccess repositories.
Both "arxiv" and "pubmed" have two features:
- article: the body of the document, pagragraphs seperated by "/n".
- abstract: the abstract of the document, pagragraphs seperated by "/n".
- section_names: titles of sections, seperated by "/n".
"""
_DOCUMENT = "article"
_SUMMARY = "abstract"
_URLS = {
"arxiv": "https://drive.google.com/uc?id=1b3rmCSIoh6VhD4HKWjI4HOW-cSwcwbeC&export=download",
"pubmed": "https://drive.google.com/uc?id=1lvsqvsFi3W-pE1SqNZI0s8NR9rC1tsja&export=download",
}
class ScientificPapersConfig(nlp.BuilderConfig):
"""BuilderConfig for Scientific Papers."""
def __init__(self, filename=None, **kwargs):
"""BuilderConfig for Wikihow.
Args:
filename: filename of different configs for the dataset.
**kwargs: keyword arguments forwarded to super.
"""
# 1.1.0 remove sentence breaker <S> and </S> in summary.
super(ScientificPapersConfig, self).__init__(version=nlp.Version("1.1.1"), **kwargs)
self.filename = filename
class ScientificPapers(nlp.GeneratorBasedBuilder):
"""Scientific Papers."""
BUILDER_CONFIGS = [
ScientificPapersConfig(name="pubmed", description="Documents from PubMed repository."),
ScientificPapersConfig(name="arxiv", description="Documents from ArXiv repository."),
]
def _info(self):
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features(
{_DOCUMENT: nlp.Value("string"), _SUMMARY: nlp.Value("string"), "section_names": nlp.Value("string"),}
),
supervised_keys=None,
homepage="https://github.com/armancohan/long-summarization",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
dl_paths = dl_manager.download_and_extract(_URLS)
path = os.path.join(dl_paths[self.config.name], self.config.name + "-dataset")
return [
nlp.SplitGenerator(name=nlp.Split.TRAIN, gen_kwargs={"path": os.path.join(path, "train.txt")},),
nlp.SplitGenerator(name=nlp.Split.VALIDATION, gen_kwargs={"path": os.path.join(path, "val.txt")},),
nlp.SplitGenerator(name=nlp.Split.TEST, gen_kwargs={"path": os.path.join(path, "test.txt")},),
]
def _generate_examples(self, path=None):
"""Yields examples."""
with open(path) as f:
for line in f:
# Possible keys are:
# "article_id": str
# "article_text": list[str] article (list of paragraphs).
# "abstract_text": list[str], abstract (list of paragraphs).
# "section_names": list[str], list of section names.
# "sections": list[list[str]], list of sections (list of paragraphs)
d = json.loads(line)
summary = "\n".join(d["abstract_text"])
# In original paper, <S> and </S> are not used in vocab during training
# or during decoding.
# https://github.com/armancohan/long-summarization/blob/master/data.py#L27
summary = summary.replace("<S>", "").replace("</S>", "")
yield d["article_id"], {
_DOCUMENT: "\n".join(d["article_text"]),
_SUMMARY: summary,
"section_names": "\n".join(d["section_names"]),
}
|
nlp-master
|
datasets/scientific_papers/scientific_papers.py
|
"""TODO(art): Add a description here."""
from __future__ import absolute_import, division, print_function
import json
import os
import nlp
# TODO(art): BibTeX citation
_CITATION = """\
@InProceedings{anli,
author = {Chandra, Bhagavatula and Ronan, Le Bras and Chaitanya, Malaviya and Keisuke, Sakaguchi and Ari, Holtzman
and Hannah, Rashkin and Doug, Downey and Scott, Wen-tau Yih and Yejin, Choi},
title = {Abductive Commonsense Reasoning},
year = {2020}
}"""
# TODO(art):
_DESCRIPTION = """\
the Abductive Natural Language Inference Dataset from AI2
"""
_DATA_URL = "https://storage.googleapis.com/ai2-mosaic/public/alphanli/alphanli-train-dev.zip"
class ArtConfig(nlp.BuilderConfig):
"""BuilderConfig for Art."""
def __init__(self, **kwargs):
"""BuilderConfig for Art.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(ArtConfig, self).__init__(
version=nlp.Version("0.1.0", "New split API (https://tensorflow.org/datasets/splits)"), **kwargs
)
class Art(nlp.GeneratorBasedBuilder):
"""TODO(art): Short description of my dataset."""
# TODO(art): Set up version.
VERSION = nlp.Version("0.1.0")
BUILDER_CONFIGS = [
ArtConfig(
name="anli",
description="""\
the Abductive Natural Language Inference Dataset from AI2.
""",
),
]
def _info(self):
# TODO(art): Specifies the nlp.DatasetInfo object
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
"observation_1": nlp.Value("string"),
"observation_2": nlp.Value("string"),
"hypothesis_1": nlp.Value("string"),
"hypothesis_2": nlp.Value("string"),
"label": nlp.features.ClassLabel(num_classes=3)
# These are the features of your dataset like images, labels ...
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://leaderboard.allenai.org/anli/submissions/get-started",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(art): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
dl_dir = dl_manager.download_and_extract(_DATA_URL)
return [
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
gen_kwargs={
"filepath": os.path.join(dl_dir, "dev.jsonl"),
"labelpath": os.path.join(dl_dir, "dev-labels.lst"),
},
),
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
gen_kwargs={
"filepath": os.path.join(dl_dir, "train.jsonl"),
"labelpath": os.path.join(dl_dir, "train-labels.lst"),
},
),
]
def _generate_examples(self, filepath, labelpath):
"""Yields examples."""
# TODO(art): Yields (key, example) tuples from the dataset
data = []
for line in open(filepath):
data.append(json.loads(line))
labels = []
with open(labelpath) as f:
for word in f:
labels.append(word)
for idx, row in enumerate(data):
yield idx, {
"observation_1": row["obs1"],
"observation_2": row["obs2"],
"hypothesis_1": row["hyp1"],
"hypothesis_2": row["hyp2"],
"label": labels[idx],
}
|
nlp-master
|
datasets/art/art.py
|
"""TODO(tydiqa): Add a description here."""
from __future__ import absolute_import, division, print_function
import json
import os
import textwrap
import nlp
# TODO(tydiqa): BibTeX citation
_CITATION = """\
@article{tydiqa,
title = {TyDi QA: A Benchmark for Information-Seeking Question Answering in Typologically Diverse Languages},
author = {Jonathan H. Clark and Eunsol Choi and Michael Collins and Dan Garrette and Tom Kwiatkowski and Vitaly Nikolaev and Jennimaria Palomaki}
year = {2020},
journal = {Transactions of the Association for Computational Linguistics}
}
"""
# TODO(tydiqa):
_DESCRIPTION = """\
TyDi QA is a question answering dataset covering 11 typologically diverse languages with 204K question-answer pairs.
The languages of TyDi QA are diverse with regard to their typology -- the set of linguistic features that each language
expresses -- such that we expect models performing well on this set to generalize across a large number of the languages
in the world. It contains language phenomena that would not be found in English-only corpora. To provide a realistic
information-seeking task and avoid priming effects, questions are written by people who want to know the answer, but
don’t know the answer yet, (unlike SQuAD and its descendents) and the data is collected directly in each language without
the use of translation (unlike MLQA and XQuAD).
"""
_URL = "https://storage.googleapis.com/tydiqa/"
_PRIMARY_TASK_TRAIN = "v1.0/tydiqa-v1.0-train.jsonl.gz"
_PRIMARY_TASK_DEV = "v1.0/tydiqa-v1.0-dev.jsonl.gz"
_SECONDARY_TASK_TRAIN = "v1.1/tydiqa-goldp-v1.1-train.json"
_SECONDARY_TASK_DEV = "v1.1/tydiqa-goldp-v1.1-dev.json"
class TydiqaConfig(nlp.BuilderConfig):
""" BuilderConfig for Tydiqa"""
def __init__(self, **kwargs):
"""
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(TydiqaConfig, self).__init__(
version=nlp.Version("1.0.0", "New split API (https://tensorflow.org/datasets/splits)"), **kwargs
)
class Tydiqa(nlp.GeneratorBasedBuilder):
"""TODO(tydiqa): Short description of my dataset."""
# TODO(tydiqa): Set up version.
VERSION = nlp.Version("0.1.0")
BUILDER_CONFIGS = [
TydiqaConfig(
name="primary_task",
description=textwrap.dedent(
"""\
Passage selection task (SelectP): Given a list of the passages in the article, return either (a) the index of
the passage that answers the question or (b) NULL if no such passage exists.
Minimal answer span task (MinSpan): Given the full text of an article, return one of (a) the start and end
byte indices of the minimal span that completely answers the question; (b) YES or NO if the question requires
a yes/no answer and we can draw a conclusion from the passage; (c) NULL if it is not possible to produce a
minimal answer for this question."""
),
),
TydiqaConfig(
name="secondary_task",
description=textwrap.dedent(
"""\Gold passage task (GoldP): Given a passage that is guaranteed to contain the
answer, predict the single contiguous span of characters that answers the question. This is more similar to
existing reading comprehension datasets (as opposed to the information-seeking task outlined above).
This task is constructed with two goals in mind: (1) more directly comparing with prior work and (2) providing
a simplified way for researchers to use TyDi QA by providing compatibility with existing code for SQuAD 1.1,
XQuAD, and MLQA. Toward these goals, the gold passage task differs from the primary task in several ways:
only the gold answer passage is provided rather than the entire Wikipedia article;
unanswerable questions have been discarded, similar to MLQA and XQuAD;
we evaluate with the SQuAD 1.1 metrics like XQuAD; and
Thai and Japanese are removed since the lack of whitespace breaks some tools.
"""
),
),
]
def _info(self):
# TODO(tydiqa): Specifies the nlp.DatasetInfo object
if self.config.name == "primary_task":
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
"passage_answer_candidates": nlp.features.Sequence(
{"plaintext_start_byte": nlp.Value("int32"), "plaintext_end_byte": nlp.Value("int32")}
),
"question_text": nlp.Value("string"),
"document_title": nlp.Value("string"),
"language": nlp.Value("string"),
"annotations": nlp.features.Sequence(
{
#'annotation_id': nlp.Value('variant'),
"passage_answer_candidate_index": nlp.Value("int32"),
"minimal_answers_start_byte": nlp.Value("int32"),
"minimal_answers_end_byte": nlp.Value("int32"),
"yes_no_answer": nlp.Value("string"),
}
),
"document_plaintext": nlp.Value("string"),
# 'example_id': nlp.Value('variant'),
"document_url": nlp.Value("string")
# These are the features of your dataset like images, labels ...
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://github.com/google-research-datasets/tydiqa",
citation=_CITATION,
)
elif self.config.name == "secondary_task":
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features(
{
"id": nlp.Value("string"),
"title": nlp.Value("string"),
"context": nlp.Value("string"),
"question": nlp.Value("string"),
"answers": nlp.features.Sequence(
{"text": nlp.Value("string"), "answer_start": nlp.Value("int32"),}
),
}
),
# No default supervised_keys (as we have to pass both question
# and context as input).
supervised_keys=None,
homepage="https://github.com/google-research-datasets/tydiqa",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(tydiqa): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
primary_urls_to_download = {
"train": os.path.join(_URL, _PRIMARY_TASK_TRAIN),
"dev": os.path.join(_URL, _PRIMARY_TASK_DEV),
}
secondary_urls_to_download = {
"train": os.path.join(_URL, _SECONDARY_TASK_TRAIN),
"dev": os.path.join(_URL, _SECONDARY_TASK_DEV),
}
primary_downloaded = dl_manager.download_and_extract(primary_urls_to_download)
secondary_downloaded = dl_manager.download_and_extract(secondary_urls_to_download)
if self.config.name == "primary_task":
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": primary_downloaded["train"]},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": primary_downloaded["dev"]},
),
]
elif self.config.name == "secondary_task":
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": secondary_downloaded["train"]},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": secondary_downloaded["dev"]},
),
]
def _generate_examples(self, filepath):
"""Yields examples."""
# TODO(tydiqa): Yields (key, example) tuples from the dataset
if self.config.name == "primary_task":
with open(filepath) as f:
for id_, row in enumerate(f):
data = json.loads(row)
passages = data["passage_answer_candidates"]
end_byte = [passage["plaintext_end_byte"] for passage in passages]
start_byte = [passage["plaintext_start_byte"] for passage in passages]
title = data["document_title"]
lang = data["language"]
question = data["question_text"]
annotations = data["annotations"]
annot_ids = [annotation["annotation_id"] for annotation in annotations]
yes_no_answers = [annotation["yes_no_answer"] for annotation in annotations]
min_answers_end_byte = [
annotation["minimal_answer"]["plaintext_end_byte"] for annotation in annotations
]
min_answers_start_byte = [
annotation["minimal_answer"]["plaintext_start_byte"] for annotation in annotations
]
passage_cand_answers = [
annotation["passage_answer"]["candidate_index"] for annotation in annotations
]
doc = data["document_plaintext"]
example_id = data["example_id"]
url = data["document_url"]
yield id_, {
"passage_answer_candidates": {
"plaintext_start_byte": start_byte,
"plaintext_end_byte": end_byte,
},
"question_text": question,
"document_title": title,
"language": lang,
"annotations": {
#'annotation_id': annot_ids,
"passage_answer_candidate_index": passage_cand_answers,
"minimal_answers_start_byte": min_answers_start_byte,
"minimal_answers_end_byte": min_answers_end_byte,
"yes_no_answer": yes_no_answers,
},
"document_plaintext": doc,
#'example_id': example_id,
"document_url": url,
}
elif self.config.name == "secondary_task":
with open(filepath) as f:
data = json.load(f)
for article in data["data"]:
title = article.get("title", "").strip()
for paragraph in article["paragraphs"]:
context = paragraph["context"].strip()
for qa in paragraph["qas"]:
question = qa["question"].strip()
id_ = qa["id"]
answer_starts = [answer["answer_start"] for answer in qa["answers"]]
answers = [answer["text"].strip() for answer in qa["answers"]]
# Features currently used are "context", "question", and "answers".
# Others are extracted here for the ease of future expansions.
yield id_, {
"title": title,
"context": context,
"question": question,
"id": id_,
"answers": {"answer_start": answer_starts, "text": answers,},
}
|
nlp-master
|
datasets/tydiqa/tydiqa.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wiki40B: A clean Wikipedia dataset for 40+ languages."""
from __future__ import absolute_import, division, print_function
import logging
import os
import nlp
_CITATION = """
"""
_DESCRIPTION = """
Clean-up text for 40+ Wikipedia languages editions of pages
correspond to entities. The datasets have train/dev/test splits per language.
The dataset is cleaned up by page filtering to remove disambiguation pages,
redirect pages, deleted pages, and non-entity pages. Each example contains the
wikidata id of the entity, and the full Wikipedia article after page processing
that removes non-content sections and structured objects.
"""
_LICENSE = """
This work is licensed under the Creative Commons Attribution-ShareAlike
3.0 Unported License. To view a copy of this license, visit
http://creativecommons.org/licenses/by-sa/3.0/ or send a letter to
Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""
_URL = "https://research.google/pubs/pub49029/"
_DATA_DIRECTORY = "gs://tfds-data/downloads/wiki40b/tfrecord_prod"
WIKIPEDIA_LANGUAGES = [
"en",
"ar",
"zh-cn",
"zh-tw",
"nl",
"fr",
"de",
"it",
"ja",
"ko",
"pl",
"pt",
"ru",
"es",
"th",
"tr",
"bg",
"ca",
"cs",
"da",
"el",
"et",
"fa",
"fi",
"he",
"hi",
"hr",
"hu",
"id",
"lt",
"lv",
"ms",
"no",
"ro",
"sk",
"sl",
"sr",
"sv",
"tl",
"uk",
"vi",
]
class Wiki40bConfig(nlp.BuilderConfig):
"""BuilderConfig for Wiki40B."""
def __init__(self, language=None, **kwargs):
"""BuilderConfig for Wiki40B.
Args:
language: string, the language code for the Wiki40B dataset to use.
**kwargs: keyword arguments forwarded to super.
"""
super(Wiki40bConfig, self).__init__(
name=str(language), description="Wiki40B dataset for {0}.".format(language), **kwargs
)
self.language = language
_VERSION = nlp.Version("1.1.0")
class Wiki40b(nlp.BeamBasedBuilder):
"""Wiki40B: A Clean Wikipedia Dataset for Mutlilingual Language Modeling."""
BUILDER_CONFIGS = [
Wiki40bConfig(version=_VERSION, language=lang,) # pylint:disable=g-complex-comprehension
for lang in WIKIPEDIA_LANGUAGES
]
def _info(self):
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features(
{"wikidata_id": nlp.Value("string"), "text": nlp.Value("string"), "version_id": nlp.Value("string")}
),
supervised_keys=None,
homepage=_URL,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
lang = self.config.language
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
gen_kwargs={"filepaths": os.path.join(_DATA_DIRECTORY, "train", "{}_examples-*".format(lang))},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
gen_kwargs={"filepaths": os.path.join(_DATA_DIRECTORY, "dev", "{}_examples-*".format(lang))},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
gen_kwargs={"filepaths": os.path.join(_DATA_DIRECTORY, "test", "{}_examples-*".format(lang))},
),
]
def _build_pcollection(self, pipeline, filepaths):
"""Build PCollection of examples."""
import apache_beam as beam
import tensorflow as tf
logging.info("generating examples from = %s", filepaths)
def _extract_content(example):
"""Extracts content from a TFExample."""
wikidata_id = example.features.feature["wikidata_id"].bytes_list.value[0].decode("utf-8")
text = example.features.feature["text"].bytes_list.value[0].decode("utf-8")
version_id = example.features.feature["version_id"].bytes_list.value[0].decode("utf-8")
# wikidata_id could be duplicated with different texts.
yield wikidata_id + text, {
"wikidata_id": wikidata_id,
"text": text,
"version_id": version_id,
}
return (
pipeline
| beam.io.ReadFromTFRecord(filepaths, coder=beam.coders.ProtoCoder(tf.train.Example))
| beam.FlatMap(_extract_content)
)
|
nlp-master
|
datasets/wiki40b/wiki40b.py
|
"""TODO(jeopardy): Add a description here."""
from __future__ import absolute_import, division, print_function
import json
import os
import nlp
# TODO(jeopardy): BibTeX citation
_CITATION = """
"""
# TODO(jeopardy):
_DESCRIPTION = """
Dataset containing 216,930 Jeopardy questions, answers and other data.
The json file is an unordered list of questions where each question has
'category' : the question category, e.g. "HISTORY"
'value' : integer $ value of the question as string, e.g. "200"
Note: This is "None" for Final Jeopardy! and Tiebreaker questions
'question' : text of question
Note: This sometimes contains hyperlinks and other things messy text such as when there's a picture or video question
'answer' : text of answer
'round' : one of "Jeopardy!","Double Jeopardy!","Final Jeopardy!" or "Tiebreaker"
Note: Tiebreaker questions do happen but they're very rare (like once every 20 years)
'show_number' : int of show number, e.g '4680'
'air_date' : string of the show air date in format YYYY-MM-DD
"""
_URL = "https://www.reddit.com/r/datasets/comments/1uyd0t/200000_jeopardy_questions_in_a_json_file/"
_DATA_URL = "http://skeeto.s3.amazonaws.com/share/JEOPARDY_QUESTIONS1.json.gz"
_DATA_FILE = "JEOPARDY_QUESTIONS1.json"
class Jeopardy(nlp.GeneratorBasedBuilder):
"""TODO(jeopardy): Short description of my dataset."""
# TODO(jeopardy): Set up version.
VERSION = nlp.Version("0.1.0")
def _info(self):
# TODO(jeopardy): Specifies the nlp.DatasetInfo object
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
"category": nlp.Value("string"),
"air_date": nlp.Value("string"),
"question": nlp.Value("string"),
"value": nlp.Value("int32"),
"answer": nlp.Value("string"),
"round": nlp.Value("string"),
"category": nlp.Value("string"),
"show_number": nlp.Value("int32"),
# These are the features of your dataset like images, labels ...
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage=_URL,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(jeopardy): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
filepath = dl_manager.download_and_extract(_DATA_URL)
return [
nlp.SplitGenerator(name=nlp.Split.TRAIN, gen_kwargs={"filepath": filepath}),
]
def _generate_examples(self, filepath):
"""Yields examples."""
# TODO(jeopardy): Yields (key, example) tuples from the dataset
with open(filepath) as f:
data = json.load(f)
for i, example in enumerate(data):
category = example["category"]
air_date = example["air_date"]
question = example["question"]
if example["value"] is None:
value = -1 # for Final Jeopardy! and Tiebreaker questions
else:
value = int(example["value"][1:].replace(",", ""))
answer = example["answer"]
round = example["round"]
show_number = int(example["show_number"])
yield i, {
"category": category,
"air_date": air_date,
"question": question,
"value": value,
"answer": answer,
"round": round,
"category": category,
"show_number": show_number,
}
|
nlp-master
|
datasets/jeopardy/jeopardy.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""The Stanford Natural Language Inference (SNLI) Corpus."""
from __future__ import absolute_import, division, print_function
import csv
import os
import nlp
_CITATION = """\
@inproceedings{snli:emnlp2015,
Author = {Bowman, Samuel R. and Angeli, Gabor and Potts, Christopher, and Manning, Christopher D.},
Booktitle = {Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing (EMNLP)},
Publisher = {Association for Computational Linguistics},
Title = {A large annotated corpus for learning natural language inference},
Year = {2015}
}
"""
_DESCRIPTION = """\
The SNLI corpus (version 1.0) is a collection of 570k human-written English
sentence pairs manually labeled for balanced classification with the labels
entailment, contradiction, and neutral, supporting the task of natural language
inference (NLI), also known as recognizing textual entailment (RTE).
"""
_DATA_URL = "https://nlp.stanford.edu/projects/snli/snli_1.0.zip"
class Snli(nlp.GeneratorBasedBuilder):
"""The Stanford Natural Language Inference (SNLI) Corpus."""
BUILDER_CONFIGS = [
nlp.BuilderConfig(
name="plain_text",
version=nlp.Version("1.0.0", "New split API (https://tensorflow.org/datasets/splits)"),
description="Plain text import of SNLI",
)
]
def _info(self):
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features(
{
"premise": nlp.Value("string"),
"hypothesis": nlp.Value("string"),
"label": nlp.features.ClassLabel(names=["entailment", "neutral", "contradiction"]),
}
),
# No default supervised_keys (as we have to pass both premise
# and hypothesis as input).
supervised_keys=None,
homepage="https://nlp.stanford.edu/projects/snli/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract(_DATA_URL)
data_dir = os.path.join(dl_dir, "snli_1.0")
return [
nlp.SplitGenerator(
name=nlp.Split.TEST, gen_kwargs={"filepath": os.path.join(data_dir, "snli_1.0_test.txt")}
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION, gen_kwargs={"filepath": os.path.join(data_dir, "snli_1.0_dev.txt")}
),
nlp.SplitGenerator(
name=nlp.Split.TRAIN, gen_kwargs={"filepath": os.path.join(data_dir, "snli_1.0_train.txt")}
),
]
def _generate_examples(self, filepath):
"""This function returns the examples in the raw (text) form."""
with open(filepath) as f:
reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
for idx, row in enumerate(reader):
label = -1 if row["gold_label"] == "-" else row["gold_label"]
yield idx, {
"premise": row["sentence1"],
"hypothesis": row["sentence2"],
"label": label,
}
|
nlp-master
|
datasets/snli/snli.py
|
"""TODO(coarse_discourse): Add a description here."""
from __future__ import absolute_import, division, print_function
import json
import os
import nlp
# TODO(coarse_discourse): BibTeX citation
_CITATION = """\
@inproceedings{coarsediscourse, title={Characterizing Online Discussion Using Coarse Discourse Sequences}, author={Zhang, Amy X. and Culbertson, Bryan and Paritosh, Praveen}, booktitle={Proceedings of the 11th International AAAI Conference on Weblogs and Social Media}, series={ICWSM '17}, year={2017}, location = {Montreal, Canada} }
"""
# TODO(coarse_discourse):
_DESCRIPTION = """\
dataset contains discourse annotation and relation on threads from reddit during 2016
"""
_URL = "https://github.com/google-research-datasets/coarse-discourse/archive/master.zip"
class CoarseDiscourse(nlp.GeneratorBasedBuilder):
"""TODO(coarse_discourse): Short description of my dataset."""
# TODO(coarse_discourse): Set up version.
VERSION = nlp.Version("0.1.0")
def _info(self):
# TODO(coarse_discourse): Specifies the nlp.DatasetInfo object
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
# These are the features of your dataset like images, labels ...
"title": nlp.Value("string"),
"is_self_post": nlp.Value("bool"),
"subreddit": nlp.Value("string"),
"url": nlp.Value("string"),
"majority_link": nlp.Value("string"),
"is_first_post": nlp.Value("bool"),
"majority_type": nlp.Value("string"),
"id_post": nlp.Value("string"),
"post_depth": nlp.Value("int32"),
"in_reply_to": nlp.Value("string"),
"annotations": nlp.features.Sequence(
{
"annotator": nlp.Value("string"),
"link_to_post": nlp.Value("string"),
"main_type": nlp.Value("string"),
}
),
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://github.com/google-research-datasets/coarse-discourse",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(coarse_discourse): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
dl_dir = dl_manager.download_and_extract(_URL)
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": os.path.join(dl_dir, "coarse-discourse-master", "coarse_discourse_dataset.json")
},
),
]
def _generate_examples(self, filepath):
"""Yields examples."""
# TODO(coarse_discourse): Yields (key, example) tuples from the dataset
with open(filepath) as f:
for id_, row in enumerate(f):
data = json.loads(row)
url = data.get("url", "")
is_self_post = data.get("is_self_post", "")
subreddit = data.get("subreddit", "")
title = data.get("title", "")
posts = data.get("posts", "")
for id1, post in enumerate(posts):
maj_link = post.get("majority_link", "")
maj_type = post.get("majority_type", "")
id_post = post.get("id", "")
is_first_post = post.get("is_firs_post", "")
post_depth = post.get("post_depth", -1)
in_reply_to = post.get("in_reply_to", "")
annotations = post["annotations"]
annotators = [annotation.get("annotator", "") for annotation in annotations]
main_types = [annotation.get("main_type", "") for annotation in annotations]
link_posts = [annotation.get("linkk_to_post", "") for annotation in annotations]
yield str(id_) + "_" + str(id1), {
"title": title,
"is_self_post": is_self_post,
"subreddit": subreddit,
"url": url,
"majority_link": maj_link,
"is_first_post": is_first_post,
"majority_type": maj_type,
"id_post": id_post,
"post_depth": post_depth,
"in_reply_to": in_reply_to,
"annotations": {"annotator": annotators, "link_to_post": link_posts, "main_type": main_types},
}
|
nlp-master
|
datasets/coarse_discourse/coarse_discourse.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Natural Questions: A Benchmark for Question Answering Research."""
from __future__ import absolute_import, division, print_function
import json
import re
import apache_beam as beam
import six
import nlp
if six.PY2:
import HTMLParser as html_parser # pylint:disable=g-import-not-at-top
html_unescape = html_parser.HTMLParser().unescape
else:
import html # pylint:disable=g-import-not-at-top
html_unescape = html.unescape
_CITATION = """
@article{47761,
title = {Natural Questions: a Benchmark for Question Answering Research},
author = {Tom Kwiatkowski and Jennimaria Palomaki and Olivia Redfield and Michael Collins and Ankur Parikh and Chris Alberti and Danielle Epstein and Illia Polosukhin and Matthew Kelcey and Jacob Devlin and Kenton Lee and Kristina N. Toutanova and Llion Jones and Ming-Wei Chang and Andrew Dai and Jakob Uszkoreit and Quoc Le and Slav Petrov},
year = {2019},
journal = {Transactions of the Association of Computational Linguistics}
}
"""
_DESCRIPTION = """
The NQ corpus contains questions from real users, and it requires QA systems to
read and comprehend an entire Wikipedia article that may or may not contain the
answer to the question. The inclusion of real user questions, and the
requirement that solutions should read an entire page to find the answer, cause
NQ to be a more realistic and challenging task than prior QA datasets.
"""
_URL = "https://ai.google.com/research/NaturalQuestions/dataset"
_BASE_DOWNLOAD_URL = "https://storage.googleapis.com/natural_questions/v1.0"
_DOWNLOAD_URLS = {
"train": ["%s/train/nq-train-%02d.jsonl.gz" % (_BASE_DOWNLOAD_URL, i) for i in range(50)],
"validation": ["%s/dev/nq-dev-%02d.jsonl.gz" % (_BASE_DOWNLOAD_URL, i) for i in range(5)],
}
class NaturalQuestions(nlp.BeamBasedBuilder):
"""Natural Questions: A Benchmark for Question Answering Research."""
VERSION = nlp.Version("0.0.2")
SUPPORTED_VERSIONS = [nlp.Version("0.0.1")]
def _info(self):
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features(
{
"id": nlp.Value("string"),
"document": {
"title": nlp.Value("string"),
"url": nlp.Value("string"),
"html": nlp.Value("string"),
"tokens": nlp.features.Sequence({"token": nlp.Value("string"), "is_html": nlp.Value("bool")}),
},
"question": {"text": nlp.Value("string"), "tokens": nlp.features.Sequence(nlp.Value("string"))},
"annotations": nlp.features.Sequence(
{
"id": nlp.Value("string"),
"long_answer": {
"start_token": nlp.Value("int64"),
"end_token": nlp.Value("int64"),
"start_byte": nlp.Value("int64"),
"end_byte": nlp.Value("int64"),
},
"short_answers": nlp.features.Sequence(
{
"start_token": nlp.Value("int64"),
"end_token": nlp.Value("int64"),
"start_byte": nlp.Value("int64"),
"end_byte": nlp.Value("int64"),
"text": nlp.Value("string"),
}
),
"yes_no_answer": nlp.features.ClassLabel(names=["NO", "YES"]), # Can also be -1 for NONE.
}
),
}
),
supervised_keys=None,
homepage=_URL,
citation=_CITATION,
)
def _split_generators(self, dl_manager, pipeline):
"""Returns SplitGenerators."""
files = dl_manager.download(_DOWNLOAD_URLS)
if not pipeline.is_local():
files = dl_manager.ship_files_with_pipeline(files, pipeline)
return [
nlp.SplitGenerator(name=nlp.Split.TRAIN, gen_kwargs={"filepaths": files["train"]},),
nlp.SplitGenerator(name=nlp.Split.VALIDATION, gen_kwargs={"filepaths": files["validation"]},),
]
def _build_pcollection(self, pipeline, filepaths):
"""Build PCollection of examples."""
def _parse_example(line):
"""Parse a single json line and emit an example dict."""
ex_json = json.loads(line)
html_bytes = ex_json["document_html"].encode("utf-8")
def _parse_short_answer(short_ans):
""""Extract text of short answer."""
ans_bytes = html_bytes[short_ans["start_byte"] : short_ans["end_byte"]]
# Remove non-breaking spaces.
ans_bytes = ans_bytes.replace(b"\xc2\xa0", b" ")
text = ans_bytes.decode("utf-8")
# Remove HTML markup.
text = re.sub("<([^>]*)>", "", html_unescape(text))
# Replace \xa0 characters with spaces.
return {
"start_token": short_ans["start_token"],
"end_token": short_ans["end_token"],
"start_byte": short_ans["start_byte"],
"end_byte": short_ans["end_byte"],
"text": text,
}
def _parse_annotation(an_json):
return {
# Convert to str since some IDs cannot be represented by nlp.Value('int64').
"id": str(an_json["annotation_id"]),
"long_answer": {
"start_token": an_json["long_answer"]["start_token"],
"end_token": an_json["long_answer"]["end_token"],
"start_byte": an_json["long_answer"]["start_byte"],
"end_byte": an_json["long_answer"]["end_byte"],
},
"short_answers": [_parse_short_answer(ans) for ans in an_json["short_answers"]],
"yes_no_answer": (-1 if an_json["yes_no_answer"] == "NONE" else an_json["yes_no_answer"]),
}
beam.metrics.Metrics.counter("nq", "examples").inc()
# Convert to str since some IDs cannot be represented by nlp.Value('int64').
id_ = str(ex_json["example_id"])
return (
id_,
{
"id": id_,
"document": {
"title": ex_json["document_title"],
"url": ex_json["document_url"],
"html": html_bytes,
"tokens": [
{"token": t["token"], "is_html": t["html_token"]} for t in ex_json["document_tokens"]
],
},
"question": {"text": ex_json["question_text"], "tokens": ex_json["question_tokens"]},
"annotations": [_parse_annotation(an_json) for an_json in ex_json["annotations"]],
},
)
return (
pipeline
| beam.Create(filepaths)
| beam.io.ReadAllFromText(compression_type=beam.io.textio.CompressionTypes.GZIP)
| beam.Map(_parse_example)
)
|
nlp-master
|
datasets/natural_questions/natural_questions.py
|
"""TODO(empathetic_dialogues): Add a description here."""
from __future__ import absolute_import, division, print_function
import csv
import os
import nlp
_CITATION = """\
@inproceedings{rashkin2019towards,
title = {Towards Empathetic Open-domain Conversation Models: a New Benchmark and Dataset},
author = {Hannah Rashkin and Eric Michael Smith and Margaret Li and Y-Lan Boureau},
booktitle = {ACL},
year = {2019},
}
"""
_DESCRIPTION = """\
PyTorch original implementation of Towards Empathetic Open-domain Conversation Models: a New Benchmark and Dataset
"""
_URL = "https://dl.fbaipublicfiles.com/parlai/empatheticdialogues/empatheticdialogues.tar.gz"
class EmpatheticDialogues(nlp.GeneratorBasedBuilder):
"""TODO(empathetic_dialogues): Short description of my dataset."""
# TODO(empathetic_dialogues): Set up version.
VERSION = nlp.Version("0.1.0")
def _info(self):
# TODO(empathetic_dialogues): Specifies the nlp.DatasetInfo object
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
"conv_id": nlp.Value("string"),
"utterance_idx": nlp.Value("int32"),
"context": nlp.Value("string"),
"prompt": nlp.Value("string"),
"speaker_idx": nlp.Value("int32"),
"utterance": nlp.Value("string"),
"selfeval": nlp.Value("string"),
"tags": nlp.Value("string")
# These are the features of your dataset like images, labels ...
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://github.com/facebookresearch/EmpatheticDialogues",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(empathetic_dialogues): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
dl_dir = dl_manager.download_and_extract(_URL)
data_dir = os.path.join(dl_dir, "empatheticdialogues")
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, "train.csv")},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, "valid.csv")},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, "test.csv")},
),
]
def _generate_examples(self, filepath):
"""Yields examples."""
# TODO(empathetic_dialogues): Yields (key, example) tuples from the dataset
with open(filepath) as f:
data = csv.DictReader(f)
for id_, row in enumerate(data):
utterance = row["utterance"]
speaker_id = int(row["speaker_idx"])
context = row["context"]
conv_id = row["conv_id"]
tags = row["tags"] if row["tags"] else ""
selfeval = row["selfeval"] if row["selfeval"] else ""
utterance_id = int(row["utterance_idx"])
prompt = row["prompt"]
yield id_, {
"utterance": utterance,
"utterance_idx": utterance_id,
"context": context,
"speaker_idx": speaker_id,
"conv_id": conv_id,
"selfeval": selfeval,
"prompt": prompt,
"tags": tags,
}
|
nlp-master
|
datasets/empathetic_dialogues/empathetic_dialogues.py
|
"""TODO(reclor): Add a description here."""
from __future__ import absolute_import, division, print_function
import json
import os
import nlp
# TODO(reclor): BibTeX citation
_CITATION = """\
@inproceedings{yu2020reclor,
author = {Yu, Weihao and Jiang, Zihang and Dong, Yanfei and Feng, Jiashi},
title = {ReClor: A Reading Comprehension Dataset Requiring Logical Reasoning},
booktitle = {International Conference on Learning Representations (ICLR)},
month = {April},
year = {2020}
}
"""
# TODO(reclor):
_DESCRIPTION = """\
Logical reasoning is an important ability to examine, analyze, and critically evaluate arguments as they occur in ordinary
language as the definition from LSAC. ReClor is a dataset extracted from logical reasoning questions of standardized graduate
admission examinations. Empirical results show that the state-of-the-art models struggle on ReClor with poor performance
indicating more research is needed to essentially enhance the logical reasoning ability of current models. We hope this
dataset could help push Machine Reading Comprehension (MRC) towards more complicated reasonin
"""
class Reclor(nlp.GeneratorBasedBuilder):
"""TODO(reclor): Short description of my dataset."""
# TODO(reclor): Set up version.
VERSION = nlp.Version("0.1.0")
@property
def manual_download_instructions(self):
return """\
to use ReClor you need to download it manually. Please go to its homepage (http://whyu.me/reclor/) fill the google
form and you will recive a download link and a password to extract it.Please extract all files in one folder and use the path folder in nlp.load_dataset('reclor', data_dir='path/to/folder/folder_name')
"""
def _info(self):
# TODO(reclor): Specifies the nlp.DatasetInfo object
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
# These are the features of your dataset like images, labels ...
"context": nlp.Value("string"),
"question": nlp.Value("string"),
"answers": nlp.features.Sequence({"answer": nlp.Value("string")}),
"label": nlp.Value("string"),
"id_string": nlp.Value("string"),
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="http://whyu.me/reclor/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(reclor): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
data_dir = os.path.abspath(os.path.expanduser(dl_manager.manual_dir))
if not os.path.exists(data_dir):
raise FileNotFoundError(
"{} does not exist. Make sure you insert a manual dir via `nlp.load_dataset('wikihow', data_dir=...)` that includes files unzipped from the reclor zip. Manual download instructions: {}".format(
data_dir, self.manual_download_instructions
)
)
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, "train.json")},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, "test.json")},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, "val.json")},
),
]
def _generate_examples(self, filepath):
"""Yields examples."""
# TODO(reclor): Yields (key, example) tuples from the dataset
with open(filepath) as f:
data = json.load(f)
for id_, row in enumerate(data):
yield id_, {
"context": row["context"],
"question": row["question"],
"answers": {"answer": row["answers"]},
"label": str(row.get("label", "")),
"id_string": row["id_string"],
}
|
nlp-master
|
datasets/reclor/reclor.py
|
"""PG-19 language modeling benchmark - a set of books extracted from the Project Gutenberg books library"""
from __future__ import absolute_import, division, print_function
import csv
import json
import os
from operator import itemgetter
import requests
import nlp
# TODO(pg19): BibTeX citation
_CITATION = """\
@article{raecompressive2019,
author = {Rae, Jack W and Potapenko, Anna and Jayakumar, Siddhant M and
Hillier, Chloe and Lillicrap, Timothy P},
title = {Compressive Transformers for Long-Range Sequence Modelling},
journal = {arXiv preprint},
url = {https://arxiv.org/abs/1911.05507},
year = {2019},
}
"""
# TODO(pg19):
_DESCRIPTION = """\
This repository contains the PG-19 language modeling benchmark.
It includes a set of books extracted from the Project Gutenberg books library, that were published before 1919.
It also contains metadata of book titles and publication dates.
PG-19 is over double the size of the Billion Word benchmark and contains documents that are 20X longer, on average, than the WikiText long-range language modelling benchmark.
Books are partitioned into a train, validation, and test set. Book metadata is stored in metadata.csv which contains (book_id, short_book_title, publication_date).
Unlike prior benchmarks, we do not constrain the vocabulary size --- i.e. mapping rare words to an UNK token --- but instead release the data as an open-vocabulary benchmark. The only processing of the text that has been applied is the removal of boilerplate license text, and the mapping of offensive discriminatory words as specified by Ofcom to placeholder tokens. Users are free to model the data at the character-level, subword-level, or via any mechanism that can model an arbitrary string of text.
To compare models we propose to continue measuring the word-level perplexity, by calculating the total likelihood of the dataset (via any chosen subword vocabulary or character-based scheme) divided by the number of tokens --- specified below in the dataset statistics table.
One could use this dataset for benchmarking long-range language models, or use it to pre-train for other natural language processing tasks which require long-range reasoning, such as LAMBADA or NarrativeQA. We would not recommend using this dataset to train a general-purpose language model, e.g. for applications to a production-system dialogue agent, due to the dated linguistic style of old texts and the inherent biases present in historical writing.
"""
_ASSET_ROOT_URL = "https://storage.googleapis.com/deepmind-gutenberg/"
_STORAGE_API_ROOT_URL = "https://storage.googleapis.com/storage/v1/b/deepmind-gutenberg/o"
_METADATA_URL = os.path.join(_ASSET_ROOT_URL, "metadata.csv")
flat_map = lambda fn, arr: [el for sub_arr in map(fn, arr) for el in sub_arr]
class Pg19(nlp.GeneratorBasedBuilder):
"""PG-19 dataset - books as plain text extracted from the Project Gutenberg library"""
# TODO(pg19): Set up version.
VERSION = nlp.Version("0.1.0")
def _info(self):
# TODO(pg19): Specifies the nlp.DatasetInfo object
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
"short_book_title": nlp.Value("string"),
"publication_date": nlp.Value("int32"),
"url": nlp.Value("string"),
"text": nlp.Value("string"),
# These are the features of your dataset like images, labels ...
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://github.com/deepmind/pg19",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(pg19): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
def fetch_all_pages(url, prefix):
pageToken = None
payload = {"prefix": prefix}
while True:
resp = requests.get(url, params={"pageToken": pageToken, **payload})
json = resp.json()
yield json
pageToken = json.pop("nextPageToken", None)
if pageToken is None:
break
def get_filename(path):
return os.path.splitext(os.path.basename(path))[0]
def download_listdir(url, local_filepath):
root_url, prefix = url.rsplit("/", 1)
pages = fetch_all_pages(root_url, prefix + "/")
items = flat_map(itemgetter("items"), pages)
names = sorted(map(itemgetter("name"), items))
with open(local_filepath, "w") as f:
f.write(json.dumps(names))
return local_filepath
def filepath_to_json(path):
with open(path, "r") as f:
return json.load(f)
splits = ["train", "validation", "test"]
split_paths = map(lambda path: os.path.join(_STORAGE_API_ROOT_URL, path), splits)
split_paths = dl_manager.download_custom(dict(zip(splits, split_paths)), download_listdir)
file_urls = list(map(filepath_to_json, split_paths.values()))
complete_file_urls = [
list(map(lambda url: os.path.join(_ASSET_ROOT_URL, url), urls))
for (split_path, urls) in zip(split_paths, file_urls)
]
urls_to_download = {(get_filename(url)): url for urls in complete_file_urls for url in urls}
metadata = dl_manager.download({"metadata": _METADATA_URL})
downloaded_files = dl_manager.download(urls_to_download)
ids_in_split = list(map(lambda urls: list(map(get_filename, urls)), file_urls))
split_ids_index = dict(zip(split_paths, ids_in_split))
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
gen_kwargs={
"ids": split_ids_index["train"],
"metadata_filepath": metadata["metadata"],
"filepaths": downloaded_files,
},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
gen_kwargs={
"ids": split_ids_index["validation"],
"metadata_filepath": metadata["metadata"],
"filepaths": downloaded_files,
},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
gen_kwargs={
"ids": split_ids_index["test"],
"metadata_filepath": metadata["metadata"],
"filepaths": downloaded_files,
},
),
]
def _generate_examples(self, ids, metadata_filepath, filepaths):
"""Yields examples."""
# TODO(pg19): Yields (key, example) tuples from the dataset
with open(metadata_filepath) as f:
metadata_dict = csv.DictReader(f, fieldnames=["_id", "short_book_title", "publication_date", "url"])
indexed_metadata = {row["_id"]: row for row in metadata_dict}
for _id in ids:
data = indexed_metadata[_id]
filepath = filepaths[_id]
with open(filepath) as f:
text = f.read()
_id = data["_id"]
short_book_title = data["short_book_title"]
publication_date = int(data["publication_date"])
url = data["url"]
yield _id, {
"short_book_title": short_book_title,
"publication_date": publication_date,
"url": url,
"text": text,
}
|
nlp-master
|
datasets/pg19/pg19.py
|
"""TODO(com_qa): Add a description here."""
from __future__ import absolute_import, division, print_function
import json
import os
import nlp
# TODO(com_qa): BibTeX citation
_CITATION = """\
@inproceedings{abujabal-etal-2019-comqa,
title = "{ComQA: A Community-sourced Dataset for Complex Factoid Question Answering with Paraphrase Clusters",
author = {Abujabal, Abdalghani and
Saha Roy, Rishiraj and
Yahya, Mohamed and
Weikum, Gerhard},
booktitle = {Proceedings of the 2019 Conference of the North {A}merican Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers)},
month = {jun},
year = {2019},
address = {Minneapolis, Minnesota},
publisher = {Association for Computational Linguistics},
url = {https://www.aclweb.org/anthology/N19-1027},
doi = {10.18653/v1/N19-1027{,
pages = {307--317},
}
"""
# TODO(com_qa):
_DESCRIPTION = """\
ComQA is a dataset of 11,214 questions, which were collected from WikiAnswers, a community question answering website.
By collecting questions from such a site we ensure that the information needs are ones of interest to actual users.
Moreover, questions posed there are often cannot be answered by commercial search engines or QA technology, making them
more interesting for driving future research compared to those collected from an engine's query log. The dataset contains
questions with various challenging phenomena such as the need for temporal reasoning, comparison (e.g., comparatives,
superlatives, ordinals), compositionality (multiple, possibly nested, subquestions with multiple entities), and
unanswerable questions (e.g., Who was the first human being on Mars?). Through a large crowdsourcing effort, questions
in ComQA are grouped into 4,834 paraphrase clusters that express the same information need. Each cluster is annotated
with its answer(s). ComQA answers come in the form of Wikipedia entities wherever possible. Wherever the answers are
temporal or measurable quantities, TIMEX3 and the International System of Units (SI) are used for normalization.
"""
_URL = "https://qa.mpi-inf.mpg.de/comqa"
_TRAIN_FILE = "comqa_train.json"
_DEV_FILE = "comqa_dev.json"
_TEST_FILE = "comqa_test.json"
class ComQa(nlp.GeneratorBasedBuilder):
"""TODO(com_qa): Short description of my dataset."""
# TODO(com_qa): Set up version.
VERSION = nlp.Version("0.1.0")
def _info(self):
# TODO(com_qa): Specifies the nlp.DatasetInfo object
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
"cluster_id": nlp.Value("string"),
"questions": nlp.features.Sequence({"question": nlp.Value("string")}),
"answers": nlp.features.Sequence({"answer": nlp.Value("string")}),
# These are the features of your dataset like images, labels ...
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="http://qa.mpi-inf.mpg.de/comqa/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(com_qa): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
urls_to_download = {
"train": os.path.join(_URL, _TRAIN_FILE),
"dev": os.path.join(_URL, _DEV_FILE),
"test": os.path.join(_URL, _TEST_FILE),
}
dl_dir = dl_manager.download_and_extract(urls_to_download)
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": dl_dir["train"], "split": "train"},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": dl_dir["test"], "split": "test"},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": dl_dir["dev"], "split": "dev"},
),
]
def _generate_examples(self, filepath, split):
"""Yields examples."""
# TODO(com_qa): Yields (key, example) tuples from the dataset
with open(filepath) as f:
data = json.load(f)
for id_, example in enumerate(data):
questions = []
if split == "test":
cluster_id = str(example["id"])
questions.append(example["question"])
else:
cluster_id = example["cluster_id"]
questions = example["questions"]
answers = example["answers"]
yield id_, {
"cluster_id": cluster_id,
"questions": {"question": questions},
"answers": {"answer": answers},
}
|
nlp-master
|
datasets/com_qa/com_qa.py
|
"""TODO(wiki_split): Add a description here."""
from __future__ import absolute_import, division, print_function
import csv
import os
import nlp
# TODO(wiki_split): BibTeX citation
_CITATION = """\
@InProceedings{BothaEtAl2018,
title = {{Learning To Split and Rephrase From Wikipedia Edit History}},
author = {Botha, Jan A and Faruqui, Manaal and Alex, John and Baldridge, Jason and Das, Dipanjan},
booktitle = {Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing},
pages = {to appear},
note = {arXiv preprint arXiv:1808.09468},
year = {2018}
}
"""
# TODO(wiki_split):
_DESCRIPTION = """\
One million English sentences, each split into two sentences that together preserve the original meaning, extracted from Wikipedia
Google's WikiSplit dataset was constructed automatically from the publicly available Wikipedia revision history. Although
the dataset contains some inherent noise, it can serve as valuable training data for models that split or merge sentences.
"""
_URL = "https://github.com/google-research-datasets/wiki-split/raw/master"
_TRAIN_FILE = "train.tsv.zip"
_TEST_FILE = "test.tsv"
_DEV_FILE = "validation.tsv"
class WikiSplit(nlp.GeneratorBasedBuilder):
"""TODO(wiki_split): Short description of my dataset."""
# TODO(wiki_split): Set up version.
VERSION = nlp.Version("0.1.0")
def _info(self):
# TODO(wiki_split): Specifies the nlp.DatasetInfo object
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
"complex_sentence": nlp.Value("string"),
"simple_sentence_1": nlp.Value("string"),
"simple_sentence_2": nlp.Value("string"),
# These are the features of your dataset like images, labels ...
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://dataset-homepage/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(wiki_split): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
urls_to_download = {
"train": os.path.join(_URL, _TRAIN_FILE),
"test": os.path.join(_URL, _TEST_FILE),
"dev": os.path.join(_URL, _DEV_FILE),
}
dl_dir = dl_manager.download_and_extract(urls_to_download)
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(dl_dir["train"], "train.tsv")},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": dl_dir["test"]},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": dl_dir["dev"]},
),
]
def _generate_examples(self, filepath):
"""Yields examples."""
# TODO(wiki_split): Yields (key, example) tuples from the dataset
with open(filepath) as f:
data = csv.reader(f, delimiter="\t")
# data = csv.reader(f, delimiter='\t')
for id_, row in enumerate(data):
yield id_, {
"complex_sentence": row[0],
"simple_sentence_1": row[1].split("<::::>")[0],
"simple_sentence_2": row[1].split("<::::>")[1],
}
|
nlp-master
|
datasets/wiki_split/wiki_split.py
|
from __future__ import absolute_import, division, print_function
import gzip
import json
import os
import nlp
class CompguesswhatConfig(nlp.BuilderConfig):
""" BuilderConfig for CompGuessWhat?!"""
def __init__(self, data_url, splits, gameplay_scenario, **kwargs):
"""
Args:
gameplay_scenario: to specify if we want to load original CompGuessWhat?! split ('original') or
the zero-shot reference games based on NOCAPS images ('zero_shot')
**kwargs: keyword arguments forwarded to super.
"""
super(CompguesswhatConfig, self).__init__(
version=nlp.Version("0.1.0", "First CompGuessWhat?! release"), **kwargs
)
assert gameplay_scenario in (
"original",
"zero_shot",
), "Invalid choice for parameter 'gameplay_scenario': {gameplay_scenario}. Valid values are ('original', 'zero_shot')."
self.gameplay_scenario = gameplay_scenario
self.splits = splits
self.data_url = data_url
class Compguesswhat(nlp.GeneratorBasedBuilder):
_CITATION = """\
@inproceedings{suglia2020compguesswhat,
title={CompGuessWhat?!: a Multi-task Evaluation Framework for Grounded Language Learning},
author={Suglia, Alessandro, Konstas, Ioannis, Vanzo, Andrea, Bastianelli, Emanuele, Desmond Elliott, Stella Frank and Oliver Lemon},
booktitle={Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics},
year={2020}
}
"""
_DESCRIPTION = """
CompGuessWhat?! is an instance of a multi-task framework for evaluating the quality of learned neural representations,
in particular concerning attribute grounding. Use this dataset if you want to use the set of games whose reference
scene is an image in VisualGenome. Visit the website for more details: https://compguesswhat.github.io
"""
BUILDER_CONFIGS = [
CompguesswhatConfig(
name="compguesswhat-original",
gameplay_scenario="original",
description="CompGuessWhat?! subset of games from the original GuessWhat?! dataset",
data_url="https://www.dropbox.com/s/l0nc13udml6vs0w/compguesswhat-original.zip?dl=1",
splits={
"train": "compguesswhat.train.jsonl.gz",
"valid": "compguesswhat.valid.jsonl.gz",
"test": "compguesswhat.test.jsonl.gz",
},
),
CompguesswhatConfig(
name="compguesswhat-zero_shot",
gameplay_scenario="zero_shot",
description="CompGuessWhat?! reference set of games for zero-shot evaluation using NOCAPS images",
data_url="https://www.dropbox.com/s/gd46azul7o7iip4/compguesswhat-zero_shot.zip?dl=1",
splits={
"nd_valid": "compguesswhat.nd_valid.jsonl.gz",
"nd_test": "compguesswhat.nd_test.jsonl.gz",
"od_valid": "compguesswhat.od_valid.jsonl.gz",
"od_test": "compguesswhat.od_test.jsonl.gz",
},
),
]
VERSION = nlp.Version("0.1.0")
def _info(self):
if self.config.gameplay_scenario == "original":
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=self._DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
"id": nlp.Value("int32"),
"target_id": nlp.Value("int32"),
"timestamp": nlp.Value("string"),
"status": nlp.Value("string"),
"image": {
"id": nlp.Value("int32"),
"file_name": nlp.Value("string"),
"flickr_url": nlp.Value("string"),
"coco_url": nlp.Value("string"),
"height": nlp.Value("int32"),
"width": nlp.Value("int32"),
"vg_id": nlp.Value("int32"),
"vg_url": nlp.Value("string"),
},
"qas": nlp.features.Sequence(
{"question": nlp.Value("string"), "answer": nlp.Value("string"), "id": nlp.Value("int32")}
),
"objects": nlp.features.Sequence(
{
"id": nlp.Value("int32"),
"bbox": nlp.Sequence(nlp.Value("float32"), length=4),
"category": nlp.Value("string"),
"area": nlp.Value("float32"),
"category_id": nlp.Value("int32"),
"segment": nlp.features.Sequence(nlp.features.Sequence(nlp.Value("float32"))),
}
),
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://compguesswhat.github.io/",
citation=self._CITATION,
)
elif self.config.gameplay_scenario == "zero_shot":
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=self._DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
"id": nlp.Value("int32"),
"target_id": nlp.Value("string"),
"status": nlp.Value("string"),
"image": {
"id": nlp.Value("int32"),
"file_name": nlp.Value("string"),
"coco_url": nlp.Value("string"),
"height": nlp.Value("int32"),
"width": nlp.Value("int32"),
"license": nlp.Value("int32"),
"open_images_id": nlp.Value("string"),
"date_captured": nlp.Value("string"),
},
"objects": nlp.features.Sequence(
{
"id": nlp.Value("string"),
"bbox": nlp.Sequence(nlp.Value("float32"), length=4),
"category": nlp.Value("string"),
"area": nlp.Value("float32"),
"category_id": nlp.Value("int32"),
"IsOccluded": nlp.Value("int32"),
"IsTruncated": nlp.Value("int32"),
"segment": nlp.features.Sequence(
{
"MaskPath": nlp.Value("string"),
"LabelName": nlp.Value("string"),
"BoxID": nlp.Value("string"),
"BoxXMin": nlp.Value("string"),
"BoxXMax": nlp.Value("string"),
"BoxYMin": nlp.Value("string"),
"BoxYMax": nlp.Value("string"),
"PredictedIoU": nlp.Value("string"),
"Clicks": nlp.Value("string"),
}
),
}
),
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://compguesswhat.github.io/",
citation=self._CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
dl_dir = dl_manager.download_and_extract(self.config.data_url)
splits_gen = []
for split_id, split_filename in self.config.splits.items():
if self.config.gameplay_scenario == "original":
if "train" in split_id:
split_name = nlp.Split.TRAIN
elif "valid" in split_id:
split_name = nlp.Split.VALIDATION
elif "test" in split_id:
split_name = nlp.Split.TEST
else:
split_name = nlp.Split(split_id)
full_split_name = "-".join(["compguesswhat", self.config.gameplay_scenario])
splits_gen.append(
nlp.SplitGenerator(
name=split_name,
gen_kwargs={
"filepath": os.path.join(dl_dir, full_split_name, self.VERSION.version_str, split_filename)
},
)
)
return splits_gen
def _generate_examples(self, filepath):
def _extract_game_tuple(data):
data = data.decode("utf-8")
game = json.loads(data.strip("\n"))
# we refactor the data structure a bit to fit with the new version
game["target_id"] = game["object_id"]
if "object_id" in game:
del game["object_id"]
if "questioner_id" in game:
del game["questioner_id"]
###
return game["id"], game
"""Yields examples."""
with gzip.open(filepath) as in_file:
for data in in_file:
yield _extract_game_tuple(data)
|
nlp-master
|
datasets/compguesswhat/compguesswhat.py
|
import gzip
import json
import os
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument(
"-d",
"--data_path",
type=str,
required=True,
help="Data path containing the CompGuessWhat?! datasets (files with 'jsonl.gz' extension)",
)
parser.add_argument("--examples", type=int, default=5, help="Number of games to consider in the dummy dataset")
original_data_files = {
"train": "compguesswhat.train.jsonl.gz",
"valid": "compguesswhat.valid.jsonl.gz",
"test": "compguesswhat.test.jsonl.gz",
}
zs_data_files = {
"nd_valid": "compguesswhat.nd_valid.jsonl.gz",
"od_valid": "compguesswhat.od_valid.jsonl.gz",
"nd_test": "compguesswhat.nd_test.jsonl.gz",
"od_test": "compguesswhat.od_test.jsonl.gz",
}
COMPGUESSWHAT_ROOT = "datasets/compguesswhat/"
def create_dummy_data_for_split(data_path, dataset_name, dataset_version, data_files):
full_dataset_name = "-".join(["compguesswhat", dataset_name])
dummy_data_path = os.path.join(
COMPGUESSWHAT_ROOT,
"dummy",
full_dataset_name,
dataset_version,
"dummy_data",
full_dataset_name,
dataset_version,
)
if not os.path.exists(dummy_data_path):
os.makedirs(dummy_data_path)
for split_name, split_file in data_files.items():
print(f"Generating dummy data for split {split_name} (num. examples = {args.examples})")
split_filepath = os.path.join(data_path, full_dataset_name, dataset_version, split_file)
print(f"Reading split file {split_filepath}")
with gzip.open(split_filepath) as in_file:
dummy_filepath = os.path.join(dummy_data_path, split_file)
with gzip.open(dummy_filepath, mode="w") as out_file:
for i, line in enumerate(in_file):
if i > args.examples:
break
data = json.loads(line.strip())
out_file.write(json.dumps(data).encode("utf-8"))
out_file.write(b"\n")
def main(args):
# args.data_path is the directory containing the already downloaded dataset files
# we assume that the dataset test was successful and we have the file dataset_info.json
dataset_info_path = os.path.join(COMPGUESSWHAT_ROOT, "dataset_infos.json")
if not os.path.exists(dataset_info_path):
raise ValueError(
"The file 'dataset_info.json' doesn't exists. Make sure that you run the dataset tests via nlp-cli."
)
with open(dataset_info_path) as in_file:
dataset_info = json.load(in_file)
dataset_version = dataset_info["default"]["version"]["version_str"]
print(f"Creating dummy data for CompGuessWhat?! {dataset_version}")
print("Original dataset...")
create_dummy_data_for_split(args.data_path, "original", dataset_version, original_data_files)
print("Zero-shot dataset...")
create_dummy_data_for_split(args.data_path, "zero_shot", dataset_version, zs_data_files)
if __name__ == "__main__":
args = parser.parse_args()
main(args)
|
nlp-master
|
datasets/compguesswhat/create_dummy_data.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Commonsense Explanations (CoS-E) Dataset."""
from __future__ import absolute_import, division, print_function
import json
import os
import nlp
_CITATION = """
@inproceedings{rajani2019explain,
title = {Explain Yourself! Leveraging Language models for Commonsense Reasoning},
author = {Rajani, Nazneen Fatema and
McCann, Bryan and
Xiong, Caiming and
Socher, Richard}
year={2019}
booktitle = {Proceedings of the 2019 Conference of the Association for Computational Linguistics (ACL2019)}
url ={https://arxiv.org/abs/1906.02361}
}
"""
_DESCRIPTION = """
Common Sense Explanations (CoS-E) allows for training language models to
automatically generate explanations that can be used during training and
inference in a novel Commonsense Auto-Generated Explanation (CAGE) framework.
"""
_COS_E_URL = "https://raw.githubusercontent.com/salesforce/cos-e/master/data/"
# COS E has explanations for the CQA dataset, which is joined by ID.
_CQA_V1_11_URL_TRAIN = "https://s3.amazonaws.com/commensenseqa/train_rand_split.jsonl"
_CQA_V1_11_URL_DEV = "https://s3.amazonaws.com/commensenseqa/dev_rand_split.jsonl"
_CQA_V1_11_URL_TEST = "https://s3.amazonaws.com/commensenseqa/test_rand_split_no_answers.jsonl"
_CQA_V1_0_URL_TRAIN = os.path.join(_COS_E_URL, "v1.0/train_rand_split.jsonl")
_CQA_V1_0_URL_DEV = os.path.join(_COS_E_URL, "v1.0/dev_rand_split.jsonl")
_CQA_V1_0_URL_TEST = os.path.join(_COS_E_URL, "v1.0/test_rand_split_no_answers.jsonl")
def _download_and_index_cqa(dl_manager, name):
"""Downloads CQA and returns it, indexed by id, for joining with Cos-E."""
downloaded_files = dl_manager.download_and_extract(
{
"cqa_train": _CQA_V1_11_URL_TRAIN if name == "v1.11" else _CQA_V1_0_URL_TRAIN,
"cqa_dev": _CQA_V1_11_URL_DEV if name == "v1.11" else _CQA_V1_0_URL_DEV,
"cqa_test": _CQA_V1_11_URL_TEST if name == "v1.11" else _CQA_V1_0_URL_TEST,
}
)
# NB: "cqa_test" is included in the files, but not in any of the CoS-E splits.
cqa_splits = ["cqa_train", "cqa_dev"]
cqa_complete = []
for split in cqa_splits:
with open(downloaded_files[split]) as f:
for _, line in enumerate(f):
d = json.loads(line)
cqa_complete.append(d)
# Index the CQA dataset by id for joining with Cos-E.
cqa_indexed = {}
for d in cqa_complete:
cqa_indexed[d["id"]] = d
return cqa_indexed
def _get_choices_and_answer(cqa):
"""Returns choices and the answer from a cqa example."""
choices = []
answer_key = cqa["answerKey"]
answer = None
for choice in cqa["question"]["choices"]:
choices.append(choice["text"])
if answer_key == choice["label"]:
answer = choice["text"]
return choices, answer
class CosEConfig(nlp.BuilderConfig):
""" BuilderConfig for CosE"""
def __init__(self, **kwargs):
"""
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(CosEConfig, self).__init__(**kwargs)
class CosE(nlp.GeneratorBasedBuilder):
"""CoS-E: Common Sense Explanations corpus."""
BUILDER_CONFIGS = [
CosEConfig(
name="v1.0",
description="cos-e version 1.0",
version=nlp.Version("1.0.0", "New split API (https://tensorflow.org/datasets/splits)"),
),
CosEConfig(
name="v1.11",
description="cos-e version 1.11",
version=nlp.Version("1.11.0", "New split API (https://tensorflow.org/datasets/splits)"),
),
]
def _info(self):
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features(
{
"id": nlp.Value("string"),
"question": nlp.Value("string"),
"choices": nlp.features.Sequence(nlp.Value("string")),
"answer": nlp.Value("string"),
"abstractive_explanation": nlp.Value("string"),
"extractive_explanation": nlp.Value("string"),
}
),
supervised_keys=None,
homepage="https://github.com/salesforce/cos-e",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# NB: The CQA Dataset should be read only once, and only by callers who
# want to _create_ the Cos-E dataset from scratch.
cqa_indexed = _download_and_index_cqa(dl_manager, self.config.name)
if self.config.name == "v1.11":
files = dl_manager.download_and_extract(
{
"dev": [os.path.join(_COS_E_URL, "v1.11/cose_dev_v1.11_processed.jsonl")],
"train": [os.path.join(_COS_E_URL, "v1.11/cose_train_v1.11_processed.jsonl")],
}
)
elif self.config.name == "v1.0":
files = dl_manager.download_and_extract(
{
"dev": [os.path.join(_COS_E_URL, "v1.0/cose_dev_v1.0_processed.jsonl")],
"train": [os.path.join(_COS_E_URL, "v1.0/cose_train_v1.0_processed.jsonl")],
}
)
else:
raise ValueError("Unknown config name")
# We use the CoS-E/CQA dev set as our validation set.
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN, gen_kwargs={"files": files["train"], "cqa_indexed": cqa_indexed},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION, gen_kwargs={"files": files["dev"], "cqa_indexed": cqa_indexed},
),
]
def _generate_examples(self, files, **kwargs):
"""Yields examples."""
cqa_indexed = kwargs["cqa_indexed"]
for filepath in files:
with open(filepath) as f:
for line in f:
cos = json.loads(line)
cqa = cqa_indexed[cos["id"]]
choices, answer = _get_choices_and_answer(cqa)
yield cos["id"], {
"id": cos["id"],
"question": cqa["question"]["stem"],
"choices": choices,
"answer": answer,
"abstractive_explanation": cos["explanation"]["open-ended"],
"extractive_explanation": cos["explanation"]["selected"],
}
|
nlp-master
|
datasets/cos_e/cos_e.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""TriviaQA: A Reading Comprehension Dataset."""
from __future__ import absolute_import, division, print_function
import glob
import json
import logging
import os
import six
import nlp
_CITATION = """
@article{2017arXivtriviaqa,
author = {{Joshi}, Mandar and {Choi}, Eunsol and {Weld},
Daniel and {Zettlemoyer}, Luke},
title = "{triviaqa: A Large Scale Distantly Supervised Challenge Dataset for Reading Comprehension}",
journal = {arXiv e-prints},
year = 2017,
eid = {arXiv:1705.03551},
pages = {arXiv:1705.03551},
archivePrefix = {arXiv},
eprint = {1705.03551},
}
"""
_DOWNLOAD_URL_TMPL = "http://nlp.cs.washington.edu/triviaqa/data/triviaqa-{}.tar.gz"
_TRAIN_FILE_FORMAT = "*-train.json"
_VALIDATION_FILE_FORMAT = "*-dev.json"
_TEST_FILE_FORMAT = "*test-without-answers.json"
_WEB_EVIDENCE_DIR = "evidence/web"
_WIKI_EVIDENCE_DIR = "evidence/wikipedia"
_DESCRIPTION = """\
TriviaqQA is a reading comprehension dataset containing over 650K
question-answer-evidence triples. TriviaqQA includes 95K question-answer
pairs authored by trivia enthusiasts and independently gathered evidence
documents, six per question on average, that provide high quality distant
supervision for answering the questions.
"""
_RC_DESCRIPTION = """\
Question-answer pairs where all documents for a given question contain the
answer string(s).
"""
_UNFILTERED_DESCRIPTION = """\
110k question-answer pairs for open domain QA where not all documents for a
given question contain the answer string(s). This makes the unfiltered dataset
more appropriate for IR-style QA.
"""
_CONTEXT_ADDENDUM = "Includes context from Wikipedia and search results."
def _web_evidence_dir(tmp_dir):
return sorted(glob.glob(os.path.join(tmp_dir, _WEB_EVIDENCE_DIR)))
def _wiki_evidence_dir(tmp_dir):
return sorted(glob.glob(os.path.join(tmp_dir, _WIKI_EVIDENCE_DIR)))
class TriviaQaConfig(nlp.BuilderConfig):
"""BuilderConfig for TriviaQA."""
def __init__(self, unfiltered=False, exclude_context=False, **kwargs):
"""BuilderConfig for TriviaQA.
Args:
unfiltered: bool, whether to use the unfiltered version of the dataset,
intended for open-domain QA.
exclude_context: bool, whether to exclude Wikipedia and search context for
reduced size.
**kwargs: keyword arguments forwarded to super.
"""
name = "unfiltered" if unfiltered else "rc"
if exclude_context:
name += ".nocontext"
description = _UNFILTERED_DESCRIPTION if unfiltered else _RC_DESCRIPTION
if not exclude_context:
description += _CONTEXT_ADDENDUM
super(TriviaQaConfig, self).__init__(
name=name, description=description, version=nlp.Version("1.1.0"), **kwargs
)
self.unfiltered = unfiltered
self.exclude_context = exclude_context
class TriviaQa(nlp.GeneratorBasedBuilder):
"""TriviaQA is a reading comprehension dataset.
It containss over 650K question-answer-evidence triples.
"""
BUILDER_CONFIGS = [
TriviaQaConfig(unfiltered=False, exclude_context=False), # rc
TriviaQaConfig(unfiltered=False, exclude_context=True), # rc.nocontext
TriviaQaConfig(unfiltered=True, exclude_context=False), # unfiltered
TriviaQaConfig(unfiltered=True, exclude_context=True),
# unfilered.nocontext
]
def _info(self):
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features(
{
"question": nlp.Value("string"),
"question_id": nlp.Value("string"),
"question_source": nlp.Value("string"),
"entity_pages": nlp.features.Sequence(
{
"doc_source": nlp.Value("string"),
"filename": nlp.Value("string"),
"title": nlp.Value("string"),
"wiki_context": nlp.Value("string"),
}
),
"search_results": nlp.features.Sequence(
{
"description": nlp.Value("string"),
"filename": nlp.Value("string"),
"rank": nlp.Value("int32"),
"title": nlp.Value("string"),
"url": nlp.Value("string"),
"search_context": nlp.Value("string"),
}
),
"answer": dict(
{
"aliases": nlp.features.Sequence(nlp.Value("string")),
"normalized_aliases": nlp.features.Sequence(nlp.Value("string")),
"matched_wiki_entity_name": nlp.Value("string"),
"normalized_matched_wiki_entity_name": nlp.Value("string"),
"normalized_value": nlp.Value("string"),
"type": nlp.Value("string"),
"value": nlp.Value("string"),
}
),
}
),
supervised_keys=None,
homepage="http://nlp.cs.washington.edu/triviaqa/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
cfg = self.config
download_urls = dict()
if not (cfg.unfiltered and cfg.exclude_context):
download_urls["rc"] = _DOWNLOAD_URL_TMPL.format("rc")
if cfg.unfiltered:
download_urls["unfiltered"] = _DOWNLOAD_URL_TMPL.format("unfiltered")
file_paths = dl_manager.download_and_extract(download_urls)
qa_dir = (
os.path.join(file_paths["unfiltered"], "triviaqa-unfiltered")
if cfg.unfiltered
else os.path.join(file_paths["rc"], "qa")
)
train_files = sorted(glob.glob(os.path.join(qa_dir, _TRAIN_FILE_FORMAT)))
valid_files = sorted(glob.glob(os.path.join(qa_dir, _VALIDATION_FILE_FORMAT)))
test_files = sorted(glob.glob(os.path.join(qa_dir, _TEST_FILE_FORMAT)))
if cfg.exclude_context:
web_evidence_dir = None
wiki_evidence_dir = None
else:
web_evidence_dir = os.path.join(file_paths["rc"], _WEB_EVIDENCE_DIR)
wiki_evidence_dir = os.path.join(file_paths["rc"], _WIKI_EVIDENCE_DIR)
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
gen_kwargs={"files": train_files, "web_dir": web_evidence_dir, "wiki_dir": wiki_evidence_dir},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
gen_kwargs={"files": valid_files, "web_dir": web_evidence_dir, "wiki_dir": wiki_evidence_dir},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
gen_kwargs={"files": test_files, "web_dir": web_evidence_dir, "wiki_dir": wiki_evidence_dir},
),
]
def _generate_examples(self, files, web_dir, wiki_dir):
"""This function returns the examples."""
def parse_example(article):
"""Return a single example from an article JSON record."""
def _strip(collection):
return [item.strip() for item in collection]
if "Answer" in article:
answer = article["Answer"]
answer_dict = {
"aliases": _strip(answer["Aliases"]),
"normalized_aliases": _strip(answer["NormalizedAliases"]),
"matched_wiki_entity_name": answer.get("MatchedWikiEntryName", "").strip(),
"normalized_matched_wiki_entity_name": answer.get("NormalizedMatchedWikiEntryName", "").strip(),
"normalized_value": answer["NormalizedValue"].strip(),
"type": answer["Type"].strip(),
"value": answer["Value"].strip(),
}
else:
answer_dict = {
"aliases": [],
"normalized_aliases": [],
"matched_wiki_entity_name": "<unk>",
"normalized_matched_wiki_entity_name": "<unk>",
"normalized_value": "<unk>",
"type": "",
"value": "<unk>",
}
if self.config.exclude_context:
article["SearchResults"] = []
article["EntityPages"] = []
def _add_context(collection, context_field, file_dir):
"""Adds context from file, or skips if file does not exist."""
new_items = []
for item in collection:
if "Filename" not in item:
logging.info("Missing context 'Filename', skipping.")
continue
new_item = item.copy()
fname = item["Filename"]
try:
with open(os.path.join(file_dir, fname)) as f:
new_item[context_field] = f.read()
except (IOError, nlp.Value("errors").NotFoundError):
logging.info("File does not exist, skipping: %s", fname)
continue
new_items.append(new_item)
return new_items
def _strip_if_str(v):
return v.strip() if isinstance(v, six.string_types) else v
def _transpose_and_strip_dicts(dicts, field_names):
return {
nlp.naming.camelcase_to_snakecase(k): [_strip_if_str(d[k]) for d in dicts] for k in field_names
}
search_results = _transpose_and_strip_dicts(
_add_context(article.get("SearchResults", []), "SearchContext", web_dir),
["Description", "Filename", "Rank", "Title", "Url", "SearchContext"],
)
entity_pages = _transpose_and_strip_dicts(
_add_context(article.get("EntityPages", []), "WikiContext", wiki_dir),
["DocSource", "Filename", "Title", "WikiContext"],
)
question = article["Question"].strip()
question_id = article["QuestionId"]
question_source = article["QuestionSource"].strip()
return {
"entity_pages": entity_pages,
"search_results": search_results,
"question": question,
"question_id": question_id,
"question_source": question_source,
"answer": answer_dict,
}
for filepath in files:
logging.info("generating examples from = %s", filepath)
fname = os.path.basename(filepath)
with open(filepath) as f:
current_record = ""
for line in f:
if line == " {\n":
current_record = line
elif line.startswith(" }"): # Handles final record as well.
article = json.loads(current_record + "}")
current_record = ""
example = parse_example(article)
yield "%s_%s" % (fname, example["question_id"]), example
else:
current_record += line
|
nlp-master
|
datasets/trivia_qa/trivia_qa.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""TODO(scicite): Add a description here."""
from __future__ import absolute_import, division, print_function
import json
import os
import nlp
_CITATION = """
@InProceedings{Cohan2019Structural,
author={Arman Cohan and Waleed Ammar and Madeleine Van Zuylen and Field Cady},
title={Structural Scaffolds for Citation Intent Classification in Scientific Publications},
booktitle={NAACL},
year={2019}
}
"""
_DESCRIPTION = """
This is a dataset for classifying citation intents in academic papers.
The main citation intent label for each Json object is specified with the label
key while the citation context is specified in with a context key. Example:
{
'string': 'In chacma baboons, male-infant relationships can be linked to both
formation of friendships and paternity success [30,31].'
'sectionName': 'Introduction',
'label': 'background',
'citingPaperId': '7a6b2d4b405439',
'citedPaperId': '9d1abadc55b5e0',
...
}
You may obtain the full information about the paper using the provided paper ids
with the Semantic Scholar API (https://api.semanticscholar.org/).
The labels are:
Method, Background, Result
"""
_SOURCE_NAMES = ["properNoun", "andPhrase", "acronym", "etAlPhrase", "explicit", "acronymParen", "nan"]
class Scicite(nlp.GeneratorBasedBuilder):
"""This is a dataset for classifying citation intents in academic papers."""
VERSION = nlp.Version("1.0.0")
def _info(self):
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
"string": nlp.Value("string"),
"sectionName": nlp.Value("string"),
"label": nlp.features.ClassLabel(names=["method", "background", "result"]),
"citingPaperId": nlp.Value("string"),
"citedPaperId": nlp.Value("string"),
"excerpt_index": nlp.Value("int32"),
"isKeyCitation": nlp.Value("bool"),
"label2": nlp.features.ClassLabel(
names=["supportive", "not_supportive", "cant_determine", "none"]
),
"citeEnd": nlp.Value("int64"),
"citeStart": nlp.Value("int64"),
"source": nlp.features.ClassLabel(names=_SOURCE_NAMES),
"label_confidence": nlp.Value("float32"),
"label2_confidence": nlp.Value("float32"),
"id": nlp.Value("string"),
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://github.com/allenai/scicite",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
dl_paths = dl_manager.download_and_extract(
{"scicite": "https://s3-us-west-2.amazonaws.com/ai2-s2-research/scicite/scicite.tar.gz",}
)
path = os.path.join(dl_paths["scicite"], "scicite")
return [
nlp.SplitGenerator(name=nlp.Split.TRAIN, gen_kwargs={"path": os.path.join(path, "train.jsonl")},),
nlp.SplitGenerator(name=nlp.Split.VALIDATION, gen_kwargs={"path": os.path.join(path, "dev.jsonl")},),
nlp.SplitGenerator(name=nlp.Split.TEST, gen_kwargs={"path": os.path.join(path, "test.jsonl")},),
]
def _generate_examples(self, path=None):
"""Yields examples."""
with open(path) as f:
unique_ids = {}
for line in f:
d = json.loads(line)
unique_id = str(d["unique_id"])
if unique_id in unique_ids:
continue
unique_ids[unique_id] = True
yield unique_id, {
"string": d["string"],
"label": str(d["label"]),
"sectionName": str(d["sectionName"]),
"citingPaperId": str(d["citingPaperId"]),
"citedPaperId": str(d["citedPaperId"]),
"excerpt_index": int(d["excerpt_index"]),
"isKeyCitation": bool(d["isKeyCitation"]),
"label2": str(d.get("label2", "none")),
"citeEnd": _safe_int(d["citeEnd"]),
"citeStart": _safe_int(d["citeStart"]),
"source": str(d["source"]),
"label_confidence": float(d.get("label_confidence", 0.0)),
"label2_confidence": float(d.get("label2_confidence", 0.0)),
"id": str(d["id"]),
}
def _safe_int(a):
try:
# skip NaNs
return int(a)
except ValueError:
return -1
|
nlp-master
|
datasets/scicite/scicite.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""QA4MRE (CLEF 2011/2012/2013): a reading comprehension dataset."""
from __future__ import division, print_function
import logging
import os
import xml.etree.ElementTree as ET
import nlp
# pylint: disable=anomalous-backslash-in-string
_CITATION = r"""
@InProceedings{10.1007/978-3-642-40802-1_29,
author={Pe{\~{n}}as, Anselmoband Hovy, Eduardband Forner, Pamela and Rodrigo, {\'A}lvaro and Sutcliffe, Richard
and Morante, Roser},
editor={Forner, Pamela and M{\"u}ller, Henning and Paredes, Roberto and Rosso, Paolo
and Stein, Benno},
title={QA4MRE 2011-2013: Overview of Question Answering for Machine Reading Evaluation},
booktitle={Information Access Evaluation. Multilinguality, Multimodality, and Visualization},
year={2013},
publisher={Springer Berlin Heidelberg},
address={Berlin, Heidelberg},
pages={303--320},
abstract={This paper describes the methodology for testing the performance of Machine Reading systems through Question Answering and Reading Comprehension Tests. This was the attempt of the QA4MRE challenge which was run as a Lab at CLEF 2011--2013. The traditional QA task was replaced by a new Machine Reading task, whose intention was to ask questions that required a deep knowledge of individual short texts and in which systems were required to choose one answer, by analysing the corresponding test document in conjunction with background text collections provided by the organization. Four different tasks have been organized during these years: Main Task, Processing Modality and Negation for Machine Reading, Machine Reading of Biomedical Texts about Alzheimer's disease, and Entrance Exams. This paper describes their motivation, their goals, their methodology for preparing the data sets, their background collections, their metrics used for the evaluation, and the lessons learned along these three years.},
isbn={978-3-642-40802-1}
}
"""
_DESCRIPTION = """
QA4MRE dataset was created for the CLEF 2011/2012/2013 shared tasks to promote research in
question answering and reading comprehension. The dataset contains a supporting
passage and a set of questions corresponding to the passage. Multiple options
for answers are provided for each question, of which only one is correct. The
training and test datasets are available for the main track.
Additional gold standard documents are available for two pilot studies: one on
alzheimers data, and the other on entrance exams data.
"""
_BASE_URL = "http://nlp.uned.es/clef-qa/repository/js/scripts/downloadFile.php?file=/var/www/html/nlp/clef-qa/repository/resources/QA4MRE/"
PATHS = {
"2011": {
"_TRACKS": ("main"),
"_PATH_TMPL_MAIN_GS": "2011/Training_Data/Goldstandard/QA4MRE-2011-{}_GS.xml",
"_LANGUAGES_MAIN": ("DE", "EN", "ES", "IT", "RO"),
},
"2012": {
"_TRACKS": ("main", "alzheimers"),
"_PATH_TMPL_MAIN_GS": "2012/Main_Task/Training_Data/Goldstandard/Used_in_Evaluation/QA4MRE-2012-{}_GS.xml",
"_LANGUAGES_MAIN": ("AR", "BG", "DE", "EN", "ES", "IT", "RO"),
"_PATH_ALZHEIMER": "2012/Pilot_Tasks/Biomedical_About_Alzheimer/Training_Data/Goldstandard/QA4MRE-2012_BIOMEDICAL_GS.xml",
},
"2013": {
"_TRACKS": ("main", "alzheimers", "entrance_exam"),
"_PATH_TMPL_MAIN_GS": "2013/Main_Task/Training_Data/Goldstandard/QA4MRE-2013-{}_GS.xml",
"_LANGUAGES_MAIN": ("AR", "BG", "EN", "ES", "RO"),
"_PATH_ALZHEIMER": "2013/Biomedical_About_Alzheimer/Training_Data/Goldstandard/QA4MRE-2013_BIO_GS-RUN.xml",
"_PATH_ENTRANCE_EXAM": "2013/Entrance_Exams/Training_Data/Goldstandard/qa4mre-exam-test-withanswer.xml",
},
}
def _get_question(topic_id, topic_name, test_id, document_id, document_str, question):
"""Gets instance ID and features for every question.
Args:
topic_id: string
topic_name: string
test_id: string
document_id: string
document_str: string
question: XML element for question
Returns:
id_: string. Unique ID for instance.
feats: dict of instance features
"""
question_id = question.attrib["q_id"]
for q_text in question.iter("q_str"):
question_str = q_text.text
possible_answers = list()
for answer in question.iter("answer"):
answer_id = answer.attrib["a_id"]
answer_str = answer.text
possible_answers.append({"answer_id": answer_id, "answer_str": answer_str})
if "correct" in answer.attrib:
correct_answer_id = answer_id
correct_answer_str = answer_str
id_ = "_".join([topic_id, topic_name, test_id, question_id])
logging.info("ID: %s", id_)
feats = {
"topic_id": topic_id,
"topic_name": topic_name,
"test_id": test_id,
"document_id": document_id,
"document_str": document_str,
"question_id": question_id,
"question_str": question_str,
"answer_options": possible_answers,
"correct_answer_id": correct_answer_id,
"correct_answer_str": correct_answer_str,
}
return id_, feats
class Qa4mreConfig(nlp.BuilderConfig):
"""BuilderConfig for Qa4mre."""
def __init__(self, year, track="main", language="EN", **kwargs):
"""BuilderConfig for Qa4Mre.
Args:
year: string, year of dataset
track: string, the task track from PATHS[year]['_TRACKS'].
language: string, Acronym for language in the main task.
**kwargs: keyword arguments forwarded to super.
"""
if track.lower() not in PATHS[year]["_TRACKS"]:
raise ValueError("Incorrect track. Track should be one of the following: ", PATHS[year]["_TRACKS"])
if track.lower() != "main" and language.upper() != "EN":
logging.warn("Only English documents available for pilot " "tracks. Setting English by default.")
language = "EN"
if track.lower() == "main" and language.upper() not in PATHS[year]["_LANGUAGES_MAIN"]:
raise ValueError(
"Incorrect language for the main track. Correct options: ", PATHS[year]["_LANGUAGES_MAIN"]
)
self.year = year
self.track = track.lower()
self.lang = language.upper()
name = self.year + "." + self.track + "." + self.lang
description = _DESCRIPTION
description += ("This configuration includes the {} track for {} language " "in {} year.").format(
self.track, self.lang, self.year
)
super(Qa4mreConfig, self).__init__(name=name, description=description, version=nlp.Version("0.1.0"), **kwargs)
class Qa4mre(nlp.GeneratorBasedBuilder):
"""QA4MRE dataset from CLEF shared tasks 2011, 2012, 2013."""
BUILDER_CONFIGS = [
Qa4mreConfig(year="2011", track="main", language="DE"), # 2011 Main track German (2011.main.DE)
Qa4mreConfig(year="2011", track="main", language="EN"), # 2011 Main track English (2011.main.EN)
Qa4mreConfig(year="2011", track="main", language="ES"), # 2011 Main track Spanish (2011.main.ES)
Qa4mreConfig(year="2011", track="main", language="IT"), # 2011 Main track Italian (2011.main.IT)
Qa4mreConfig(year="2011", track="main", language="RO"), # 2011 Main track Romanian (2011.main.RO)
Qa4mreConfig(year="2012", track="main", language="AR"), # 2012 Main track Arabic (2012.main.AR)
Qa4mreConfig(year="2012", track="main", language="BG"), # 2012 Main track Bulgarian (2012.main.BG)
Qa4mreConfig(year="2012", track="main", language="DE"), # 2012 Main track German (2012.main.DE)
Qa4mreConfig(year="2012", track="main", language="EN"), # 2012 Main track English (2012.main.EN)
Qa4mreConfig(year="2012", track="main", language="ES"), # 2012 Main track Spanish (2012.main.ES)
Qa4mreConfig(year="2012", track="main", language="IT"), # 2012 Main track Italian (2012.main.IT)
Qa4mreConfig(year="2012", track="main", language="RO"), # 2012 Main track Romanian (2012.main.RO)
Qa4mreConfig(year="2012", track="alzheimers", language="EN"), # (2012.alzheimers.EN)
Qa4mreConfig(year="2013", track="main", language="AR"), # 2013 Main track Arabic (2013.main.AR)
Qa4mreConfig(year="2013", track="main", language="BG"), # 2013 Main track Bulgarian (2013.main.BG)
Qa4mreConfig(year="2013", track="main", language="EN"), # 2013 Main track English (2013.main.EN)
Qa4mreConfig(year="2013", track="main", language="ES"), # 2013 Main track Spanish (2013.main.ES)
Qa4mreConfig(year="2013", track="main", language="RO"), # 2013 Main track Romanian (2013.main.RO)
Qa4mreConfig(year="2013", track="alzheimers", language="EN"), # (2013.alzheimers.EN)
Qa4mreConfig(year="2013", track="entrance_exam", language="EN"), # (2013.entrance_exam.EN)
]
def _info(self):
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
"topic_id": nlp.Value("string"),
"topic_name": nlp.Value("string"),
"test_id": nlp.Value("string"),
"document_id": nlp.Value("string"),
"document_str": nlp.Value("string"),
"question_id": nlp.Value("string"),
"question_str": nlp.Value("string"),
"answer_options": nlp.features.Sequence(
{"answer_id": nlp.Value("string"), "answer_str": nlp.Value("string")}
),
"correct_answer_id": nlp.Value("string"),
"correct_answer_str": nlp.Value("string"),
}
),
# No default supervised keys because both passage and question are used
# to determine the correct answer.
supervised_keys=None,
homepage="http://nlp.uned.es/clef-qa/repository/pastCampaigns.php",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
cfg = self.config
download_urls = dict()
if cfg.track == "main":
download_urls["{}.main.{}".format(cfg.year, cfg.lang)] = os.path.join(
_BASE_URL, PATHS[cfg.year]["_PATH_TMPL_MAIN_GS"].format(cfg.lang)
)
if cfg.year in ["2012", "2013"] and cfg.track == "alzheimers":
download_urls["{}.alzheimers.EN".format(cfg.year)] = os.path.join(
_BASE_URL, PATHS[cfg.year]["_PATH_ALZHEIMER"]
)
if cfg.year == "2013" and cfg.track == "entrance_exam":
download_urls["2013.entrance_exam.EN"] = os.path.join(_BASE_URL, PATHS[cfg.year]["_PATH_ENTRANCE_EXAM"])
downloaded_files = dl_manager.download_and_extract(download_urls)
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
gen_kwargs={"filepath": downloaded_files["{}.{}.{}".format(cfg.year, cfg.track, cfg.lang)]},
)
]
def _generate_examples(self, filepath):
"""Yields examples."""
with open(filepath, "rb") as f:
tree = ET.parse(f)
root = tree.getroot() # test-set
for topic in root:
topic_id = topic.attrib["t_id"]
topic_name = topic.attrib["t_name"]
for test in topic:
test_id = test.attrib["r_id"]
for document in test.iter("doc"):
document_id = document.attrib["d_id"]
document_str = document.text
for question in test.iter("q"):
yield _get_question(topic_id, topic_name, test_id, document_id, document_str, question)
|
nlp-master
|
datasets/qa4mre/qa4mre.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""SCAN tasks with various different splits."""
from __future__ import absolute_import, division, print_function
import os
import nlp
_CITATION = """
@inproceedings{Lake2018GeneralizationWS,
title={Generalization without Systematicity: On the Compositional Skills of
Sequence-to-Sequence Recurrent Networks},
author={Brenden M. Lake and Marco Baroni},
booktitle={ICML},
year={2018},
url={https://arxiv.org/pdf/1711.00350.pdf},
}
"""
_DESCRIPTION = """SCAN tasks with various splits.
SCAN is a set of simple language-driven navigation tasks for studying
compositional learning and zero-shot generalization.
See https://github.com/brendenlake/SCAN for a description of the splits.
Example usage:
data = nlp.load_dataset('scan/length')
"""
_DATA_URL = "https://github.com/brendenlake/SCAN/archive/master.zip"
class ScanConfig(nlp.BuilderConfig):
"""BuilderConfig for SCAN."""
def __init__(self, name, directory=None, **kwargs):
"""BuilderConfig for SCAN.
Args:
name: Unique name of the split.
directory: Which subdirectory to read the split from.
**kwargs: keyword arguments forwarded to super.
"""
# Version history:
super(ScanConfig, self).__init__(name=name, version=nlp.Version("1.0.0"), description=_DESCRIPTION, **kwargs)
if directory is None:
self.directory = name + "_split"
else:
self.directory = directory
_COMMANDS = "commands"
_ACTIONS = "actions"
class Scan(nlp.GeneratorBasedBuilder):
"""SCAN task / splits as proposed by Brenden M. Lake and Marco Baroni."""
BUILDER_CONFIGS = [
ScanConfig(name="simple"),
ScanConfig(name="addprim_jump", directory="add_prim_split"),
ScanConfig(name="addprim_turn_left", directory="add_prim_split"),
ScanConfig(name="filler_num0", directory="filler_split"),
ScanConfig(name="filler_num1", directory="filler_split"),
ScanConfig(name="filler_num2", directory="filler_split"),
ScanConfig(name="filler_num3", directory="filler_split"),
ScanConfig(name="length"),
ScanConfig(name="template_around_right", directory="template_split"),
ScanConfig(name="template_jump_around_right", directory="template_split"),
ScanConfig(name="template_opposite_right", directory="template_split"),
ScanConfig(name="template_right", directory="template_split"),
]
def _info(self):
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features({_COMMANDS: nlp.Value("string"), _ACTIONS: nlp.Value("string"),}),
supervised_keys=None,
homepage="https://github.com/brendenlake/SCAN",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
data_dir = dl_manager.download_and_extract(_DATA_URL)
data_dir = os.path.join(data_dir, "SCAN-master", self.config.directory)
split = self.config.name
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN, gen_kwargs={"filepath": os.path.join(data_dir, "tasks_train_" + split + ".txt")}
),
nlp.SplitGenerator(
name=nlp.Split.TEST, gen_kwargs={"filepath": os.path.join(data_dir, "tasks_test_" + split + ".txt")}
),
]
def _generate_examples(self, filepath):
"""Yields examples."""
with open(filepath) as infile:
for i, line in enumerate(infile):
if not line.startswith("IN: "):
continue
# Chop the prefix and split string between input and output
commands, actions = line[len("IN: ") :].strip().split(" OUT: ", 1)
yield i, {_COMMANDS: commands, _ACTIONS: actions}
|
nlp-master
|
datasets/scan/scan.py
|
"""TODO(xquad): Add a description here."""
from __future__ import absolute_import, division, print_function
import json
import nlp
_CITATION = """\
@article{Artetxe:etal:2019,
author = {Mikel Artetxe and Sebastian Ruder and Dani Yogatama},
title = {On the cross-lingual transferability of monolingual representations},
journal = {CoRR},
volume = {abs/1910.11856},
year = {2019},
archivePrefix = {arXiv},
eprint = {1910.11856}
}
"""
_DESCRIPTION = """\
XQuAD (Cross-lingual Question Answering Dataset) is a benchmark dataset for evaluating cross-lingual question answering
performance. The dataset consists of a subset of 240 paragraphs and 1190 question-answer pairs from the development set
of SQuAD v1.1 (Rajpurkar et al., 2016) together with their professional translations into ten languages: Spanish, German,
Greek, Russian, Turkish, Arabic, Vietnamese, Thai, Chinese, and Hindi. Consequently, the dataset is entirely parallel
across 11 languages.
"""
_URL = "https://github.com/deepmind/xquad/raw/master/"
_LANG = ["ar", "de", "zh", "vi", "en", "es", "hi", "el", "th", "tr", "ru"]
class XquadConfig(nlp.BuilderConfig):
""" BuilderConfig for Xquad"""
def __init__(self, lang, **kwargs):
"""
Args:
lang: string, language for the input text
**kwargs: keyword arguments forwarded to super.
"""
super(XquadConfig, self).__init__(
version=nlp.Version("1.0.0", "New split API (https://tensorflow.org/datasets/splits)"), **kwargs
)
self.lang = lang
class Xquad(nlp.GeneratorBasedBuilder):
"""TODO(xquad): Short description of my dataset."""
# TODO(xquad): Set up version.
VERSION = nlp.Version("1.0.0")
BUILDER_CONFIGS = [
XquadConfig(name="xquad.{}".format(lang), description=_DESCRIPTION, lang=lang) for lang in _LANG
]
def _info(self):
# TODO(xquad): Specifies the nlp.DatasetInfo object
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
"id": nlp.Value("string"),
"context": nlp.Value("string"),
"question": nlp.Value("string"),
"answers": nlp.features.Sequence(
{"text": nlp.Value("string"), "answer_start": nlp.Value("int32"),}
),
# These are the features of your dataset like images, labels ...
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://github.com/deepmind/xquad",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(xquad): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
urls_to_download = {lang: _URL + "xquad.{}.json".format(lang) for lang in _LANG}
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": downloaded_files[self.config.lang]},
),
]
def _generate_examples(self, filepath):
"""Yields examples."""
# TODO(xquad): Yields (key, example) tuples from the dataset
with open(filepath) as f:
xquad = json.load(f)
for article in xquad["data"]:
for paragraph in article["paragraphs"]:
context = paragraph["context"].strip()
for qa in paragraph["qas"]:
question = qa["question"].strip()
id_ = qa["id"]
answer_starts = [answer["answer_start"] for answer in qa["answers"]]
answers = [answer["text"].strip() for answer in qa["answers"]]
# Features currently used are "context", "question", and "answers".
# Others are extracted here for the ease of future expansions.
yield id_, {
"context": context,
"question": question,
"id": id_,
"answers": {"answer_start": answer_starts, "text": answers,},
}
|
nlp-master
|
datasets/xquad/xquad.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""NEWSROOM Dataset."""
from __future__ import absolute_import, division, print_function
import json
import os
import nlp
_CITATION = """
@inproceedings{N18-1065,
author = {Grusky, Max and Naaman, Mor and Artzi, Yoav},
title = {NEWSROOM: A Dataset of 1.3 Million Summaries
with Diverse Extractive Strategies},
booktitle = {Proceedings of the 2018 Conference of the
North American Chapter of the Association for
Computational Linguistics: Human Language Technologies},
year = {2018},
}
"""
_DESCRIPTION = """
NEWSROOM is a large dataset for training and evaluating summarization systems.
It contains 1.3 million articles and summaries written by authors and
editors in the newsrooms of 38 major publications.
Dataset features includes:
- text: Input news text.
- summary: Summary for the news.
And additional features:
- title: news title.
- url: url of the news.
- date: date of the article.
- density: extractive density.
- coverage: extractive coverage.
- compression: compression ratio.
- density_bin: low, medium, high.
- coverage_bin: extractive, abstractive.
- compression_bin: low, medium, high.
This dataset can be downloaded upon requests. Unzip all the contents
"train.jsonl, dev.josnl, test.jsonl" to the tfds folder.
"""
_DOCUMENT = "text"
_SUMMARY = "summary"
_ADDITIONAL_TEXT_FEATURES = [
"title",
"url",
"date",
"density_bin",
"coverage_bin",
"compression_bin",
]
_ADDITIONAL_FLOAT_FEATURES = [
"density",
"coverage",
"compression",
]
class Newsroom(nlp.GeneratorBasedBuilder):
"""NEWSROOM Dataset."""
VERSION = nlp.Version("1.0.0")
@property
def manual_download_instructions(self):
return """\
You should download the dataset from http://lil.nlp.cornell.edu/newsroom/
The webpage requires registration.
To unzip the .tar file run `tar -zxvf complete.tar`. To unzip the .gz files
run `gunzip train.json.gz` , ...
After downloading, please put the files under the following names
dev.jsonl, test.jsonl and train.jsonl in a dir of your choice,
which will be used as a manual_dir, e.g. `~/.manual_dirs/newsroom`
Newsroom can then be loaded via:
`nlp.load_dataset("newsroom", data_dir="~/.manual_dirs/newsroom")`.
"""
def _info(self):
features = {k: nlp.Value("string") for k in [_DOCUMENT, _SUMMARY] + _ADDITIONAL_TEXT_FEATURES}
features.update({k: nlp.Value("float32") for k in _ADDITIONAL_FLOAT_FEATURES})
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features(features),
supervised_keys=(_DOCUMENT, _SUMMARY),
homepage="http://lil.nlp.cornell.edu/newsroom/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
data_dir = os.path.abspath(os.path.expanduser(dl_manager.manual_dir))
if not os.path.exists(data_dir):
raise FileNotFoundError(
"{} does not exist. Make sure you insert a manual dir via `nlp.load_dataset('newsroom', data_dir=...)` that includes files unzipped from the reclor zip. Manual download instructions: {}".format(
data_dir, self.manual_download_instructions
)
)
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN, gen_kwargs={"input_file": os.path.join(data_dir, "train.jsonl")},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION, gen_kwargs={"input_file": os.path.join(data_dir, "dev.jsonl")},
),
nlp.SplitGenerator(name=nlp.Split.TEST, gen_kwargs={"input_file": os.path.join(data_dir, "test.jsonl")},),
]
def _generate_examples(self, input_file=None):
"""Yields examples."""
with open(input_file) as f:
for i, line in enumerate(f):
d = json.loads(line)
# fields are "url", "archive", "title", "date", "text",
# "compression_bin", "density_bin", "summary", "density",
# "compression', "coverage", "coverage_bin",
yield i, {
k: d[k] for k in [_DOCUMENT, _SUMMARY] + _ADDITIONAL_TEXT_FEATURES + _ADDITIONAL_FLOAT_FEATURES
}
|
nlp-master
|
datasets/newsroom/newsroom.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""CNN/DailyMail Summarization dataset, non-anonymized version."""
from __future__ import absolute_import, division, print_function
import hashlib
import logging
import os
import nlp
_DESCRIPTION = """\
CNN/DailyMail non-anonymized summarization dataset.
There are two features:
- article: text of news article, used as the document to be summarized
- highlights: joined text of highlights with <s> and </s> around each
highlight, which is the target summary
"""
# The second citation introduces the source data, while the first
# introduces the specific form (non-anonymized) we use here.
_CITATION = """\
@article{DBLP:journals/corr/SeeLM17,
author = {Abigail See and
Peter J. Liu and
Christopher D. Manning},
title = {Get To The Point: Summarization with Pointer-Generator Networks},
journal = {CoRR},
volume = {abs/1704.04368},
year = {2017},
url = {http://arxiv.org/abs/1704.04368},
archivePrefix = {arXiv},
eprint = {1704.04368},
timestamp = {Mon, 13 Aug 2018 16:46:08 +0200},
biburl = {https://dblp.org/rec/bib/journals/corr/SeeLM17},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@inproceedings{hermann2015teaching,
title={Teaching machines to read and comprehend},
author={Hermann, Karl Moritz and Kocisky, Tomas and Grefenstette, Edward and Espeholt, Lasse and Kay, Will and Suleyman, Mustafa and Blunsom, Phil},
booktitle={Advances in neural information processing systems},
pages={1693--1701},
year={2015}
}
"""
_DL_URLS = {
# pylint: disable=line-too-long
"cnn_stories": "https://drive.google.com/uc?export=download&id=0BwmD_VLjROrfTHk4NFg2SndKcjQ",
"dm_stories": "https://drive.google.com/uc?export=download&id=0BwmD_VLjROrfM1BxdkxVaTY2bWs",
"test_urls": "https://raw.githubusercontent.com/abisee/cnn-dailymail/master/url_lists/all_test.txt",
"train_urls": "https://raw.githubusercontent.com/abisee/cnn-dailymail/master/url_lists/all_train.txt",
"val_urls": "https://raw.githubusercontent.com/abisee/cnn-dailymail/master/url_lists/all_val.txt",
# pylint: enable=line-too-long
}
_HIGHLIGHTS = "highlights"
_ARTICLE = "article"
_SUPPORTED_VERSIONS = [
# Using cased version.
nlp.Version("3.0.0", "Using cased version."),
# Same data as 0.0.2
nlp.Version("1.0.0", "New split API (https://tensorflow.org/datasets/splits)"),
# Having the model predict newline separators makes it easier to evaluate
# using summary-level ROUGE.
nlp.Version("2.0.0", "Separate target sentences with newline."),
]
_DEFAULT_VERSION = nlp.Version("3.0.0", "Using cased version.")
class CnnDailymailConfig(nlp.BuilderConfig):
"""BuilderConfig for CnnDailymail."""
def __init__(self, **kwargs):
"""BuilderConfig for CnnDailymail.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(CnnDailymailConfig, self).__init__(**kwargs)
def _get_url_hashes(path):
"""Get hashes of urls in file."""
urls = _read_text_file(path)
def url_hash(u):
h = hashlib.sha1()
try:
u = u.encode("utf-8")
except UnicodeDecodeError:
logging.error("Cannot hash url: %s", u)
h.update(u)
return h.hexdigest()
return {url_hash(u): True for u in urls}
def _find_files(dl_paths, publisher, url_dict):
"""Find files corresponding to urls."""
if publisher == "cnn":
top_dir = os.path.join(dl_paths["cnn_stories"], "cnn", "stories")
elif publisher == "dm":
top_dir = os.path.join(dl_paths["dm_stories"], "dailymail", "stories")
else:
logging.fatal("Unsupported publisher: %s", publisher)
files = sorted(os.listdir(top_dir))
ret_files = []
for p in files:
basename = os.path.basename(p)
if basename[0 : basename.find(".story")] in url_dict:
ret_files.append(os.path.join(top_dir, p))
return ret_files
def _subset_filenames(dl_paths, split):
"""Get filenames for a particular split."""
assert isinstance(dl_paths, dict), dl_paths
# Get filenames for a split.
if split == nlp.Split.TRAIN:
urls = _get_url_hashes(dl_paths["train_urls"])
elif split == nlp.Split.VALIDATION:
urls = _get_url_hashes(dl_paths["val_urls"])
elif split == nlp.Split.TEST:
urls = _get_url_hashes(dl_paths["test_urls"])
else:
logging.fatal("Unsupported split: %s", split)
cnn = _find_files(dl_paths, "cnn", urls)
dm = _find_files(dl_paths, "dm", urls)
return cnn + dm
DM_SINGLE_CLOSE_QUOTE = "\u2019" # unicode
DM_DOUBLE_CLOSE_QUOTE = "\u201d"
# acceptable ways to end a sentence
END_TOKENS = [".", "!", "?", "...", "'", "`", '"', DM_SINGLE_CLOSE_QUOTE, DM_DOUBLE_CLOSE_QUOTE, ")"]
def _read_text_file(text_file):
lines = []
with open(text_file, "r") as f:
for line in f:
lines.append(line.strip())
return lines
def _get_art_abs(story_file, tfds_version):
"""Get abstract (highlights) and article from a story file path."""
# Based on https://github.com/abisee/cnn-dailymail/blob/master/
# make_datafiles.py
lines = _read_text_file(story_file)
# The github code lowercase the text and we removed it in 3.0.0.
# Put periods on the ends of lines that are missing them
# (this is a problem in the dataset because many image captions don't end in
# periods; consequently they end up in the body of the article as run-on
# sentences)
def fix_missing_period(line):
"""Adds a period to a line that is missing a period."""
if "@highlight" in line:
return line
if not line:
return line
if line[-1] in END_TOKENS:
return line
return line + " ."
lines = [fix_missing_period(line) for line in lines]
# Separate out article and abstract sentences
article_lines = []
highlights = []
next_is_highlight = False
for line in lines:
if not line:
continue # empty line
elif line.startswith("@highlight"):
next_is_highlight = True
elif next_is_highlight:
highlights.append(line)
else:
article_lines.append(line)
# Make article into a single string
article = " ".join(article_lines)
if tfds_version >= "2.0.0":
abstract = "\n".join(highlights)
else:
abstract = " ".join(highlights)
return article, abstract
class CnnDailymail(nlp.GeneratorBasedBuilder):
"""CNN/DailyMail non-anonymized summarization dataset."""
BUILDER_CONFIGS = [
CnnDailymailConfig(name=str(version), description="Plain text", version=version)
for version in _SUPPORTED_VERSIONS
]
def _info(self):
# Should return a nlp.DatasetInfo object
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features({_ARTICLE: nlp.Value("string"), _HIGHLIGHTS: nlp.Value("string")}),
supervised_keys=None,
homepage="https://github.com/abisee/cnn-dailymail",
citation=_CITATION,
)
def _vocab_text_gen(self, paths):
for _, ex in self._generate_examples(paths):
yield " ".join([ex[_ARTICLE], ex[_HIGHLIGHTS]])
def _split_generators(self, dl_manager):
dl_paths = dl_manager.download_and_extract(_DL_URLS)
train_files = _subset_filenames(dl_paths, nlp.Split.TRAIN)
# Generate shared vocabulary
return [
nlp.SplitGenerator(name=nlp.Split.TRAIN, gen_kwargs={"files": train_files}),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION, gen_kwargs={"files": _subset_filenames(dl_paths, nlp.Split.VALIDATION)}
),
nlp.SplitGenerator(name=nlp.Split.TEST, gen_kwargs={"files": _subset_filenames(dl_paths, nlp.Split.TEST)}),
]
def _generate_examples(self, files):
for p in files:
article, highlights = _get_art_abs(p, self.config.version)
if not article or not highlights:
continue
fname = os.path.basename(p)
yield fname, {_ARTICLE: article, _HIGHLIGHTS: highlights}
|
nlp-master
|
datasets/cnn_dailymail/cnn_dailymail.py
|
"""TODO(squad_it): Add a description here."""
from __future__ import absolute_import, division, print_function
import json
import os
import nlp
# TODO(squad_it): BibTeX citation
_CITATION = """\
@InProceedings{10.1007/978-3-030-03840-3_29,
author={Croce, Danilo and Zelenanska, Alexandra and Basili, Roberto},
editor={Ghidini, Chiara and Magnini, Bernardo and Passerini, Andrea and Traverso, Paolo",
title={Neural Learning for Question Answering in Italian},
booktitle={AI*IA 2018 -- Advances in Artificial Intelligence},
year={2018},
publisher={Springer International Publishing},
address={Cham},
pages={389--402},
isbn={978-3-030-03840-3}
}
"""
# TODO(squad_it):
_DESCRIPTION = """\
SQuAD-it is derived from the SQuAD dataset and it is obtained through semi-automatic translation of the SQuAD dataset
into Italian. It represents a large-scale dataset for open question answering processes on factoid questions in Italian.
The dataset contains more than 60,000 question/answer pairs derived from the original English dataset. The dataset is
split into training and test sets to support the replicability of the benchmarking of QA systems:
"""
_URL = "https://github.com/crux82/squad-it/raw/master"
_TRAIN_FILE = "SQuAD_it-train.json.gz"
_TEST_FILE = "SQuAD_it-test.json.gz"
class SquadIt(nlp.GeneratorBasedBuilder):
"""TODO(squad_it): Short description of my dataset."""
# TODO(squad_it): Set up version.
VERSION = nlp.Version("0.1.0")
def _info(self):
# TODO(squad_it): Specifies the nlp.DatasetInfo object
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
"id": nlp.Value("string"),
"context": nlp.Value("string"),
"question": nlp.Value("string"),
"answers": nlp.features.Sequence(
{"text": nlp.Value("string"), "answer_start": nlp.Value("int32"),}
),
# These are the features of your dataset like images, labels ...
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://github.com/crux82/squad-it",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(squad_it): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
urls_to_download = {"train": os.path.join(_URL, _TRAIN_FILE), "test": os.path.join(_URL, _TEST_FILE)}
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
nlp.SplitGenerator(name=nlp.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
nlp.SplitGenerator(name=nlp.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
]
def _generate_examples(self, filepath):
"""Yields examples."""
# TODO(squad_it): Yields (key, example) tuples from the dataset
with open(filepath) as f:
data = json.load(f)
for example in data["data"]:
for paragraph in example["paragraphs"]:
context = paragraph["context"].strip()
for qa in paragraph["qas"]:
question = qa["question"].strip()
id_ = qa["id"]
answer_starts = [answer["answer_start"] for answer in qa["answers"]]
answers = [answer["text"].strip() for answer in qa["answers"]]
yield id_, {
"context": context,
"question": question,
"id": id_,
"answers": {"answer_start": answer_starts, "text": answers,},
}
|
nlp-master
|
datasets/squad_it/squad_it.py
|
"""TODO(arc): Add a description here."""
from __future__ import absolute_import, division, print_function
import json
import os
import nlp
# TODO(ai2_arc): BibTeX citation
_CITATION = """\
@article{allenai:arc,
author = {Peter Clark and Isaac Cowhey and Oren Etzioni and Tushar Khot and
Ashish Sabharwal and Carissa Schoenick and Oyvind Tafjord},
title = {Think you have Solved Question Answering? Try ARC, the AI2 Reasoning Challenge},
journal = {arXiv:1803.05457v1},
year = {2018},
}
"""
# TODO(ai2_arc):
_DESCRIPTION = """\
A new dataset of 7,787 genuine grade-school level, multiple-choice science questions, assembled to encourage research in
advanced question-answering. The dataset is partitioned into a Challenge Set and an Easy Set, where the former contains
only questions answered incorrectly by both a retrieval-based algorithm and a word co-occurrence algorithm. We are also
including a corpus of over 14 million science sentences relevant to the task, and an implementation of three neural baseline models for this dataset. We pose ARC as a challenge to the community.
"""
_URL = "https://s3-us-west-2.amazonaws.com/ai2-website/data/ARC-V1-Feb2018.zip"
class Ai2ArcConfig(nlp.BuilderConfig):
"""BuilderConfig for Ai2ARC."""
def __init__(self, **kwargs):
"""BuilderConfig for Ai2Arc.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(Ai2ArcConfig, self).__init__(
version=nlp.Version("1.0.0", "New split API (https://tensorflow.org/datasets/splits)"), **kwargs
)
class Ai2Arc(nlp.GeneratorBasedBuilder):
"""TODO(arc): Short description of my dataset."""
# TODO(arc): Set up version.
VERSION = nlp.Version("1.0.0")
BUILDER_CONFIGS = [
Ai2ArcConfig(
name="ARC-Challenge",
description="""\
Challenge Set of 2590 “hard” questions (those that both a retrieval and a co-occurrence method fail to answer correctly)
""",
),
Ai2ArcConfig(
name="ARC-Easy",
description="""\
Easy Set of 5197 questions
""",
),
]
def _info(self):
# TODO(ai2_arc): Specifies the nlp.DatasetInfo object
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
"id": nlp.Value("string"),
"question": nlp.Value("string"),
"choices": nlp.features.Sequence({"text": nlp.Value("string"), "label": nlp.Value("string")}),
"answerKey": nlp.Value("string")
# These are the features of your dataset like images, labels ...
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://allenai.org/data/arc",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(ai2_arc): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
dl_dir = dl_manager.download_and_extract(_URL)
data_dir = os.path.join(dl_dir, "ARC-V1-Feb2018-2")
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, self.config.name, self.config.name + "-Train.jsonl")},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, self.config.name, self.config.name + "-Test.jsonl")},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, self.config.name, self.config.name + "-Dev.jsonl")},
),
]
def _generate_examples(self, filepath):
"""Yields examples."""
# TODO(ai2_arc): Yields (key, example) tuples from the dataset
with open(filepath) as f:
for row in f:
data = json.loads(row)
answerkey = data["answerKey"]
id_ = data["id"]
question = data["question"]["stem"]
choices = data["question"]["choices"]
text_choices = [choice["text"] for choice in choices]
label_choices = [choice["label"] for choice in choices]
yield id_, {
"id": id_,
"answerKey": answerkey,
"question": question,
"choices": {"text": text_choices, "label": label_choices},
}
|
nlp-master
|
datasets/ai2_arc/ai2_arc.py
|
"""TODO(quarel): Add a description here."""
from __future__ import absolute_import, division, print_function
import json
import os
import nlp
# TODO(quarel): BibTeX citation
_CITATION = """\
@inproceedings{quarel_v1,
title={QuaRel: A Dataset and Models for Answering Questions about Qualitative Relationships},
author={Oyvind Tafjord, Peter Clark, Matt Gardner, Wen-tau Yih, Ashish Sabharwal},
year={2018},
journal={arXiv:1805.05377v1}
}
"""
# TODO(quarel):
_DESCRIPTION = """
QuaRel is a crowdsourced dataset of 2771 multiple-choice story questions, including their logical forms.
"""
_URL = "https://s3-us-west-2.amazonaws.com/ai2-website/data/quarel-dataset-v1-nov2018.zip"
class Quarel(nlp.GeneratorBasedBuilder):
"""TODO(quarel): Short description of my dataset."""
# TODO(quarel): Set up version.
VERSION = nlp.Version("0.1.0")
def _info(self):
# TODO(quarel): Specifies the nlp.DatasetInfo object
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
# These are the features of your dataset like images, labels ...
"id": nlp.Value("string"),
"answer_index": nlp.Value("int32"),
"logical_forms": nlp.features.Sequence({"logical_form": nlp.Value("string")}),
"logical_form_pretty": nlp.Value("string"),
"world_literals": nlp.features.Sequence(
{"world1": nlp.Value("string"), "world2": nlp.Value("string")}
),
"question": nlp.Value("string"),
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://allenai.org/data/quarel",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(quarel): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
dl_dir = dl_manager.download_and_extract(_URL)
data_dir = os.path.join(dl_dir, "quarel-dataset-v1")
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, "quarel-v1-train.jsonl")},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, "quarel-v1-test.jsonl")},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, "quarel-v1-dev.jsonl")},
),
]
def _generate_examples(self, filepath):
"""Yields examples."""
# TODO(quarel): Yields (key, example) tuples from the dataset
with open(filepath) as f:
for id_, row in enumerate(f):
data = json.loads(row)
yield id_, {
"id": data["id"],
"answer_index": data["answer_index"],
"logical_forms": {"logical_form": data["logical_forms"],},
"world_literals": {
"world1": [data["world_literals"]["world1"]],
"world2": [data["world_literals"]["world2"]],
},
"logical_form_pretty": data["logical_form_pretty"],
"question": data["question"],
}
|
nlp-master
|
datasets/quarel/quarel.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Gigaword summarization dataset."""
from __future__ import absolute_import, division, print_function
import os
import nlp
_CITATION = """
@article{graff2003english,
title={English gigaword},
author={Graff, David and Kong, Junbo and Chen, Ke and Maeda, Kazuaki},
journal={Linguistic Data Consortium, Philadelphia},
volume={4},
number={1},
pages={34},
year={2003}
}
@article{Rush_2015,
title={A Neural Attention Model for Abstractive Sentence Summarization},
url={http://dx.doi.org/10.18653/v1/D15-1044},
DOI={10.18653/v1/d15-1044},
journal={Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing},
publisher={Association for Computational Linguistics},
author={Rush, Alexander M. and Chopra, Sumit and Weston, Jason},
year={2015}
}
"""
_DESCRIPTION = """
Headline-generation on a corpus of article pairs from Gigaword consisting of
around 4 million articles. Use the 'org_data' provided by
https://github.com/microsoft/unilm/ which is identical to
https://github.com/harvardnlp/sent-summary but with better format.
There are two features:
- document: article.
- summary: headline.
"""
_URL = "https://drive.google.com/uc?export=download&id=1USoQ8lJgN8kAWnUnRrupMGrPMLlDVqlV"
_DOCUMENT = "document"
_SUMMARY = "summary"
class Gigaword(nlp.GeneratorBasedBuilder):
"""Gigaword summarization dataset."""
# 1.0.0 contains a bug that uses validation data as training data.
# 1.1.0 Update to the correct train, validation and test data.
# 1.2.0 Replace <unk> with <UNK> in train/val to be consistent with test.
VERSION = nlp.Version("1.2.0")
def _info(self):
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features({_DOCUMENT: nlp.Value("string"), _SUMMARY: nlp.Value("string")}),
supervised_keys=(_DOCUMENT, _SUMMARY),
homepage="https://github.com/harvardnlp/sent-summary",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
dl_path = dl_manager.download_and_extract(_URL)
pattern = os.path.join(dl_path, "org_data", "%s.%s.txt")
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
gen_kwargs={
"src_path": pattern % ("train", "src"),
"tgt_path": pattern % ("train", "tgt"),
"replace_unk": True,
},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
gen_kwargs={
"src_path": pattern % ("dev", "src"),
"tgt_path": pattern % ("dev", "tgt"),
"replace_unk": True,
},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
gen_kwargs={
"src_path": pattern % ("test", "src"),
"tgt_path": pattern % ("test", "tgt"),
"replace_unk": False,
},
),
]
def _generate_examples(self, src_path=None, tgt_path=None, replace_unk=None):
"""Yields examples."""
with open(src_path) as f_d, open(tgt_path) as f_s:
for i, (doc_text, sum_text) in enumerate(zip(f_d, f_s)):
if replace_unk:
yield i, {
_DOCUMENT: doc_text.strip().replace("<unk>", "UNK"),
_SUMMARY: sum_text.strip().replace("<unk>", "UNK"),
}
else:
yield i, {_DOCUMENT: doc_text.strip(), _SUMMARY: sum_text.strip()}
|
nlp-master
|
datasets/gigaword/gigaword.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""XSum dataset."""
from __future__ import absolute_import, division, print_function
import json
import logging
import os
import nlp
_CITATION = """
@article{Narayan2018DontGM,
title={Don't Give Me the Details, Just the Summary! Topic-Aware Convolutional Neural Networks for Extreme Summarization},
author={Shashi Narayan and Shay B. Cohen and Mirella Lapata},
journal={ArXiv},
year={2018},
volume={abs/1808.08745}
}
"""
_DESCRIPTION = """
Extreme Summarization (XSum) Dataset.
There are two features:
- document: Input news article.
- summary: One sentence summary of the article.
"""
_URL = "https://s3.amazonaws.com/datasets.huggingface.co/summarization/xsum.tar.gz"
_DOCUMENT = "document"
_SUMMARY = "summary"
class Xsum(nlp.GeneratorBasedBuilder):
"""Extreme Summarization (XSum) Dataset."""
# Version 1.1.0 removes web contents.
VERSION = nlp.Version("1.1.0")
SUPPORTED_VERSIONS = [nlp.Version("1.0.0", "Dataset without cleaning.")]
def _info(self):
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features({_DOCUMENT: nlp.Value("string"), _SUMMARY: nlp.Value("string"),}),
supervised_keys=(_DOCUMENT, _SUMMARY),
homepage="https://github.com/EdinburghNLP/XSum/tree/master/XSum-Dataset",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
dl_path = dl_manager.download_and_extract(_URL)
dl_path = os.path.join(dl_path, "xsum")
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
gen_kwargs={
"source": os.path.join(dl_path, "train.source"),
"target": os.path.join(dl_path, "train.target"),
},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
gen_kwargs={
"source": os.path.join(dl_path, "val.source"),
"target": os.path.join(dl_path, "val.target"),
},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
gen_kwargs={
"source": os.path.join(dl_path, "test.source"),
"target": os.path.join(dl_path, "test.target"),
},
),
]
def _generate_examples(self, source, target):
"""Yields examples."""
with open(source) as f1:
source = f1.readlines()
with open(target) as f2:
target = f2.readlines()
assert len(source) == len(target)
for i in range(len(target)):
yield i, {_DOCUMENT: source[i], _SUMMARY: target[i]}
|
nlp-master
|
datasets/xsum/xsum.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""The BookCorpus dataset."""
from __future__ import absolute_import, division, print_function
import glob
import os
import re
import nlp
_DESCRIPTION = """\
Books are a rich source of both fine-grained information, how a character, \
an object or a scene looks like, as well as high-level semantics, what \
someone is thinking, feeling and how these states evolve through a story.\
This work aims to align books to their movie releases in order to provide\
rich descriptive explanations for visual content that go semantically far\
beyond the captions available in current datasets. \
"""
_CITATION = """\
@InProceedings{Zhu_2015_ICCV,
title = {Aligning Books and Movies: Towards Story-Like Visual Explanations by Watching Movies and Reading Books},
author = {Zhu, Yukun and Kiros, Ryan and Zemel, Rich and Salakhutdinov, Ruslan and Urtasun, Raquel and Torralba, Antonio and Fidler, Sanja},
booktitle = {The IEEE International Conference on Computer Vision (ICCV)},
month = {December},
year = {2015}
}
"""
URL = "https://storage.googleapis.com/huggingface-nlp/datasets/bookcorpus/bookcorpus.tar.bz2"
class BookcorpusConfig(nlp.BuilderConfig):
"""BuilderConfig for BookCorpus."""
def __init__(self, **kwargs):
"""BuilderConfig for BookCorpus.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(BookcorpusConfig, self).__init__(
version=nlp.Version("1.0.0", "New split API (https://tensorflow.org/datasets/splits)"), **kwargs
)
class Bookcorpus(nlp.GeneratorBasedBuilder):
"""BookCorpus dataset."""
BUILDER_CONFIGS = [BookcorpusConfig(name="plain_text", description="Plain text",)]
def _info(self):
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features({"text": nlp.Value("string"),}),
supervised_keys=None,
homepage="https://yknzhu.wixsite.com/mbweb",
citation=_CITATION,
)
def _vocab_text_gen(self, archive):
for _, ex in self._generate_examples(archive):
yield ex["text"]
def _split_generators(self, dl_manager):
arch_path = dl_manager.download_and_extract(URL)
return [
nlp.SplitGenerator(name=nlp.Split.TRAIN, gen_kwargs={"directory": arch_path}),
]
def _generate_examples(self, directory):
files = [
os.path.join(directory, "books_large_p1.txt"),
os.path.join(directory, "books_large_p2.txt"),
]
_id = 0
for txt_file in files:
with open(txt_file, mode="r") as f:
for line in f:
yield _id, {"text": line.strip()}
_id += 1
|
nlp-master
|
datasets/bookcorpus/bookcorpus.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""CivilComments from Jigsaw Unintended Bias Kaggle Competition."""
from __future__ import absolute_import, division, print_function
import csv
import os
import nlp
_CITATION = """
@article{DBLP:journals/corr/abs-1903-04561,
author = {Daniel Borkan and
Lucas Dixon and
Jeffrey Sorensen and
Nithum Thain and
Lucy Vasserman},
title = {Nuanced Metrics for Measuring Unintended Bias with Real Data for Text
Classification},
journal = {CoRR},
volume = {abs/1903.04561},
year = {2019},
url = {http://arxiv.org/abs/1903.04561},
archivePrefix = {arXiv},
eprint = {1903.04561},
timestamp = {Sun, 31 Mar 2019 19:01:24 +0200},
biburl = {https://dblp.org/rec/bib/journals/corr/abs-1903-04561},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
"""
_DESCRIPTION = """
The comments in this dataset come from an archive of the Civil Comments
platform, a commenting plugin for independent news sites. These public comments
were created from 2015 - 2017 and appeared on approximately 50 English-language
news sites across the world. When Civil Comments shut down in 2017, they chose
to make the public comments available in a lasting open archive to enable future
research. The original data, published on figshare, includes the public comment
text, some associated metadata such as article IDs, timestamps and
commenter-generated "civility" labels, but does not include user ids. Jigsaw
extended this dataset by adding additional labels for toxicity and identity
mentions. This data set is an exact replica of the data released for the
Jigsaw Unintended Bias in Toxicity Classification Kaggle challenge. This
dataset is released under CC0, as is the underlying comment text.
"""
_DOWNLOAD_URL = "https://storage.googleapis.com/jigsaw-unintended-bias-in-toxicity-classification/civil_comments.zip"
class CivilComments(nlp.GeneratorBasedBuilder):
"""Classification and tagging of 2M comments on news sites.
This version of the CivilComments Dataset provides access to the primary
seven labels that were annotated by crowd workers, the toxicity and other
tags are a value between 0 and 1 indicating the fraction of annotators that
assigned these attributes to the comment text.
The other tags, which are only available for a fraction of the input examples
are currently ignored, as are all of the attributes that were part of the
original civil comments release. See the Kaggle documentation for more
details about the available features.
"""
VERSION = nlp.Version("0.9.0")
def _info(self):
return nlp.DatasetInfo(
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
"text": nlp.Value("string"),
"toxicity": nlp.Value("float32"),
"severe_toxicity": nlp.Value("float32"),
"obscene": nlp.Value("float32"),
"threat": nlp.Value("float32"),
"insult": nlp.Value("float32"),
"identity_attack": nlp.Value("float32"),
"sexual_explicit": nlp.Value("float32"),
}
),
# The supervised_keys version is very impoverished.
supervised_keys=("text", "toxicity"),
homepage="https://www.kaggle.com/c/jigsaw-unintended-bias-in-toxicity-classification/data",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
dl_path = dl_manager.download_and_extract(_DOWNLOAD_URL)
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
gen_kwargs={"filename": os.path.join(dl_path, "train.csv"), "toxicity_label": "target"},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
gen_kwargs={
"filename": os.path.join(dl_path, "test_public_expanded.csv"),
"toxicity_label": "toxicity",
},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
gen_kwargs={
"filename": os.path.join(dl_path, "test_private_expanded.csv"),
"toxicity_label": "toxicity",
},
),
]
def _generate_examples(self, filename, toxicity_label):
"""Yields examples.
Each example contains a text input and then seven annotation labels.
Args:
filename: the path of the file to be read for this split.
toxicity_label: indicates 'target' or 'toxicity' to capture the variation
in the released labels for this dataset.
Yields:
A dictionary of features, all floating point except the input text.
"""
with open(filename) as f:
reader = csv.DictReader(f)
for row in reader:
example = {}
example["text"] = row["comment_text"]
example["toxicity"] = float(row[toxicity_label])
for label in ["severe_toxicity", "obscene", "threat", "insult", "identity_attack", "sexual_explicit"]:
example[label] = float(row[label])
yield row["id"], example
|
nlp-master
|
datasets/civil_comments/civil_comments.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""PIAF Question Answering Dataset"""
from __future__ import absolute_import, division, print_function
import json
import logging
import os
import nlp
_CITATION = """\
@InProceedings{keraron-EtAl:2020:LREC,
author = {Keraron, Rachel and Lancrenon, Guillaume and Bras, Mathilde and Allary, Frédéric and Moyse, Gilles and Scialom, Thomas and Soriano-Morales, Edmundo-Pavel and Staiano, Jacopo},
title = {Project PIAF: Building a Native French Question-Answering Dataset},
booktitle = {Proceedings of The 12th Language Resources and Evaluation Conference},
month = {May},
year = {2020},
address = {Marseille, France},
publisher = {European Language Resources Association},
pages = {5483--5492},
abstract = {Motivated by the lack of data for non-English languages, in particular for the evaluation of downstream tasks such as Question Answering, we present a participatory effort to collect a native French Question Answering Dataset. Furthermore, we describe and publicly release the annotation tool developed for our collection effort, along with the data obtained and preliminary baselines.},
url = {https://www.aclweb.org/anthology/2020.lrec-1.673}
}
"""
_DESCRIPTION = """\
Piaf is a reading comprehension \
dataset. This version, published in February 2020, contains 3835 questions on French Wikipedia.
"""
class PiafConfig(nlp.BuilderConfig):
"""BuilderConfig for PIAF."""
def __init__(self, **kwargs):
"""BuilderConfig for PIAF.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(PiafConfig, self).__init__(**kwargs)
class Piaf(nlp.GeneratorBasedBuilder):
"""The Piaf Question Answering Dataset. Version 1.0."""
_URL = "https://github.com/etalab-ia/piaf-code/raw/master/"
_TRAINING_FILE = "piaf-v1.0.json"
BUILDER_CONFIGS = [
PiafConfig(
name="plain_text",
version=nlp.Version("1.0.0", "New split API (https://tensorflow.org/datasets/splits)"),
description="Plain text",
),
]
def _info(self):
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features(
{
"id": nlp.Value("string"),
"title": nlp.Value("string"),
"context": nlp.Value("string"),
"question": nlp.Value("string"),
"answers": nlp.features.Sequence(
{"text": nlp.Value("string"), "answer_start": nlp.Value("int32"),}
),
}
),
# No default supervised_keys (as we have to pass both question
# and context as input).
supervised_keys=None,
homepage="https://piaf.etalab.studio",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
urls_to_download = {"train": os.path.join(self._URL, self._TRAINING_FILE)}
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
nlp.SplitGenerator(name=nlp.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
]
def _generate_examples(self, filepath):
"""This function returns the examples in the raw (text) form."""
logging.info("generating examples from = %s", filepath)
with open(filepath) as f:
dataset = json.load(f)
for article in dataset["data"]:
title = article.get("title", "").strip()
for paragraph in article["paragraphs"]:
context = paragraph["context"].strip()
for qa in paragraph["qas"]:
question = qa["question"].strip()
id_ = qa["id"]
answer_starts = [answer["answer_start"] for answer in qa["answers"]]
answers = [answer["text"].strip() for answer in qa["answers"]]
# Features currently used are "context", "question", and "answers".
# Others are extracted here for the ease of future expansions.
yield id_, {
"title": title,
"context": context,
"question": question,
"id": id_,
"answers": {"answer_start": answer_starts, "text": answers,},
}
|
nlp-master
|
datasets/piaf/piaf.py
|
"""TODO(event2Mind): Add a description here."""
from __future__ import absolute_import, division, print_function
import csv
import os
import nlp
# TODO(event2Mind): BibTeX citation
_CITATION = """\
@inproceedings{event2Mind,
title={Event2Mind: Commonsense Inference on Events, Intents, and Reactions},
author={Hannah Rashkin and Maarten Sap and Emily Allaway and Noah A. Smith† Yejin Choi},
year={2018}
}
"""
# TODO(event2Mind):\
_DESCRIPTION = """\
In Event2Mind, we explore the task of understanding stereotypical intents and reactions to events. Through crowdsourcing, we create a large corpus with 25,000 events and free-form descriptions of their intents and reactions, both of the event's subject and (potentially implied) other participants.
"""
_URL = "https://uwnlp.github.io/event2mind/data/event2mind.zip"
class Event2mind(nlp.GeneratorBasedBuilder):
"""TODO(event2Mind): Short description of my dataset."""
# TODO(event2Mind): Set up version.
VERSION = nlp.Version("0.1.0")
def _info(self):
# TODO(event2Mind): Specifies the nlp.DatasetInfo object
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
# These are the features of your dataset like images, labels ...
"Source": nlp.Value("string"),
"Event": nlp.Value("string"),
"Xintent": nlp.Value("string"),
"Xemotion": nlp.Value("string"),
"Otheremotion": nlp.Value("string"),
"Xsent": nlp.Value("string"),
"Osent": nlp.Value("string"),
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://uwnlp.github.io/event2mind/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(event2Mind): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
dl_dir = dl_manager.download_and_extract(_URL)
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(dl_dir, "train.csv")},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(dl_dir, "test.csv")},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(dl_dir, "dev.csv")},
),
]
def _generate_examples(self, filepath):
"""Yields examples."""
# TODO(event2Mind): Yields (key, example) tuples from the dataset
with open(filepath) as f:
data = csv.DictReader(f)
for id_, row in enumerate(data):
yield id_, {
"Source": row["Source"],
"Event": row["Event"],
"Xintent": row["Xintent"],
"Xemotion": row["Xemotion"],
"Otheremotion": row["Otheremotion"],
"Xsent": row["Xsent"],
"Osent": row["Osent"],
}
|
nlp-master
|
datasets/event2Mind/event2Mind.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Facebook Low Resource (FLoRes) machine translation benchmark dataset."""
from __future__ import absolute_import, division, print_function
import collections
import nlp
_DESCRIPTION = """\
Evaluation datasets for low-resource machine translation: Nepali-English and Sinhala-English.
"""
_CITATION = """\
@misc{guzmn2019new,
title={Two New Evaluation Datasets for Low-Resource Machine Translation: Nepali-English and Sinhala-English},
author={Francisco Guzman and Peng-Jen Chen and Myle Ott and Juan Pino and Guillaume Lample and Philipp Koehn and Vishrav Chaudhary and Marc'Aurelio Ranzato},
year={2019},
eprint={1902.01382},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_DATA_URL = "https://github.com/facebookresearch/flores/raw/master/data/wikipedia_en_ne_si_test_sets.tgz"
# Tuple that describes a single pair of files with matching translations.
# language_to_file is the map from language (2 letter string: example 'en')
# to the file path in the extracted directory.
TranslateData = collections.namedtuple("TranslateData", ["url", "language_to_file"])
class FloresConfig(nlp.BuilderConfig):
"""BuilderConfig for FLoRes."""
def __init__(self, language_pair=(None, None), **kwargs):
"""BuilderConfig for FLoRes.
Args:
for the `nlp.features.text.TextEncoder` used for the features feature.
language_pair: pair of languages that will be used for translation. Should
contain 2-letter coded strings. First will be used at source and second
as target in supervised mode. For example: ("se", "en").
**kwargs: keyword arguments forwarded to super.
"""
name = "%s%s" % (language_pair[0], language_pair[1])
description = ("Translation dataset from %s to %s") % (language_pair[0], language_pair[1])
super(FloresConfig, self).__init__(
name=name,
description=description,
version=nlp.Version("1.1.0", "New split API (https://tensorflow.org/datasets/splits)"),
**kwargs,
)
# Validate language pair.
assert "en" in language_pair, ("Config language pair must contain `en`, got: %s", language_pair)
source, target = language_pair
non_en = source if target == "en" else target
assert non_en in ["ne", "si"], ("Invalid non-en language in pair: %s", non_en)
self.language_pair = language_pair
class Flores(nlp.GeneratorBasedBuilder):
"""FLoRes machine translation dataset."""
BUILDER_CONFIGS = [
FloresConfig(language_pair=("ne", "en"),),
FloresConfig(language_pair=("si", "en"),),
]
def _info(self):
source, target = self.config.language_pair
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features({"translation": nlp.features.Translation(languages=self.config.language_pair)}),
supervised_keys=(source, target),
homepage="https://github.com/facebookresearch/flores/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract(_DATA_URL)
source, target = self.config.language_pair
non_en = source if target == "en" else target
path_tmpl = "{dl_dir}/wikipedia_en_ne_si_test_sets/wikipedia.{split}.{non_en}-en." "{lang}"
files = {}
for split in ("dev", "devtest"):
files[split] = {
"source_file": path_tmpl.format(dl_dir=dl_dir, split=split, non_en=non_en, lang=source),
"target_file": path_tmpl.format(dl_dir=dl_dir, split=split, non_en=non_en, lang=target),
}
return [
nlp.SplitGenerator(name=nlp.Split.VALIDATION, gen_kwargs=files["dev"]),
nlp.SplitGenerator(name=nlp.Split.TEST, gen_kwargs=files["devtest"]),
]
def _generate_examples(self, source_file, target_file):
"""This function returns the examples in the raw (text) form."""
with open(source_file) as f:
source_sentences = f.read().split("\n")
with open(target_file) as f:
target_sentences = f.read().split("\n")
assert len(target_sentences) == len(source_sentences), "Sizes do not match: %d vs %d for %s vs %s." % (
len(source_sentences),
len(target_sentences),
source_file,
target_file,
)
source, target = self.config.language_pair
for idx, (l1, l2) in enumerate(zip(source_sentences, target_sentences)):
result = {"translation": {source: l1, target: l2}}
# Make sure that both translations are non-empty.
if all(result.values()):
yield idx, result
|
nlp-master
|
datasets/flores/flores.py
|
"""qanta dataset."""
from __future__ import absolute_import, division, print_function
import json
from typing import List, Tuple
import nlp
_CITATION = """
@article{Rodriguez2019QuizbowlTC,
title={Quizbowl: The Case for Incremental Question Answering},
author={Pedro Rodriguez and Shi Feng and Mohit Iyyer and He He and Jordan L. Boyd-Graber},
journal={ArXiv},
year={2019},
volume={abs/1904.04792}
}
"""
_DESCRIPTION = """
The Qanta dataset is a question answering dataset based on the academic trivia game Quizbowl.
"""
_QANTA_URL = "https://s3-us-west-2.amazonaws.com/pinafore-us-west-2/qanta-jmlr-datasets/qanta.mapped.2018.04.18.json"
_TRICK_URL = "https://s3-us-west-2.amazonaws.com/pinafore-us-west-2/trick-tacl-datasets/qanta.tacl-trick.json"
_VERSION = nlp.Version("2018.04.18")
_FIRST = "first"
_FULL = "full"
_SENTENCES = "sentences"
_RUNS = "runs"
# Order matters, the first one is default
_MODES = [_FULL, _FIRST, _SENTENCES, _RUNS]
_DEFAULT_CHAR_SKIP = 25
class QantaConfig(nlp.BuilderConfig):
"""BuilderConfig for Qanta."""
def __init__(self, mode: str, char_skip: int, **kwargs):
super(QantaConfig, self).__init__(version=_VERSION, **kwargs)
self.mode = mode
self.char_skip = char_skip
def create_char_runs(text: str, char_skip: int) -> List[Tuple[str, int]]:
"""
Returns runs of the question based on skipping char_skip characters at a time. Also returns the indices used
q: name this first united states president.
runs with char_skip=10:
['name this ',
'name this first unit',
'name this first united state p',
'name this first united state president.']
:param char_skip: Number of characters to skip each time
"""
char_indices = list(range(char_skip, len(text) + char_skip, char_skip))
return [(text[:idx], idx) for idx in char_indices]
def with_default(key, lookup, default):
if key in lookup:
value = lookup[key]
if value is None:
return default
else:
return value
else:
return default
def question_to_examples(question, mode: str, char_skip: int):
features = {
"qanta_id": question["qanta_id"],
"proto_id": with_default("proto_id", question, ""),
"qdb_id": with_default("qdb_id", question, -1),
# We refer to the actual answer as page, but this
# may be misleading externally, so rename here to
# be clearer
"page": question["page"],
"answer": question["page"],
"raw_answer": question["answer"],
"dataset": with_default("dataset", question, ""),
"full_question": question["text"],
"first_sentence": question["first_sentence"],
"tokenizations": question["tokenizations"],
"fold": question["fold"],
"gameplay": question["gameplay"],
"category": with_default("category", question, ""),
"subcategory": with_default("subcategory", question, ""),
"tournament": question["tournament"],
"difficulty": with_default("difficulty", question, ""),
"year": question["year"],
"char_idx": -1,
"sentence_idx": -1,
}
if mode == _FULL:
yield {
"text": question["text"],
"id": str(question["qanta_id"]) + "-full",
**features,
}
elif mode == _FIRST:
yield {
"text": question["first_sentence"],
"id": str(question["qanta_id"]) + "-first",
**features,
}
elif mode == _RUNS:
text = question["text"]
for text_run, char_idx in create_char_runs(text, char_skip):
yield {
"text": text_run,
"char_idx": char_idx,
"id": str(question["qanta_id"]) + "-char-" + str(char_idx),
**features,
}
elif mode == _SENTENCES:
for sentence_idx, (start, end) in enumerate(question["tokenizations"]):
sentence = question["text"][start:end]
yield {
"text": sentence,
"sentence_idx": sentence_idx,
"id": str(question["qanta_id"]) + "-sentence-" + str(sentence_idx),
**features,
}
else:
raise ValueError(f"Invalid mode: {mode}")
_FEATURES = {
# Generated ID based modes set, unique
"id": nlp.Value("string"),
# Dataset defined IDs
"qanta_id": nlp.Value("int32"),
"proto_id": nlp.Value("string"),
"qdb_id": nlp.Value("int32"),
"dataset": nlp.Value("string"),
# Inputs
"text": nlp.Value("string"),
"full_question": nlp.Value("string"),
"first_sentence": nlp.Value("string"),
"char_idx": nlp.Value("int32"),
"sentence_idx": nlp.Value("int32"),
# Character indices of sentences: List[Tuple[int, int]]
"tokenizations": nlp.features.Sequence(nlp.features.Sequence(nlp.Value("int32"), length=2)),
# Labels: Number is equal to number of unique pages across all folds
"answer": nlp.Value("string"),
"page": nlp.Value("string"),
"raw_answer": nlp.Value("string"),
# Meta Information
"fold": nlp.Value("string"),
"gameplay": nlp.Value("bool"),
"category": nlp.Value("string"),
"subcategory": nlp.Value("string"),
"tournament": nlp.Value("string"),
"difficulty": nlp.Value("string"),
"year": nlp.Value("int32"),
}
class Qanta(nlp.GeneratorBasedBuilder):
"""The Qanta dataset is a question answering dataset based on the academic trivia game Quizbowl.
"""
VERSION = _VERSION
BUILDER_CONFIGS = [
QantaConfig(
name=f"mode={mode},char_skip={_DEFAULT_CHAR_SKIP}",
description=f"Question format: {mode}, char_skip: {_DEFAULT_CHAR_SKIP}",
mode=mode,
char_skip=_DEFAULT_CHAR_SKIP,
)
for mode in _MODES
]
def _info(self):
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(_FEATURES),
# Number of classes is a function of the dataset, ClassLabel doesn't support dynamic
# definition, so have to defer conversion to classes to later, so can't define
# supervied keys
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="http://www.qanta.org/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
qanta_path = dl_manager.download_and_extract(_QANTA_URL)
trick_path = dl_manager.download_and_extract(_TRICK_URL)
return [
nlp.SplitGenerator(
name=nlp.Split("guesstrain"),
gen_kwargs={
"qanta_filepath": qanta_path,
"trick_filepath": trick_path,
"fold": "guesstrain",
"mode": self.config.mode,
"char_skip": self.config.char_skip,
},
),
nlp.SplitGenerator(
name=nlp.Split("buzztrain"),
gen_kwargs={
"qanta_filepath": qanta_path,
"trick_filepath": trick_path,
"fold": "buzztrain",
"mode": self.config.mode,
"char_skip": self.config.char_skip,
},
),
nlp.SplitGenerator(
name=nlp.Split("guessdev"),
gen_kwargs={
"qanta_filepath": qanta_path,
"trick_filepath": trick_path,
"fold": "guessdev",
"mode": self.config.mode,
"char_skip": self.config.char_skip,
},
),
nlp.SplitGenerator(
name=nlp.Split("buzzdev"),
gen_kwargs={
"qanta_filepath": qanta_path,
"trick_filepath": trick_path,
"fold": "buzzdev",
"mode": self.config.mode,
"char_skip": self.config.char_skip,
},
),
nlp.SplitGenerator(
name=nlp.Split("guesstest"),
gen_kwargs={
"qanta_filepath": qanta_path,
"trick_filepath": trick_path,
"fold": "guesstest",
"mode": self.config.mode,
"char_skip": self.config.char_skip,
},
),
nlp.SplitGenerator(
name=nlp.Split("buzztest"),
gen_kwargs={
"qanta_filepath": qanta_path,
"trick_filepath": trick_path,
"fold": "buzztest",
"mode": self.config.mode,
"char_skip": self.config.char_skip,
},
),
nlp.SplitGenerator(
name=nlp.Split("adversarial"),
gen_kwargs={
"qanta_filepath": qanta_path,
"trick_filepath": trick_path,
"fold": "adversarial",
"mode": self.config.mode,
"char_skip": self.config.char_skip,
},
),
]
def _generate_examples(
self, qanta_filepath: str, trick_filepath: str, fold: str, mode: str, char_skip: int,
):
"""Yields examples."""
if mode not in _MODES:
raise ValueError(f"Invalid mode: {mode}")
if fold == "adversarial":
path = trick_filepath
else:
path = qanta_filepath
with open(path) as f:
questions = json.load(f)["questions"]
for q in questions:
if q["page"] is not None and q["fold"] == fold:
for example in question_to_examples(q, mode, char_skip):
yield example["id"], example
|
nlp-master
|
datasets/qanta/qanta.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""The Multi-Genre NLI Corpus."""
from __future__ import absolute_import, division, print_function
import os
import nlp
_CITATION = """\
@InProceedings{N18-1101,
author = {Williams, Adina
and Nangia, Nikita
and Bowman, Samuel},
title = {A Broad-Coverage Challenge Corpus for
Sentence Understanding through Inference},
booktitle = {Proceedings of the 2018 Conference of
the North American Chapter of the
Association for Computational Linguistics:
Human Language Technologies, Volume 1 (Long
Papers)},
year = {2018},
publisher = {Association for Computational Linguistics},
pages = {1112--1122},
location = {New Orleans, Louisiana},
url = {http://aclweb.org/anthology/N18-1101}
}
"""
_DESCRIPTION = """\
The Multi-Genre Natural Language Inference (MultiNLI) corpus is a
crowd-sourced collection of 433k sentence pairs annotated with textual
entailment information. The corpus is modeled on the SNLI corpus, but differs in
that covers a range of genres of spoken and written text, and supports a
distinctive cross-genre generalization evaluation. The corpus served as the
basis for the shared task of the RepEval 2017 Workshop at EMNLP in Copenhagen.
"""
ROOT_URL = "http://storage.googleapis.com/tfds-data/downloads/multi_nli/multinli_1.0.zip"
class MultiNLIMismatchConfig(nlp.BuilderConfig):
"""BuilderConfig for MultiNLI Mismatch."""
def __init__(self, **kwargs):
"""BuilderConfig for MultiNLI Mismatch.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(MultiNLIMismatchConfig, self).__init__(
version=nlp.Version("1.0.0", "New split API (https://tensorflow.org/datasets/splits)"), **kwargs
)
class MultiNliMismatch(nlp.GeneratorBasedBuilder):
"""MultiNLI: The Stanford Question Answering Dataset. Version 1.1."""
BUILDER_CONFIGS = [
MultiNLIMismatchConfig(name="plain_text", description="Plain text",),
]
def _info(self):
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features(
{"premise": nlp.Value("string"), "hypothesis": nlp.Value("string"), "label": nlp.Value("string"),}
),
# No default supervised_keys (as we have to pass both premise
# and hypothesis as input).
supervised_keys=None,
homepage="https://www.nyu.edu/projects/bowman/multinli/",
citation=_CITATION,
)
def _vocab_text_gen(self, filepath):
for _, ex in self._generate_examples(filepath):
yield " ".join([ex["premise"], ex["hypothesis"], ex["label"]])
def _split_generators(self, dl_manager):
downloaded_dir = dl_manager.download_and_extract(ROOT_URL)
mnli_path = os.path.join(downloaded_dir, "multinli_1.0")
train_path = os.path.join(mnli_path, "multinli_1.0_train.txt")
validation_path = os.path.join(mnli_path, "multinli_1.0_dev_mismatched.txt")
return [
nlp.SplitGenerator(name=nlp.Split.TRAIN, gen_kwargs={"filepath": train_path}),
nlp.SplitGenerator(name=nlp.Split.VALIDATION, gen_kwargs={"filepath": validation_path}),
]
def _generate_examples(self, filepath):
"""Generate mnli mismatch examples.
Args:
filepath: a string
Yields:
dictionaries containing "premise", "hypothesis" and "label" strings
"""
for idx, line in enumerate(open(filepath, "rb")):
if idx == 0:
continue
line = line.strip().decode("utf-8")
split_line = line.split("\t")
yield idx, {"premise": split_line[5], "hypothesis": split_line[6], "label": split_line[0]}
|
nlp-master
|
datasets/multi_nli_mismatch/multi_nli_mismatch.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""WMT15: Translate dataset."""
import nlp
from .wmt_utils import Wmt, WmtConfig
_URL = "http://www.statmt.org/wmt15/translation-task.html"
_CITATION = """
@InProceedings{bojar-EtAl:2015:WMT,
author = {Bojar, Ond\v{r}ej and Chatterjee, Rajen and Federmann, Christian and Haddow, Barry and Huck, Matthias and Hokamp, Chris and Koehn, Philipp and Logacheva, Varvara and Monz, Christof and Negri, Matteo and Post, Matt and Scarton, Carolina and Specia, Lucia and Turchi, Marco},
title = {Findings of the 2015 Workshop on Statistical Machine Translation},
booktitle = {Proceedings of the Tenth Workshop on Statistical Machine Translation},
month = {September},
year = {2015},
address = {Lisbon, Portugal},
publisher = {Association for Computational Linguistics},
pages = {1--46},
url = {http://aclweb.org/anthology/W15-3001}
}
"""
_LANGUAGE_PAIRS = [(lang, "en") for lang in ["cs", "de", "fi", "fr", "ru"]]
class Wmt15(Wmt):
"""WMT 15 translation datasets for all {xx, "en"} language pairs."""
BUILDER_CONFIGS = [
WmtConfig( # pylint:disable=g-complex-comprehension
description="WMT 2015 %s-%s translation task dataset." % (l1, l2),
url=_URL,
citation=_CITATION,
language_pair=(l1, l2),
version=nlp.Version("1.0.0"),
)
for l1, l2 in _LANGUAGE_PAIRS
]
@property
def manual_download_instructions(self):
if self.config.language_pair[1] in ["cs", "hi", "ru"]:
return "Please download the data manually as explained. TODO(PVP)"
@property
def _subsets(self):
return {
nlp.Split.TRAIN: [
"europarl_v7",
"europarl_v8_16",
"commoncrawl",
"multiun",
"newscommentary_v10",
"gigafren",
"czeng_10",
"yandexcorpus",
"wikiheadlines_fi",
"wikiheadlines_ru",
],
nlp.Split.VALIDATION: ["newsdev2015", "newsdiscussdev2015", "newstest2014"],
nlp.Split.TEST: ["newstest2015", "newsdiscusstest2015"],
}
|
nlp-master
|
datasets/wmt15/wmt15.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""WMT: Translate dataset."""
from __future__ import absolute_import, division, print_function
import codecs
import functools
import glob
import gzip
import itertools
import logging
import os
import re
import xml.etree.cElementTree as ElementTree
from abc import ABC, abstractmethod
import six
import nlp
_DESCRIPTION = """\
Translate dataset based on the data from statmt.org.
Versions exists for the different years using a combination of multiple data
sources. The base `wmt_translate` allows you to create your own config to choose
your own data/language pair by creating a custom `nlp.translate.wmt.WmtConfig`.
```
config = nlp.wmt.WmtConfig(
version="0.0.1",
language_pair=("fr", "de"),
subsets={
nlp.Split.TRAIN: ["commoncrawl_frde"],
nlp.Split.VALIDATION: ["euelections_dev2019"],
},
)
builder = nlp.builder("wmt_translate", config=config)
```
"""
CWMT_SUBSET_NAMES = ["casia2015", "casict2011", "casict2015", "datum2015", "datum2017", "neu2017"]
class SubDataset(object):
"""Class to keep track of information on a sub-dataset of WMT."""
def __init__(self, name, target, sources, url, path, manual_dl_files=None):
"""Sub-dataset of WMT.
Args:
name: `string`, a unique dataset identifier.
target: `string`, the target language code.
sources: `set<string>`, the set of source language codes.
url: `string` or `(string, string)`, URL(s) or URL template(s) specifying
where to download the raw data from. If two strings are provided, the
first is used for the source language and the second for the target.
Template strings can either contain '{src}' placeholders that will be
filled in with the source language code, '{0}' and '{1}' placeholders
that will be filled in with the source and target language codes in
alphabetical order, or all 3.
path: `string` or `(string, string)`, path(s) or path template(s)
specifing the path to the raw data relative to the root of the
downloaded archive. If two strings are provided, the dataset is assumed
to be made up of parallel text files, the first being the source and the
second the target. If one string is provided, both languages are assumed
to be stored within the same file and the extension is used to determine
how to parse it. Template strings should be formatted the same as in
`url`.
manual_dl_files: `<list>(string)` (optional), the list of files that must
be manually downloaded to the data directory.
"""
self._paths = (path,) if isinstance(path, six.string_types) else path
self._urls = (url,) if isinstance(url, six.string_types) else url
self._manual_dl_files = manual_dl_files if manual_dl_files else []
self.name = name
self.target = target
self.sources = set(sources)
def _inject_language(self, src, strings):
"""Injects languages into (potentially) template strings."""
if src not in self.sources:
raise ValueError("Invalid source for '{0}': {1}".format(self.name, src))
def _format_string(s):
if "{0}" in s and "{1}" and "{src}" in s:
return s.format(*sorted([src, self.target]), src=src)
elif "{0}" in s and "{1}" in s:
return s.format(*sorted([src, self.target]))
elif "{src}" in s:
return s.format(src=src)
else:
return s
return [_format_string(s) for s in strings]
def get_url(self, src):
return self._inject_language(src, self._urls)
def get_manual_dl_files(self, src):
return self._inject_language(src, self._manual_dl_files)
def get_path(self, src):
return self._inject_language(src, self._paths)
# Subsets used in the training sets for various years of WMT.
_TRAIN_SUBSETS = [
# pylint:disable=line-too-long
SubDataset(
name="commoncrawl",
target="en", # fr-de pair in commoncrawl_frde
sources={"cs", "de", "es", "fr", "ru"},
url="http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz",
path=("commoncrawl.{src}-en.{src}", "commoncrawl.{src}-en.en"),
),
SubDataset(
name="commoncrawl_frde",
target="de",
sources={"fr"},
url=(
"http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/commoncrawl.fr.gz",
"http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/commoncrawl.de.gz",
),
path=("", ""),
),
SubDataset(
name="czeng_10",
target="en",
sources={"cs"},
url="http://ufal.mff.cuni.cz/czeng/czeng10",
manual_dl_files=["data-plaintext-format.%d.tar" % i for i in range(10)],
# Each tar contains multiple files, which we process specially in
# _parse_czeng.
path=("data.plaintext-format/??train.gz",) * 10,
),
SubDataset(
name="czeng_16pre",
target="en",
sources={"cs"},
url="http://ufal.mff.cuni.cz/czeng/czeng16pre",
manual_dl_files=["czeng16pre.deduped-ignoring-sections.txt.gz"],
path="",
),
SubDataset(
name="czeng_16",
target="en",
sources={"cs"},
url="http://ufal.mff.cuni.cz/czeng",
manual_dl_files=["data-plaintext-format.%d.tar" % i for i in range(10)],
# Each tar contains multiple files, which we process specially in
# _parse_czeng.
path=("data.plaintext-format/??train.gz",) * 10,
),
SubDataset(
# This dataset differs from the above in the filtering that is applied
# during parsing.
name="czeng_17",
target="en",
sources={"cs"},
url="http://ufal.mff.cuni.cz/czeng",
manual_dl_files=["data-plaintext-format.%d.tar" % i for i in range(10)],
# Each tar contains multiple files, which we process specially in
# _parse_czeng.
path=("data.plaintext-format/??train.gz",) * 10,
),
SubDataset(
name="dcep_v1",
target="en",
sources={"lv"},
url="http://data.statmt.org/wmt17/translation-task/dcep.lv-en.v1.tgz",
path=("dcep.en-lv/dcep.lv", "dcep.en-lv/dcep.en"),
),
SubDataset(
name="europarl_v7",
target="en",
sources={"cs", "de", "es", "fr"},
url="http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz",
path=("training/europarl-v7.{src}-en.{src}", "training/europarl-v7.{src}-en.en"),
),
SubDataset(
name="europarl_v7_frde",
target="de",
sources={"fr"},
url=(
"http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/europarl-v7.fr.gz",
"http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/europarl-v7.de.gz",
),
path=("", ""),
),
SubDataset(
name="europarl_v8_18",
target="en",
sources={"et", "fi"},
url="http://data.statmt.org/wmt18/translation-task/training-parallel-ep-v8.tgz",
path=("training/europarl-v8.{src}-en.{src}", "training/europarl-v8.{src}-en.en"),
),
SubDataset(
name="europarl_v8_16",
target="en",
sources={"fi", "ro"},
url="http://data.statmt.org/wmt16/translation-task/training-parallel-ep-v8.tgz",
path=("training-parallel-ep-v8/europarl-v8.{src}-en.{src}", "training-parallel-ep-v8/europarl-v8.{src}-en.en"),
),
SubDataset(
name="europarl_v9",
target="en",
sources={"cs", "de", "fi", "lt"},
url="http://www.statmt.org/europarl/v9/training/europarl-v9.{src}-en.tsv.gz",
path="",
),
SubDataset(
name="gigafren",
target="en",
sources={"fr"},
url="http://www.statmt.org/wmt10/training-giga-fren.tar",
path=("giga-fren.release2.fixed.fr.gz", "giga-fren.release2.fixed.en.gz"),
),
SubDataset(
name="hindencorp_01",
target="en",
sources={"hi"},
url="http://ufallab.ms.mff.cuni.cz/~bojar/hindencorp",
manual_dl_files=["hindencorp0.1.gz"],
path="",
),
SubDataset(
name="leta_v1",
target="en",
sources={"lv"},
url="http://data.statmt.org/wmt17/translation-task/leta.v1.tgz",
path=("LETA-lv-en/leta.lv", "LETA-lv-en/leta.en"),
),
SubDataset(
name="multiun",
target="en",
sources={"es", "fr"},
url="http://www.statmt.org/wmt13/training-parallel-un.tgz",
path=("un/undoc.2000.{src}-en.{src}", "un/undoc.2000.{src}-en.en"),
),
SubDataset(
name="newscommentary_v9",
target="en",
sources={"cs", "de", "fr", "ru"},
url="http://www.statmt.org/wmt14/training-parallel-nc-v9.tgz",
path=("training/news-commentary-v9.{src}-en.{src}", "training/news-commentary-v9.{src}-en.en"),
),
SubDataset(
name="newscommentary_v10",
target="en",
sources={"cs", "de", "fr", "ru"},
url="http://www.statmt.org/wmt15/training-parallel-nc-v10.tgz",
path=("news-commentary-v10.{src}-en.{src}", "news-commentary-v10.{src}-en.en"),
),
SubDataset(
name="newscommentary_v11",
target="en",
sources={"cs", "de", "ru"},
url="http://data.statmt.org/wmt16/translation-task/training-parallel-nc-v11.tgz",
path=(
"training-parallel-nc-v11/news-commentary-v11.{src}-en.{src}",
"training-parallel-nc-v11/news-commentary-v11.{src}-en.en",
),
),
SubDataset(
name="newscommentary_v12",
target="en",
sources={"cs", "de", "ru", "zh"},
url="http://data.statmt.org/wmt17/translation-task/training-parallel-nc-v12.tgz",
path=("training/news-commentary-v12.{src}-en.{src}", "training/news-commentary-v12.{src}-en.en"),
),
SubDataset(
name="newscommentary_v13",
target="en",
sources={"cs", "de", "ru", "zh"},
url="http://data.statmt.org/wmt18/translation-task/training-parallel-nc-v13.tgz",
path=(
"training-parallel-nc-v13/news-commentary-v13.{src}-en.{src}",
"training-parallel-nc-v13/news-commentary-v13.{src}-en.en",
),
),
SubDataset(
name="newscommentary_v14",
target="en", # fr-de pair in newscommentary_v14_frde
sources={"cs", "de", "kk", "ru", "zh"},
url="http://data.statmt.org/news-commentary/v14/training/news-commentary-v14.{0}-{1}.tsv.gz",
path="",
),
SubDataset(
name="newscommentary_v14_frde",
target="de",
sources={"fr"},
url="http://data.statmt.org/news-commentary/v14/training/news-commentary-v14.de-fr.tsv.gz",
path="",
),
SubDataset(
name="onlinebooks_v1",
target="en",
sources={"lv"},
url="http://data.statmt.org/wmt17/translation-task/books.lv-en.v1.tgz",
path=("farewell/farewell.lv", "farewell/farewell.en"),
),
SubDataset(
name="paracrawl_v1",
target="en",
sources={"cs", "de", "et", "fi", "ru"},
url="https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-{src}.zipporah0-dedup-clean.tgz",
path=(
"paracrawl-release1.en-{src}.zipporah0-dedup-clean.{src}",
"paracrawl-release1.en-{src}.zipporah0-dedup-clean.en",
),
),
SubDataset(
name="paracrawl_v1_ru",
target="en",
sources={"ru"},
url="https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-ru.zipporah0-dedup-clean.tgz",
path=(
"paracrawl-release1.en-ru.zipporah0-dedup-clean.ru",
"paracrawl-release1.en-ru.zipporah0-dedup-clean.en",
),
),
SubDataset(
name="paracrawl_v3",
target="en", # fr-de pair in paracrawl_v3_frde
sources={"cs", "de", "fi", "lt"},
url="https://s3.amazonaws.com/web-language-models/paracrawl/release3/en-{src}.bicleaner07.tmx.gz",
path="",
),
SubDataset(
name="paracrawl_v3_frde",
target="de",
sources={"fr"},
url=(
"http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/de-fr.bicleaner07.de.gz",
"http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/de-fr.bicleaner07.fr.gz",
),
path=("", ""),
),
SubDataset(
name="rapid_2016",
target="en",
sources={"de", "et", "fi"},
url="http://data.statmt.org/wmt18/translation-task/rapid2016.tgz",
path=("rapid2016.{0}-{1}.{src}", "rapid2016.{0}-{1}.en"),
),
SubDataset(
name="rapid_2016_ltfi",
target="en",
sources={"fi", "lt"},
url="https://tilde-model.s3-eu-west-1.amazonaws.com/rapid2016.en-{src}.tmx.zip",
path="rapid2016.en-{src}.tmx",
),
SubDataset(
name="rapid_2019",
target="en",
sources={"de"},
url="https://s3-eu-west-1.amazonaws.com/tilde-model/rapid2019.de-en.zip",
path=("rapid2019.de-en.de", "rapid2019.de-en.en"),
),
SubDataset(
name="setimes_2",
target="en",
sources={"ro", "tr"},
url="http://opus.nlpl.eu/download.php?f=SETIMES/v2/tmx/en-{src}.tmx.gz",
path="",
),
SubDataset(
name="uncorpus_v1",
target="en",
sources={"ru", "zh"},
url="https://storage.googleapis.com/tfdataset-data/downloadataset/uncorpus/UNv1.0.en-{src}.tar.gz",
path=("en-{src}/UNv1.0.en-{src}.{src}", "en-{src}/UNv1.0.en-{src}.en"),
),
SubDataset(
name="wikiheadlines_fi",
target="en",
sources={"fi"},
url="http://www.statmt.org/wmt15/wiki-titles.tgz",
path="wiki/fi-en/titles.fi-en",
),
SubDataset(
name="wikiheadlines_hi",
target="en",
sources={"hi"},
url="http://www.statmt.org/wmt14/wiki-titles.tgz",
path="wiki/hi-en/wiki-titles.hi-en",
),
SubDataset(
# Verified that wmt14 and wmt15 files are identical.
name="wikiheadlines_ru",
target="en",
sources={"ru"},
url="http://www.statmt.org/wmt15/wiki-titles.tgz",
path="wiki/ru-en/wiki.ru-en",
),
SubDataset(
name="wikititles_v1",
target="en",
sources={"cs", "de", "fi", "gu", "kk", "lt", "ru", "zh"},
url="http://data.statmt.org/wikititles/v1/wikititles-v1.{src}-en.tsv.gz",
path="",
),
SubDataset(
name="yandexcorpus",
target="en",
sources={"ru"},
url="https://translate.yandex.ru/corpus?lang=en",
manual_dl_files=["1mcorpus.zip"],
path=("corpus.en_ru.1m.ru", "corpus.en_ru.1m.en"),
),
# pylint:enable=line-too-long
] + [
SubDataset( # pylint:disable=g-complex-comprehension
name=ss,
target="en",
sources={"zh"},
url="ftp://cwmt-wmt:cwmt-wmt@nlp.nju.edu.cn/parallel/%s.zip" % ss,
path=("%s/*_c[hn].txt" % ss, "%s/*_en.txt" % ss),
)
for ss in CWMT_SUBSET_NAMES
]
_DEV_SUBSETS = [
SubDataset(
name="euelections_dev2019",
target="de",
sources={"fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/euelections_dev2019.fr-de.src.fr", "dev/euelections_dev2019.fr-de.tgt.de"),
),
SubDataset(
name="newsdev2014",
target="en",
sources={"hi"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdev2014.hi", "dev/newsdev2014.en"),
),
SubDataset(
name="newsdev2015",
target="en",
sources={"fi"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdev2015-fien-src.{src}.sgm", "dev/newsdev2015-fien-ref.en.sgm"),
),
SubDataset(
name="newsdiscussdev2015",
target="en",
sources={"ro", "tr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdiscussdev2015-{src}en-src.{src}.sgm", "dev/newsdiscussdev2015-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdev2016",
target="en",
sources={"ro", "tr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdev2016-{src}en-src.{src}.sgm", "dev/newsdev2016-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdev2017",
target="en",
sources={"lv", "zh"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdev2017-{src}en-src.{src}.sgm", "dev/newsdev2017-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdev2018",
target="en",
sources={"et"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdev2018-{src}en-src.{src}.sgm", "dev/newsdev2018-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdev2019",
target="en",
sources={"gu", "kk", "lt"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdev2019-{src}en-src.{src}.sgm", "dev/newsdev2019-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdiscussdev2015",
target="en",
sources={"fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdiscussdev2015-{src}en-src.{src}.sgm", "dev/newsdiscussdev2015-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdiscusstest2015",
target="en",
sources={"fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdiscusstest2015-{src}en-src.{src}.sgm", "dev/newsdiscusstest2015-{src}en-ref.en.sgm"),
),
SubDataset(
name="newssyscomb2009",
target="en",
sources={"cs", "de", "es", "fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newssyscomb2009.{src}", "dev/newssyscomb2009.en"),
),
SubDataset(
name="newstest2008",
target="en",
sources={"cs", "de", "es", "fr", "hu"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/news-test2008.{src}", "dev/news-test2008.en"),
),
SubDataset(
name="newstest2009",
target="en",
sources={"cs", "de", "es", "fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2009.{src}", "dev/newstest2009.en"),
),
SubDataset(
name="newstest2010",
target="en",
sources={"cs", "de", "es", "fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2010.{src}", "dev/newstest2010.en"),
),
SubDataset(
name="newstest2011",
target="en",
sources={"cs", "de", "es", "fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2011.{src}", "dev/newstest2011.en"),
),
SubDataset(
name="newstest2012",
target="en",
sources={"cs", "de", "es", "fr", "ru"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2012.{src}", "dev/newstest2012.en"),
),
SubDataset(
name="newstest2013",
target="en",
sources={"cs", "de", "es", "fr", "ru"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2013.{src}", "dev/newstest2013.en"),
),
SubDataset(
name="newstest2014",
target="en",
sources={"cs", "de", "es", "fr", "hi", "ru"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2014-{src}en-src.{src}.sgm", "dev/newstest2014-{src}en-ref.en.sgm"),
),
SubDataset(
name="newstest2015",
target="en",
sources={"cs", "de", "fi", "ru"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2015-{src}en-src.{src}.sgm", "dev/newstest2015-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdiscusstest2015",
target="en",
sources={"fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdiscusstest2015-{src}en-src.{src}.sgm", "dev/newsdiscusstest2015-{src}en-ref.en.sgm"),
),
SubDataset(
name="newstest2016",
target="en",
sources={"cs", "de", "fi", "ro", "ru", "tr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2016-{src}en-src.{src}.sgm", "dev/newstest2016-{src}en-ref.en.sgm"),
),
SubDataset(
name="newstestB2016",
target="en",
sources={"fi"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstestB2016-enfi-ref.{src}.sgm", "dev/newstestB2016-enfi-src.en.sgm"),
),
SubDataset(
name="newstest2017",
target="en",
sources={"cs", "de", "fi", "lv", "ru", "tr", "zh"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2017-{src}en-src.{src}.sgm", "dev/newstest2017-{src}en-ref.en.sgm"),
),
SubDataset(
name="newstestB2017",
target="en",
sources={"fi"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstestB2017-fien-src.fi.sgm", "dev/newstestB2017-fien-ref.en.sgm"),
),
SubDataset(
name="newstest2018",
target="en",
sources={"cs", "de", "et", "fi", "ru", "tr", "zh"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2018-{src}en-src.{src}.sgm", "dev/newstest2018-{src}en-ref.en.sgm"),
),
]
DATASET_MAP = {dataset.name: dataset for dataset in _TRAIN_SUBSETS + _DEV_SUBSETS}
_CZENG17_FILTER = SubDataset(
name="czeng17_filter",
target="en",
sources={"cs"},
url="http://ufal.mff.cuni.cz/czeng/download.php?f=convert_czeng16_to_17.pl.zip",
path="convert_czeng16_to_17.pl",
)
class WmtConfig(nlp.BuilderConfig):
"""BuilderConfig for WMT."""
def __init__(self, url=None, citation=None, description=None, language_pair=(None, None), subsets=None, **kwargs):
"""BuilderConfig for WMT.
Args:
url: The reference URL for the dataset.
citation: The paper citation for the dataset.
description: The description of the dataset.
language_pair: pair of languages that will be used for translation. Should
contain 2 letter coded strings. For example: ("en", "de").
configuration for the `nlp.features.text.TextEncoder` used for the
`nlp.features.text.Translation` features.
subsets: Dict[split, list[str]]. List of the subset to use for each of the
split. Note that WMT subclasses overwrite this parameter.
**kwargs: keyword arguments forwarded to super.
"""
name = "%s-%s" % (language_pair[0], language_pair[1])
if "name" in kwargs: # Add name suffix for custom configs
name += "." + kwargs.pop("name")
super(WmtConfig, self).__init__(name=name, description=description, **kwargs)
self.url = url or "http://www.statmt.org"
self.citation = citation
self.language_pair = language_pair
self.subsets = subsets
# TODO(PVP): remove when manual dir works
# +++++++++++++++++++++
if language_pair[1] in ["cs", "hi", "ru"]:
assert NotImplementedError(
"The dataset for {}-en is currently not fully supported.".format(language_pair[1])
)
# +++++++++++++++++++++
class Wmt(ABC, nlp.GeneratorBasedBuilder):
"""WMT translation dataset."""
def __init__(self, *args, **kwargs):
if type(self) == Wmt and "config" not in kwargs: # pylint: disable=unidiomatic-typecheck
raise ValueError(
"The raw `wmt_translate` can only be instantiated with the config "
"kwargs. You may want to use one of the `wmtYY_translate` "
"implementation instead to get the WMT dataset for a specific year."
)
super(Wmt, self).__init__(*args, **kwargs)
@property
@abstractmethod
def _subsets(self):
"""Subsets that make up each split of the dataset."""
raise NotImplementedError("This is a abstract method")
@property
def subsets(self):
"""Subsets that make up each split of the dataset for the language pair."""
source, target = self.config.language_pair
filtered_subsets = {}
for split, ss_names in self._subsets.items():
filtered_subsets[split] = []
for ss_name in ss_names:
dataset = DATASET_MAP[ss_name]
if dataset.target != target or source not in dataset.sources:
logging.info("Skipping sub-dataset that does not include language pair: %s", ss_name)
else:
filtered_subsets[split].append(ss_name)
logging.info("Using sub-datasets: %s", filtered_subsets)
return filtered_subsets
def _info(self):
src, target = self.config.language_pair
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features({"translation": nlp.features.Translation(languages=self.config.language_pair)}),
supervised_keys=(src, target),
homepage=self.config.url,
citation=self.config.citation,
)
def _vocab_text_gen(self, split_subsets, extraction_map, language):
for _, ex in self._generate_examples(split_subsets, extraction_map, with_translation=False):
yield ex[language]
def _split_generators(self, dl_manager):
source, _ = self.config.language_pair
manual_paths_dict = {}
urls_to_download = {}
for ss_name in itertools.chain.from_iterable(self.subsets.values()):
if ss_name == "czeng_17":
# CzEng1.7 is CzEng1.6 with some blocks filtered out. We must download
# the filtering script so we can parse out which blocks need to be
# removed.
urls_to_download[_CZENG17_FILTER.name] = _CZENG17_FILTER.get_url(source)
# get dataset
dataset = DATASET_MAP[ss_name]
if dataset.get_manual_dl_files(source):
# TODO(PVP): following two lines skip configs that are incomplete for now
# +++++++++++++++++++++
logging.info("Skipping {} for now. Incomplete dataset for {}".format(dataset.name, self.config.name))
continue
# +++++++++++++++++++++
manual_dl_files = dataset.get_manual_dl_files(source)
manual_paths = [
os.path.join(os.path.abspath(os.path.expanduser(dl_manager.manual_dir)), fname)
for fname in manual_dl_files
]
assert all(
os.path.exists(path) for path in manual_paths
), "For {0}, you must manually download the following file(s) from {1} and place them in {2}: {3}".format(
dataset.name, dataset.get_url(source), dl_manager.manual_dir, ", ".join(manual_dl_files)
)
# set manual path for correct subset
manual_paths_dict[ss_name] = manual_paths
else:
urls_to_download[ss_name] = dataset.get_url(source)
# Download and extract files from URLs.
downloaded_files = dl_manager.download_and_extract(urls_to_download)
# Extract manually downloaded files.
manual_files = dl_manager.extract(manual_paths_dict)
extraction_map = dict(downloaded_files, **manual_files)
for language in self.config.language_pair:
self._vocab_text_gen(self.subsets[nlp.Split.TRAIN], extraction_map, language)
return [
nlp.SplitGenerator( # pylint:disable=g-complex-comprehension
name=split, gen_kwargs={"split_subsets": split_subsets, "extraction_map": extraction_map}
)
for split, split_subsets in self.subsets.items()
]
def _generate_examples(self, split_subsets, extraction_map, with_translation=True):
"""Returns the examples in the raw (text) form."""
source, _ = self.config.language_pair
def _get_local_paths(dataset, extract_dirs):
rel_paths = dataset.get_path(source)
if len(extract_dirs) == 1:
extract_dirs = extract_dirs * len(rel_paths)
return [
os.path.join(ex_dir, rel_path) if rel_path else ex_dir
for ex_dir, rel_path in zip(extract_dirs, rel_paths)
]
for ss_name in split_subsets:
# TODO(PVP) remove following five lines when manual data works
# +++++++++++++++++++++
dataset = DATASET_MAP[ss_name]
source, _ = self.config.language_pair
if dataset.get_manual_dl_files(source):
logging.info("Skipping {} for now. Incomplete dataset for {}".format(dataset.name, self.config.name))
continue
# +++++++++++++++++++++
logging.info("Generating examples from: %s", ss_name)
dataset = DATASET_MAP[ss_name]
extract_dirs = extraction_map[ss_name]
files = _get_local_paths(dataset, extract_dirs)
if ss_name.startswith("czeng"):
if ss_name.endswith("16pre"):
sub_generator = functools.partial(_parse_tsv, language_pair=("en", "cs"))
elif ss_name.endswith("17"):
filter_path = _get_local_paths(_CZENG17_FILTER, extraction_map[_CZENG17_FILTER.name])[0]
sub_generator = functools.partial(_parse_czeng, filter_path=filter_path)
else:
sub_generator = _parse_czeng
elif ss_name == "hindencorp_01":
sub_generator = _parse_hindencorp
elif len(files) == 2:
if ss_name.endswith("_frde"):
sub_generator = _parse_frde_bitext
else:
sub_generator = _parse_parallel_sentences
elif len(files) == 1:
fname = files[0]
# Note: Due to formatting used by `download_manager`, the file
# extension may not be at the end of the file path.
if ".tsv" in fname:
sub_generator = _parse_tsv
elif (
ss_name.startswith("newscommentary_v14")
or ss_name.startswith("europarl_v9")
or ss_name.startswith("wikititles_v1")
):
sub_generator = functools.partial(_parse_tsv, language_pair=self.config.language_pair)
elif "tmx" in fname or ss_name.startswith("paracrawl_v3"):
sub_generator = _parse_tmx
elif ss_name.startswith("wikiheadlines"):
sub_generator = _parse_wikiheadlines
else:
raise ValueError("Unsupported file format: %s" % fname)
else:
raise ValueError("Invalid number of files: %d" % len(files))
for sub_key, ex in sub_generator(*files):
if not all(ex.values()):
continue
# TODO(adarob): Add subset feature.
# ex["subset"] = subset
key = "{}/{}".format(ss_name, sub_key)
if with_translation is True:
ex = {"translation": ex}
yield key, ex
def _parse_parallel_sentences(f1, f2):
"""Returns examples from parallel SGML or text files, which may be gzipped."""
def _parse_text(path):
"""Returns the sentences from a single text file, which may be gzipped."""
split_path = path.split(".")
if split_path[-1] == "gz":
lang = split_path[-2]
with open(path, "rb") as f, gzip.GzipFile(fileobj=f) as g:
return g.read().decode("utf-8").split("\n"), lang
if split_path[-1] == "txt":
# CWMT
lang = split_path[-2].split("_")[-1]
lang = "zh" if lang in ("ch", "cn") else lang
else:
lang = split_path[-1]
with open(path, "rb") as f:
return f.read().decode("utf-8").split("\n"), lang
def _parse_sgm(path):
"""Returns sentences from a single SGML file."""
lang = path.split(".")[-2]
sentences = []
# Note: We can't use the XML parser since some of the files are badly
# formatted.
seg_re = re.compile(r"<seg id=\"\d+\">(.*)</seg>")
with open(path) as f:
for line in f:
seg_match = re.match(seg_re, line)
if seg_match:
assert len(seg_match.groups()) == 1
sentences.append(seg_match.groups()[0])
return sentences, lang
parse_file = _parse_sgm if f1.endswith(".sgm") else _parse_text
# Some datasets (e.g., CWMT) contain multiple parallel files specified with
# a wildcard. We sort both sets to align them and parse them one by one.
f1_files = sorted(glob.glob(f1))
f2_files = sorted(glob.glob(f2))
assert f1_files and f2_files, "No matching files found: %s, %s." % (f1, f2)
assert len(f1_files) == len(f2_files), "Number of files do not match: %d vs %d for %s vs %s." % (
len(f1_files),
len(f2_files),
f1,
f2,
)
for f_id, (f1_i, f2_i) in enumerate(zip(sorted(f1_files), sorted(f2_files))):
l1_sentences, l1 = parse_file(f1_i)
l2_sentences, l2 = parse_file(f2_i)
assert len(l1_sentences) == len(l2_sentences), "Sizes do not match: %d vs %d for %s vs %s." % (
len(l1_sentences),
len(l2_sentences),
f1_i,
f2_i,
)
for line_id, (s1, s2) in enumerate(zip(l1_sentences, l2_sentences)):
key = "{}/{}".format(f_id, line_id)
yield key, {l1: s1, l2: s2}
def _parse_frde_bitext(fr_path, de_path):
with open(fr_path) as f:
fr_sentences = f.read().split("\n")
with open(de_path) as f:
de_sentences = f.read().split("\n")
assert len(fr_sentences) == len(de_sentences), "Sizes do not match: %d vs %d for %s vs %s." % (
len(fr_sentences),
len(de_sentences),
fr_path,
de_path,
)
for line_id, (s1, s2) in enumerate(zip(fr_sentences, de_sentences)):
yield line_id, {"fr": s1, "de": s2}
def _parse_tmx(path):
"""Generates examples from TMX file."""
def _get_tuv_lang(tuv):
for k, v in tuv.items():
if k.endswith("}lang"):
return v
raise AssertionError("Language not found in `tuv` attributes.")
def _get_tuv_seg(tuv):
segs = tuv.findall("seg")
assert len(segs) == 1, "Invalid number of segments: %d" % len(segs)
return segs[0].text
with open(path, "rb") as f:
if six.PY3:
# Workaround due to: https://github.com/tensorflow/tensorflow/issues/33563
utf_f = codecs.getreader("utf-8")(f)
else:
utf_f = f
for line_id, (_, elem) in enumerate(ElementTree.iterparse(utf_f)):
if elem.tag == "tu":
yield line_id, {_get_tuv_lang(tuv): _get_tuv_seg(tuv) for tuv in elem.iterfind("tuv")}
elem.clear()
def _parse_tsv(path, language_pair=None):
"""Generates examples from TSV file."""
if language_pair is None:
lang_match = re.match(r".*\.([a-z][a-z])-([a-z][a-z])\.tsv", path)
assert lang_match is not None, "Invalid TSV filename: %s" % path
l1, l2 = lang_match.groups()
else:
l1, l2 = language_pair
with open(path) as f:
for j, line in enumerate(f):
cols = line.split("\t")
if len(cols) != 2:
logging.warning("Skipping line %d in TSV (%s) with %d != 2 columns.", j, path, len(cols))
continue
s1, s2 = cols
yield j, {l1: s1.strip(), l2: s2.strip()}
def _parse_wikiheadlines(path):
"""Generates examples from Wikiheadlines dataset file."""
lang_match = re.match(r".*\.([a-z][a-z])-([a-z][a-z])$", path)
assert lang_match is not None, "Invalid Wikiheadlines filename: %s" % path
l1, l2 = lang_match.groups()
with open(path) as f:
for line_id, line in enumerate(f):
s1, s2 = line.split("|||")
yield line_id, {l1: s1.strip(), l2: s2.strip()}
def _parse_czeng(*paths, **kwargs):
"""Generates examples from CzEng v1.6, with optional filtering for v1.7."""
filter_path = kwargs.get("filter_path", None)
if filter_path:
re_block = re.compile(r"^[^-]+-b(\d+)-\d\d[tde]")
with open(filter_path) as f:
bad_blocks = {blk for blk in re.search(r"qw{([\s\d]*)}", f.read()).groups()[0].split()}
logging.info("Loaded %d bad blocks to filter from CzEng v1.6 to make v1.7.", len(bad_blocks))
for path in paths:
for gz_path in sorted(glob.glob(path)):
with open(gz_path, "rb") as g, gzip.GzipFile(fileobj=g) as f:
filename = os.path.basename(gz_path)
for line_id, line in enumerate(f):
line = line.decode("utf-8") # required for py3
if not line.strip():
continue
id_, unused_score, cs, en = line.split("\t")
if filter_path:
block_match = re.match(re_block, id_)
if block_match and block_match.groups()[0] in bad_blocks:
continue
sub_key = "{}/{}".format(filename, line_id)
yield sub_key, {
"cs": cs.strip(),
"en": en.strip(),
}
def _parse_hindencorp(path):
with open(path) as f:
for line_id, line in enumerate(f):
split_line = line.split("\t")
if len(split_line) != 5:
logging.warning("Skipping invalid HindEnCorp line: %s", line)
continue
yield line_id, {"translation": {"en": split_line[3].strip(), "hi": split_line[4].strip()}}
|
nlp-master
|
datasets/wmt15/wmt_utils.py
|
"""TODO(winogrande): Add a description here."""
from __future__ import absolute_import, division, print_function
import csv
import json
import os
import nlp
# TODO(winogrande): BibTeX citation
_CITATION = """\
@InProceedings{ai2:winogrande,
title = {WinoGrande: An Adversarial Winograd Schema Challenge at Scale},
authors={Keisuke, Sakaguchi and Ronan, Le Bras and Chandra, Bhagavatula and Yejin, Choi
},
year={2019}
}
"""
# TODO(winogrande):
_DESCRIPTION = """\
WinoGrande is a new collection of 44k problems, inspired by Winograd Schema Challenge (Levesque, Davis, and Morgenstern
2011), but adjusted to improve the scale and robustness against the dataset-specific bias. Formulated as a
fill-in-a-blank task with binary options, the goal is to choose the right option for a given sentence which requires
commonsense reasoning.
"""
_URL = "https://storage.googleapis.com/ai2-mosaic/public/winogrande/winogrande_1.1.zip"
_SIZES = ["xs", "s", "m", "l", "xl"]
class WinograndeConfig(nlp.BuilderConfig):
""" BuilderConfig for Discofuse"""
def __init__(self, data_size, **kwargs):
"""
Args:
data_size: the size of the training set we want to us (xs, s, m, l, xl)
**kwargs: keyword arguments forwarded to super.
"""
super(WinograndeConfig, self).__init__(
version=nlp.Version("1.0.0", "New split API (https://tensorflow.org/datasets/splits)"), **kwargs
)
self.data_size = data_size
class Winogrande(nlp.GeneratorBasedBuilder):
"""TODO(winogrande): Short description of my dataset."""
# TODO(winogrande): Set up version.
VERSION = nlp.Version("1.1.0")
BUILDER_CONFIGS = [
WinograndeConfig(name="winogrande_" + size, description="AI2 dataset", data_size=size) for size in _SIZES
]
def _info(self):
# TODO(winogrande): Specifies the nlp.DatasetInfo object
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
"sentence": nlp.Value("string"),
"option1": nlp.Value("string"),
"option2": nlp.Value("string"),
"answer": nlp.Value("string")
# These are the features of your dataset like images, labels ...
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://leaderboard.allenai.org/winogrande/submissions/get-started",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(winogrande): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
dl_dir = dl_manager.download_and_extract(_URL)
data_dir = os.path.join(dl_dir, "winogrande_1.1")
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": os.path.join(data_dir, "train_{}.jsonl".format(self.config.data_size)),
#'labelpath': os.path.join(data_dir, 'train_{}-labels.lst'.format(self.config.data_size)),
"split": "train",
},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, "test.jsonl"), "split": "test"},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": os.path.join(data_dir, "dev.jsonl"),
#'labelpath': os.path.join(data_dir, 'dev-labels.lst'),
"split": "dev",
},
),
]
def _generate_examples(self, filepath, split):
"""Yields examples."""
# TODO(winogrande): Yields (key, example) tuples from the dataset
with open(filepath) as f:
for id_, row in enumerate(f):
data = json.loads(row)
if split == "test":
yield id_, {
"sentence": data["sentence"],
"option1": data["option1"],
"option2": data["option2"],
"answer": "",
}
else:
yield id_, {
"sentence": data["sentence"],
"option1": data["option1"],
"option2": data["option2"],
"answer": data["answer"],
}
# def _generate_test_example(filepath, split, labelpath=None):
# with open(filepath) as f:
# for id_, row in enumerate(f):
# data = json.loads(row)
# yield id_,{
# 'sentence': data['sentence'],
# 'option1': data['option1'],
# 'option2': data['option2'],
# 'answer': None
# }
|
nlp-master
|
datasets/winogrande/winogrande.py
|
"""TODO(x_stance): Add a description here."""
from __future__ import absolute_import, division, print_function
import json
import os
import nlp
# TODO(x_stance): BibTeX citation
_CITATION = """\
@inproceedings{vamvas2020xstance,
author = "Vamvas, Jannis and Sennrich, Rico",
title = "{X-Stance}: A Multilingual Multi-Target Dataset for Stance Detection",
booktitle = "Proceedings of the 5th Swiss Text Analytics Conference (SwissText) \\& 16th Conference on Natural Language Processing (KONVENS)",
address = "Zurich, Switzerland",
year = "2020",
month = "jun",
url = "http://ceur-ws.org/Vol-2624/paper9.pdf"
}
"""
# TODO(x_stance):
_DESCRIPTION = """\
The x-stance dataset contains more than 150 political questions, and 67k comments written by candidates on those questions.
It can be used to train and evaluate stance detection systems.
"""
_URL = "https://github.com/ZurichNLP/xstance/raw/v1.0.0/data/xstance-data-v1.0.zip"
class XStance(nlp.GeneratorBasedBuilder):
"""TODO(x_stance): Short description of my dataset."""
# TODO(x_stance): Set up version.
VERSION = nlp.Version("0.1.0")
def _info(self):
# TODO(x_stance): Specifies the nlp.DatasetInfo object
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
"question": nlp.Value("string"),
"id": nlp.Value("int32"),
"question_id": nlp.Value("int32"),
"language": nlp.Value("string"),
"comment": nlp.Value("string"),
"label": nlp.Value("string"),
"numerical_label": nlp.Value("int32"),
"author": nlp.Value("string"),
"topic": nlp.Value("string")
# These are the features of your dataset like images, labels ...
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://github.com/ZurichNLP/xstance",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(x_stance): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
dl_dir = dl_manager.download_and_extract(_URL)
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(dl_dir, "train.jsonl")},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(dl_dir, "test.jsonl")},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(dl_dir, "valid.jsonl")},
),
]
def _generate_examples(self, filepath):
"""Yields examples."""
# TODO(x_stance): Yields (key, example) tuples from the dataset
with open(filepath) as f:
for id_, row in enumerate(f):
data = json.loads(row)
yield id_, {
"id": data["id"],
"question_id": data["question_id"],
"question": data["question"],
"comment": data["comment"],
"label": data["label"],
"author": data["author"],
"numerical_label": data["numerical_label"],
"topic": data["topic"],
"language": data["language"],
}
|
nlp-master
|
datasets/x_stance/x_stance.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""The WMT EnDe Translate dataset used by the Tensor2Tensor library."""
import nlp
from .wmt_utils import Wmt, WmtConfig
_URL = "https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/data_generators/translate_ende.py"
_CITATION = """
@InProceedings{bojar-EtAl:2014:W14-33,
author = {Bojar, Ondrej and Buck, Christian and Federmann, Christian and Haddow, Barry and Koehn, Philipp and Leveling, Johannes and Monz, Christof and Pecina, Pavel and Post, Matt and Saint-Amand, Herve and Soricut, Radu and Specia, Lucia and Tamchyna, Ale\v{s}},
title = {Findings of the 2014 Workshop on Statistical Machine Translation},
booktitle = {Proceedings of the Ninth Workshop on Statistical Machine Translation},
month = {June},
year = {2014},
address = {Baltimore, Maryland, USA},
publisher = {Association for Computational Linguistics},
pages = {12--58},
url = {http://www.aclweb.org/anthology/W/W14/W14-3302}
}
"""
class WmtT2t(Wmt):
"""The WMT EnDe Translate dataset used by the Tensor2Tensor library."""
BUILDER_CONFIGS = [
WmtConfig( # pylint:disable=g-complex-comprehension
description="WMT T2T EnDe translation task dataset.",
url=_URL,
citation=_CITATION,
language_pair=("de", "en"),
version=nlp.Version("1.0.0"),
)
]
@property
def manual_download_instructions(self):
if self.config.language_pair[1] in ["cs", "hi", "ru"]:
return "Please download the data manually as explained. TODO(PVP)"
@property
def _subsets(self):
return {
nlp.Split.TRAIN: ["europarl_v7", "commoncrawl", "newscommentary_v13"],
nlp.Split.VALIDATION: ["newstest2013"],
nlp.Split.TEST: ["newstest2014"],
}
|
nlp-master
|
datasets/wmt_t2t/wmt_t2t.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""WMT: Translate dataset."""
from __future__ import absolute_import, division, print_function
import codecs
import functools
import glob
import gzip
import itertools
import logging
import os
import re
import xml.etree.cElementTree as ElementTree
from abc import ABC, abstractmethod
import six
import nlp
_DESCRIPTION = """\
Translate dataset based on the data from statmt.org.
Versions exists for the different years using a combination of multiple data
sources. The base `wmt_translate` allows you to create your own config to choose
your own data/language pair by creating a custom `nlp.translate.wmt.WmtConfig`.
```
config = nlp.wmt.WmtConfig(
version="0.0.1",
language_pair=("fr", "de"),
subsets={
nlp.Split.TRAIN: ["commoncrawl_frde"],
nlp.Split.VALIDATION: ["euelections_dev2019"],
},
)
builder = nlp.builder("wmt_translate", config=config)
```
"""
CWMT_SUBSET_NAMES = ["casia2015", "casict2011", "casict2015", "datum2015", "datum2017", "neu2017"]
class SubDataset(object):
"""Class to keep track of information on a sub-dataset of WMT."""
def __init__(self, name, target, sources, url, path, manual_dl_files=None):
"""Sub-dataset of WMT.
Args:
name: `string`, a unique dataset identifier.
target: `string`, the target language code.
sources: `set<string>`, the set of source language codes.
url: `string` or `(string, string)`, URL(s) or URL template(s) specifying
where to download the raw data from. If two strings are provided, the
first is used for the source language and the second for the target.
Template strings can either contain '{src}' placeholders that will be
filled in with the source language code, '{0}' and '{1}' placeholders
that will be filled in with the source and target language codes in
alphabetical order, or all 3.
path: `string` or `(string, string)`, path(s) or path template(s)
specifing the path to the raw data relative to the root of the
downloaded archive. If two strings are provided, the dataset is assumed
to be made up of parallel text files, the first being the source and the
second the target. If one string is provided, both languages are assumed
to be stored within the same file and the extension is used to determine
how to parse it. Template strings should be formatted the same as in
`url`.
manual_dl_files: `<list>(string)` (optional), the list of files that must
be manually downloaded to the data directory.
"""
self._paths = (path,) if isinstance(path, six.string_types) else path
self._urls = (url,) if isinstance(url, six.string_types) else url
self._manual_dl_files = manual_dl_files if manual_dl_files else []
self.name = name
self.target = target
self.sources = set(sources)
def _inject_language(self, src, strings):
"""Injects languages into (potentially) template strings."""
if src not in self.sources:
raise ValueError("Invalid source for '{0}': {1}".format(self.name, src))
def _format_string(s):
if "{0}" in s and "{1}" and "{src}" in s:
return s.format(*sorted([src, self.target]), src=src)
elif "{0}" in s and "{1}" in s:
return s.format(*sorted([src, self.target]))
elif "{src}" in s:
return s.format(src=src)
else:
return s
return [_format_string(s) for s in strings]
def get_url(self, src):
return self._inject_language(src, self._urls)
def get_manual_dl_files(self, src):
return self._inject_language(src, self._manual_dl_files)
def get_path(self, src):
return self._inject_language(src, self._paths)
# Subsets used in the training sets for various years of WMT.
_TRAIN_SUBSETS = [
# pylint:disable=line-too-long
SubDataset(
name="commoncrawl",
target="en", # fr-de pair in commoncrawl_frde
sources={"cs", "de", "es", "fr", "ru"},
url="http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz",
path=("commoncrawl.{src}-en.{src}", "commoncrawl.{src}-en.en"),
),
SubDataset(
name="commoncrawl_frde",
target="de",
sources={"fr"},
url=(
"http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/commoncrawl.fr.gz",
"http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/commoncrawl.de.gz",
),
path=("", ""),
),
SubDataset(
name="czeng_10",
target="en",
sources={"cs"},
url="http://ufal.mff.cuni.cz/czeng/czeng10",
manual_dl_files=["data-plaintext-format.%d.tar" % i for i in range(10)],
# Each tar contains multiple files, which we process specially in
# _parse_czeng.
path=("data.plaintext-format/??train.gz",) * 10,
),
SubDataset(
name="czeng_16pre",
target="en",
sources={"cs"},
url="http://ufal.mff.cuni.cz/czeng/czeng16pre",
manual_dl_files=["czeng16pre.deduped-ignoring-sections.txt.gz"],
path="",
),
SubDataset(
name="czeng_16",
target="en",
sources={"cs"},
url="http://ufal.mff.cuni.cz/czeng",
manual_dl_files=["data-plaintext-format.%d.tar" % i for i in range(10)],
# Each tar contains multiple files, which we process specially in
# _parse_czeng.
path=("data.plaintext-format/??train.gz",) * 10,
),
SubDataset(
# This dataset differs from the above in the filtering that is applied
# during parsing.
name="czeng_17",
target="en",
sources={"cs"},
url="http://ufal.mff.cuni.cz/czeng",
manual_dl_files=["data-plaintext-format.%d.tar" % i for i in range(10)],
# Each tar contains multiple files, which we process specially in
# _parse_czeng.
path=("data.plaintext-format/??train.gz",) * 10,
),
SubDataset(
name="dcep_v1",
target="en",
sources={"lv"},
url="http://data.statmt.org/wmt17/translation-task/dcep.lv-en.v1.tgz",
path=("dcep.en-lv/dcep.lv", "dcep.en-lv/dcep.en"),
),
SubDataset(
name="europarl_v7",
target="en",
sources={"cs", "de", "es", "fr"},
url="http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz",
path=("training/europarl-v7.{src}-en.{src}", "training/europarl-v7.{src}-en.en"),
),
SubDataset(
name="europarl_v7_frde",
target="de",
sources={"fr"},
url=(
"http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/europarl-v7.fr.gz",
"http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/europarl-v7.de.gz",
),
path=("", ""),
),
SubDataset(
name="europarl_v8_18",
target="en",
sources={"et", "fi"},
url="http://data.statmt.org/wmt18/translation-task/training-parallel-ep-v8.tgz",
path=("training/europarl-v8.{src}-en.{src}", "training/europarl-v8.{src}-en.en"),
),
SubDataset(
name="europarl_v8_16",
target="en",
sources={"fi", "ro"},
url="http://data.statmt.org/wmt16/translation-task/training-parallel-ep-v8.tgz",
path=("training-parallel-ep-v8/europarl-v8.{src}-en.{src}", "training-parallel-ep-v8/europarl-v8.{src}-en.en"),
),
SubDataset(
name="europarl_v9",
target="en",
sources={"cs", "de", "fi", "lt"},
url="http://www.statmt.org/europarl/v9/training/europarl-v9.{src}-en.tsv.gz",
path="",
),
SubDataset(
name="gigafren",
target="en",
sources={"fr"},
url="http://www.statmt.org/wmt10/training-giga-fren.tar",
path=("giga-fren.release2.fixed.fr.gz", "giga-fren.release2.fixed.en.gz"),
),
SubDataset(
name="hindencorp_01",
target="en",
sources={"hi"},
url="http://ufallab.ms.mff.cuni.cz/~bojar/hindencorp",
manual_dl_files=["hindencorp0.1.gz"],
path="",
),
SubDataset(
name="leta_v1",
target="en",
sources={"lv"},
url="http://data.statmt.org/wmt17/translation-task/leta.v1.tgz",
path=("LETA-lv-en/leta.lv", "LETA-lv-en/leta.en"),
),
SubDataset(
name="multiun",
target="en",
sources={"es", "fr"},
url="http://www.statmt.org/wmt13/training-parallel-un.tgz",
path=("un/undoc.2000.{src}-en.{src}", "un/undoc.2000.{src}-en.en"),
),
SubDataset(
name="newscommentary_v9",
target="en",
sources={"cs", "de", "fr", "ru"},
url="http://www.statmt.org/wmt14/training-parallel-nc-v9.tgz",
path=("training/news-commentary-v9.{src}-en.{src}", "training/news-commentary-v9.{src}-en.en"),
),
SubDataset(
name="newscommentary_v10",
target="en",
sources={"cs", "de", "fr", "ru"},
url="http://www.statmt.org/wmt15/training-parallel-nc-v10.tgz",
path=("news-commentary-v10.{src}-en.{src}", "news-commentary-v10.{src}-en.en"),
),
SubDataset(
name="newscommentary_v11",
target="en",
sources={"cs", "de", "ru"},
url="http://data.statmt.org/wmt16/translation-task/training-parallel-nc-v11.tgz",
path=(
"training-parallel-nc-v11/news-commentary-v11.{src}-en.{src}",
"training-parallel-nc-v11/news-commentary-v11.{src}-en.en",
),
),
SubDataset(
name="newscommentary_v12",
target="en",
sources={"cs", "de", "ru", "zh"},
url="http://data.statmt.org/wmt17/translation-task/training-parallel-nc-v12.tgz",
path=("training/news-commentary-v12.{src}-en.{src}", "training/news-commentary-v12.{src}-en.en"),
),
SubDataset(
name="newscommentary_v13",
target="en",
sources={"cs", "de", "ru", "zh"},
url="http://data.statmt.org/wmt18/translation-task/training-parallel-nc-v13.tgz",
path=(
"training-parallel-nc-v13/news-commentary-v13.{src}-en.{src}",
"training-parallel-nc-v13/news-commentary-v13.{src}-en.en",
),
),
SubDataset(
name="newscommentary_v14",
target="en", # fr-de pair in newscommentary_v14_frde
sources={"cs", "de", "kk", "ru", "zh"},
url="http://data.statmt.org/news-commentary/v14/training/news-commentary-v14.{0}-{1}.tsv.gz",
path="",
),
SubDataset(
name="newscommentary_v14_frde",
target="de",
sources={"fr"},
url="http://data.statmt.org/news-commentary/v14/training/news-commentary-v14.de-fr.tsv.gz",
path="",
),
SubDataset(
name="onlinebooks_v1",
target="en",
sources={"lv"},
url="http://data.statmt.org/wmt17/translation-task/books.lv-en.v1.tgz",
path=("farewell/farewell.lv", "farewell/farewell.en"),
),
SubDataset(
name="paracrawl_v1",
target="en",
sources={"cs", "de", "et", "fi", "ru"},
url="https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-{src}.zipporah0-dedup-clean.tgz",
path=(
"paracrawl-release1.en-{src}.zipporah0-dedup-clean.{src}",
"paracrawl-release1.en-{src}.zipporah0-dedup-clean.en",
),
),
SubDataset(
name="paracrawl_v1_ru",
target="en",
sources={"ru"},
url="https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-ru.zipporah0-dedup-clean.tgz",
path=(
"paracrawl-release1.en-ru.zipporah0-dedup-clean.ru",
"paracrawl-release1.en-ru.zipporah0-dedup-clean.en",
),
),
SubDataset(
name="paracrawl_v3",
target="en", # fr-de pair in paracrawl_v3_frde
sources={"cs", "de", "fi", "lt"},
url="https://s3.amazonaws.com/web-language-models/paracrawl/release3/en-{src}.bicleaner07.tmx.gz",
path="",
),
SubDataset(
name="paracrawl_v3_frde",
target="de",
sources={"fr"},
url=(
"http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/de-fr.bicleaner07.de.gz",
"http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/de-fr.bicleaner07.fr.gz",
),
path=("", ""),
),
SubDataset(
name="rapid_2016",
target="en",
sources={"de", "et", "fi"},
url="http://data.statmt.org/wmt18/translation-task/rapid2016.tgz",
path=("rapid2016.{0}-{1}.{src}", "rapid2016.{0}-{1}.en"),
),
SubDataset(
name="rapid_2016_ltfi",
target="en",
sources={"fi", "lt"},
url="https://tilde-model.s3-eu-west-1.amazonaws.com/rapid2016.en-{src}.tmx.zip",
path="rapid2016.en-{src}.tmx",
),
SubDataset(
name="rapid_2019",
target="en",
sources={"de"},
url="https://s3-eu-west-1.amazonaws.com/tilde-model/rapid2019.de-en.zip",
path=("rapid2019.de-en.de", "rapid2019.de-en.en"),
),
SubDataset(
name="setimes_2",
target="en",
sources={"ro", "tr"},
url="http://opus.nlpl.eu/download.php?f=SETIMES/v2/tmx/en-{src}.tmx.gz",
path="",
),
SubDataset(
name="uncorpus_v1",
target="en",
sources={"ru", "zh"},
url="https://storage.googleapis.com/tfdataset-data/downloadataset/uncorpus/UNv1.0.en-{src}.tar.gz",
path=("en-{src}/UNv1.0.en-{src}.{src}", "en-{src}/UNv1.0.en-{src}.en"),
),
SubDataset(
name="wikiheadlines_fi",
target="en",
sources={"fi"},
url="http://www.statmt.org/wmt15/wiki-titles.tgz",
path="wiki/fi-en/titles.fi-en",
),
SubDataset(
name="wikiheadlines_hi",
target="en",
sources={"hi"},
url="http://www.statmt.org/wmt14/wiki-titles.tgz",
path="wiki/hi-en/wiki-titles.hi-en",
),
SubDataset(
# Verified that wmt14 and wmt15 files are identical.
name="wikiheadlines_ru",
target="en",
sources={"ru"},
url="http://www.statmt.org/wmt15/wiki-titles.tgz",
path="wiki/ru-en/wiki.ru-en",
),
SubDataset(
name="wikititles_v1",
target="en",
sources={"cs", "de", "fi", "gu", "kk", "lt", "ru", "zh"},
url="http://data.statmt.org/wikititles/v1/wikititles-v1.{src}-en.tsv.gz",
path="",
),
SubDataset(
name="yandexcorpus",
target="en",
sources={"ru"},
url="https://translate.yandex.ru/corpus?lang=en",
manual_dl_files=["1mcorpus.zip"],
path=("corpus.en_ru.1m.ru", "corpus.en_ru.1m.en"),
),
# pylint:enable=line-too-long
] + [
SubDataset( # pylint:disable=g-complex-comprehension
name=ss,
target="en",
sources={"zh"},
url="ftp://cwmt-wmt:cwmt-wmt@nlp.nju.edu.cn/parallel/%s.zip" % ss,
path=("%s/*_c[hn].txt" % ss, "%s/*_en.txt" % ss),
)
for ss in CWMT_SUBSET_NAMES
]
_DEV_SUBSETS = [
SubDataset(
name="euelections_dev2019",
target="de",
sources={"fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/euelections_dev2019.fr-de.src.fr", "dev/euelections_dev2019.fr-de.tgt.de"),
),
SubDataset(
name="newsdev2014",
target="en",
sources={"hi"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdev2014.hi", "dev/newsdev2014.en"),
),
SubDataset(
name="newsdev2015",
target="en",
sources={"fi"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdev2015-fien-src.{src}.sgm", "dev/newsdev2015-fien-ref.en.sgm"),
),
SubDataset(
name="newsdiscussdev2015",
target="en",
sources={"ro", "tr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdiscussdev2015-{src}en-src.{src}.sgm", "dev/newsdiscussdev2015-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdev2016",
target="en",
sources={"ro", "tr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdev2016-{src}en-src.{src}.sgm", "dev/newsdev2016-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdev2017",
target="en",
sources={"lv", "zh"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdev2017-{src}en-src.{src}.sgm", "dev/newsdev2017-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdev2018",
target="en",
sources={"et"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdev2018-{src}en-src.{src}.sgm", "dev/newsdev2018-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdev2019",
target="en",
sources={"gu", "kk", "lt"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdev2019-{src}en-src.{src}.sgm", "dev/newsdev2019-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdiscussdev2015",
target="en",
sources={"fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdiscussdev2015-{src}en-src.{src}.sgm", "dev/newsdiscussdev2015-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdiscusstest2015",
target="en",
sources={"fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdiscusstest2015-{src}en-src.{src}.sgm", "dev/newsdiscusstest2015-{src}en-ref.en.sgm"),
),
SubDataset(
name="newssyscomb2009",
target="en",
sources={"cs", "de", "es", "fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newssyscomb2009.{src}", "dev/newssyscomb2009.en"),
),
SubDataset(
name="newstest2008",
target="en",
sources={"cs", "de", "es", "fr", "hu"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/news-test2008.{src}", "dev/news-test2008.en"),
),
SubDataset(
name="newstest2009",
target="en",
sources={"cs", "de", "es", "fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2009.{src}", "dev/newstest2009.en"),
),
SubDataset(
name="newstest2010",
target="en",
sources={"cs", "de", "es", "fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2010.{src}", "dev/newstest2010.en"),
),
SubDataset(
name="newstest2011",
target="en",
sources={"cs", "de", "es", "fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2011.{src}", "dev/newstest2011.en"),
),
SubDataset(
name="newstest2012",
target="en",
sources={"cs", "de", "es", "fr", "ru"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2012.{src}", "dev/newstest2012.en"),
),
SubDataset(
name="newstest2013",
target="en",
sources={"cs", "de", "es", "fr", "ru"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2013.{src}", "dev/newstest2013.en"),
),
SubDataset(
name="newstest2014",
target="en",
sources={"cs", "de", "es", "fr", "hi", "ru"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2014-{src}en-src.{src}.sgm", "dev/newstest2014-{src}en-ref.en.sgm"),
),
SubDataset(
name="newstest2015",
target="en",
sources={"cs", "de", "fi", "ru"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2015-{src}en-src.{src}.sgm", "dev/newstest2015-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdiscusstest2015",
target="en",
sources={"fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdiscusstest2015-{src}en-src.{src}.sgm", "dev/newsdiscusstest2015-{src}en-ref.en.sgm"),
),
SubDataset(
name="newstest2016",
target="en",
sources={"cs", "de", "fi", "ro", "ru", "tr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2016-{src}en-src.{src}.sgm", "dev/newstest2016-{src}en-ref.en.sgm"),
),
SubDataset(
name="newstestB2016",
target="en",
sources={"fi"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstestB2016-enfi-ref.{src}.sgm", "dev/newstestB2016-enfi-src.en.sgm"),
),
SubDataset(
name="newstest2017",
target="en",
sources={"cs", "de", "fi", "lv", "ru", "tr", "zh"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2017-{src}en-src.{src}.sgm", "dev/newstest2017-{src}en-ref.en.sgm"),
),
SubDataset(
name="newstestB2017",
target="en",
sources={"fi"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstestB2017-fien-src.fi.sgm", "dev/newstestB2017-fien-ref.en.sgm"),
),
SubDataset(
name="newstest2018",
target="en",
sources={"cs", "de", "et", "fi", "ru", "tr", "zh"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2018-{src}en-src.{src}.sgm", "dev/newstest2018-{src}en-ref.en.sgm"),
),
]
DATASET_MAP = {dataset.name: dataset for dataset in _TRAIN_SUBSETS + _DEV_SUBSETS}
_CZENG17_FILTER = SubDataset(
name="czeng17_filter",
target="en",
sources={"cs"},
url="http://ufal.mff.cuni.cz/czeng/download.php?f=convert_czeng16_to_17.pl.zip",
path="convert_czeng16_to_17.pl",
)
class WmtConfig(nlp.BuilderConfig):
"""BuilderConfig for WMT."""
def __init__(self, url=None, citation=None, description=None, language_pair=(None, None), subsets=None, **kwargs):
"""BuilderConfig for WMT.
Args:
url: The reference URL for the dataset.
citation: The paper citation for the dataset.
description: The description of the dataset.
language_pair: pair of languages that will be used for translation. Should
contain 2 letter coded strings. For example: ("en", "de").
configuration for the `nlp.features.text.TextEncoder` used for the
`nlp.features.text.Translation` features.
subsets: Dict[split, list[str]]. List of the subset to use for each of the
split. Note that WMT subclasses overwrite this parameter.
**kwargs: keyword arguments forwarded to super.
"""
name = "%s-%s" % (language_pair[0], language_pair[1])
if "name" in kwargs: # Add name suffix for custom configs
name += "." + kwargs.pop("name")
super(WmtConfig, self).__init__(name=name, description=description, **kwargs)
self.url = url or "http://www.statmt.org"
self.citation = citation
self.language_pair = language_pair
self.subsets = subsets
# TODO(PVP): remove when manual dir works
# +++++++++++++++++++++
if language_pair[1] in ["cs", "hi", "ru"]:
assert NotImplementedError(
"The dataset for {}-en is currently not fully supported.".format(language_pair[1])
)
# +++++++++++++++++++++
class Wmt(ABC, nlp.GeneratorBasedBuilder):
"""WMT translation dataset."""
def __init__(self, *args, **kwargs):
if type(self) == Wmt and "config" not in kwargs: # pylint: disable=unidiomatic-typecheck
raise ValueError(
"The raw `wmt_translate` can only be instantiated with the config "
"kwargs. You may want to use one of the `wmtYY_translate` "
"implementation instead to get the WMT dataset for a specific year."
)
super(Wmt, self).__init__(*args, **kwargs)
@property
@abstractmethod
def _subsets(self):
"""Subsets that make up each split of the dataset."""
raise NotImplementedError("This is a abstract method")
@property
def subsets(self):
"""Subsets that make up each split of the dataset for the language pair."""
source, target = self.config.language_pair
filtered_subsets = {}
for split, ss_names in self._subsets.items():
filtered_subsets[split] = []
for ss_name in ss_names:
dataset = DATASET_MAP[ss_name]
if dataset.target != target or source not in dataset.sources:
logging.info("Skipping sub-dataset that does not include language pair: %s", ss_name)
else:
filtered_subsets[split].append(ss_name)
logging.info("Using sub-datasets: %s", filtered_subsets)
return filtered_subsets
def _info(self):
src, target = self.config.language_pair
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features({"translation": nlp.features.Translation(languages=self.config.language_pair)}),
supervised_keys=(src, target),
homepage=self.config.url,
citation=self.config.citation,
)
def _vocab_text_gen(self, split_subsets, extraction_map, language):
for _, ex in self._generate_examples(split_subsets, extraction_map, with_translation=False):
yield ex[language]
def _split_generators(self, dl_manager):
source, _ = self.config.language_pair
manual_paths_dict = {}
urls_to_download = {}
for ss_name in itertools.chain.from_iterable(self.subsets.values()):
if ss_name == "czeng_17":
# CzEng1.7 is CzEng1.6 with some blocks filtered out. We must download
# the filtering script so we can parse out which blocks need to be
# removed.
urls_to_download[_CZENG17_FILTER.name] = _CZENG17_FILTER.get_url(source)
# get dataset
dataset = DATASET_MAP[ss_name]
if dataset.get_manual_dl_files(source):
# TODO(PVP): following two lines skip configs that are incomplete for now
# +++++++++++++++++++++
logging.info("Skipping {} for now. Incomplete dataset for {}".format(dataset.name, self.config.name))
continue
# +++++++++++++++++++++
manual_dl_files = dataset.get_manual_dl_files(source)
manual_paths = [
os.path.join(os.path.abspath(os.path.expanduser(dl_manager.manual_dir)), fname)
for fname in manual_dl_files
]
assert all(
os.path.exists(path) for path in manual_paths
), "For {0}, you must manually download the following file(s) from {1} and place them in {2}: {3}".format(
dataset.name, dataset.get_url(source), dl_manager.manual_dir, ", ".join(manual_dl_files)
)
# set manual path for correct subset
manual_paths_dict[ss_name] = manual_paths
else:
urls_to_download[ss_name] = dataset.get_url(source)
# Download and extract files from URLs.
downloaded_files = dl_manager.download_and_extract(urls_to_download)
# Extract manually downloaded files.
manual_files = dl_manager.extract(manual_paths_dict)
extraction_map = dict(downloaded_files, **manual_files)
for language in self.config.language_pair:
self._vocab_text_gen(self.subsets[nlp.Split.TRAIN], extraction_map, language)
return [
nlp.SplitGenerator( # pylint:disable=g-complex-comprehension
name=split, gen_kwargs={"split_subsets": split_subsets, "extraction_map": extraction_map}
)
for split, split_subsets in self.subsets.items()
]
def _generate_examples(self, split_subsets, extraction_map, with_translation=True):
"""Returns the examples in the raw (text) form."""
source, _ = self.config.language_pair
def _get_local_paths(dataset, extract_dirs):
rel_paths = dataset.get_path(source)
if len(extract_dirs) == 1:
extract_dirs = extract_dirs * len(rel_paths)
return [
os.path.join(ex_dir, rel_path) if rel_path else ex_dir
for ex_dir, rel_path in zip(extract_dirs, rel_paths)
]
for ss_name in split_subsets:
# TODO(PVP) remove following five lines when manual data works
# +++++++++++++++++++++
dataset = DATASET_MAP[ss_name]
source, _ = self.config.language_pair
if dataset.get_manual_dl_files(source):
logging.info("Skipping {} for now. Incomplete dataset for {}".format(dataset.name, self.config.name))
continue
# +++++++++++++++++++++
logging.info("Generating examples from: %s", ss_name)
dataset = DATASET_MAP[ss_name]
extract_dirs = extraction_map[ss_name]
files = _get_local_paths(dataset, extract_dirs)
if ss_name.startswith("czeng"):
if ss_name.endswith("16pre"):
sub_generator = functools.partial(_parse_tsv, language_pair=("en", "cs"))
elif ss_name.endswith("17"):
filter_path = _get_local_paths(_CZENG17_FILTER, extraction_map[_CZENG17_FILTER.name])[0]
sub_generator = functools.partial(_parse_czeng, filter_path=filter_path)
else:
sub_generator = _parse_czeng
elif ss_name == "hindencorp_01":
sub_generator = _parse_hindencorp
elif len(files) == 2:
if ss_name.endswith("_frde"):
sub_generator = _parse_frde_bitext
else:
sub_generator = _parse_parallel_sentences
elif len(files) == 1:
fname = files[0]
# Note: Due to formatting used by `download_manager`, the file
# extension may not be at the end of the file path.
if ".tsv" in fname:
sub_generator = _parse_tsv
elif (
ss_name.startswith("newscommentary_v14")
or ss_name.startswith("europarl_v9")
or ss_name.startswith("wikititles_v1")
):
sub_generator = functools.partial(_parse_tsv, language_pair=self.config.language_pair)
elif "tmx" in fname or ss_name.startswith("paracrawl_v3"):
sub_generator = _parse_tmx
elif ss_name.startswith("wikiheadlines"):
sub_generator = _parse_wikiheadlines
else:
raise ValueError("Unsupported file format: %s" % fname)
else:
raise ValueError("Invalid number of files: %d" % len(files))
for sub_key, ex in sub_generator(*files):
if not all(ex.values()):
continue
# TODO(adarob): Add subset feature.
# ex["subset"] = subset
key = "{}/{}".format(ss_name, sub_key)
if with_translation is True:
ex = {"translation": ex}
yield key, ex
def _parse_parallel_sentences(f1, f2):
"""Returns examples from parallel SGML or text files, which may be gzipped."""
def _parse_text(path):
"""Returns the sentences from a single text file, which may be gzipped."""
split_path = path.split(".")
if split_path[-1] == "gz":
lang = split_path[-2]
with open(path, "rb") as f, gzip.GzipFile(fileobj=f) as g:
return g.read().decode("utf-8").split("\n"), lang
if split_path[-1] == "txt":
# CWMT
lang = split_path[-2].split("_")[-1]
lang = "zh" if lang in ("ch", "cn") else lang
else:
lang = split_path[-1]
with open(path, "rb") as f:
return f.read().decode("utf-8").split("\n"), lang
def _parse_sgm(path):
"""Returns sentences from a single SGML file."""
lang = path.split(".")[-2]
sentences = []
# Note: We can't use the XML parser since some of the files are badly
# formatted.
seg_re = re.compile(r"<seg id=\"\d+\">(.*)</seg>")
with open(path) as f:
for line in f:
seg_match = re.match(seg_re, line)
if seg_match:
assert len(seg_match.groups()) == 1
sentences.append(seg_match.groups()[0])
return sentences, lang
parse_file = _parse_sgm if f1.endswith(".sgm") else _parse_text
# Some datasets (e.g., CWMT) contain multiple parallel files specified with
# a wildcard. We sort both sets to align them and parse them one by one.
f1_files = sorted(glob.glob(f1))
f2_files = sorted(glob.glob(f2))
assert f1_files and f2_files, "No matching files found: %s, %s." % (f1, f2)
assert len(f1_files) == len(f2_files), "Number of files do not match: %d vs %d for %s vs %s." % (
len(f1_files),
len(f2_files),
f1,
f2,
)
for f_id, (f1_i, f2_i) in enumerate(zip(sorted(f1_files), sorted(f2_files))):
l1_sentences, l1 = parse_file(f1_i)
l2_sentences, l2 = parse_file(f2_i)
assert len(l1_sentences) == len(l2_sentences), "Sizes do not match: %d vs %d for %s vs %s." % (
len(l1_sentences),
len(l2_sentences),
f1_i,
f2_i,
)
for line_id, (s1, s2) in enumerate(zip(l1_sentences, l2_sentences)):
key = "{}/{}".format(f_id, line_id)
yield key, {l1: s1, l2: s2}
def _parse_frde_bitext(fr_path, de_path):
with open(fr_path) as f:
fr_sentences = f.read().split("\n")
with open(de_path) as f:
de_sentences = f.read().split("\n")
assert len(fr_sentences) == len(de_sentences), "Sizes do not match: %d vs %d for %s vs %s." % (
len(fr_sentences),
len(de_sentences),
fr_path,
de_path,
)
for line_id, (s1, s2) in enumerate(zip(fr_sentences, de_sentences)):
yield line_id, {"fr": s1, "de": s2}
def _parse_tmx(path):
"""Generates examples from TMX file."""
def _get_tuv_lang(tuv):
for k, v in tuv.items():
if k.endswith("}lang"):
return v
raise AssertionError("Language not found in `tuv` attributes.")
def _get_tuv_seg(tuv):
segs = tuv.findall("seg")
assert len(segs) == 1, "Invalid number of segments: %d" % len(segs)
return segs[0].text
with open(path, "rb") as f:
if six.PY3:
# Workaround due to: https://github.com/tensorflow/tensorflow/issues/33563
utf_f = codecs.getreader("utf-8")(f)
else:
utf_f = f
for line_id, (_, elem) in enumerate(ElementTree.iterparse(utf_f)):
if elem.tag == "tu":
yield line_id, {_get_tuv_lang(tuv): _get_tuv_seg(tuv) for tuv in elem.iterfind("tuv")}
elem.clear()
def _parse_tsv(path, language_pair=None):
"""Generates examples from TSV file."""
if language_pair is None:
lang_match = re.match(r".*\.([a-z][a-z])-([a-z][a-z])\.tsv", path)
assert lang_match is not None, "Invalid TSV filename: %s" % path
l1, l2 = lang_match.groups()
else:
l1, l2 = language_pair
with open(path) as f:
for j, line in enumerate(f):
cols = line.split("\t")
if len(cols) != 2:
logging.warning("Skipping line %d in TSV (%s) with %d != 2 columns.", j, path, len(cols))
continue
s1, s2 = cols
yield j, {l1: s1.strip(), l2: s2.strip()}
def _parse_wikiheadlines(path):
"""Generates examples from Wikiheadlines dataset file."""
lang_match = re.match(r".*\.([a-z][a-z])-([a-z][a-z])$", path)
assert lang_match is not None, "Invalid Wikiheadlines filename: %s" % path
l1, l2 = lang_match.groups()
with open(path) as f:
for line_id, line in enumerate(f):
s1, s2 = line.split("|||")
yield line_id, {l1: s1.strip(), l2: s2.strip()}
def _parse_czeng(*paths, **kwargs):
"""Generates examples from CzEng v1.6, with optional filtering for v1.7."""
filter_path = kwargs.get("filter_path", None)
if filter_path:
re_block = re.compile(r"^[^-]+-b(\d+)-\d\d[tde]")
with open(filter_path) as f:
bad_blocks = {blk for blk in re.search(r"qw{([\s\d]*)}", f.read()).groups()[0].split()}
logging.info("Loaded %d bad blocks to filter from CzEng v1.6 to make v1.7.", len(bad_blocks))
for path in paths:
for gz_path in sorted(glob.glob(path)):
with open(gz_path, "rb") as g, gzip.GzipFile(fileobj=g) as f:
filename = os.path.basename(gz_path)
for line_id, line in enumerate(f):
line = line.decode("utf-8") # required for py3
if not line.strip():
continue
id_, unused_score, cs, en = line.split("\t")
if filter_path:
block_match = re.match(re_block, id_)
if block_match and block_match.groups()[0] in bad_blocks:
continue
sub_key = "{}/{}".format(filename, line_id)
yield sub_key, {
"cs": cs.strip(),
"en": en.strip(),
}
def _parse_hindencorp(path):
with open(path) as f:
for line_id, line in enumerate(f):
split_line = line.split("\t")
if len(split_line) != 5:
logging.warning("Skipping invalid HindEnCorp line: %s", line)
continue
yield line_id, {"translation": {"en": split_line[3].strip(), "hi": split_line[4].strip()}}
|
nlp-master
|
datasets/wmt_t2t/wmt_utils.py
|
"""TODO(boolq): Add a description here."""
from __future__ import absolute_import, division, print_function
import json
import os
import tensorflow as tf
import nlp
# TODO(boolq): BibTeX citation
_CITATION = """\
@inproceedings{clark2019boolq,
title = {BoolQ: Exploring the Surprising Difficulty of Natural Yes/No Questions},
author = {Clark, Christopher and Lee, Kenton and Chang, Ming-Wei, and Kwiatkowski, Tom and Collins, Michael, and Toutanova, Kristina},
booktitle = {NAACL},
year = {2019},
}
"""
# TODO(boolq):
_DESCRIPTION = """\
BoolQ is a question answering dataset for yes/no questions containing 15942 examples. These questions are naturally
occurring ---they are generated in unprompted and unconstrained settings.
Each example is a triplet of (question, passage, answer), with the title of the page as optional additional context.
The text-pair classification setup is similar to existing natural language inference tasks.
"""
_URL = "gs://boolq"
_TRAIN_FILE_NAME = "train.jsonl"
_DEV_FILE_NAME = "dev.jsonl"
class Boolq(nlp.GeneratorBasedBuilder):
"""TODO(boolq): Short description of my dataset."""
# TODO(boolq): Set up version.
VERSION = nlp.Version("0.1.0")
def _info(self):
# TODO(boolq): Specifies the nlp.DatasetInfo object
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
"question": nlp.Value("string"),
"answer": nlp.Value("bool"),
"passage": nlp.Value("string")
# These are the features of your dataset like images, labels ...
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://github.com/google-research-datasets/boolean-questions",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(boolq): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
urls_to_download = {
"train": os.path.join(_URL, _TRAIN_FILE_NAME),
"dev": os.path.join(_URL, _DEV_FILE_NAME),
}
downloaded_files = dl_manager.download_custom(urls_to_download, tf.io.gfile.copy)
return [
nlp.SplitGenerator(name=nlp.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
nlp.SplitGenerator(name=nlp.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]},),
]
def _generate_examples(self, filepath):
"""Yields examples."""
# TODO(boolq): Yields (key, example) tuples from the dataset
with open(filepath) as f:
for id_, row in enumerate(f):
data = json.loads(row)
question = data["question"]
answer = data["answer"]
passage = data["passage"]
yield id_, {"question": question, "answer": answer, "passage": passage}
|
nlp-master
|
datasets/boolq/boolq.py
|
# coding=utf-8
# Copyright 2020 Facebook, Inc. and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""ELI5: Long Form Question Answering dataset"""
from __future__ import absolute_import, division, print_function
import bz2
import io
import json
import logging
import lzma
import os
import re
from os.path import isfile
from os.path import join as pjoin
from time import time
import nlp
_SUB_REDDITS = ["explainlikeimfive", "askscience", "AskHistorians"]
_REDDIT_URL = "https://files.pushshift.io/reddit/"
# pylint: disable=line-too-long
_URL_REGEX = r"""(?i)\b((?:https?:(?:/{1,3}|[a-z0-9%])|[a-z0-9.\-]+[.](?:com|net|org|edu|gov|mil|aero|asia|biz|cat|coop|info|int|jobs|mobi|museum|name|post|pro|tel|travel|xxx|ac|ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz|ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|co|cr|cs|cu|cv|cx|cy|cz|dd|de|dj|dk|dm|do|dz|ec|ee|eg|eh|er|es|et|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|gf|gg|gh|gi|gl|gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id|ie|il|im|in|io|iq|ir|is|it|je|jm|jo|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|lr|ls|lt|lu|lv|ly|ma|mc|md|me|mg|mh|mk|ml|mm|mn|mo|mp|mq|mr|ms|mt|mu|mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np|nr|nu|nz|om|pa|pe|pf|pg|ph|pk|pl|pm|pn|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|sj|Ja|sk|sl|sm|sn|so|sr|ss|st|su|sv|sx|sy|sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|tp|tr|tt|tv|tw|tz|ua|ug|uk|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|ye|yt|yu|za|zm|zw)/)(?:[^\s()<>{}\[\]]+|\([^\s()]*?\([^\s()]+\)[^\s()]*?\)|\([^\s]+?\))+(?:\([^\s()]*?\([^\s()]+\)[^\s()]*?\)|\([^\s]+?\)|[^\s`!()\[\]{};:'".,<>?«»“”‘’])|(?:(?<!@)[a-z0-9]+(?:[.\-][a-z0-9]+)*[.](?:com|net|org|edu|gov|mil|aero|asia|biz|cat|coop|info|int|jobs|mobi|museum|name|post|pro|tel|travel|xxx|ac|ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz|ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|co|cr|cs|cu|cv|cx|cy|cz|dd|de|dj|dk|dm|do|dz|ec|ee|eg|eh|er|es|et|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|gf|gg|gh|gi|gl|gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id|ie|il|im|in|io|iq|ir|is|it|je|jm|jo|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|lr|ls|lt|lu|lv|ly|ma|mc|md|me|mg|mh|mk|ml|mm|mn|mo|mp|mq|mr|ms|mt|mu|mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np|nr|nu|nz|om|pa|pe|pf|pg|ph|pk|pl|pm|pn|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|sj|Ja|sk|sl|sm|sn|so|sr|ss|st|su|sv|sx|sy|sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|tp|tr|tt|tv|tw|tz|ua|ug|uk|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|ye|yt|yu|za|zm|zw)\b/?(?!@)))"""
# pylint: enable=line-too-long
_HTML_PAIRS = [
("&", " & "),
(""", ' " '),
("&apos", " ' "),
(">", " > "),
("<", " < "),
]
# removes URLs (kept in separate list)
def _extract_urls_from_text(stp):
url_list = list(set(re.findall(_URL_REGEX, stp)))
for i, url in enumerate(url_list):
stp = stp.replace(url, "_URL_%d_" % (i,))
for a, b in _HTML_PAIRS:
stp = stp.replace(a, b)
return (stp, url_list)
# collects URLs for monthly dumps, has to be robust to file type changes
def _gather_dump_urls(base_url, mode, dl_manager):
from bs4 import BeautifulSoup
page_path = dl_manager.download(_REDDIT_URL + mode)
page_f = open(page_path)
page_content = page_f.read()
page_f.close()
soup = BeautifulSoup(page_content, "lxml")
files = [it for it in soup.find_all(attrs={"class": "file"})]
f_urls = [
tg.find_all(lambda x: x.has_attr("href"))[0]["href"]
for tg in files
if len(tg.find_all(lambda x: x.has_attr("href"))) > 0
]
date_to_url = {}
for url_st in f_urls:
ls = re.findall(r"20[0-9]{2}-[0-9]{2}", url_st)
if len(ls) > 0:
yr, mt = ls[0].split("-")
date_to_url[(int(yr), int(mt))] = base_url + mode + url_st[1:]
return date_to_url
# select valid top-level comments
def _valid_line(dct, mode):
top_level = (mode == "submissions") or (
len(dct["body"].split()) > 2
and not dct["body"].startswith("Your submission has been removed")
and dct["author"] != "AutoModerator"
and dct["parent_id"] == dct["link_id"]
)
res = dct.get("num_comments", 1) > 0 and dct.get("score", 0) and dct.get("score", 0) >= 2 and top_level
return res
def _open_compressed_file(f_name, f_type):
import zstandard as zstd
fh = None
if f_type == "xz":
f = lzma.open(f_name, "rt")
elif f_type == "bz2":
f = bz2.open(f_name, "rt")
elif f_type == "zst":
fh = open(f_name, "rb")
dctx = zstd.ZstdDecompressor()
stream_reader = dctx.stream_reader(fh)
f = io.TextIOWrapper(stream_reader, encoding="utf-8")
else:
raise NotImplementedError
return f, fh
# download a file, extract posts from desired subreddit, then remove from disk
def _download_and_select_lines(dl_manager, f_url, mode, st_time):
# download and pre-process original posts
print("downloading {} {:.2f}".format(f_url, time() - st_time))
f_downloaded_path = dl_manager.download(f_url)
print("decompressing and filtering {} {:.2f}".format(f_url, time() - st_time))
f, fh = _open_compressed_file(f_downloaded_path, f_url.split(".")[-1])
lines = dict([(name, []) for name in _SUB_REDDITS])
for line in f:
line_dct = json.loads(line)
if any([line_dct.get("subreddit", "") == name for name in _SUB_REDDITS]):
lines[line_dct["subreddit"]] += [line_dct]
f.close()
if f_url.split(".")[-1] == "zst":
fh.close()
os.remove(f_downloaded_path)
os.remove(f_downloaded_path + ".json")
os.remove(f_downloaded_path + ".lock")
print("tokenizing and selecting {} {:.2f}".format(f_url, time() - st_time))
processed_items = dict([(name, []) for name in _SUB_REDDITS])
if mode == "submissions":
key_list = ["id", "score", "url", "title", "selftext", "subreddit"]
else:
key_list = ["id", "link_id", "parent_id", "score", "body"]
for name in _SUB_REDDITS:
for line in lines[name]:
if _valid_line(line, mode):
reddit_res = {}
for k in key_list:
if k in ["title", "selftext", "body"]:
reddit_res[k] = _extract_urls_from_text(line[k])
else:
reddit_res[k] = line[k]
processed_items[name] += [reddit_res]
print("Total found {} {} {:.2f}".format(sum([len(ls) for ls in processed_items.values()]), mode, time() - st_time))
return processed_items
# post-process ELI5 questions and de-duplicate answers
def _post_process(reddit_dct, name=""):
# remove the ELI5 at the start of explainlikeimfive questions
start_re = re.compile(r"""\A[\[|\(]?[ ]?eli[5f][ ]?[\]|\)]?[]?[:,]?""", re.IGNORECASE)
if name == "explainlikeimfive":
title, uls = reddit_dct["title"]
title = start_re.sub("", title.strip()).strip()
reddit_dct["title"] = [title, uls]
# dedupe and filter comments
comments = [
c
for i, c in enumerate(reddit_dct["comments"])
if len(c["body"][0].split()) >= 8 and c["id"] not in [x["id"] for x in reddit_dct["comments"][:i]]
]
comments = sorted(comments, key=lambda c: (c["score"], len(c["body"][0].split()), c["id"]), reverse=True)
reddit_dct["comments"] = comments
return reddit_dct
def _download_and_filter_reddit(dl_manager, start_year=2011, start_month=7, end_year=2019, end_month=7):
# collect submissions and comments monthly URLs
date_to_url_submissions = _gather_dump_urls(_REDDIT_URL, "submissions", dl_manager)
date_to_url_comments = _gather_dump_urls(_REDDIT_URL, "comments", dl_manager)
# download, filter, process, remove
st_time = time()
qa_dict = dict([(name, {}) for name in _SUB_REDDITS])
# first download all questions
for year in range(start_year, end_year + 1):
start_mth = start_month if year == start_year else 1
end_mth = end_month if year == end_year else 12
months = range(start_mth, end_mth + 1)
for month in months:
if (year, month) in date_to_url_submissions:
f_url = date_to_url_submissions[(year, month)]
processed_submissions = _download_and_select_lines(dl_manager, f_url, "submissions", st_time)
for name in _SUB_REDDITS:
for dct in processed_submissions[name]:
qa_dict[name][dct["id"]] = dct
else:
print("Could not find submissions dump file for year {:4d} month {:2d}".format(year, month))
# then all answers
for year in range(start_year, end_year + 1):
start_mth = start_month if year == start_year else 1
end_mth = end_month if year == end_year else 12
months = range(start_mth, end_mth + 1)
for month in months:
if (year, month) in date_to_url_comments:
f_url = date_to_url_comments[(year, month)]
processed_comments = _download_and_select_lines(dl_manager, f_url, "comments", st_time)
# merge submissions and comments
for name in _SUB_REDDITS:
merged_comments = 0
for dct in processed_comments[name]:
did = dct["parent_id"].split("_")[-1]
if did in qa_dict[name]:
merged_comments += 1
qa_dict[name][did]["comments"] = qa_dict[name][did].get("comments", []) + [dct]
else:
print("Could not find comments dump file for year {:4d} month {:2d}".format(year, month))
# then post-process
res = {}
for name in _SUB_REDDITS:
qa_dct_list = [(k, _post_process(rdct, name)) for k, rdct in qa_dict[name].items() if "comments" in rdct]
qa_dct_list = [x for x in qa_dct_list if len(x[1]["comments"]) > 0 and name in x[1]["url"]]
res[name] = dict(qa_dct_list[:])
return res
_DESCRIPTION = """\
Explain Like I'm 5 long form QA dataset
"""
_CITATION = """\
@inproceedings{DBLP:conf/acl/FanJPGWA19,
author = {Angela Fan and
Yacine Jernite and
Ethan Perez and
David Grangier and
Jason Weston and
Michael Auli},
editor = {Anna Korhonen and
David R. Traum and
Lluis Marquez},
title = {{ELI5:} Long Form Question Answering},
booktitle = {Proceedings of the 57th Conference of the Association for Computational
Linguistics, {ACL} 2019, Florence, Italy, July 28- August 2, 2019,
Volume 1: Long Papers},
pages = {3558--3567},
publisher = {Association for Computational Linguistics},
year = {2019},
url = {https://doi.org/10.18653/v1/p19-1346},
doi = {10.18653/v1/p19-1346},
}
"""
class Eli5Config(nlp.BuilderConfig):
"""BuilderConfig for ExplainLikeImFive."""
def __init__(self, **kwargs):
"""BuilderConfig for ExplainLikeImFive.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(Eli5Config, self).__init__(**kwargs)
class Eli5(nlp.GeneratorBasedBuilder):
"""ELI5: Explain Like I'm Five long form question answering dataset."""
BUILDER_CONFIG_CLASS = Eli5Config
_DATA_SPLIT_URL = "https://s3.amazonaws.com/datasets.huggingface.co/nlp/datasets/eli5/reddit_data_split.json"
BUILDER_CONFIGS = [
Eli5Config(name="LFQA_reddit", version=nlp.Version("1.0.0"), description="long from QA subreddits"),
]
test_dummy_data = False
def _info(self):
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features(
{
"q_id": nlp.Value("string"),
"title": nlp.Value("string"),
"selftext": nlp.Value("string"),
"document": nlp.Value("string"),
"subreddit": nlp.Value("string"),
"answers": nlp.features.Sequence(
{"a_id": nlp.Value("string"), "text": nlp.Value("string"), "score": nlp.Value("int32")}
),
"title_urls": nlp.features.Sequence({"url": nlp.Value("string")}),
"selftext_urls": nlp.features.Sequence({"url": nlp.Value("string")}),
"answers_urls": nlp.features.Sequence({"url": nlp.Value("string")}),
}
),
supervised_keys=None,
homepage="https://facebookresearch.github.io/ELI5/explore.html",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
qa_data_file = pjoin(
self._cache_dir_root, self._relative_data_dir(with_version=False), "reddit_downloaded_qa_lists.json"
)
if isfile(qa_data_file):
logging.info("loading pre-computed QA list")
self.filtered_reddit = json.load(open(qa_data_file))
else:
self.filtered_reddit = _download_and_filter_reddit(
dl_manager, start_year=2011, start_month=7, end_year=2019, end_month=7
)
logging.info("saving pre-computed QA list")
json.dump(self.filtered_reddit, open(qa_data_file, "w"))
# download data splits from AWS
fpath_splits = dl_manager.download(self._DATA_SPLIT_URL)
self.data_split = json.load(open(fpath_splits))
return [
nlp.SplitGenerator(
name=nlp.Split("train_eli5"), gen_kwargs={"split": "train", "subreddit_name": "explainlikeimfive"},
),
nlp.SplitGenerator(
name=nlp.Split("validation_eli5"),
gen_kwargs={"split": "validation", "subreddit_name": "explainlikeimfive"},
),
nlp.SplitGenerator(
name=nlp.Split("test_eli5"), gen_kwargs={"split": "test", "subreddit_name": "explainlikeimfive"},
),
nlp.SplitGenerator(
name=nlp.Split("train_asks"), gen_kwargs={"split": "train", "subreddit_name": "askscience"},
),
nlp.SplitGenerator(
name=nlp.Split("validation_asks"), gen_kwargs={"split": "validation", "subreddit_name": "askscience"},
),
nlp.SplitGenerator(
name=nlp.Split("test_asks"), gen_kwargs={"split": "test", "subreddit_name": "askscience"},
),
nlp.SplitGenerator(
name=nlp.Split("train_askh"), gen_kwargs={"split": "train", "subreddit_name": "AskHistorians"},
),
nlp.SplitGenerator(
name=nlp.Split("validation_askh"),
gen_kwargs={"split": "validation", "subreddit_name": "AskHistorians"},
),
nlp.SplitGenerator(
name=nlp.Split("test_askh"), gen_kwargs={"split": "test", "subreddit_name": "AskHistorians"},
),
]
def _generate_examples(self, split, subreddit_name):
logging.info("generating examples from = {}, {} set".format(subreddit_name, split))
if split in self.data_split.get(subreddit_name, []):
id_list = self.data_split[subreddit_name][split]
data = [
self.filtered_reddit[subreddit_name][q_id]
for q_id in id_list
if q_id in self.filtered_reddit[subreddit_name]
]
elif split == "train":
data = [
self.filtered_reddit[subreddit_name][q_id]
for subreddit_name in self.filtered_reddit
for q_id in self.filtered_reddit[subreddit_name]
]
else:
data = []
for example in data:
id_ = example["id"]
title = example["title"][0]
title_urls = example["title"][1]
selftext = example["selftext"][0]
selftext_urls = example["selftext"][1]
answer_scores = [ans["score"] for ans in example["comments"]]
answer_ids = [ans["id"] for ans in example["comments"]]
# flatten list of URL mappings
url_maps = [(ul, i, j) for i, ans in enumerate(example["comments"]) for j, ul in enumerate(ans["body"][1])]
answers_urls = [ul for ul, _, _ in url_maps]
map_url_indices = dict([((i, j), k) for k, (_, i, j) in enumerate(url_maps)])
answer_texts = []
for i, ans in enumerate(example["comments"]):
txt = ans["body"][0]
for j, _ in enumerate(ans["body"][1]):
txt = txt.replace("_URL_{}_".format(j), "_URL_{}_".format(map_url_indices[(i, j)]))
answer_texts += [txt.strip()]
yield id_, {
"q_id": id_,
"title": title,
"selftext": selftext,
"document": "",
"subreddit": example.get("subreddit", subreddit_name),
"answers": {"a_id": answer_ids, "text": answer_texts, "score": answer_scores},
"title_urls": {"url": title_urls},
"selftext_urls": {"url": selftext_urls},
"answers_urls": {"url": answers_urls},
}
|
nlp-master
|
datasets/eli5/eli5.py
|
"""TODO(openBookQA): Add a description here."""
from __future__ import absolute_import, division, print_function
import json
import os
import textwrap
import nlp
# TODO(openBookQA): BibTeX citation
_CITATION = """\
@inproceedings{OpenBookQA2018,
title={Can a Suit of Armor Conduct Electricity? A New Dataset for Open Book Question Answering},
author={Todor Mihaylov and Peter Clark and Tushar Khot and Ashish Sabharwal},
booktitle={EMNLP},
year={2018}
}
"""
# TODO(openBookQA):
_DESCRIPTION = textwrap.dedent(
"""\
OpenBookQA aims to promote research in advanced question-answering, probing a deeper understanding of both the topic
(with salient facts summarized as an open book, also provided with the dataset) and the language it is expressed in. In
particular, it contains questions that require multi-step reasoning, use of additional common and commonsense knowledge,
and rich text comprehension.
OpenBookQA is a new kind of question-answering dataset modeled after open book exams for assessing human understanding of
a subject.
"""
)
_URL = "https://s3-us-west-2.amazonaws.com/ai2-website/data/OpenBookQA-V1-Sep2018.zip"
class OpenbookqaConfig(nlp.BuilderConfig):
def __init__(self, data_dir, **kwargs):
""" BuilderConfig for openBookQA dataset
Args:
data_dir: directory for the given dataset name
**kwargs: keyword arguments forwarded to super.
"""
super(OpenbookqaConfig, self).__init__(
version=nlp.Version("1.0.0", "New split API (https://tensorflow.org/datasets/splits)"), **kwargs
)
self.data_dir = data_dir
class Openbookqa(nlp.GeneratorBasedBuilder):
"""TODO(openBookQA): Short description of my dataset."""
# TODO(openBookQA): Set up version.
VERSION = nlp.Version("0.1.0")
BUILDER_CONFIGS = [
OpenbookqaConfig(
name="main",
description=textwrap.dedent(
"""
It consists of 5,957 multiple-choice elementary-level science questions (4,957 train, 500 dev, 500 test),
which probe the understanding of a small “book” of 1,326 core science facts and the application of these facts to novel
situations. For training, the dataset includes a mapping from each question to the core science fact it was designed to
probe. Answering OpenBookQA questions requires additional broad common knowledge, not contained in the book. The questions,
by design, are answered incorrectly by both a retrieval-based algorithm and a word co-occurrence algorithm. Strong neural
baselines achieve around 50% on OpenBookQA, leaving a large gap to the 92% accuracy of crowd-workers.
"""
),
data_dir="Main",
),
OpenbookqaConfig(
name="additional",
description=textwrap.dedent(
"""
Additionally, we provide 5,167 crowd-sourced common knowledge facts, and an expanded version of the train/dev/test questions where
each question is associated with its originating core fact, a human accuracy score, a clarity score, and an anonymized crowd-worker
ID (in the “Additional” folder).
"""
),
data_dir="Additional",
),
]
def _info(self):
# TODO(openBookQA): Specifies the nlp.DatasetInfo object
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
# These are the features of your dataset like images, labels ...
"id": nlp.Value("string"),
"question_stem": nlp.Value("string"),
"choices": nlp.features.Sequence({"text": nlp.Value("string"), "label": nlp.Value("string")}),
"answerKey": nlp.Value("string"),
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://allenai.org/data/open-book-qa",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(openBookQA): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
dl_dir = dl_manager.download_and_extract(_URL)
data_dir = os.path.join(dl_dir, "OpenBookQA-V1-Sep2018", "Data")
data_dir = os.path.join(data_dir, self.config.data_dir)
train_file = (
os.path.join(data_dir, "train.jsonl")
if self.config.name == "main"
else os.path.join(data_dir, "train_complete.jsonl")
)
test_file = (
os.path.join(data_dir, "test.jsonl")
if self.config.name == "main"
else os.path.join(data_dir, "test_complete.jsonl")
)
dev_file = (
os.path.join(data_dir, "dev.jsonl")
if self.config.name == "main"
else os.path.join(data_dir, "dev_complete.jsonl")
)
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": train_file},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": test_file},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": dev_file},
),
]
def _generate_examples(self, filepath):
"""Yields examples."""
# TODO(openBookQA): Yields (key, example) tuples from the dataset
with open(filepath) as f:
for row in f:
data = json.loads(row)
yield data["id"], {
"id": data["id"],
"question_stem": data["question"]["stem"],
"choices": {
"text": [choice["text"] for choice in data["question"]["choices"]],
"label": [choice["text"] for choice in data["question"]["choices"]],
},
"answerKey": data["answerKey"],
}
|
nlp-master
|
datasets/openbookqa/openbookqa.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""WMT14: Translate dataset."""
import nlp
from .wmt_utils import Wmt, WmtConfig
_URL = "http://www.statmt.org/wmt14/translation-task.html"
_CITATION = """
@InProceedings{bojar-EtAl:2014:W14-33,
author = {Bojar, Ondrej and Buck, Christian and Federmann, Christian and Haddow, Barry and Koehn, Philipp and Leveling, Johannes and Monz, Christof and Pecina, Pavel and Post, Matt and Saint-Amand, Herve and Soricut, Radu and Specia, Lucia and Tamchyna, Ale\v{s}},
title = {Findings of the 2014 Workshop on Statistical Machine Translation},
booktitle = {Proceedings of the Ninth Workshop on Statistical Machine Translation},
month = {June},
year = {2014},
address = {Baltimore, Maryland, USA},
publisher = {Association for Computational Linguistics},
pages = {12--58},
url = {http://www.aclweb.org/anthology/W/W14/W14-3302}
}
"""
_LANGUAGE_PAIRS = [(lang, "en") for lang in ["cs", "de", "fr", "hi", "ru"]]
class Wmt14(Wmt):
"""WMT 14 translation datasets for all {xx, "en"} language pairs."""
# Version history:
# 1.0.0: S3 (new shuffling, sharding and slicing mechanism).
BUILDER_CONFIGS = [
WmtConfig( # pylint:disable=g-complex-comprehension
description="WMT 2014 %s-%s translation task dataset." % (l1, l2),
url=_URL,
citation=_CITATION,
language_pair=(l1, l2),
version=nlp.Version("1.0.0"),
)
for l1, l2 in _LANGUAGE_PAIRS
]
@property
def manual_download_instructions(self):
if self.config.language_pair[1] in ["cs", "hi", "ru"]:
return "Please download the data manually as explained. TODO(PVP)"
return None
@property
def _subsets(self):
return {
nlp.Split.TRAIN: [
"europarl_v7",
"commoncrawl",
"multiun",
"newscommentary_v9",
"gigafren",
"czeng_10",
"yandexcorpus",
"wikiheadlines_hi",
"wikiheadlines_ru",
"hindencorp_01",
],
nlp.Split.VALIDATION: ["newsdev2014", "newstest2013"],
nlp.Split.TEST: ["newstest2014"],
}
|
nlp-master
|
datasets/wmt14/wmt14.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""WMT: Translate dataset."""
from __future__ import absolute_import, division, print_function
import codecs
import functools
import glob
import gzip
import itertools
import logging
import os
import re
import xml.etree.cElementTree as ElementTree
from abc import ABC, abstractmethod
import six
import nlp
_DESCRIPTION = """\
Translate dataset based on the data from statmt.org.
Versions exists for the different years using a combination of multiple data
sources. The base `wmt_translate` allows you to create your own config to choose
your own data/language pair by creating a custom `nlp.translate.wmt.WmtConfig`.
```
config = nlp.wmt.WmtConfig(
version="0.0.1",
language_pair=("fr", "de"),
subsets={
nlp.Split.TRAIN: ["commoncrawl_frde"],
nlp.Split.VALIDATION: ["euelections_dev2019"],
},
)
builder = nlp.builder("wmt_translate", config=config)
```
"""
CWMT_SUBSET_NAMES = ["casia2015", "casict2011", "casict2015", "datum2015", "datum2017", "neu2017"]
class SubDataset(object):
"""Class to keep track of information on a sub-dataset of WMT."""
def __init__(self, name, target, sources, url, path, manual_dl_files=None):
"""Sub-dataset of WMT.
Args:
name: `string`, a unique dataset identifier.
target: `string`, the target language code.
sources: `set<string>`, the set of source language codes.
url: `string` or `(string, string)`, URL(s) or URL template(s) specifying
where to download the raw data from. If two strings are provided, the
first is used for the source language and the second for the target.
Template strings can either contain '{src}' placeholders that will be
filled in with the source language code, '{0}' and '{1}' placeholders
that will be filled in with the source and target language codes in
alphabetical order, or all 3.
path: `string` or `(string, string)`, path(s) or path template(s)
specifing the path to the raw data relative to the root of the
downloaded archive. If two strings are provided, the dataset is assumed
to be made up of parallel text files, the first being the source and the
second the target. If one string is provided, both languages are assumed
to be stored within the same file and the extension is used to determine
how to parse it. Template strings should be formatted the same as in
`url`.
manual_dl_files: `<list>(string)` (optional), the list of files that must
be manually downloaded to the data directory.
"""
self._paths = (path,) if isinstance(path, six.string_types) else path
self._urls = (url,) if isinstance(url, six.string_types) else url
self._manual_dl_files = manual_dl_files if manual_dl_files else []
self.name = name
self.target = target
self.sources = set(sources)
def _inject_language(self, src, strings):
"""Injects languages into (potentially) template strings."""
if src not in self.sources:
raise ValueError("Invalid source for '{0}': {1}".format(self.name, src))
def _format_string(s):
if "{0}" in s and "{1}" and "{src}" in s:
return s.format(*sorted([src, self.target]), src=src)
elif "{0}" in s and "{1}" in s:
return s.format(*sorted([src, self.target]))
elif "{src}" in s:
return s.format(src=src)
else:
return s
return [_format_string(s) for s in strings]
def get_url(self, src):
return self._inject_language(src, self._urls)
def get_manual_dl_files(self, src):
return self._inject_language(src, self._manual_dl_files)
def get_path(self, src):
return self._inject_language(src, self._paths)
# Subsets used in the training sets for various years of WMT.
_TRAIN_SUBSETS = [
# pylint:disable=line-too-long
SubDataset(
name="commoncrawl",
target="en", # fr-de pair in commoncrawl_frde
sources={"cs", "de", "es", "fr", "ru"},
url="http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz",
path=("commoncrawl.{src}-en.{src}", "commoncrawl.{src}-en.en"),
),
SubDataset(
name="commoncrawl_frde",
target="de",
sources={"fr"},
url=(
"http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/commoncrawl.fr.gz",
"http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/commoncrawl.de.gz",
),
path=("", ""),
),
SubDataset(
name="czeng_10",
target="en",
sources={"cs"},
url="http://ufal.mff.cuni.cz/czeng/czeng10",
manual_dl_files=["data-plaintext-format.%d.tar" % i for i in range(10)],
# Each tar contains multiple files, which we process specially in
# _parse_czeng.
path=("data.plaintext-format/??train.gz",) * 10,
),
SubDataset(
name="czeng_16pre",
target="en",
sources={"cs"},
url="http://ufal.mff.cuni.cz/czeng/czeng16pre",
manual_dl_files=["czeng16pre.deduped-ignoring-sections.txt.gz"],
path="",
),
SubDataset(
name="czeng_16",
target="en",
sources={"cs"},
url="http://ufal.mff.cuni.cz/czeng",
manual_dl_files=["data-plaintext-format.%d.tar" % i for i in range(10)],
# Each tar contains multiple files, which we process specially in
# _parse_czeng.
path=("data.plaintext-format/??train.gz",) * 10,
),
SubDataset(
# This dataset differs from the above in the filtering that is applied
# during parsing.
name="czeng_17",
target="en",
sources={"cs"},
url="http://ufal.mff.cuni.cz/czeng",
manual_dl_files=["data-plaintext-format.%d.tar" % i for i in range(10)],
# Each tar contains multiple files, which we process specially in
# _parse_czeng.
path=("data.plaintext-format/??train.gz",) * 10,
),
SubDataset(
name="dcep_v1",
target="en",
sources={"lv"},
url="http://data.statmt.org/wmt17/translation-task/dcep.lv-en.v1.tgz",
path=("dcep.en-lv/dcep.lv", "dcep.en-lv/dcep.en"),
),
SubDataset(
name="europarl_v7",
target="en",
sources={"cs", "de", "es", "fr"},
url="http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz",
path=("training/europarl-v7.{src}-en.{src}", "training/europarl-v7.{src}-en.en"),
),
SubDataset(
name="europarl_v7_frde",
target="de",
sources={"fr"},
url=(
"http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/europarl-v7.fr.gz",
"http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/europarl-v7.de.gz",
),
path=("", ""),
),
SubDataset(
name="europarl_v8_18",
target="en",
sources={"et", "fi"},
url="http://data.statmt.org/wmt18/translation-task/training-parallel-ep-v8.tgz",
path=("training/europarl-v8.{src}-en.{src}", "training/europarl-v8.{src}-en.en"),
),
SubDataset(
name="europarl_v8_16",
target="en",
sources={"fi", "ro"},
url="http://data.statmt.org/wmt16/translation-task/training-parallel-ep-v8.tgz",
path=("training-parallel-ep-v8/europarl-v8.{src}-en.{src}", "training-parallel-ep-v8/europarl-v8.{src}-en.en"),
),
SubDataset(
name="europarl_v9",
target="en",
sources={"cs", "de", "fi", "lt"},
url="http://www.statmt.org/europarl/v9/training/europarl-v9.{src}-en.tsv.gz",
path="",
),
SubDataset(
name="gigafren",
target="en",
sources={"fr"},
url="http://www.statmt.org/wmt10/training-giga-fren.tar",
path=("giga-fren.release2.fixed.fr.gz", "giga-fren.release2.fixed.en.gz"),
),
SubDataset(
name="hindencorp_01",
target="en",
sources={"hi"},
url="http://ufallab.ms.mff.cuni.cz/~bojar/hindencorp",
manual_dl_files=["hindencorp0.1.gz"],
path="",
),
SubDataset(
name="leta_v1",
target="en",
sources={"lv"},
url="http://data.statmt.org/wmt17/translation-task/leta.v1.tgz",
path=("LETA-lv-en/leta.lv", "LETA-lv-en/leta.en"),
),
SubDataset(
name="multiun",
target="en",
sources={"es", "fr"},
url="http://www.statmt.org/wmt13/training-parallel-un.tgz",
path=("un/undoc.2000.{src}-en.{src}", "un/undoc.2000.{src}-en.en"),
),
SubDataset(
name="newscommentary_v9",
target="en",
sources={"cs", "de", "fr", "ru"},
url="http://www.statmt.org/wmt14/training-parallel-nc-v9.tgz",
path=("training/news-commentary-v9.{src}-en.{src}", "training/news-commentary-v9.{src}-en.en"),
),
SubDataset(
name="newscommentary_v10",
target="en",
sources={"cs", "de", "fr", "ru"},
url="http://www.statmt.org/wmt15/training-parallel-nc-v10.tgz",
path=("news-commentary-v10.{src}-en.{src}", "news-commentary-v10.{src}-en.en"),
),
SubDataset(
name="newscommentary_v11",
target="en",
sources={"cs", "de", "ru"},
url="http://data.statmt.org/wmt16/translation-task/training-parallel-nc-v11.tgz",
path=(
"training-parallel-nc-v11/news-commentary-v11.{src}-en.{src}",
"training-parallel-nc-v11/news-commentary-v11.{src}-en.en",
),
),
SubDataset(
name="newscommentary_v12",
target="en",
sources={"cs", "de", "ru", "zh"},
url="http://data.statmt.org/wmt17/translation-task/training-parallel-nc-v12.tgz",
path=("training/news-commentary-v12.{src}-en.{src}", "training/news-commentary-v12.{src}-en.en"),
),
SubDataset(
name="newscommentary_v13",
target="en",
sources={"cs", "de", "ru", "zh"},
url="http://data.statmt.org/wmt18/translation-task/training-parallel-nc-v13.tgz",
path=(
"training-parallel-nc-v13/news-commentary-v13.{src}-en.{src}",
"training-parallel-nc-v13/news-commentary-v13.{src}-en.en",
),
),
SubDataset(
name="newscommentary_v14",
target="en", # fr-de pair in newscommentary_v14_frde
sources={"cs", "de", "kk", "ru", "zh"},
url="http://data.statmt.org/news-commentary/v14/training/news-commentary-v14.{0}-{1}.tsv.gz",
path="",
),
SubDataset(
name="newscommentary_v14_frde",
target="de",
sources={"fr"},
url="http://data.statmt.org/news-commentary/v14/training/news-commentary-v14.de-fr.tsv.gz",
path="",
),
SubDataset(
name="onlinebooks_v1",
target="en",
sources={"lv"},
url="http://data.statmt.org/wmt17/translation-task/books.lv-en.v1.tgz",
path=("farewell/farewell.lv", "farewell/farewell.en"),
),
SubDataset(
name="paracrawl_v1",
target="en",
sources={"cs", "de", "et", "fi", "ru"},
url="https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-{src}.zipporah0-dedup-clean.tgz",
path=(
"paracrawl-release1.en-{src}.zipporah0-dedup-clean.{src}",
"paracrawl-release1.en-{src}.zipporah0-dedup-clean.en",
),
),
SubDataset(
name="paracrawl_v1_ru",
target="en",
sources={"ru"},
url="https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-ru.zipporah0-dedup-clean.tgz",
path=(
"paracrawl-release1.en-ru.zipporah0-dedup-clean.ru",
"paracrawl-release1.en-ru.zipporah0-dedup-clean.en",
),
),
SubDataset(
name="paracrawl_v3",
target="en", # fr-de pair in paracrawl_v3_frde
sources={"cs", "de", "fi", "lt"},
url="https://s3.amazonaws.com/web-language-models/paracrawl/release3/en-{src}.bicleaner07.tmx.gz",
path="",
),
SubDataset(
name="paracrawl_v3_frde",
target="de",
sources={"fr"},
url=(
"http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/de-fr.bicleaner07.de.gz",
"http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/de-fr.bicleaner07.fr.gz",
),
path=("", ""),
),
SubDataset(
name="rapid_2016",
target="en",
sources={"de", "et", "fi"},
url="http://data.statmt.org/wmt18/translation-task/rapid2016.tgz",
path=("rapid2016.{0}-{1}.{src}", "rapid2016.{0}-{1}.en"),
),
SubDataset(
name="rapid_2016_ltfi",
target="en",
sources={"fi", "lt"},
url="https://tilde-model.s3-eu-west-1.amazonaws.com/rapid2016.en-{src}.tmx.zip",
path="rapid2016.en-{src}.tmx",
),
SubDataset(
name="rapid_2019",
target="en",
sources={"de"},
url="https://s3-eu-west-1.amazonaws.com/tilde-model/rapid2019.de-en.zip",
path=("rapid2019.de-en.de", "rapid2019.de-en.en"),
),
SubDataset(
name="setimes_2",
target="en",
sources={"ro", "tr"},
url="http://opus.nlpl.eu/download.php?f=SETIMES/v2/tmx/en-{src}.tmx.gz",
path="",
),
SubDataset(
name="uncorpus_v1",
target="en",
sources={"ru", "zh"},
url="https://storage.googleapis.com/tfdataset-data/downloadataset/uncorpus/UNv1.0.en-{src}.tar.gz",
path=("en-{src}/UNv1.0.en-{src}.{src}", "en-{src}/UNv1.0.en-{src}.en"),
),
SubDataset(
name="wikiheadlines_fi",
target="en",
sources={"fi"},
url="http://www.statmt.org/wmt15/wiki-titles.tgz",
path="wiki/fi-en/titles.fi-en",
),
SubDataset(
name="wikiheadlines_hi",
target="en",
sources={"hi"},
url="http://www.statmt.org/wmt14/wiki-titles.tgz",
path="wiki/hi-en/wiki-titles.hi-en",
),
SubDataset(
# Verified that wmt14 and wmt15 files are identical.
name="wikiheadlines_ru",
target="en",
sources={"ru"},
url="http://www.statmt.org/wmt15/wiki-titles.tgz",
path="wiki/ru-en/wiki.ru-en",
),
SubDataset(
name="wikititles_v1",
target="en",
sources={"cs", "de", "fi", "gu", "kk", "lt", "ru", "zh"},
url="http://data.statmt.org/wikititles/v1/wikititles-v1.{src}-en.tsv.gz",
path="",
),
SubDataset(
name="yandexcorpus",
target="en",
sources={"ru"},
url="https://translate.yandex.ru/corpus?lang=en",
manual_dl_files=["1mcorpus.zip"],
path=("corpus.en_ru.1m.ru", "corpus.en_ru.1m.en"),
),
# pylint:enable=line-too-long
] + [
SubDataset( # pylint:disable=g-complex-comprehension
name=ss,
target="en",
sources={"zh"},
url="ftp://cwmt-wmt:cwmt-wmt@nlp.nju.edu.cn/parallel/%s.zip" % ss,
path=("%s/*_c[hn].txt" % ss, "%s/*_en.txt" % ss),
)
for ss in CWMT_SUBSET_NAMES
]
_DEV_SUBSETS = [
SubDataset(
name="euelections_dev2019",
target="de",
sources={"fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/euelections_dev2019.fr-de.src.fr", "dev/euelections_dev2019.fr-de.tgt.de"),
),
SubDataset(
name="newsdev2014",
target="en",
sources={"hi"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdev2014.hi", "dev/newsdev2014.en"),
),
SubDataset(
name="newsdev2015",
target="en",
sources={"fi"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdev2015-fien-src.{src}.sgm", "dev/newsdev2015-fien-ref.en.sgm"),
),
SubDataset(
name="newsdiscussdev2015",
target="en",
sources={"ro", "tr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdiscussdev2015-{src}en-src.{src}.sgm", "dev/newsdiscussdev2015-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdev2016",
target="en",
sources={"ro", "tr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdev2016-{src}en-src.{src}.sgm", "dev/newsdev2016-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdev2017",
target="en",
sources={"lv", "zh"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdev2017-{src}en-src.{src}.sgm", "dev/newsdev2017-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdev2018",
target="en",
sources={"et"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdev2018-{src}en-src.{src}.sgm", "dev/newsdev2018-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdev2019",
target="en",
sources={"gu", "kk", "lt"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdev2019-{src}en-src.{src}.sgm", "dev/newsdev2019-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdiscussdev2015",
target="en",
sources={"fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdiscussdev2015-{src}en-src.{src}.sgm", "dev/newsdiscussdev2015-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdiscusstest2015",
target="en",
sources={"fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdiscusstest2015-{src}en-src.{src}.sgm", "dev/newsdiscusstest2015-{src}en-ref.en.sgm"),
),
SubDataset(
name="newssyscomb2009",
target="en",
sources={"cs", "de", "es", "fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newssyscomb2009.{src}", "dev/newssyscomb2009.en"),
),
SubDataset(
name="newstest2008",
target="en",
sources={"cs", "de", "es", "fr", "hu"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/news-test2008.{src}", "dev/news-test2008.en"),
),
SubDataset(
name="newstest2009",
target="en",
sources={"cs", "de", "es", "fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2009.{src}", "dev/newstest2009.en"),
),
SubDataset(
name="newstest2010",
target="en",
sources={"cs", "de", "es", "fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2010.{src}", "dev/newstest2010.en"),
),
SubDataset(
name="newstest2011",
target="en",
sources={"cs", "de", "es", "fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2011.{src}", "dev/newstest2011.en"),
),
SubDataset(
name="newstest2012",
target="en",
sources={"cs", "de", "es", "fr", "ru"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2012.{src}", "dev/newstest2012.en"),
),
SubDataset(
name="newstest2013",
target="en",
sources={"cs", "de", "es", "fr", "ru"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2013.{src}", "dev/newstest2013.en"),
),
SubDataset(
name="newstest2014",
target="en",
sources={"cs", "de", "es", "fr", "hi", "ru"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2014-{src}en-src.{src}.sgm", "dev/newstest2014-{src}en-ref.en.sgm"),
),
SubDataset(
name="newstest2015",
target="en",
sources={"cs", "de", "fi", "ru"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2015-{src}en-src.{src}.sgm", "dev/newstest2015-{src}en-ref.en.sgm"),
),
SubDataset(
name="newsdiscusstest2015",
target="en",
sources={"fr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newsdiscusstest2015-{src}en-src.{src}.sgm", "dev/newsdiscusstest2015-{src}en-ref.en.sgm"),
),
SubDataset(
name="newstest2016",
target="en",
sources={"cs", "de", "fi", "ro", "ru", "tr"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2016-{src}en-src.{src}.sgm", "dev/newstest2016-{src}en-ref.en.sgm"),
),
SubDataset(
name="newstestB2016",
target="en",
sources={"fi"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstestB2016-enfi-ref.{src}.sgm", "dev/newstestB2016-enfi-src.en.sgm"),
),
SubDataset(
name="newstest2017",
target="en",
sources={"cs", "de", "fi", "lv", "ru", "tr", "zh"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2017-{src}en-src.{src}.sgm", "dev/newstest2017-{src}en-ref.en.sgm"),
),
SubDataset(
name="newstestB2017",
target="en",
sources={"fi"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstestB2017-fien-src.fi.sgm", "dev/newstestB2017-fien-ref.en.sgm"),
),
SubDataset(
name="newstest2018",
target="en",
sources={"cs", "de", "et", "fi", "ru", "tr", "zh"},
url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
path=("dev/newstest2018-{src}en-src.{src}.sgm", "dev/newstest2018-{src}en-ref.en.sgm"),
),
]
DATASET_MAP = {dataset.name: dataset for dataset in _TRAIN_SUBSETS + _DEV_SUBSETS}
_CZENG17_FILTER = SubDataset(
name="czeng17_filter",
target="en",
sources={"cs"},
url="http://ufal.mff.cuni.cz/czeng/download.php?f=convert_czeng16_to_17.pl.zip",
path="convert_czeng16_to_17.pl",
)
class WmtConfig(nlp.BuilderConfig):
"""BuilderConfig for WMT."""
def __init__(self, url=None, citation=None, description=None, language_pair=(None, None), subsets=None, **kwargs):
"""BuilderConfig for WMT.
Args:
url: The reference URL for the dataset.
citation: The paper citation for the dataset.
description: The description of the dataset.
language_pair: pair of languages that will be used for translation. Should
contain 2 letter coded strings. For example: ("en", "de").
configuration for the `nlp.features.text.TextEncoder` used for the
`nlp.features.text.Translation` features.
subsets: Dict[split, list[str]]. List of the subset to use for each of the
split. Note that WMT subclasses overwrite this parameter.
**kwargs: keyword arguments forwarded to super.
"""
name = "%s-%s" % (language_pair[0], language_pair[1])
if "name" in kwargs: # Add name suffix for custom configs
name += "." + kwargs.pop("name")
super(WmtConfig, self).__init__(name=name, description=description, **kwargs)
self.url = url or "http://www.statmt.org"
self.citation = citation
self.language_pair = language_pair
self.subsets = subsets
# TODO(PVP): remove when manual dir works
# +++++++++++++++++++++
if language_pair[1] in ["cs", "hi", "ru"]:
assert NotImplementedError(
"The dataset for {}-en is currently not fully supported.".format(language_pair[1])
)
# +++++++++++++++++++++
class Wmt(ABC, nlp.GeneratorBasedBuilder):
"""WMT translation dataset."""
def __init__(self, *args, **kwargs):
if type(self) == Wmt and "config" not in kwargs: # pylint: disable=unidiomatic-typecheck
raise ValueError(
"The raw `wmt_translate` can only be instantiated with the config "
"kwargs. You may want to use one of the `wmtYY_translate` "
"implementation instead to get the WMT dataset for a specific year."
)
super(Wmt, self).__init__(*args, **kwargs)
@property
@abstractmethod
def _subsets(self):
"""Subsets that make up each split of the dataset."""
raise NotImplementedError("This is a abstract method")
@property
def subsets(self):
"""Subsets that make up each split of the dataset for the language pair."""
source, target = self.config.language_pair
filtered_subsets = {}
for split, ss_names in self._subsets.items():
filtered_subsets[split] = []
for ss_name in ss_names:
dataset = DATASET_MAP[ss_name]
if dataset.target != target or source not in dataset.sources:
logging.info("Skipping sub-dataset that does not include language pair: %s", ss_name)
else:
filtered_subsets[split].append(ss_name)
logging.info("Using sub-datasets: %s", filtered_subsets)
return filtered_subsets
def _info(self):
src, target = self.config.language_pair
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features({"translation": nlp.features.Translation(languages=self.config.language_pair)}),
supervised_keys=(src, target),
homepage=self.config.url,
citation=self.config.citation,
)
def _vocab_text_gen(self, split_subsets, extraction_map, language):
for _, ex in self._generate_examples(split_subsets, extraction_map, with_translation=False):
yield ex[language]
def _split_generators(self, dl_manager):
source, _ = self.config.language_pair
manual_paths_dict = {}
urls_to_download = {}
for ss_name in itertools.chain.from_iterable(self.subsets.values()):
if ss_name == "czeng_17":
# CzEng1.7 is CzEng1.6 with some blocks filtered out. We must download
# the filtering script so we can parse out which blocks need to be
# removed.
urls_to_download[_CZENG17_FILTER.name] = _CZENG17_FILTER.get_url(source)
# get dataset
dataset = DATASET_MAP[ss_name]
if dataset.get_manual_dl_files(source):
# TODO(PVP): following two lines skip configs that are incomplete for now
# +++++++++++++++++++++
logging.info("Skipping {} for now. Incomplete dataset for {}".format(dataset.name, self.config.name))
continue
# +++++++++++++++++++++
manual_dl_files = dataset.get_manual_dl_files(source)
manual_paths = [
os.path.join(os.path.abspath(os.path.expanduser(dl_manager.manual_dir)), fname)
for fname in manual_dl_files
]
assert all(
os.path.exists(path) for path in manual_paths
), "For {0}, you must manually download the following file(s) from {1} and place them in {2}: {3}".format(
dataset.name, dataset.get_url(source), dl_manager.manual_dir, ", ".join(manual_dl_files)
)
# set manual path for correct subset
manual_paths_dict[ss_name] = manual_paths
else:
urls_to_download[ss_name] = dataset.get_url(source)
# Download and extract files from URLs.
downloaded_files = dl_manager.download_and_extract(urls_to_download)
# Extract manually downloaded files.
manual_files = dl_manager.extract(manual_paths_dict)
extraction_map = dict(downloaded_files, **manual_files)
for language in self.config.language_pair:
self._vocab_text_gen(self.subsets[nlp.Split.TRAIN], extraction_map, language)
return [
nlp.SplitGenerator( # pylint:disable=g-complex-comprehension
name=split, gen_kwargs={"split_subsets": split_subsets, "extraction_map": extraction_map}
)
for split, split_subsets in self.subsets.items()
]
def _generate_examples(self, split_subsets, extraction_map, with_translation=True):
"""Returns the examples in the raw (text) form."""
source, _ = self.config.language_pair
def _get_local_paths(dataset, extract_dirs):
rel_paths = dataset.get_path(source)
if len(extract_dirs) == 1:
extract_dirs = extract_dirs * len(rel_paths)
return [
os.path.join(ex_dir, rel_path) if rel_path else ex_dir
for ex_dir, rel_path in zip(extract_dirs, rel_paths)
]
for ss_name in split_subsets:
# TODO(PVP) remove following five lines when manual data works
# +++++++++++++++++++++
dataset = DATASET_MAP[ss_name]
source, _ = self.config.language_pair
if dataset.get_manual_dl_files(source):
logging.info("Skipping {} for now. Incomplete dataset for {}".format(dataset.name, self.config.name))
continue
# +++++++++++++++++++++
logging.info("Generating examples from: %s", ss_name)
dataset = DATASET_MAP[ss_name]
extract_dirs = extraction_map[ss_name]
files = _get_local_paths(dataset, extract_dirs)
if ss_name.startswith("czeng"):
if ss_name.endswith("16pre"):
sub_generator = functools.partial(_parse_tsv, language_pair=("en", "cs"))
elif ss_name.endswith("17"):
filter_path = _get_local_paths(_CZENG17_FILTER, extraction_map[_CZENG17_FILTER.name])[0]
sub_generator = functools.partial(_parse_czeng, filter_path=filter_path)
else:
sub_generator = _parse_czeng
elif ss_name == "hindencorp_01":
sub_generator = _parse_hindencorp
elif len(files) == 2:
if ss_name.endswith("_frde"):
sub_generator = _parse_frde_bitext
else:
sub_generator = _parse_parallel_sentences
elif len(files) == 1:
fname = files[0]
# Note: Due to formatting used by `download_manager`, the file
# extension may not be at the end of the file path.
if ".tsv" in fname:
sub_generator = _parse_tsv
elif (
ss_name.startswith("newscommentary_v14")
or ss_name.startswith("europarl_v9")
or ss_name.startswith("wikititles_v1")
):
sub_generator = functools.partial(_parse_tsv, language_pair=self.config.language_pair)
elif "tmx" in fname or ss_name.startswith("paracrawl_v3"):
sub_generator = _parse_tmx
elif ss_name.startswith("wikiheadlines"):
sub_generator = _parse_wikiheadlines
else:
raise ValueError("Unsupported file format: %s" % fname)
else:
raise ValueError("Invalid number of files: %d" % len(files))
for sub_key, ex in sub_generator(*files):
if not all(ex.values()):
continue
# TODO(adarob): Add subset feature.
# ex["subset"] = subset
key = "{}/{}".format(ss_name, sub_key)
if with_translation is True:
ex = {"translation": ex}
yield key, ex
def _parse_parallel_sentences(f1, f2):
"""Returns examples from parallel SGML or text files, which may be gzipped."""
def _parse_text(path):
"""Returns the sentences from a single text file, which may be gzipped."""
split_path = path.split(".")
if split_path[-1] == "gz":
lang = split_path[-2]
with open(path, "rb") as f, gzip.GzipFile(fileobj=f) as g:
return g.read().decode("utf-8").split("\n"), lang
if split_path[-1] == "txt":
# CWMT
lang = split_path[-2].split("_")[-1]
lang = "zh" if lang in ("ch", "cn") else lang
else:
lang = split_path[-1]
with open(path, "rb") as f:
return f.read().decode("utf-8").split("\n"), lang
def _parse_sgm(path):
"""Returns sentences from a single SGML file."""
lang = path.split(".")[-2]
sentences = []
# Note: We can't use the XML parser since some of the files are badly
# formatted.
seg_re = re.compile(r"<seg id=\"\d+\">(.*)</seg>")
with open(path) as f:
for line in f:
seg_match = re.match(seg_re, line)
if seg_match:
assert len(seg_match.groups()) == 1
sentences.append(seg_match.groups()[0])
return sentences, lang
parse_file = _parse_sgm if f1.endswith(".sgm") else _parse_text
# Some datasets (e.g., CWMT) contain multiple parallel files specified with
# a wildcard. We sort both sets to align them and parse them one by one.
f1_files = sorted(glob.glob(f1))
f2_files = sorted(glob.glob(f2))
assert f1_files and f2_files, "No matching files found: %s, %s." % (f1, f2)
assert len(f1_files) == len(f2_files), "Number of files do not match: %d vs %d for %s vs %s." % (
len(f1_files),
len(f2_files),
f1,
f2,
)
for f_id, (f1_i, f2_i) in enumerate(zip(sorted(f1_files), sorted(f2_files))):
l1_sentences, l1 = parse_file(f1_i)
l2_sentences, l2 = parse_file(f2_i)
assert len(l1_sentences) == len(l2_sentences), "Sizes do not match: %d vs %d for %s vs %s." % (
len(l1_sentences),
len(l2_sentences),
f1_i,
f2_i,
)
for line_id, (s1, s2) in enumerate(zip(l1_sentences, l2_sentences)):
key = "{}/{}".format(f_id, line_id)
yield key, {l1: s1, l2: s2}
def _parse_frde_bitext(fr_path, de_path):
with open(fr_path) as f:
fr_sentences = f.read().split("\n")
with open(de_path) as f:
de_sentences = f.read().split("\n")
assert len(fr_sentences) == len(de_sentences), "Sizes do not match: %d vs %d for %s vs %s." % (
len(fr_sentences),
len(de_sentences),
fr_path,
de_path,
)
for line_id, (s1, s2) in enumerate(zip(fr_sentences, de_sentences)):
yield line_id, {"fr": s1, "de": s2}
def _parse_tmx(path):
"""Generates examples from TMX file."""
def _get_tuv_lang(tuv):
for k, v in tuv.items():
if k.endswith("}lang"):
return v
raise AssertionError("Language not found in `tuv` attributes.")
def _get_tuv_seg(tuv):
segs = tuv.findall("seg")
assert len(segs) == 1, "Invalid number of segments: %d" % len(segs)
return segs[0].text
with open(path, "rb") as f:
if six.PY3:
# Workaround due to: https://github.com/tensorflow/tensorflow/issues/33563
utf_f = codecs.getreader("utf-8")(f)
else:
utf_f = f
for line_id, (_, elem) in enumerate(ElementTree.iterparse(utf_f)):
if elem.tag == "tu":
yield line_id, {_get_tuv_lang(tuv): _get_tuv_seg(tuv) for tuv in elem.iterfind("tuv")}
elem.clear()
def _parse_tsv(path, language_pair=None):
"""Generates examples from TSV file."""
if language_pair is None:
lang_match = re.match(r".*\.([a-z][a-z])-([a-z][a-z])\.tsv", path)
assert lang_match is not None, "Invalid TSV filename: %s" % path
l1, l2 = lang_match.groups()
else:
l1, l2 = language_pair
with open(path) as f:
for j, line in enumerate(f):
cols = line.split("\t")
if len(cols) != 2:
logging.warning("Skipping line %d in TSV (%s) with %d != 2 columns.", j, path, len(cols))
continue
s1, s2 = cols
yield j, {l1: s1.strip(), l2: s2.strip()}
def _parse_wikiheadlines(path):
"""Generates examples from Wikiheadlines dataset file."""
lang_match = re.match(r".*\.([a-z][a-z])-([a-z][a-z])$", path)
assert lang_match is not None, "Invalid Wikiheadlines filename: %s" % path
l1, l2 = lang_match.groups()
with open(path) as f:
for line_id, line in enumerate(f):
s1, s2 = line.split("|||")
yield line_id, {l1: s1.strip(), l2: s2.strip()}
def _parse_czeng(*paths, **kwargs):
"""Generates examples from CzEng v1.6, with optional filtering for v1.7."""
filter_path = kwargs.get("filter_path", None)
if filter_path:
re_block = re.compile(r"^[^-]+-b(\d+)-\d\d[tde]")
with open(filter_path) as f:
bad_blocks = {blk for blk in re.search(r"qw{([\s\d]*)}", f.read()).groups()[0].split()}
logging.info("Loaded %d bad blocks to filter from CzEng v1.6 to make v1.7.", len(bad_blocks))
for path in paths:
for gz_path in sorted(glob.glob(path)):
with open(gz_path, "rb") as g, gzip.GzipFile(fileobj=g) as f:
filename = os.path.basename(gz_path)
for line_id, line in enumerate(f):
line = line.decode("utf-8") # required for py3
if not line.strip():
continue
id_, unused_score, cs, en = line.split("\t")
if filter_path:
block_match = re.match(re_block, id_)
if block_match and block_match.groups()[0] in bad_blocks:
continue
sub_key = "{}/{}".format(filename, line_id)
yield sub_key, {
"cs": cs.strip(),
"en": en.strip(),
}
def _parse_hindencorp(path):
with open(path) as f:
for line_id, line in enumerate(f):
split_line = line.split("\t")
if len(split_line) != 5:
logging.warning("Skipping invalid HindEnCorp line: %s", line)
continue
yield line_id, {"translation": {"en": split_line[3].strip(), "hi": split_line[4].strip()}}
|
nlp-master
|
datasets/wmt14/wmt_utils.py
|
"""TODO(qangaroo): Add a description here."""
from __future__ import absolute_import, division, print_function
import json
import os
import nlp
# TODO(qangaroo): BibTeX citation
_CITATION = """
"""
# TODO(quangaroo):
_DESCRIPTION = """\
We have created two new Reading Comprehension datasets focussing on multi-hop (alias multi-step) inference.
Several pieces of information often jointly imply another fact. In multi-hop inference, a new fact is derived by combining facts via a chain of multiple steps.
Our aim is to build Reading Comprehension methods that perform multi-hop inference on text, where individual facts are spread out across different documents.
The two QAngaroo datasets provide a training and evaluation resource for such methods.
"""
_MEDHOP_DESCRIPTION = """\
With the same format as WikiHop, this dataset is based on research paper abstracts from PubMed, and the queries are about interactions between pairs of drugs.
The correct answer has to be inferred by combining information from a chain of reactions of drugs and proteins.
"""
_WIKIHOP_DESCRIPTION = """\
With the same format as WikiHop, this dataset is based on research paper abstracts from PubMed, and the queries are about interactions between pairs of drugs.
The correct answer has to be inferred by combining information from a chain of reactions of drugs and proteins.
"""
_URL = "https://drive.google.com/uc?export=download&id=1ytVZ4AhubFDOEL7o7XrIRIyhU8g9wvKA"
class QangarooConfig(nlp.BuilderConfig):
def __init__(self, data_dir, **kwargs):
""" BuilderConfig for qangaroo dataset
Args:
data_dir: directory for the given dataset name
**kwargs: keyword arguments forwarded to super.
"""
super(QangarooConfig, self).__init__(
version=nlp.Version("1.0.0", "New split API (https://tensorflow.org/datasets/splits)"), **kwargs
)
self.data_dir = data_dir
class Qangaroo(nlp.GeneratorBasedBuilder):
"""TODO(qangaroo): Short description of my dataset."""
# TODO(qangaroo): Set up version.
VERSION = nlp.Version("0.1.0")
BUILDER_CONFIGS = [
QangarooConfig(name="medhop", description=_MEDHOP_DESCRIPTION, data_dir="medhop"),
QangarooConfig(name="masked_medhop", description=_MEDHOP_DESCRIPTION, data_dir="medhop"),
QangarooConfig(name="wikihop", description=_WIKIHOP_DESCRIPTION, data_dir="wikihop"),
QangarooConfig(name="masked_wikihop", description=_WIKIHOP_DESCRIPTION, data_dir="wikihop"),
]
def _info(self):
# TODO(qangaroo): Specifies the nlp.DatasetInfo object
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
# These are the features of your dataset like images, labels ...
"query": nlp.Value("string"),
"supports": nlp.features.Sequence({"support": nlp.Value("string")}),
"candidates": nlp.features.Sequence({"candidate": nlp.Value("string")}),
"answer": nlp.Value("string"),
"id": nlp.Value("string")
# These are the features of your dataset like images, labels ...
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="http://qangaroo.cs.ucl.ac.uk/index.html",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(qangaroo): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
dl_dir = dl_manager.download_and_extract(_URL)
data_dir = os.path.join(dl_dir, "qangaroo_v1.1")
train_file = "train.masked.json" if "masked" in self.config.name else "train.json"
dev_file = "dev.masked.json" if "masked" in self.config.name else "dev.json"
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, self.config.data_dir, train_file)},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, self.config.data_dir, dev_file)},
),
]
def _generate_examples(self, filepath):
"""Yields examples."""
# TODO(quangaroo): Yields (key, example) tuples from the dataset
with open(filepath) as f:
data = json.load(f)
for example in data:
id_ = example["id"]
yield id_, {
"id": example["id"],
"query": example["query"],
"supports": {"support": example["supports"]},
"candidates": {"candidate": example["candidates"]},
"answer": example["answer"],
}
|
nlp-master
|
datasets/qangaroo/qangaroo.py
|
"""TODO(social_i_qa): Add a description here."""
from __future__ import absolute_import, division, print_function
import json
import os
import nlp
# TODO(social_i_qa): BibTeX citation
_CITATION = """
"""
# TODO(social_i_qa):
_DESCRIPTION = """\
We introduce Social IQa: Social Interaction QA, a new question-answering benchmark for testing social commonsense intelligence. Contrary to many prior benchmarks that focus on physical or taxonomic knowledge, Social IQa focuses on reasoning about people’s actions and their social implications. For example, given an action like "Jesse saw a concert" and a question like "Why did Jesse do this?", humans can easily infer that Jesse wanted "to see their favorite performer" or "to enjoy the music", and not "to see what's happening inside" or "to see if it works". The actions in Social IQa span a wide variety of social situations, and answer candidates contain both human-curated answers and adversarially-filtered machine-generated candidates. Social IQa contains over 37,000 QA pairs for evaluating models’ abilities to reason about the social implications of everyday events and situations. (Less)
"""
_URL = "https://storage.googleapis.com/ai2-mosaic/public/socialiqa/socialiqa-train-dev.zip"
class SocialIQa(nlp.GeneratorBasedBuilder):
"""TODO(social_i_qa): Short description of my dataset."""
# TODO(social_i_qa): Set up version.
VERSION = nlp.Version("0.1.0")
def _info(self):
# TODO(social_i_qa): Specifies the nlp.DatasetInfo object
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
# These are the features of your dataset like images, labels ...
"context": nlp.Value("string"),
"question": nlp.Value("string"),
"answerA": nlp.Value("string"),
"answerB": nlp.Value("string"),
"answerC": nlp.Value("string"),
"label": nlp.Value("string"),
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://leaderboard.allenai.org/socialiqa/submissions/get-started",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(social_i_qa): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
dl_dir = dl_manager.download_and_extract(_URL)
dl_dir = os.path.join(dl_dir, "socialiqa-train-dev")
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": os.path.join(dl_dir, "train.jsonl"),
"labelpath": os.path.join(dl_dir, "train-labels.lst"),
},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": os.path.join(dl_dir, "dev.jsonl"),
"labelpath": os.path.join(dl_dir, "dev-labels.lst"),
},
),
]
def _generate_examples(self, filepath, labelpath):
"""Yields examples."""
# TODO(social_i_qa): Yields (key, example) tuples from the dataset
with open(labelpath) as f:
labels = [label for label in f]
with open(filepath) as f1:
for id_, row in enumerate(f1):
data = json.loads(row)
label = labels[id_]
context = data["context"]
answerA = data["answerA"]
answerB = data["answerB"]
answerC = data["answerC"]
question = data["question"]
yield id_, {
"context": context,
"question": question,
"answerA": answerA,
"answerB": answerB,
"answerC": answerC,
"label": label,
}
|
nlp-master
|
datasets/social_i_qa/social_i_qa.py
|
"""TODO(drop): Add a description here."""
from __future__ import absolute_import, division, print_function
import json
import os
import nlp
# TODO(drop): BibTeX citation
_CITATION = """\
@inproceedings{Dua2019DROP,
author={Dheeru Dua and Yizhong Wang and Pradeep Dasigi and Gabriel Stanovsky and Sameer Singh and Matt Gardner},
title={DROP: A Reading Comprehension Benchmark Requiring Discrete Reasoning Over Paragraphs},
booktitle={Proc. of NAACL},
year={2019}
}
"""
# TODO(drop):
_DESCRIPTION = """\
DROP: A Reading Comprehension Benchmark Requiring Discrete Reasoning Over Paragraphs.
. DROP is a crowdsourced, adversarially-created, 96k-question benchmark, in which a system must resolve references in a
question, perhaps to multiple input positions, and perform discrete operations over them (such as addition, counting, or
sorting). These operations require a much more comprehensive understanding of the content of paragraphs than what was
necessary for prior datasets.
"""
_URl = "https://s3-us-west-2.amazonaws.com/allennlp/datasets/drop/drop_dataset.zip"
class Drop(nlp.GeneratorBasedBuilder):
"""TODO(drop): Short description of my dataset."""
# TODO(drop): Set up version.
VERSION = nlp.Version("0.1.0")
def _info(self):
# TODO(drop): Specifies the nlp.DatasetInfo object
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
"passage": nlp.Value("string"),
"question": nlp.Value("string"),
"answers_spans": nlp.features.Sequence({"spans": nlp.Value("string")})
# These are the features of your dataset like images, labels ...
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://allennlp.org/drop",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(drop): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
dl_dir = dl_manager.download_and_extract(_URl)
data_dir = os.path.join(dl_dir, "drop_dataset")
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, "drop_dataset_train.json")},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, "drop_dataset_dev.json")},
),
]
def _generate_examples(self, filepath):
"""Yields examples."""
# TODO(drop): Yields (key, example) tuples from the dataset
with open(filepath) as f:
data = json.load(f)
for i, key in enumerate(data):
example = data[key]
qa_pairs = example["qa_pairs"]
for j, qa in enumerate(qa_pairs):
question = qa["question"]
answers = qa["answer"]["spans"]
yield str(i) + "_" + str(j), {
"passage": example["passage"],
"question": question,
"answers_spans": {"spans": answers},
}
|
nlp-master
|
datasets/drop/drop.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Movie reviews with human annotated rationales."""
from __future__ import absolute_import, division, print_function
import json
import os
import nlp
_CITATION = """
@unpublished{eraser2019,
title = {ERASER: A Benchmark to Evaluate Rationalized NLP Models},
author = {Jay DeYoung and Sarthak Jain and Nazneen Fatema Rajani and Eric Lehman and Caiming Xiong and Richard Socher and Byron C. Wallace}
}
@InProceedings{zaidan-eisner-piatko-2008:nips,
author = {Omar F. Zaidan and Jason Eisner and Christine Piatko},
title = {Machine Learning with Annotator Rationales to Reduce Annotation Cost},
booktitle = {Proceedings of the NIPS*2008 Workshop on Cost Sensitive Learning},
month = {December},
year = {2008}
}
"""
_DESCRIPTION = """
The movie rationale dataset contains human annotated rationales for movie
reviews.
"""
_DOWNLOAD_URL = "http://www.eraserbenchmark.com/zipped/movies.tar.gz"
class MovieRationales(nlp.GeneratorBasedBuilder):
"""Movie reviews with human annotated rationales."""
VERSION = nlp.Version("0.1.0")
def _info(self):
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features(
{
"review": nlp.Value("string"),
"label": nlp.features.ClassLabel(names=["NEG", "POS"]),
"evidences": nlp.features.Sequence(nlp.Value("string")),
}
),
supervised_keys=None,
homepage="http://www.cs.jhu.edu/~ozaidan/rationales/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
dl_dir = dl_manager.download_and_extract(_DOWNLOAD_URL)
data_dir = os.path.join(dl_dir, "movies")
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
gen_kwargs={"data_dir": data_dir, "filepath": os.path.join(data_dir, "train.jsonl")},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
gen_kwargs={"data_dir": data_dir, "filepath": os.path.join(data_dir, "val.jsonl")},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
gen_kwargs={"data_dir": data_dir, "filepath": os.path.join(data_dir, "test.jsonl")},
),
]
def _generate_examples(self, data_dir, filepath):
"""Yields examples."""
reviews_dir = os.path.join(data_dir, "docs")
with open(filepath) as f:
for line in f:
row = json.loads(line)
doc_id = row["annotation_id"]
review_file = os.path.join(reviews_dir, doc_id)
with open(review_file) as f1:
review_text = f1.read()
evidences = []
for evidence in row["evidences"]:
for e in evidence:
evidences.append(e["text"])
yield doc_id, {
"review": review_text,
"label": row["classification"],
"evidences": evidences,
}
|
nlp-master
|
datasets/movie_rationales/movie_rationales.py
|
# coding=utf-8
from dataclasses import dataclass
import pyarrow as pa
import pyarrow.json as paj
import nlp
@dataclass
class JsonConfig(nlp.BuilderConfig):
"""BuilderConfig for JSON."""
read_options: paj.ReadOptions = paj.ReadOptions()
parse_options: paj.ParseOptions = paj.ParseOptions()
@property
def pa_read_options(self):
return self.read_options
@property
def pa_parse_options(self):
return self.parse_options
class Json(nlp.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = JsonConfig
def _info(self):
return nlp.DatasetInfo()
def _split_generators(self, dl_manager):
""" We handle string, list and dicts in datafiles
"""
if isinstance(self.config.data_files, (str, list, tuple)):
files = self.config.data_files
if isinstance(files, str):
files = [files]
return [nlp.SplitGenerator(name=nlp.Split.TRAIN, gen_kwargs={"files": files})]
splits = []
for split_name in [nlp.Split.TRAIN, nlp.Split.VALIDATION, nlp.Split.TEST]:
if split_name in self.config.data_files:
files = self.config.data_files[split_name]
if isinstance(files, str):
files = [files]
splits.append(nlp.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
return splits
def _generate_tables(self, files):
for i, file in enumerate(files):
pa_table = paj.read_json(
file, read_options=self.config.pa_read_options, parse_options=self.config.pa_parse_options,
)
yield i, pa_table
|
nlp-master
|
datasets/json/json.py
|
# coding=utf-8
# Copyright 2020 HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""The GermEval 2014 NER Shared Task dataset."""
from __future__ import absolute_import, division, print_function
import csv
import glob
import logging
import os
from pathlib import Path
import nlp
_CITATION = """\
@inproceedings{benikova-etal-2014-nosta,
title = {NoSta-D Named Entity Annotation for German: Guidelines and Dataset},
author = {Benikova, Darina and
Biemann, Chris and
Reznicek, Marc},
booktitle = {Proceedings of the Ninth International Conference on Language Resources and Evaluation ({LREC}'14)},
month = {may},
year = {2014},
address = {Reykjavik, Iceland},
publisher = {European Language Resources Association (ELRA)},
url = {http://www.lrec-conf.org/proceedings/lrec2014/pdf/276_Paper.pdf},
pages = {2524--2531},
}
"""
_DESCRIPTION = """\
The GermEval 2014 NER Shared Task builds on a new dataset with German Named Entity annotation with the following properties:\
- The data was sampled from German Wikipedia and News Corpora as a collection of citations.\
- The dataset covers over 31,000 sentences corresponding to over 590,000 tokens.\
- The NER annotation uses the NoSta-D guidelines, which extend the Tübingen Treebank guidelines,\
using four main NER categories with sub-structure, and annotating embeddings among NEs\
such as [ORG FC Kickers [LOC Darmstadt]].
"""
_URL = "https://sites.google.com/site/germeval2014ner/data/"
_TRAINING_FILE = "NER-de-train.tsv"
_DEV_FILE = "NER-de-dev.tsv"
_TEST_FILE = "NER-de-test.tsv"
class GermEval14Config(nlp.BuilderConfig):
"""BuilderConfig for GermEval 2014."""
def __init__(self, **kwargs):
"""BuilderConfig for GermEval 2014.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(GermEval14Config, self).__init__(**kwargs)
class GermEval14(nlp.GeneratorBasedBuilder):
"""GermEval 2014 NER Shared Task dataset."""
BUILDER_CONFIGS = [
GermEval14Config(
name="germeval_14", version=nlp.Version("1.0.0"), description="GermEval 2014 NER Shared Task dataset"
),
]
def _info(self):
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features(
{
"id": nlp.Value("string"),
"source": nlp.Value("string"),
"tokens": nlp.Sequence(nlp.Value("string")),
"labels": nlp.Sequence(nlp.Value("string")),
"nested-labels": nlp.Sequence(nlp.Value("string")),
}
),
supervised_keys=None,
homepage="https://sites.google.com/site/germeval2014ner/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
urls_to_download = {
"train": f"{_URL}{_TRAINING_FILE}",
"dev": f"{_URL}{_DEV_FILE}",
"test": f"{_URL}{_TEST_FILE}",
}
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
nlp.SplitGenerator(name=nlp.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
nlp.SplitGenerator(name=nlp.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
nlp.SplitGenerator(name=nlp.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
]
def _generate_examples(self, filepath):
logging.info("⏳ Generating examples from = %s", filepath)
with open(filepath) as f:
data = csv.reader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
current_source = ""
current_tokens = []
current_labels = []
current_nested_labels = []
sentence_counter = 0
for row in data:
if row:
if row[0] == "#":
current_source = " ".join(row[1:])
continue
id_, token, label, nested_label = row[:4]
current_tokens.append(token)
current_labels.append(label)
current_nested_labels.append(nested_label)
else:
# New sentence
if not current_tokens:
# Consecutive empty lines will cause empty sentences
continue
assert len(current_tokens) == len(current_labels), "💔 between len of tokens & labels"
assert len(current_labels) == len(current_nested_labels), "💔 between len of labels & nested labels"
assert current_source, "💥 Source for new sentence was not set"
sentence = (
sentence_counter,
{
"id": str(sentence_counter),
"tokens": current_tokens,
"labels": current_labels,
"nested-labels": current_nested_labels,
"source": current_source,
},
)
sentence_counter += 1
current_tokens = []
current_labels = []
current_nested_labels = []
current_source = ""
yield sentence
# Don't forget last sentence in dataset 🧐
yield sentence_counter, {
"id": str(sentence_counter),
"tokens": current_tokens,
"labels": current_labels,
"nested-labels": current_nested_labels,
"source": current_source,
}
|
nlp-master
|
datasets/germeval_14/germeval_14.py
|
"""Allocine Dataset: A Large-Scale French Movie Reviews Dataset."""
from __future__ import absolute_import, division, print_function
import json
import os
import nlp
_CITATION = """\
@misc{blard2019allocine,
author = {Blard, Theophile},
title = {french-sentiment-analysis-with-bert},
year = {2020},
publisher = {GitHub},
journal = {GitHub repository},
howpublished={\\url{https://github.com/TheophileBlard/french-sentiment-analysis-with-bert}},
}
"""
_DESCRIPTION = """\
Allocine Dataset: A Large-Scale French Movie Reviews Dataset.
This is a dataset for binary sentiment classification, made of user reviews scraped from Allocine.fr.
It contains 100k positive and 100k negative reviews divided into 3 balanced splits: train (160k reviews), val (20k) and test (20k).
"""
class AllocineConfig(nlp.BuilderConfig):
"""BuilderConfig for Allocine."""
def __init__(self, **kwargs):
"""BuilderConfig for Allocine.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(AllocineConfig, self).__init__(**kwargs)
class AllocineDataset(nlp.GeneratorBasedBuilder):
"""Allocine Dataset: A Large-Scale French Movie Reviews Dataset."""
_DOWNLOAD_URL = "https://github.com/TheophileBlard/french-sentiment-analysis-with-bert/raw/master/allocine_dataset/data.tar.bz2"
_TRAIN_FILE = "train.jsonl"
_VAL_FILE = "val.jsonl"
_TEST_FILE = "test.jsonl"
BUILDER_CONFIGS = [
AllocineConfig(
name="allocine",
version=nlp.Version("1.0.0"),
description="Allocine Dataset: A Large-Scale French Movie Reviews Dataset",
),
]
def _info(self):
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features(
{"review": nlp.Value("string"), "label": nlp.features.ClassLabel(names=["neg", "pos"]),}
),
supervised_keys=None,
homepage="https://github.com/TheophileBlard/french-sentiment-analysis-with-bert",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
arch_path = dl_manager.download_and_extract(self._DOWNLOAD_URL)
data_dir = os.path.join(arch_path, "data")
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN, gen_kwargs={"filepath": os.path.join(data_dir, self._TRAIN_FILE)}
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION, gen_kwargs={"filepath": os.path.join(data_dir, self._VAL_FILE)}
),
nlp.SplitGenerator(name=nlp.Split.TEST, gen_kwargs={"filepath": os.path.join(data_dir, self._TEST_FILE)}),
]
def _generate_examples(self, filepath):
"""Generate Allocine examples."""
with open(filepath) as f:
for id_, row in enumerate(f):
data = json.loads(row)
review = data["review"]
label = "neg" if data["polarity"] == 0 else "pos"
yield id_, {"review": review, "label": label}
|
nlp-master
|
datasets/allocine/allocine.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""The Multi-Genre NLI Corpus."""
from __future__ import absolute_import, division, print_function
import os
import nlp
_CITATION = """\
@InProceedings{N18-1101,
author = {Williams, Adina
and Nangia, Nikita
and Bowman, Samuel},
title = {A Broad-Coverage Challenge Corpus for
Sentence Understanding through Inference},
booktitle = {Proceedings of the 2018 Conference of
the North American Chapter of the
Association for Computational Linguistics:
Human Language Technologies, Volume 1 (Long
Papers)},
year = {2018},
publisher = {Association for Computational Linguistics},
pages = {1112--1122},
location = {New Orleans, Louisiana},
url = {http://aclweb.org/anthology/N18-1101}
}
"""
_DESCRIPTION = """\
The Multi-Genre Natural Language Inference (MultiNLI) corpus is a
crowd-sourced collection of 433k sentence pairs annotated with textual
entailment information. The corpus is modeled on the SNLI corpus, but differs in
that covers a range of genres of spoken and written text, and supports a
distinctive cross-genre generalization evaluation. The corpus served as the
basis for the shared task of the RepEval 2017 Workshop at EMNLP in Copenhagen.
"""
class MultiNLIConfig(nlp.BuilderConfig):
"""BuilderConfig for MultiNLI."""
def __init__(self, **kwargs):
"""BuilderConfig for MultiNLI.
Args:
.
**kwargs: keyword arguments forwarded to super.
"""
super(MultiNLIConfig, self).__init__(
version=nlp.Version("1.0.0", "New split API (https://tensorflow.org/datasets/splits)"), **kwargs
)
class MultiNli(nlp.GeneratorBasedBuilder):
"""MultiNLI: The Stanford Question Answering Dataset. Version 1.1."""
BUILDER_CONFIGS = [
MultiNLIConfig(name="plain_text", description="Plain text",),
]
def _info(self):
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features(
{
"premise": nlp.Value("string"),
"hypothesis": nlp.Value("string"),
"label": nlp.features.ClassLabel(names=["entailment", "neutral", "contradiction"]),
}
),
# No default supervised_keys (as we have to pass both premise
# and hypothesis as input).
supervised_keys=None,
homepage="https://www.nyu.edu/projects/bowman/multinli/",
citation=_CITATION,
)
def _vocab_text_gen(self, filepath):
for _, ex in self._generate_examples(filepath):
yield " ".join([ex["premise"], ex["hypothesis"]])
def _split_generators(self, dl_manager):
downloaded_dir = dl_manager.download_and_extract(
"http://storage.googleapis.com/tfds-data/downloads/multi_nli/multinli_1.0.zip"
)
mnli_path = os.path.join(downloaded_dir, "multinli_1.0")
train_path = os.path.join(mnli_path, "multinli_1.0_train.txt")
matched_validation_path = os.path.join(mnli_path, "multinli_1.0_dev_matched.txt")
mismatched_validation_path = os.path.join(mnli_path, "multinli_1.0_dev_mismatched.txt")
return [
nlp.SplitGenerator(name=nlp.Split.TRAIN, gen_kwargs={"filepath": train_path}),
nlp.SplitGenerator(name="validation_matched", gen_kwargs={"filepath": matched_validation_path}),
nlp.SplitGenerator(name="validation_mismatched", gen_kwargs={"filepath": mismatched_validation_path}),
]
def _generate_examples(self, filepath):
"""Generate mnli examples.
Args:
filepath: a string
Yields:
dictionaries containing "premise", "hypothesis" and "label" strings
"""
for idx, line in enumerate(open(filepath, "rb")):
if idx == 0:
continue # skip header
line = line.strip().decode("utf-8")
split_line = line.split("\t")
# Examples not marked with a three out of five consensus are marked with
# "-" and should not be used in standard evaluations.
if split_line[0] == "-":
continue
# Works for both splits even though dev has some extra human labels.
yield idx, {"premise": split_line[5], "hypothesis": split_line[6], "label": split_line[0]}
|
nlp-master
|
datasets/multi_nli/multi_nli.py
|
"""TODO(ubuntu_dialogs_corpus): Add a description here."""
from __future__ import absolute_import, division, print_function
import csv
import os
import nlp
# TODO(ubuntu_dialogs_corpus): BibTeX citation
_CITATION = """\
@article{DBLP:journals/corr/LowePSP15,
author = {Ryan Lowe and
Nissan Pow and
Iulian Serban and
Joelle Pineau},
title = {The Ubuntu Dialogue Corpus: {A} Large Dataset for Research in Unstructured
Multi-Turn Dialogue Systems},
journal = {CoRR},
volume = {abs/1506.08909},
year = {2015},
url = {http://arxiv.org/abs/1506.08909},
archivePrefix = {arXiv},
eprint = {1506.08909},
timestamp = {Mon, 13 Aug 2018 16:48:23 +0200},
biburl = {https://dblp.org/rec/journals/corr/LowePSP15.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
"""
# TODO(ubuntu_dialogs_corpus):
_DESCRIPTION = """\
Ubuntu Dialogue Corpus, a dataset containing almost 1 million multi-turn dialogues, with a total of over 7 million utterances and 100 million words. This provides a unique resource for research into building dialogue managers based on neural language models that can make use of large amounts of unlabeled data. The dataset has both the multi-turn property of conversations in the Dialog State Tracking Challenge datasets, and the unstructured nature of interactions from microblog services such as Twitter.
"""
class UbuntuDialogsCorpusConfig(nlp.BuilderConfig):
"""BuilderConfig for UbuntuDialogsCorpus."""
def __init__(self, features, **kwargs):
"""BuilderConfig for UbuntuDialogsCorpus.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(UbuntuDialogsCorpusConfig, self).__init__(version=nlp.Version("2.0.0"), **kwargs)
self.features = features
class UbuntuDialogsCorpus(nlp.GeneratorBasedBuilder):
"""TODO(ubuntu_dialogs_corpus): Short description of my dataset."""
# TODO(ubuntu_dialogs_corpus): Set up version.
VERSION = nlp.Version("2.0.0")
BUILDER_CONFIGS = [
UbuntuDialogsCorpusConfig(
name="train", features=["Context", "Utterance", "Label"], description="training features"
),
UbuntuDialogsCorpusConfig(
name="dev_test",
features=["Context", "Ground Truth Utterance"] + ["Distractor_" + str(i) for i in range(9)],
description="test and dev features",
),
]
@property
def manual_download_instructions(self):
return """\
Please download the Ubuntu Dialog Corpus from https://github.com/rkadlec/ubuntu-ranking-dataset-creator. Run ./generate.sh -t -s -l to download the
data. Others arguments are left to their default values here. Please save train.csv, test.csv and valid.csv in the same path"""
def _info(self):
# TODO(ubuntu_dialogs_corpus): Specifies the nlp.DatasetInfo object
features = {feature: nlp.Value("string") for feature in self.config.features}
if self.config.name == "train":
features["Label"] = nlp.Value("int32")
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
# These are the features of your dataset like images, labels ...
features
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://github.com/rkadlec/ubuntu-ranking-dataset-creator",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(ubuntu_dialogs_corpus): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
manual_dir = os.path.abspath(os.path.expanduser(dl_manager.manual_dir))
if self.config.name == "train":
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(manual_dir, "train.csv")},
),
]
else:
return [
nlp.SplitGenerator(
name=nlp.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(manual_dir, "test.csv")},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(manual_dir, "valid.csv")},
),
]
def _generate_examples(self, filepath):
"""Yields examples."""
# TODO(ubuntu_dialogs_corpus): Yields (key, example) tuples from the dataset
with open(filepath) as f:
data = csv.DictReader(f)
for id_, row in enumerate(data):
yield id_, row
|
nlp-master
|
datasets/ubuntu_dialogs_corpus/ubuntu_dialogs_corpus.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""GAP is a gender-balanced text data set."""
from __future__ import absolute_import, division, print_function
import csv
import nlp
_CITATION = """
@article{DBLP:journals/corr/abs-1810-05201,
author = {Kellie Webster and
Marta Recasens and
Vera Axelrod and
Jason Baldridge},
title = {Mind the {GAP:} {A} Balanced Corpus of Gendered Ambiguous Pronouns},
journal = {CoRR},
volume = {abs/1810.05201},
year = {2018},
url = {http://arxiv.org/abs/1810.05201},
archivePrefix = {arXiv},
eprint = {1810.05201},
timestamp = {Tue, 30 Oct 2018 20:39:56 +0100},
biburl = {https://dblp.org/rec/bib/journals/corr/abs-1810-05201},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
"""
_DESCRIPTION = """
GAP is a gender-balanced dataset containing 8,908 coreference-labeled pairs of
(ambiguous pronoun, antecedent name), sampled from Wikipedia and released by
Google AI Language for the evaluation of coreference resolution in practical
applications.
"""
_TRAINURL = "https://raw.githubusercontent.com/google-research-datasets/gap-coreference/master/gap-development.tsv"
_VALIDATIONURL = "https://raw.githubusercontent.com/google-research-datasets/gap-coreference/master/gap-validation.tsv"
_TESTURL = "https://raw.githubusercontent.com/google-research-datasets/gap-coreference/master/gap-test.tsv"
class Gap(nlp.GeneratorBasedBuilder):
"""GAP is a gender-balanced dataset.
It contains 8,908 coreference-labeled pairs
of (ambiguous pronoun, antecedent name), sampled from Wikipedia.
"""
VERSION = nlp.Version("0.1.0")
def _info(self):
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features(
{
"ID": nlp.Value("string"),
"Text": nlp.Value("string"),
"Pronoun": nlp.Value("string"),
"Pronoun-offset": nlp.Value("int32"),
"A": nlp.Value("string"),
"A-offset": nlp.Value("int32"),
"A-coref": nlp.Value("bool"),
"B": nlp.Value("string"),
"B-offset": nlp.Value("int32"),
"B-coref": nlp.Value("bool"),
"URL": nlp.Value("string"),
}
),
supervised_keys=None,
homepage="https://github.com/google-research-datasets/gap-coreference",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
directory = dl_manager.download_and_extract(
{"train": _TRAINURL, "validation": _VALIDATIONURL, "test": _TESTURL}
)
return [
nlp.SplitGenerator(name=nlp.Split.TRAIN, gen_kwargs={"filepath": directory["train"]},),
nlp.SplitGenerator(name=nlp.Split.VALIDATION, gen_kwargs={"filepath": directory["validation"]},),
nlp.SplitGenerator(name=nlp.Split.TEST, gen_kwargs={"filepath": directory["test"]},),
]
def _generate_examples(self, filepath):
"""Yields examples."""
with open(filepath) as tsvfile:
reader = csv.DictReader(tsvfile, dialect="excel-tab")
for i, row in enumerate(reader):
row["A-coref"] = bool(row["A-coref"])
row["B-coref"] = bool(row["B-coref"])
row["A-offset"] = int(row["A-offset"])
row["B-offset"] = int(row["B-offset"])
row["Pronoun-offset"] = int(row["Pronoun-offset"])
yield i, row
|
nlp-master
|
datasets/gap/gap.py
|
"""TODO(discofuse): Add a description here."""
from __future__ import absolute_import, division, print_function
import csv
import os
import tensorflow as tf
import nlp
# TODO(discofuse): BibTeX citation
_URL_ = "https://storage.googleapis.com/discofuse_dataset_v1/"
_CITATION = """\
@InProceedings{GevaEtAl2019,
title = {DiscoFuse: A Large-Scale Dataset for Discourse-Based Sentence Fusion},
author = {Geva, Mor and Malmi, Eric and Szpektor, Idan and Berant, Jonathan},
booktitle = {Proceedings of the 2019 Annual Conference of the North American Chapter of the Association for Computational Linguistics},
note = {arXiv preprint arXiv:1902.10526},
year = {2019}
}
"""
# TODO(discofuse):
_DESCRIPTION = """\
DISCOFUSE is a large scale dataset for discourse-based sentence fusion.
"""
class DiscofuseConfig(nlp.BuilderConfig):
""" BuilderConfig for Discofuse"""
def __init__(self, data_url, balanced=False, **kwargs):
"""
Args:
balanced: to specify if we want to load the balanced file or the full file
**kwargs: keyword arguments forwarded to super.
"""
super(DiscofuseConfig, self).__init__(
version=nlp.Version("1.0.0", "New split API (https://tensorflow.org/datasets/splits)"), **kwargs
)
self.balanced = balanced
self.data_url = data_url
class Discofuse(nlp.GeneratorBasedBuilder):
"""TODO(discofuse): Short description of my dataset."""
# TODO(discofuse): Set up version.
VERSION = nlp.Version("1.0.0")
BUILDER_CONFIGS = [
DiscofuseConfig(
name="discofuse-sport", description="sentence fusion", data_url=_URL_ + "discofuse_v1_sports.tar.gz"
),
DiscofuseConfig(
name="discofuse-wikipedia", description="sentence fusion", data_url=_URL_ + "discofuse_v1_wikipedia.tar.gz"
),
]
def _info(self):
# TODO(discofuse): Specifies the nlp.DatasetInfo object
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
"connective_string": nlp.Value("string"),
"discourse_type": nlp.Value("string"),
"coherent_second_sentence": nlp.Value("string"),
"has_coref_type_pronoun": nlp.Value("float32"),
"incoherent_first_sentence": nlp.Value("string"),
"incoherent_second_sentence": nlp.Value("string"),
"has_coref_type_nominal": nlp.Value("float32"),
"coherent_first_sentence": nlp.Value("string"),
# These are the features of your dataset like images, labels ...
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://github.com/google-research-datasets/discofuse",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(discofuse): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
if self.config.name == "discofuse-sport":
dl_dir = dl_manager.download_and_extract(self.config.data_url)
data_dir = os.path.join(dl_dir, "discofuse_v1/sports")
if self.config.balanced:
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, "train_balanced.tsv")},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, "test_balanced.tsv")},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, "dev_balanced.tsv")},
),
]
else:
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, "train.tsv")},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, "test.tsv")},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, "dev.tsv")},
),
]
else:
if self.config.name == "discofuse-wikipedia":
dl_dir = dl_manager.download_and_extract(self.config.data_url)
data_dir = os.path.join(dl_dir, "discofuse_v1/wikipedia")
if self.config.balanced:
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, "train_balanced.tsv")},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, "test_balanced.tsv")},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, "dev_balanced.tsv")},
),
]
else:
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, "train.tsv")},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, "test.tsv")},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, "dev.tsv")},
),
]
def _generate_examples(self, filepath):
"""Yields examples."""
# TODO(discofuse): Yields (key, example) tuples from the dataset
with open(filepath) as f:
data = csv.DictReader(f, delimiter="\t")
for id_, row in enumerate(data):
co_first_sent = row["coherent_first_sentence"]
co_second_sent = row["coherent_second_sentence"]
connect_str = row["connective_string"]
discourse_type = row["discourse_type"]
has_coref_pronoun = row["has_coref_type_pronoun"]
has_coref_nominal = row["has_coref_type_nominal"]
inco_first_sent = row["incoherent_first_sentence"]
inco_second_sent = row["incoherent_second_sentence"]
yield id_, {
"connective_string": connect_str,
"discourse_type": discourse_type,
"coherent_second_sentence": co_second_sent,
"has_coref_type_pronoun": has_coref_pronoun,
"incoherent_first_sentence": inco_first_sent,
"incoherent_second_sentence": inco_second_sent,
"has_coref_type_nominal": has_coref_nominal,
"coherent_first_sentence": co_first_sent,
}
|
nlp-master
|
datasets/discofuse/discofuse.py
|
"""TODO(qasc): Add a description here."""
from __future__ import absolute_import, division, print_function
import json
import os
import nlp
# TODO(qasc): BibTeX citation
_CITATION = """\
@article{allenai:qasc,
author = {Tushar Khot and Peter Clark and Michal Guerquin and Peter Jansen and Ashish Sabharwal},
title = {QASC: A Dataset for Question Answering via Sentence Composition},
journal = {arXiv:1910.11473v2},
year = {2020},
}
"""
# TODO(qasc):
_DESCRIPTION = """
QASC is a question-answering dataset with a focus on sentence composition. It consists of 9,980 8-way multiple-choice
questions about grade school science (8,134 train, 926 dev, 920 test), and comes with a corpus of 17M sentences.
"""
_URl = "http://data.allenai.org/downloads/qasc/qasc_dataset.tar.gz"
class Qasc(nlp.GeneratorBasedBuilder):
"""TODO(qasc): Short description of my dataset."""
# TODO(qasc): Set up version.
VERSION = nlp.Version("0.1.0")
def _info(self):
# TODO(qasc): Specifies the nlp.DatasetInfo object
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
"id": nlp.Value("string"),
"question": nlp.Value("string"),
"choices": nlp.features.Sequence({"text": nlp.Value("string"), "label": nlp.Value("string")}),
"answerKey": nlp.Value("string"),
"fact1": nlp.Value("string"),
"fact2": nlp.Value("string"),
"combinedfact": nlp.Value("string"),
"formatted_question": nlp.Value("string"),
# These are the features of your dataset like images, labels ...
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://allenai.org/data/qasc",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(qasc): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
dl_dir = dl_manager.download_and_extract(_URl)
data_dir = os.path.join(dl_dir, "QASC_Dataset")
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, "train.jsonl")},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, "test.jsonl")},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, "dev.jsonl")},
),
]
def _generate_examples(self, filepath):
"""Yields examples."""
# TODO(qasc): Yields (key, example) tuples from the dataset
with open(filepath) as f:
for row in f:
data = json.loads(row)
answerkey = data.get("answerKey", "")
id_ = data["id"]
question = data["question"]["stem"]
choices = data["question"]["choices"]
text_choices = [choice["text"] for choice in choices]
label_choices = [choice["label"] for choice in choices]
fact1 = data.get("fact1", "")
fact2 = data.get("fact2", "")
combined_fact = data.get("combinedfact", "")
formatted_question = data.get("formatted_question", "")
yield id_, {
"id": id_,
"answerKey": answerkey,
"question": question,
"choices": {"text": text_choices, "label": label_choices},
"fact1": fact1,
"fact2": fact2,
"combinedfact": combined_fact,
"formatted_question": formatted_question,
}
|
nlp-master
|
datasets/qasc/qasc.py
|
"""TODO(xtreme): Add a description here."""
from __future__ import absolute_import, division, print_function
import csv
import glob
import json
import os
import textwrap
import six
import nlp
# TODO(xtreme): BibTeX citation
_CITATION = """\
@article{hu2020xtreme,
author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},
title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},
journal = {CoRR},
volume = {abs/2003.11080},
year = {2020},
archivePrefix = {arXiv},
eprint = {2003.11080}
}
"""
# TODO(xtrem):
_DESCRIPTION = """\
The Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of
the cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages
(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of
syntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,
and availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil
(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the
Niger-Congo languages Swahili and Yoruba, spoken in Africa.
"""
_MLQA_LANG = ["ar", "de", "vi", "zh", "en", "es", "hi"]
_XQUAD_LANG = ["ar", "de", "vi", "zh", "en", "es", "hi", "el", "ru", "th", "tr"]
_PAWSX_LANG = ["de", "en", "fr", "ja", "ko", "zh"]
_BUCC_LANG = ["de", "fr", "zh", "ru"]
_TATOEBA_LANG = [
"afr",
"ara",
"ben",
"bul",
"deu",
"cmn",
"ell",
"est",
"eus",
"fin",
"fra",
"heb",
"hin",
"hun",
"ind",
"ita",
"jav",
"jpn",
"kat",
"kaz",
"kor",
"mal",
"mar",
"nld",
"pes",
"por",
"rus",
"spa",
"swh",
"tam",
"tgl",
"tha",
"tur",
"urd",
"vie",
]
_UD_POS_LANG = [
"Afrikaans",
"Arabic",
"Basque",
"Bulgarian",
"Dutch",
"English",
"Estonian",
"Finnish",
"French",
"German",
"Greek",
"Hebrew",
"Hindi",
"Hungarian",
"Indonesian",
"Italian",
"Japanese",
"Kazakh",
"Korean",
"Chinese",
"Marathi",
"Persian",
"Portuguese",
"Russian",
"Spanish",
"Tagalog",
"Tamil",
"Telugu",
"Thai",
"Turkish",
"Urdu",
"Vietnamese",
"Yoruba",
]
_PAN_X_LANG = [
"af",
"ar",
"bg",
"bn",
"de",
"el",
"en",
"es",
"et",
"eu",
"fa",
"fi",
"fr",
"he",
"hi",
"hu",
"id",
"it",
"ja",
"jv",
"ka",
"kk",
"ko",
"ml",
"mr",
"ms",
"my",
"nl",
"pt",
"ru",
"sw",
"ta",
"te",
"th",
"tl",
"tr",
"ur",
"vi",
"yo",
"zh",
]
_PAN_X_FOLDER = "AmazonPhotos.zip"
_NAMES = ["XNLI", "tydiqa", "SQuAD"]
for lang in _PAN_X_LANG:
_NAMES.append("PAN-X.{}".format(lang))
for lang1 in _MLQA_LANG:
for lang2 in _MLQA_LANG:
_NAMES.append("MLQA.{}.{}".format(lang1, lang2))
for lang in _XQUAD_LANG:
_NAMES.append("XQuAD.{}".format(lang))
for lang in _BUCC_LANG:
_NAMES.append("bucc18.{}".format(lang))
for lang in _PAWSX_LANG:
_NAMES.append("PAWS-X.{}".format(lang))
for lang in _TATOEBA_LANG:
_NAMES.append("tatoeba.{}".format(lang))
for lang in _UD_POS_LANG:
_NAMES.append("udpos.{}".format(lang))
_DESCRIPTIONS = {
"tydiqa": textwrap.dedent(
"""Gold passage task (GoldP): Given a passage that is guaranteed to contain the
answer, predict the single contiguous span of characters that answers the question. This is more similar to
existing reading comprehension datasets (as opposed to the information-seeking task outlined above).
This task is constructed with two goals in mind: (1) more directly comparing with prior work and (2) providing
a simplified way for researchers to use TyDi QA by providing compatibility with existing code for SQuAD 1.1,
XQuAD, and MLQA. Toward these goals, the gold passage task differs from the primary task in several ways:
only the gold answer passage is provided rather than the entire Wikipedia article;
unanswerable questions have been discarded, similar to MLQA and XQuAD;
we evaluate with the SQuAD 1.1 metrics like XQuAD; and
Thai and Japanese are removed since the lack of whitespace breaks some tools.
"""
),
"XNLI": textwrap.dedent(
"""
The Cross-lingual Natural Language Inference (XNLI) corpus is a crowd-sourced collection of 5,000 test and
2,500 dev pairs for the MultiNLI corpus. The pairs are annotated with textual entailment and translated into
14 languages: French, Spanish, German, Greek, Bulgarian, Russian, Turkish, Arabic, Vietnamese, Thai, Chinese,
Hindi, Swahili and Urdu. This results in 112.5k annotated pairs. Each premise can be associated with the
corresponding hypothesis in the 15 languages, summing up to more than 1.5M combinations. The corpus is made to
evaluate how to perform inference in any language (including low-resources ones like Swahili or Urdu) when only
English NLI data is available at training time. One solution is cross-lingual sentence encoding, for which XNLI
is an evaluation benchmark."""
),
"PAWS-X": textwrap.dedent(
"""
This dataset contains 23,659 human translated PAWS evaluation pairs and 296,406 machine translated training
pairs in six typologically distinct languages: French, Spanish, German, Chinese, Japanese, and Korean. All
translated pairs are sourced from examples in PAWS-Wiki."""
),
"XQuAD": textwrap.dedent(
"""\
XQuAD (Cross-lingual Question Answering Dataset) is a benchmark dataset for evaluating cross-lingual question
answering performance. The dataset consists of a subset of 240 paragraphs and 1190 question-answer pairs from
the development set of SQuAD v1.1 (Rajpurkar et al., 2016) together with their professional translations into
ten languages: Spanish, German, Greek, Russian, Turkish, Arabic, Vietnamese, Thai, Chinese, and Hindi. Consequently,
the dataset is entirely parallel across 11 languages."""
),
"MLQA": textwrap.dedent(
"""\
MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.
MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,
German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between
4 different languages on average."""
),
"tatoeba": textwrap.dedent(
"""\
his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.
For each languages, we have selected 1000 English sentences and their translations, if available. Please check
this paper for a description of the languages, their families and scripts as well as baseline results.
Please note that the English sentences are not identical for all language pairs. This means that the results are
not directly comparable across languages. In particular, the sentences tend to have less variety for several
low-resource languages, e.g. "Tom needed water", "Tom needs water", "Tom is getting water", ...
"""
),
"bucc18": textwrap.dedent(
"""Building and Using Comparable Corpora
"""
),
"udpos": textwrap.dedent(
"""\
Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological
features, and syntactic dependencies) across different human languages. UD is an open community effort with over 200
contributors producing more than 100 treebanks in over 70 languages. If you’re new to UD, you should start by reading
the first part of the Short Introduction and then browsing the annotation guidelines.
"""
),
"SQuAD": textwrap.dedent(
"""\
Stanford Question Answering Dataset (SQuAD) is a reading comprehension \
dataset, consisting of questions posed by crowdworkers on a set of Wikipedia \
articles, where the answer to every question is a segment of text, or span, \
from the corresponding reading passage, or the question might be unanswerable."""
),
"PAN-X": textwrap.dedent(
"""\
The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been
constructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset
can be loaded with the DaNLP package:"""
),
}
_CITATIONS = {
"tydiqa": textwrap.dedent(
(
"""\
@article{tydiqa,
title = {TyDi QA: A Benchmark for Information-Seeking Question Answering in Typologically Diverse Languages},
author = {Jonathan H. Clark and Eunsol Choi and Michael Collins and Dan Garrette and Tom Kwiatkowski and Vitaly Nikolaev and Jennimaria Palomaki}
year = {2020},
journal = {Transactions of the Association for Computational Linguistics}
}"""
)
),
"XNLI": textwrap.dedent(
"""\
@InProceedings{conneau2018xnli,
author = {Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin},
title = {XNLI: Evaluating Cross-lingual Sentence Representations},
booktitle = {Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing},
year = {2018},
publisher = {Association for Computational Linguistics},
location = {Brussels, Belgium},
}"""
),
"XQuAD": textwrap.dedent(
"""
@article{Artetxe:etal:2019,
author = {Mikel Artetxe and Sebastian Ruder and Dani Yogatama},
title = {On the cross-lingual transferability of monolingual representations},
journal = {CoRR},
volume = {abs/1910.11856},
year = {2019},
archivePrefix = {arXiv},
eprint = {1910.11856}
}
"""
),
"MLQA": textwrap.dedent(
"""\
@article{lewis2019mlqa,
title={MLQA: Evaluating Cross-lingual Extractive Question Answering},
author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},
journal={arXiv preprint arXiv:1910.07475},
year={2019}"""
),
"PAWS-X": textwrap.dedent(
"""\
@InProceedings{pawsx2019emnlp,
title = {{PAWS-X: A Cross-lingual Adversarial Dataset for Paraphrase Identification}},
author = {Yang, Yinfei and Zhang, Yuan and Tar, Chris and Baldridge, Jason},
booktitle = {Proc. of EMNLP},
year = {2019}
}"""
),
"tatoeba": textwrap.dedent(
"""\
@article{tatoeba,
title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},
author={Mikel, Artetxe and Holger, Schwenk,},
journal={arXiv:1812.10464v2},
year={2018}
}"""
),
"bucc18": textwrap.dedent(""""""),
"udpos": textwrap.dedent(""""""),
"SQuAD": textwrap.dedent(
"""\
@article{2016arXiv160605250R,
author = {{Rajpurkar}, Pranav and {Zhang}, Jian and {Lopyrev},
Konstantin and {Liang}, Percy},
title = "{SQuAD: 100,000+ Questions for Machine Comprehension of Text}",
journal = {arXiv e-prints},
year = 2016,
eid = {arXiv:1606.05250},
pages = {arXiv:1606.05250},
archivePrefix = {arXiv},
eprint = {1606.05250},
}"""
),
"PAN-X": textwrap.dedent(
"""\
@article{pan-x,
title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},
author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},
volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}
year={2017}
}"""
),
}
_TEXT_FEATURES = {
"XNLI": {"language": "language", "sentence1": "sentence1", "sentence2": "sentence2"},
"tydiqa": {"id": "id", "title": "title", "context": "context", "question": "question", "answers": "answers"},
"XQuAD": {"id": "id", "context": "context", "question": "question", "answers": "answers"},
"MLQA": {"id": "id", "title": "title", "context": "context", "question": "question", "answers": "answers"},
"tatoeba": {"source_sentence": "", "target_sentence": "", "source_lang": "", "target_lang": ""},
"bucc18": {"source_sentence": "", "target_sentence": "", "source_lang": "", "target_lang": ""},
"PAWS-X": {"sentence1": "sentence1", "sentence2": "sentence2"},
"udpos": {"word": "", "pos_tag": ""},
"SQuAD": {"id": "id", "title": "title", "context": "context", "question": "question", "answers": "answers"},
"PAN-X": {"word": "", "ner_tag": "", "lang": ""},
}
_DATA_URLS = {
"tydiqa": "https://storage.googleapis.com/tydiqa/",
"XNLI": "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip",
"XQuAD": "https://github.com/deepmind/xquad/raw/master/",
"MLQA": "https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip",
"PAWS-X": "https://storage.googleapis.com/paws/pawsx/x-final.tar.gz",
"bucc18": "https://comparable.limsi.fr/bucc2018/",
"tatoeba": "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1",
"udpos": "https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz",
"SQuAD": "https://rajpurkar.github.io/SQuAD-explorer/dataset/",
"PAN-X": "",
}
_URLS = {
"tydiqa": "https://github.com/google-research-datasets/tydiqa",
"XQuAD": "https://github.com/deepmind/xquad",
"XNLI": "https://www.nyu.edu/projects/bowman/xnli/",
"MLQA": "https://github.com/facebookresearch/MLQA",
"PAWS-X": "https://github.com/google-research-datasets/paws/tree/master/pawsx",
"bucc18": "https://comparable.limsi.fr/bucc2018/",
"tatoeba": "https://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md",
"udpos": "https://universaldependencies.org/",
"SQuAD": "https://rajpurkar.github.io/SQuAD-explorer/",
"PAN-X": "",
}
class XtremeConfig(nlp.BuilderConfig):
"""BuilderConfig for Break"""
def __init__(self, data_url, citation, url, text_features, **kwargs):
"""
Args:
text_features: `dict[string, string]`, map from the name of the feature
dict for each text field to the name of the column in the tsv file
label_column:
label_classes
**kwargs: keyword arguments forwarded to super.
"""
super(XtremeConfig, self).__init__(
version=nlp.Version("1.0.0", "New split API (https://tensorflow.org/datasets/splits)"), **kwargs
)
self.text_features = text_features
self.data_url = data_url
self.citation = citation
self.url = url
class Xtreme(nlp.GeneratorBasedBuilder):
"""TODO(xtreme): Short description of my dataset."""
# TODO(xtreme): Set up version.
VERSION = nlp.Version("0.1.0")
BUILDER_CONFIGS = [
XtremeConfig(
name=name,
description=_DESCRIPTIONS[name.split(".")[0]],
citation=_CITATIONS[name.split(".")[0]],
text_features=_TEXT_FEATURES[name.split(".")[0]],
data_url=_DATA_URLS[name.split(".")[0]],
url=_URLS[name.split(".")[0]],
)
for name in _NAMES
]
@property
def manual_download_instructions(self):
if self.config.name.startswith("PAN-X"):
return """\
You need to manually download the AmazonPhotos.zip file on Amazon Cloud Drive
(https://www.amazon.com/clouddrive/share/d3KGCRCIYwhKJF0H3eWA26hjg2ZCRhjpEQtDL70FSBN). The folder containing the saved file
can be used to load the dataset via `nlp.load_dataset("xtreme", data_dir="<path/to/folder>").
"""
return None
def _info(self):
# TODO(xtreme): Specifies the nlp.DatasetInfo object
features = {text_feature: nlp.Value("string") for text_feature in six.iterkeys(self.config.text_features)}
if "answers" in features.keys():
features["answers"] = nlp.features.Sequence(
{"answer_start": nlp.Value("int32"), "text": nlp.Value("string")}
)
if self.config.name.startswith("PAWS-X"):
features["label"] = nlp.Value("string")
if self.config.name == "XNLI":
features["gold_label"] = nlp.Value("string")
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=self.config.description + "\n" + _DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
features
# These are the features of your dataset like images, labels ...
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://github.com/google-research/xtreme" + "\t" + self.config.url,
citation=self.config.citation + "\n" + _CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(xtreme): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
if self.config.name == "tydiqa":
train_url = "v1.1/tydiqa-goldp-v1.1-train.json"
dev_url = "v1.1/tydiqa-goldp-v1.1-dev.json"
urls_to_download = {
"train": os.path.join(self.config.data_url, train_url),
"dev": os.path.join(self.config.data_url, dev_url),
}
dl_dir = dl_manager.download_and_extract(urls_to_download)
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": dl_dir["train"]},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": dl_dir["dev"]},
),
]
if self.config.name == "XNLI":
dl_dir = dl_manager.download_and_extract(self.config.data_url)
data_dir = os.path.join(dl_dir, "XNLI-1.0")
return [
nlp.SplitGenerator(
name=nlp.Split.TEST, gen_kwargs={"filepath": os.path.join(data_dir, "xnli.test.tsv")}
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION, gen_kwargs={"filepath": os.path.join(data_dir, "xnli.dev.tsv")}
),
]
if self.config.name.startswith("MLQA"):
mlqa_downloaded_files = dl_manager.download_and_extract(self.config.data_url)
l1 = self.config.name.split(".")[1]
l2 = self.config.name.split(".")[2]
return [
nlp.SplitGenerator(
name=nlp.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": os.path.join(
os.path.join(mlqa_downloaded_files, "MLQA_V1/test"),
"test-context-{}-question-{}.json".format(l1, l2),
)
},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": os.path.join(
os.path.join(mlqa_downloaded_files, "MLQA_V1/dev"),
"dev-context-{}-question-{}.json".format(l1, l2),
)
},
),
]
if self.config.name.startswith("XQuAD"):
lang = self.config.name.split(".")[1]
xquad_downloaded_file = dl_manager.download_and_extract(
os.path.join(self.config.data_url, "xquad.{}.json".format(lang))
)
return [
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": xquad_downloaded_file},
),
]
if self.config.name.startswith("PAWS-X"):
lang = self.config.name.split(".")[1]
paws_x_dir = dl_manager.download_and_extract(self.config.data_url)
data_dir = os.path.join(paws_x_dir, "x-final", lang)
return [
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, "dev_2k.tsv")},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, "test_2k.tsv")},
),
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": os.path.join(data_dir, "translated_train.tsv")
if lang != "en"
else os.path.join(data_dir, "train.tsv")
},
),
]
elif self.config.name.startswith("tatoeba"):
lang = self.config.name.split(".")[1]
tatoeba_source_data = dl_manager.download_and_extract(
os.path.join(self.config.data_url, "tatoeba.{}-eng.{}".format(lang, lang))
)
tatoeba_eng_data = dl_manager.download_and_extract(
os.path.join(self.config.data_url, "tatoeba.{}-eng.eng".format(lang))
)
return [
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": (tatoeba_source_data, tatoeba_eng_data)},
),
]
if self.config.name.startswith("bucc18"):
lang = self.config.name.split(".")[1]
bucc18_dl_test_dir = dl_manager.download_and_extract(
os.path.join(self.config.data_url, "bucc2018-{}-en.training-gold.tar.bz2".format(lang))
)
bucc18_dl_dev_dir = dl_manager.download_and_extract(
os.path.join(self.config.data_url, "bucc2018-{}-en.sample-gold.tar.bz2".format(lang))
)
return [
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(bucc18_dl_dev_dir, "bucc2018", lang + "-en")},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(bucc18_dl_test_dir, "bucc2018", lang + "-en")},
),
]
if self.config.name.startswith("udpos"):
udpos_downloaded_files = dl_manager.download_and_extract(self.config.data_url)
data_dir = os.path.join(udpos_downloaded_files, "ud-treebanks-v2.5")
lang = self.config.name.split(".")[1]
data_dir = os.path.join(data_dir, "*_" + lang + "*")
folders = sorted(glob.glob(data_dir))
if lang == "Kazakh":
return [
nlp.SplitGenerator(
name=nlp.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": [
os.path.join(folder, file)
for folder in folders
for file in sorted(os.listdir(folder))
if "test" in file and file.endswith(".conllu")
]
},
),
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": [
os.path.join(folder, file)
for folder in folders
for file in sorted(os.listdir(folder))
if "train" in file and file.endswith(".conllu")
]
},
),
]
elif lang == "Tagalog" or lang == "Thai" or lang == "Yoruba":
return [
nlp.SplitGenerator(
name=nlp.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": [
os.path.join(folder, file)
for folder in folders
for file in sorted(os.listdir(folder))
if "test" in file and file.endswith(".conllu")
]
},
)
]
else:
return [
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": [
os.path.join(folder, file)
for folder in folders
for file in sorted(os.listdir(folder))
if "NYUAD" not in folder and "dev" in file and file.endswith(".conllu")
]
# we exclude Arabic NYUAD which deos not contains any word, only _
},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": [
os.path.join(folder, file)
for folder in folders
for file in sorted(os.listdir(folder))
if "NYUAD" not in folder and "test" in file and file.endswith(".conllu")
]
},
),
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": [
os.path.join(folder, file)
for folder in folders
for file in sorted(os.listdir(folder))
if "NYUAD" not in folder and "train" in file and file.endswith(".conllu")
]
},
),
]
if self.config.name == "SQuAD":
urls_to_download = {
"train": os.path.join(self.config.data_url, "train-v1.1.json"),
"dev": os.path.join(self.config.data_url, "dev-v1.1.json"),
}
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
nlp.SplitGenerator(name=nlp.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
nlp.SplitGenerator(name=nlp.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
]
if self.config.name.startswith("PAN-X"):
path_to_manual_folder = os.path.abspath(os.path.expanduser(dl_manager.manual_dir))
panx_path = os.path.join(path_to_manual_folder, _PAN_X_FOLDER)
if not os.path.exists(panx_path):
raise FileNotFoundError(
"{} does not exist. Make sure you insert a manual dir via `nlp.load_dataset('wikihow', data_dir=...)` that includes {}. Manual download instructions: {}".format(
panx_path, _PAN_X_FOLDER, self.manual_download_instructions
)
)
panx_dl_dir = dl_manager.extract(panx_path)
lang = self.config.name.split(".")[1]
lang_folder = dl_manager.extract(os.path.join(panx_dl_dir, lang + ".tar.gz"))
return [
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": os.path.join(lang_folder, "dev")
# we exclude Arabic NYUAD which deos not contains any word, only _
},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(lang_folder, "test")},
),
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(lang_folder, "train")},
),
]
def _generate_examples(self, filepath):
"""Yields examples."""
# TODO(xtreme): Yields (key, example) tuples from the dataset
if self.config.name == "tydiqa" or self.config.name.startswith("MLQA") or self.config.name == "SQuAD":
with open(filepath) as f:
data = json.load(f)
for article in data["data"]:
title = article.get("title", "").strip()
for paragraph in article["paragraphs"]:
context = paragraph["context"].strip()
for qa in paragraph["qas"]:
question = qa["question"].strip()
id_ = qa["id"]
answer_starts = [answer["answer_start"] for answer in qa["answers"]]
answers = [answer["text"].strip() for answer in qa["answers"]]
# Features currently used are "context", "question", and "answers".
# Others are extracted here for the ease of future expansions.
yield id_, {
"title": title,
"context": context,
"question": question,
"id": id_,
"answers": {"answer_start": answer_starts, "text": answers},
}
if self.config.name == "XNLI":
with open(filepath) as f:
data = csv.DictReader(f, delimiter="\t")
for id_, row in enumerate(data):
yield id_, {
"sentence1": row["sentence1"],
"sentence2": row["sentence2"],
"language": row["language"],
"gold_label": row["gold_label"],
}
if self.config.name.startswith("PAWS-X"):
with open(filepath) as f:
data = csv.reader(f, delimiter="\t")
for id_, row in enumerate(data):
if len(row) == 4:
yield id_, {"sentence1": row[1], "sentence2": row[2], "label": row[3]}
if self.config.name.startswith("XQuAD"):
with open(filepath) as f:
xquad = json.load(f)
for article in xquad["data"]:
for paragraph in article["paragraphs"]:
context = paragraph["context"].strip()
for qa in paragraph["qas"]:
question = qa["question"].strip()
id_ = qa["id"]
answer_starts = [answer["answer_start"] for answer in qa["answers"]]
answers = [answer["text"].strip() for answer in qa["answers"]]
# Features currently used are "context", "question", and "answers".
# Others are extracted here for the ease of future expansions.
yield id_, {
"context": context,
"question": question,
"id": id_,
"answers": {"answer_start": answer_starts, "text": answers},
}
if self.config.name.startswith("bucc18"):
files = sorted(os.listdir(filepath))
target_file = "/"
source_file = "/"
source_target_file = "/"
for file in files:
if file.endswith("en"):
target_file = os.path.join(filepath, file)
elif file.endswith("gold"):
source_target_file = os.path.join(filepath, file)
else:
source_file = os.path.join(filepath, file)
with open(target_file) as f:
data = csv.reader(f, delimiter="\t")
target_sentences = [row for row in data]
with open(source_file) as f:
data = csv.reader(f, delimiter="\t")
source_sentences = [row for row in data]
with open(source_target_file) as f:
data = csv.reader(f, delimiter="\t")
source_target_ids = [row for row in data]
for id_, pair in enumerate(source_target_ids):
source_id = pair[0]
target_id = pair[1]
source_sent = ""
target_sent = ""
for i in range(len(source_sentences)):
if source_sentences[i][0] == source_id:
source_sent = source_sentences[i][1]
source_id = source_sentences[i][0]
break
for j in range(len(target_sentences)):
if target_sentences[j][0] == target_id:
target_sent = target_sentences[j][1]
target_id = target_sentences[j][0]
break
yield id_, {
"source_sentence": source_sent,
"target_sentence": target_sent,
"source_lang": source_id,
"target_lang": target_id,
}
if self.config.name.startswith("tatoeba"):
source_file = filepath[0]
target_file = filepath[1]
source_sentences = []
target_sentences = []
with open(source_file) as f1:
for row in f1:
source_sentences.append(row)
with open(target_file) as f2:
for row in f2:
target_sentences.append(row)
for i in range(len(source_sentences)):
yield i, {
"source_sentence": source_sentences[i],
"target_sentence": target_sentences[i],
"source_lang": source_file.split(".")[-1],
"target_lang": "eng",
}
if self.config.name.startswith("udpos"):
for id_file, file in enumerate(filepath):
with open(file) as f:
data = csv.reader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
for id_row, row in enumerate(data):
if len(row) >= 10 and row[1] != "_":
yield str(id_file) + "_" + str(id_row), {"word": row[1], "pos_tag": row[3]}
if self.config.name.startswith("PAN-X"):
with open(filepath) as f:
data = csv.reader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
for id_, row in enumerate(data):
if row:
lang, word = row[0].split(":")[0], row[0].split(":")[1]
tag = row[1]
yield id_, {"word": word, "ner_tag": tag, "lang": lang}
|
nlp-master
|
datasets/xtreme/xtreme.py
|
"""TODO(quoref): Add a description here."""
from __future__ import absolute_import, division, print_function
import json
import os
import nlp
# TODO(quoref): BibTeX citation
_CITATION = """\
@article{allenai:quoref,
author = {Pradeep Dasigi and Nelson F. Liu and Ana Marasovic and Noah A. Smith and Matt Gardner},
title = {Quoref: A Reading Comprehension Dataset with Questions Requiring Coreferential Reasoning},
journal = {arXiv:1908.05803v2 },
year = {2019},
}
"""
# TODO(quoref):
_DESCRIPTION = """\
Quoref is a QA dataset which tests the coreferential reasoning capability of reading comprehension systems. In this
span-selection benchmark containing 24K questions over 4.7K paragraphs from Wikipedia, a system must resolve hard
coreferences before selecting the appropriate span(s) in the paragraphs for answering questions.
"""
_URL = "https://quoref-dataset.s3-us-west-2.amazonaws.com/train_and_dev/quoref-train-dev-v0.1.zip"
class Quoref(nlp.GeneratorBasedBuilder):
"""TODO(quoref): Short description of my dataset."""
# TODO(quoref): Set up version.
VERSION = nlp.Version("0.1.0")
def _info(self):
# TODO(quoref): Specifies the nlp.DatasetInfo object
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
"id": nlp.Value("string"),
"question": nlp.Value("string"),
"context": nlp.Value("string"),
"title": nlp.Value("string"),
"url": nlp.Value("string"),
"answers": nlp.features.Sequence(
{"answer_start": nlp.Value("int32"), "text": nlp.Value("string"),}
)
# These are the features of your dataset like images, labels ...
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://leaderboard.allenai.org/quoref/submissions/get-started",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(quoref): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
dl_dir = dl_manager.download_and_extract(_URL)
data_dir = os.path.join(dl_dir, "quoref-train-dev-v0.1")
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, "quoref-train-v0.1.json")},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, "quoref-dev-v0.1.json")},
),
]
def _generate_examples(self, filepath):
"""Yields examples."""
# TODO(quoref): Yields (key, example) tuples from the dataset
with open(filepath) as f:
data = json.load(f)
for article in data["data"]:
title = article.get("title", "").strip()
url = article.get("url", "").strip()
for paragraph in article["paragraphs"]:
context = paragraph["context"].strip()
for qa in paragraph["qas"]:
question = qa["question"].strip()
id_ = qa["id"]
answer_starts = [answer["answer_start"] for answer in qa["answers"]]
answers = [answer["text"].strip() for answer in qa["answers"]]
# Features currently used are "context", "question", and "answers".
# Others are extracted here for the ease of future expansions.
yield id_, {
"title": title,
"context": context,
"question": question,
"id": id_,
"answers": {"answer_start": answer_starts, "text": answers,},
"url": url,
}
|
nlp-master
|
datasets/quoref/quoref.py
|
# coding=utf-8
from dataclasses import dataclass
import pyarrow.csv as pac
import nlp
@dataclass
class CsvConfig(nlp.BuilderConfig):
"""BuilderConfig for CSV."""
skip_rows: int = 0
header_as_column_names: bool = True
delimiter: str = ","
quote_char: str = '"'
read_options: pac.ReadOptions = None
parse_options: pac.ParseOptions = None
convert_options: pac.ConvertOptions = None
@property
def pa_read_options(self):
read_options = self.read_options or pac.ReadOptions()
read_options.skip_rows = self.skip_rows
read_options.autogenerate_column_names = not self.header_as_column_names
return read_options
@property
def pa_parse_options(self):
parse_options = self.parse_options or pac.ParseOptions()
parse_options.delimiter = self.delimiter
parse_options.quote_char = self.quote_char
return parse_options
@property
def pa_convert_options(self):
convert_options = self.convert_options or pac.ConvertOptions()
return convert_options
class Csv(nlp.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = CsvConfig
def _info(self):
return nlp.DatasetInfo()
def _split_generators(self, dl_manager):
""" We handle string, list and dicts in datafiles
"""
if isinstance(self.config.data_files, (str, list, tuple)):
files = self.config.data_files
if isinstance(files, str):
files = [files]
return [nlp.SplitGenerator(name=nlp.Split.TRAIN, gen_kwargs={"files": files})]
splits = []
for split_name in [nlp.Split.TRAIN, nlp.Split.VALIDATION, nlp.Split.TEST]:
if split_name in self.config.data_files:
files = self.config.data_files[split_name]
if isinstance(files, str):
files = [files]
splits.append(nlp.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
return splits
def _generate_tables(self, files):
for i, file in enumerate(files):
pa_table = pac.read_csv(
file,
read_options=self.config.pa_read_options,
parse_options=self.config.pa_parse_options,
convert_options=self.config.convert_options,
)
yield i, pa_table
|
nlp-master
|
datasets/csv/csv.py
|
"""TODO(squad_v2): Add a description here."""
from __future__ import absolute_import, division, print_function
import json
import os
import nlp
# TODO(squad_v2): BibTeX citation
_CITATION = """\
@article{2016arXiv160605250R,
author = {{Rajpurkar}, Pranav and {Zhang}, Jian and {Lopyrev},
Konstantin and {Liang}, Percy},
title = "{SQuAD: 100,000+ Questions for Machine Comprehension of Text}",
journal = {arXiv e-prints},
year = 2016,
eid = {arXiv:1606.05250},
pages = {arXiv:1606.05250},
archivePrefix = {arXiv},
eprint = {1606.05250},
}
"""
_DESCRIPTION = """\
combines the 100,000 questions in SQuAD1.1 with over 50,000 unanswerable questions written adversarially by crowdworkers
to look similar to answerable ones. To do well on SQuAD2.0, systems must not only answer questions when possible, but
also determine when no answer is supported by the paragraph and abstain from answering.
"""
_URL = "https://rajpurkar.github.io/SQuAD-explorer/dataset/"
_DEV_FILE = "dev-v2.0.json"
_TRAINING_FILE = "train-v2.0.json"
class SquadV2Config(nlp.BuilderConfig):
"""BuilderConfig for SQUAD."""
def __init__(self, **kwargs):
"""BuilderConfig for SQUADV2.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(SquadV2Config, self).__init__(**kwargs)
class SquadV2(nlp.GeneratorBasedBuilder):
"""TODO(squad_v2): Short description of my dataset."""
# TODO(squad_v2): Set up version.
BUILDER_CONFIGS = [
SquadV2Config(name="squad_v2", version=nlp.Version("2.0.0"), description="SQuAD plaint text version 2"),
]
def _info(self):
# TODO(squad_v2): Specifies the nlp.DatasetInfo object
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
"id": nlp.Value("string"),
"title": nlp.Value("string"),
"context": nlp.Value("string"),
"question": nlp.Value("string"),
"answers": nlp.features.Sequence(
{"text": nlp.Value("string"), "answer_start": nlp.Value("int32"),}
),
# These are the features of your dataset like images, labels ...
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://rajpurkar.github.io/SQuAD-explorer/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(squad_v2): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
urls_to_download = {"train": os.path.join(_URL, _TRAINING_FILE), "dev": os.path.join(_URL, _DEV_FILE)}
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
nlp.SplitGenerator(name=nlp.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
nlp.SplitGenerator(name=nlp.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
]
def _generate_examples(self, filepath):
"""Yields examples."""
# TODO(squad_v2): Yields (key, example) tuples from the dataset
with open(filepath) as f:
squad = json.load(f)
for example in squad["data"]:
title = example.get("title", "").strip()
for paragraph in example["paragraphs"]:
context = paragraph["context"].strip()
for qa in paragraph["qas"]:
question = qa["question"].strip()
id_ = qa["id"]
answer_starts = [answer["answer_start"] for answer in qa["answers"]]
answers = [answer["text"].strip() for answer in qa["answers"]]
# Features currently used are "context", "question", and "answers".
# Others are extracted here for the ease of future expansions.
yield id_, {
"title": title,
"context": context,
"question": question,
"id": id_,
"answers": {"answer_start": answer_starts, "text": answers,},
}
|
nlp-master
|
datasets/squad_v2/squad_v2.py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""SQUAD: The Stanford Question Answering Dataset."""
from __future__ import absolute_import, division, print_function
import json
import logging
import os
import nlp
_CITATION = """\
@article{2016arXiv160605250R,
author = {{Rajpurkar}, Pranav and {Zhang}, Jian and {Lopyrev},
Konstantin and {Liang}, Percy},
title = "{SQuAD: 100,000+ Questions for Machine Comprehension of Text}",
journal = {arXiv e-prints},
year = 2016,
eid = {arXiv:1606.05250},
pages = {arXiv:1606.05250},
archivePrefix = {arXiv},
eprint = {1606.05250},
}
"""
_DESCRIPTION = """\
Stanford Question Answering Dataset (SQuAD) is a reading comprehension \
dataset, consisting of questions posed by crowdworkers on a set of Wikipedia \
articles, where the answer to every question is a segment of text, or span, \
from the corresponding reading passage, or the question might be unanswerable.
"""
class SquadConfig(nlp.BuilderConfig):
"""BuilderConfig for SQUAD."""
def __init__(self, **kwargs):
"""BuilderConfig for SQUAD.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(SquadConfig, self).__init__(**kwargs)
class Squad(nlp.GeneratorBasedBuilder):
"""SQUAD: The Stanford Question Answering Dataset. Version 1.1."""
_URL = "https://rajpurkar.github.io/SQuAD-explorer/dataset/"
_DEV_FILE = "dev-v1.1.json"
_TRAINING_FILE = "train-v1.1.json"
BUILDER_CONFIGS = [
SquadConfig(
name="plain_text",
version=nlp.Version("1.0.0", "New split API (https://tensorflow.org/datasets/splits)"),
description="Plain text",
),
]
def _info(self):
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features(
{
"id": nlp.Value("string"),
"title": nlp.Value("string"),
"context": nlp.Value("string"),
"question": nlp.Value("string"),
"answers": nlp.features.Sequence(
{"text": nlp.Value("string"), "answer_start": nlp.Value("int32"),}
),
}
),
# No default supervised_keys (as we have to pass both question
# and context as input).
supervised_keys=None,
homepage="https://rajpurkar.github.io/SQuAD-explorer/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
urls_to_download = {
"train": os.path.join(self._URL, self._TRAINING_FILE),
"dev": os.path.join(self._URL, self._DEV_FILE),
}
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
nlp.SplitGenerator(name=nlp.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
nlp.SplitGenerator(name=nlp.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
]
def _generate_examples(self, filepath):
"""This function returns the examples in the raw (text) form."""
logging.info("generating examples from = %s", filepath)
with open(filepath) as f:
squad = json.load(f)
for article in squad["data"]:
title = article.get("title", "").strip()
for paragraph in article["paragraphs"]:
context = paragraph["context"].strip()
for qa in paragraph["qas"]:
question = qa["question"].strip()
id_ = qa["id"]
answer_starts = [answer["answer_start"] for answer in qa["answers"]]
answers = [answer["text"].strip() for answer in qa["answers"]]
# Features currently used are "context", "question", and "answers".
# Others are extracted here for the ease of future expansions.
yield id_, {
"title": title,
"context": context,
"question": question,
"id": id_,
"answers": {"answer_start": answer_starts, "text": answers,},
}
|
nlp-master
|
datasets/squad/squad.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.