File size: 2,558 Bytes
26753ef c0cdaf0 26753ef c0cdaf0 26753ef e312d37 cd0b97a fd60dd7 26753ef cd0b97a 26753ef 467f601 cd0b97a 467f601 b47751a 26753ef 76cdbdd e312d37 76cdbdd 26753ef 76cdbdd 26753ef 467f601 c0cdaf0 2ccf368 b47751a 467f601 ffe5093 de7acf0 467f601 de7acf0 3591636 e312d37 0edb127 ba6ae8c 179ea32 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 |
# hinglish-dump.py
## About: This is a dataset script for diwank/silicone-merged
## Docs: https://huggingface.co/docs/datasets/dataset_script.html
"""Raw merged dump of Hinglish (hi-EN) datasets."""
import pandas as pd
import os
import datasets
_DESCRIPTION = """\
Raw merged dump of Hinglish (hi-EN) datasets.
"""
_HOMEPAGE = "https://huggingface.co/datasets/diwank/hinglish-dump"
_LICENSE = "MIT"
_URLS = {
subset: f"{_HOMEPAGE}/resolve/main/data/{subset}/data.h5"
for subset in "crowd_transliteration hindi_romanized_dump hindi_xlit hinge hinglish_norm news2018".split() }
_FEATURE_NAMES = [
"target_hinglish",
"source_hindi",
"parallel_english",
"annotations",
"raw_input",
"alternates",
]
config_names = _URLS.keys()
version = datasets.Version("1.0.0")
class HinglishDumpDataset(datasets.GeneratorBasedBuilder):
"""Raw merged dump of Hinglish (hi-EN) datasets."""
VERSION = version
CONFIGS = config_names
BUILDER_CONFIGS = [
datasets.BuilderConfig(name=subset, version=version, description=f"Config for {subset}")
for subset in config_names
]
DEFAULT_CONFIG_NAME = None
def _info(self):
features = datasets.Features({
feature: datasets.Value("string")
for feature in _FEATURE_NAMES
})
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
)
def _split_generators(self, dl_manager):
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
urls = _URLS[self.config.name]
filepath = self.data_dir = dl_manager.download_and_extract(urls)
return [
datasets.SplitGenerator(
name=getattr(datasets.Split, "VALIDATION" if split == "eval" else split.upper()),
gen_kwargs=dict(filepath=filepath, split=split) )
for split in ["train", "eval", "test"]
]
def _generate_examples(self, filepath, split):
df = pd.read_hdf(filepath, key=split)
for i, row in enumerate(df.to_dict('records')):
yield i, row |