Diwank Singh commited on
Commit
c0cdaf0
·
1 Parent(s): b47751a

Update config file

Browse files

Signed-off-by: Diwank Singh <diwank.singh@gmail.com>

Files changed (1) hide show
  1. hinglish-dump.py +9 -36
hinglish-dump.py CHANGED
@@ -7,7 +7,7 @@
7
  """Raw merged dump of Hinglish (hi-EN) datasets."""
8
 
9
 
10
- import csv
11
  import os
12
 
13
  import datasets
@@ -20,36 +20,8 @@ _HOMEPAGE = "https://huggingface.co/datasets/diwank/hinglish-dump"
20
  _LICENSE = "MIT"
21
 
22
  _URLS = {
23
- "crowd_transliteration": list(map(lambda x: f"{_HOMEPAGE}/resolve/main/data/{x}" , [
24
- "crowd_transliteration/crowd_transliterations.hi-en.txt",
25
- ])),
26
- "fire2013": list(map(lambda x: f"{_HOMEPAGE}/resolve/main/data/{x}" , [
27
- "fire2013/HindiEnglish_FIRE2013_AnnotatedDev.txt",
28
- "fire2013/HindiEnglish_FIRE2013_Test_GT.txt",
29
- ])),
30
- "hindi_romanized_dump": list(map(lambda x: f"{_HOMEPAGE}/resolve/main/data/{x}" , [
31
- "hindi_romanized_dump/hi_rom.txt",
32
- ])),
33
- "hindi_xlit": list(map(lambda x: f"{_HOMEPAGE}/resolve/main/data/{x}" , [
34
- "hindi_xlit/HiEn_ann1_test.json",
35
- "hindi_xlit/HiEn_ann1_train.json",
36
- "hindi_xlit/HiEn_ann1_valid.json",
37
- ])),
38
- "hinge": list(map(lambda x: f"{_HOMEPAGE}/resolve/main/data/{x}" , [
39
- "hinge/eval_human.csv",
40
- "hinge/train_human.csv",
41
- "hinge/train_synthetic.csv",
42
- "hinge/eval_synthetic.csv",
43
- ])),
44
- "hinglish_norm": list(map(lambda x: f"{_HOMEPAGE}/resolve/main/data/{x}" , [
45
- "hinglish_norm/hinglishNorm_trainSet.json",
46
- ])),
47
- "news2018": list(map(lambda x: f"{_HOMEPAGE}/resolve/main/data/{x}" , [
48
- "news2018/NEWS2018_M-EnHi_tst.xml",
49
- "news2018/NEWS2018_M-EnHi_trn.xml",
50
- "news2018/NEWS2018_M-EnHi_dev.xml",
51
- ])),
52
- }
53
 
54
  config_names = _URLS.keys()
55
  version = datasets.Version("1.0.0")
@@ -81,22 +53,23 @@ class HinglishDumpDataset(datasets.DatasetBuilder):
81
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
82
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
83
 
84
- data_dir = self.data_dir = dl_manager.download_and_extract(_URLS)
 
85
 
86
  return [
87
  datasets.SplitGenerator(
88
  name=datasets.Split.TRAIN,
89
- gen_kwargs={"filepath": None, "split": "train"},
90
  ),
91
  datasets.SplitGenerator(
92
  name=datasets.Split.VALIDATION,
93
- gen_kwargs={"filepath": None, "split": "validation"},
94
  ),
95
  datasets.SplitGenerator(
96
  name=datasets.Split.TEST,
97
- gen_kwargs={"filepath": None, "split": "test"},
98
  ),
99
  ]
100
 
101
  def _generate_examples(self, filepath, split):
102
- return None
 
7
  """Raw merged dump of Hinglish (hi-EN) datasets."""
8
 
9
 
10
+ import pandas as pd
11
  import os
12
 
13
  import datasets
 
20
  _LICENSE = "MIT"
21
 
22
  _URLS = {
23
+ subset: f"{_HOMEPAGE}/resolve/main/data/{subset}/data.h5"
24
+ for subset in "crowd_transliteration hindi_romanized_dump hindi_xlit hinge hinglish_norm news2018".split() }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
  config_names = _URLS.keys()
27
  version = datasets.Version("1.0.0")
 
53
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
54
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
55
 
56
+ urls = _URLS[self.config.name]
57
+ data_dir = self.data_dir = dl_manager.download_and_extract(urls)
58
 
59
  return [
60
  datasets.SplitGenerator(
61
  name=datasets.Split.TRAIN,
62
+ gen_kwargs={"filepath": os.path.join(data_dir, "data.h5"), "split": "train"},
63
  ),
64
  datasets.SplitGenerator(
65
  name=datasets.Split.VALIDATION,
66
+ gen_kwargs={"filepath": os.path.join(data_dir, "data.h5"), "split": "eval"},
67
  ),
68
  datasets.SplitGenerator(
69
  name=datasets.Split.TEST,
70
+ gen_kwargs={"filepath": os.path.join(data_dir, "data.h5"), "split": "test"},
71
  ),
72
  ]
73
 
74
  def _generate_examples(self, filepath, split):
75
+ return pd.read_hdf(filepath, key=split)