Diwank Singh commited on
Commit
26753ef
·
1 Parent(s): 842670c

Signed-off-by: Diwank Singh <diwank.singh@gmail.com>

.gitattributes CHANGED
@@ -1,3 +1,9 @@
 
 
 
 
 
 
1
  *.7z filter=lfs diff=lfs merge=lfs -text
2
  *.arrow filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
 
1
+ *.txt filter=lfs diff=lfs merge=lfs -text
2
+ *.json filter=lfs diff=lfs merge=lfs -text
3
+ *.xml filter=lfs diff=lfs merge=lfs -text
4
+ *.csv filter=lfs diff=lfs merge=lfs -text
5
+ *.pkl filter=lfs diff=lfs merge=lfs -text
6
+
7
  *.7z filter=lfs diff=lfs merge=lfs -text
8
  *.arrow filter=lfs diff=lfs merge=lfs -text
9
  *.bin filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ ---
4
+
5
+ # Hinglish Dump
6
+
7
+ Raw merged dump of Hinglish (hi-EN) datasets.
8
+
9
+ ## Files
10
+
11
+ ```
12
+ "crowd_transliteration":
13
+ "crowd_transliteration/crowd_transliterations.hi-en.txt",
14
+
15
+ "fire2013":
16
+ "fire2013/HindiEnglish_FIRE2013_AnnotatedDev.txt",
17
+ "fire2013/HindiEnglish_FIRE2013_Test_GT.txt",
18
+
19
+ "hindi_romanized_dump":
20
+ "hindi_romanized_dump/hi_rom.txt",
21
+
22
+ "hindi_xlit":
23
+ "hindi_xlit/HiEn_ann1_test.json",
24
+ "hindi_xlit/HiEn_ann1_train.json",
25
+ "hindi_xlit/HiEn_ann1_valid.json",
26
+
27
+ "hinge":
28
+ "hinge/eval_human.csv",
29
+ "hinge/eval_human.pkl",
30
+ "hinge/train_human.csv",
31
+ "hinge/train_human.pkl",
32
+ "hinge/train_synthetic.csv",
33
+ "hinge/eval_synthetic.csv",
34
+
35
+ "hinglish_norm":
36
+ "hinglish_norm/hinglishNorm_trainSet.json",
37
+
38
+ "news2018":
39
+ "news2018/NEWS2018_M-EnHi_tst.xml",
40
+ "news2018/NEWS2018_M-EnHi_trn.xml",
41
+ "news2018/NEWS2018_M-EnHi_dev.xml",
42
+
43
+ ```
data/crowd_transliteration/crowd_transliterations.hi-en.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e45d29dec243dea84f0d233e453bc14efe48d795da4258e64aba9144d36365f
3
+ size 380351
data/fire2013/HindiEnglish_FIRE2013_AnnotatedDev.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6251057ea0c2e2e1f2daa9d96a0726f4bb713762296b3eb96ed6dfbab0b5dba6
3
+ size 54391
data/fire2013/HindiEnglish_FIRE2013_Test_GT.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c31a05d3884a680f9507b7101d1c7d33fb62d870e568ce25cb5b763574c90aae
3
+ size 29326
data/hindi_romanized_dump/hi_rom.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:964952e12f06aee34c9a689636bd56bca6541b78558975f7ee15b277a9d31ca4
3
+ size 517324149
data/hindi_xlit/HiEn_ann1_test.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e8f8b749849ed6c69220b46da33a27f7a2f7e97e053935b834c5e2ae68600e7
3
+ size 338964
data/hindi_xlit/HiEn_ann1_train.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c92b3c8e955dcc3574223b4fc0516dfb3df1205131b3ad0fef00e6bd35745b4
3
+ size 2993734
data/hindi_xlit/HiEn_ann1_valid.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c9336adbabcfcd8602eee9866f8193e4d38300d933217fe3eb77ffd6b993f4c
3
+ size 335141
data/hinge/.ipynb_checkpoints/eval_synthetic-checkpoint.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01179fcb3c0dfbf45cddd0c89cd40a867676668d10b5cdea29f6f2a765d2ec82
3
+ size 67597
data/hinge/eval_human.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a48b0adeaa5a65f097ff644f695b20ef6227ef7ff46bd0c573b84186130ff49c
3
+ size 139425
data/hinge/eval_human.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a48b0adeaa5a65f097ff644f695b20ef6227ef7ff46bd0c573b84186130ff49c
3
+ size 139425
data/hinge/eval_synthetic.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d6c862f0d2654b08a20a60cd4d4d68b041da9c3c0029fd91e52e9fd3cabe240
3
+ size 176109
data/hinge/train_human.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8fe6085bbcb39c014f79faaac540ce7fa03d054924a600f340247f9ebb0dcf21
3
+ size 675953
data/hinge/train_human.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8fe6085bbcb39c014f79faaac540ce7fa03d054924a600f340247f9ebb0dcf21
3
+ size 675953
data/hinge/train_synthetic.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e7b09167b56515a617a90f8b83c61711df9e2e3796557f7c54dc4f7e036a666
3
+ size 1265593
data/hinglish_norm/hinglishNorm_trainSet.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf622e18d943de8fc28a869b0a7d76076676b83238ebbd61f96850ef86c2f337
3
+ size 2370544
data/news2018/NEWS2018_M-EnHi_dev.xml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:754d4174f291a0f6209f6ddccca4a7af04700f6bfd4d9b83ffb24c0711d4aaa3
3
+ size 127866
data/news2018/NEWS2018_M-EnHi_trn.xml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32a0f41ebe7baa635d0fc2ada5feb1ad93e18a03a22dea8f7bc015cab5adc0b6
3
+ size 1681958
data/news2018/NEWS2018_M-EnHi_tst.xml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57db5526c30ec34cd707ed365558231283788b34be4a77ae2b807f2867e6be30
3
+ size 60532
hinglish-dump.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # hinglish-dump.py
2
+
3
+ ## About: This is a dataset script for diwank/silicone-merged
4
+ ## Docs: https://huggingface.co/docs/datasets/dataset_script.html
5
+
6
+
7
+ """Raw merged dump of Hinglish (hi-EN) datasets."""
8
+
9
+
10
+ import csv
11
+ import os
12
+
13
+ import datasets
14
+
15
+ _DESCRIPTION = """\
16
+ Raw merged dump of Hinglish (hi-EN) datasets.
17
+ """
18
+
19
+ _HOMEPAGE = "https://huggingface.co/datasets/diwank/hinglish-dump"
20
+ _LICENSE = "MIT"
21
+
22
+ _URLS = {
23
+ "crowd_transliteration": list(map(lambda x: f"{_HOMEPAGE}/resolve/main/data/{x}" , [
24
+ "crowd_transliteration/crowd_transliterations.hi-en.txt",
25
+ ])),
26
+ "fire2013": list(map(lambda x: f"{_HOMEPAGE}/resolve/main/data/{x}" , [
27
+ "fire2013/HindiEnglish_FIRE2013_AnnotatedDev.txt",
28
+ "fire2013/HindiEnglish_FIRE2013_Test_GT.txt",
29
+ ])),
30
+ "hindi_romanized_dump": list(map(lambda x: f"{_HOMEPAGE}/resolve/main/data/{x}" , [
31
+ "hindi_romanized_dump/hi_rom.txt",
32
+ ])),
33
+ "hindi_xlit": list(map(lambda x: f"{_HOMEPAGE}/resolve/main/data/{x}" , [
34
+ "hindi_xlit/HiEn_ann1_test.json",
35
+ "hindi_xlit/HiEn_ann1_train.json",
36
+ "hindi_xlit/HiEn_ann1_valid.json",
37
+ ])),
38
+ "hinge": list(map(lambda x: f"{_HOMEPAGE}/resolve/main/data/{x}/" , [
39
+ "hinge/eval_human.csv",
40
+ "hinge/eval_human.pkl",
41
+ "hinge/train_human.csv",
42
+ "hinge/train_human.pkl",
43
+ "hinge/train_synthetic.csv",
44
+ "hinge/eval_synthetic.csv",
45
+ ])),
46
+ "hinglish_norm": list(map(lambda x: f"{_HOMEPAGE}/resolve/main/data/{x}" , [
47
+ "hinglish_norm/hinglishNorm_trainSet.json",
48
+ ])),
49
+ "news2018": list(map(lambda x: f"{_HOMEPAGE}/resolve/main/data/{x}" , [
50
+ "news2018/NEWS2018_M-EnHi_tst.xml",
51
+ "news2018/NEWS2018_M-EnHi_trn.xml",
52
+ "news2018/NEWS2018_M-EnHi_dev.xml",
53
+ ])),
54
+ }
55
+
56
+ class HinglishDumpDataset(datasets.GeneratorBasedBuilder):
57
+ """Raw merged dump of Hinglish (hi-EN) datasets."""
58
+
59
+ VERSION = datasets.Version("1.0.0")
60
+ BUILDER_CONFIGS = [
61
+ datasets.BuilderConfig(name="diwank--hinglish-dump", version=VERSION, description="default config"),
62
+ ]
63
+
64
+ CONFIGS = _URLS.keys()
65
+
66
+
67
+ def _info(self):
68
+
69
+ return datasets.DatasetInfo(
70
+ description=_DESCRIPTION,
71
+ features=datasets.Features(),
72
+ homepage=_HOMEPAGE,
73
+ license=_LICENSE,
74
+ )
75
+
76
+ def _split_generators(self, dl_manager):
77
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
78
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
79
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
80
+ urls = [item for sublist in _URLS.values() for item in sublist]
81
+ data_dir = dl_manager.download_and_extract(urls)
82
+ return []