tpierrot commited on
Commit
e94d8e6
·
verified ·
1 Parent(s): 53bbc92

Delete nucleotide_transformer_downstream_tasks_revised.py

Browse files
nucleotide_transformer_downstream_tasks_revised.py DELETED
@@ -1,166 +0,0 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script
2
- # contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """Script for the dataset containing the 18 downstream tasks from the Nucleotide
16
- Transformer paper."""
17
-
18
- from typing import List
19
-
20
- import datasets
21
-
22
-
23
- # This function is a basic reimplementation of SeqIO's parse method. This allows the
24
- # dataset viewer to work as it does not require an external package.
25
- def parse_fasta(fp):
26
- name, seq = None, []
27
- for line in fp:
28
- line = line.rstrip()
29
- if line.startswith(">"):
30
- if name:
31
- # Slice to remove '>'
32
- yield (name[1:], "".join(seq))
33
- name, seq = line, []
34
- else:
35
- seq.append(line)
36
- if name:
37
- # Slice to remove '>'
38
- yield (name[1:], "".join(seq))
39
-
40
-
41
- # Find for instance the citation on arxiv or on the dataset repo/website
42
- _CITATION = """\
43
- @article{dalla2023nucleotide,
44
- title={The Nucleotide Transformer: Building and Evaluating Robust Foundation Models for Human Genomics},
45
- author={Dalla-Torre, Hugo and Gonzalez, Liam and Mendoza-Revilla, Javier and Carranza, Nicolas Lopez and Grzywaczewski, Adam Henryk and Oteri, Francesco and Dallago, Christian and Trop, Evan and Sirelkhatim, Hassan and Richard, Guillaume and others},
46
- journal={bioRxiv},
47
- pages={2023--01},
48
- year={2023},
49
- publisher={Cold Spring Harbor Laboratory}
50
- }
51
- """
52
-
53
- # You can copy an official description
54
- _DESCRIPTION = """\
55
- The 18 classification downstream tasks from the Nucleotide Transformer paper. Each task
56
- corresponds to a dataset configuration.
57
- """
58
-
59
- _HOMEPAGE = "https://github.com/instadeepai/nucleotide-transformer"
60
-
61
- _LICENSE = "https://github.com/instadeepai/nucleotide-transformer/LICENSE.md"
62
-
63
- _TASKS = [
64
- "H2AFZ",
65
- "H3K27ac",
66
- "splice_sites_donors",
67
- "splice_sites_acceptors",
68
- "H3K27me3",
69
- "H3K36me3",
70
- "H3K4me1",
71
- "splice_sites_all",
72
- "H3K4me2",
73
- "H3K4me3",
74
- "enhancers_types",
75
- "promoter_no_tata",
76
- "H3K9ac",
77
- "H3K9me3",
78
- "promoter_tata",
79
- "enhancers",
80
- "H4K20me1",
81
- "promoter_all",
82
- ]
83
-
84
-
85
- class NucleotideTransformerDownstreamTasksConfig(datasets.BuilderConfig):
86
- """BuilderConfig for The Nucleotide Transformer downstream taks dataset."""
87
-
88
- def __init__(self, *args, task: str, **kwargs):
89
- """BuilderConfig downstream tasks dataset.
90
- Args:
91
- task (:obj:`str`): Task name.
92
- **kwargs: keyword arguments forwarded to super.
93
- """
94
- super().__init__(
95
- *args,
96
- name=f"{task}",
97
- **kwargs,
98
- )
99
- self.task = task
100
-
101
-
102
- class NucleotideTransformerDownstreamTasks(datasets.GeneratorBasedBuilder):
103
- VERSION = datasets.Version("1.1.0")
104
- BUILDER_CONFIG_CLASS = NucleotideTransformerDownstreamTasksConfig
105
- BUILDER_CONFIGS = [
106
- NucleotideTransformerDownstreamTasksConfig(task=task) for task in _TASKS
107
- ]
108
- DEFAULT_CONFIG_NAME = "enhancers"
109
-
110
- def _info(self):
111
-
112
- features = datasets.Features(
113
- {
114
- "sequence": datasets.Value("string"),
115
- "name": datasets.Value("string"),
116
- "label": datasets.Value("int32"),
117
- }
118
- )
119
- return datasets.DatasetInfo(
120
- # This is the description that will appear on the datasets page.
121
- description=_DESCRIPTION,
122
- # This defines the different columns of the dataset and their types
123
- features=features,
124
- # Homepage of the dataset for documentation
125
- homepage=_HOMEPAGE,
126
- # License for the dataset if available
127
- license=_LICENSE,
128
- # Citation for the dataset
129
- citation=_CITATION,
130
- )
131
-
132
- def _split_generators(
133
- self, dl_manager: datasets.DownloadManager
134
- ) -> List[datasets.SplitGenerator]:
135
-
136
- train_file = dl_manager.download_and_extract(self.config.task + "/train.fna")
137
- test_file = dl_manager.download_and_extract(self.config.task + "/test.fna")
138
-
139
- return [
140
- datasets.SplitGenerator(
141
- name=datasets.Split.TRAIN, gen_kwargs={"file": train_file}
142
- ),
143
- datasets.SplitGenerator(
144
- name=datasets.Split.TEST, gen_kwargs={"file": test_file}
145
- ),
146
- ]
147
-
148
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
149
- def _generate_examples(self, file):
150
- key = 0
151
- with open(file, "rt") as f:
152
- fasta_sequences = parse_fasta(f)
153
-
154
- for name, seq in fasta_sequences:
155
-
156
- # parse descriptions in the fasta file
157
- sequence, name = str(seq), str(name)
158
- label = int(name.split("|")[-1])
159
-
160
- # yield example
161
- yield key, {
162
- "sequence": sequence,
163
- "name": name,
164
- "label": label,
165
- }
166
- key += 1