File size: 8,884 Bytes
2927733
6e1dd5b
 
 
 
 
 
 
 
 
 
 
b91d88e
 
 
 
6e1dd5b
 
 
 
 
b91d88e
6e1dd5b
ce5a12e
6e1dd5b
ce5a12e
6e1dd5b
 
 
 
 
 
ce5a12e
 
6e1dd5b
 
 
 
 
 
 
 
 
b91d88e
 
6e1dd5b
2279b79
6e1dd5b
 
 
 
 
ce5a12e
6e1dd5b
 
 
 
 
 
 
ce5a12e
 
 
 
 
6e1dd5b
 
 
ce5a12e
6e1dd5b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ce5a12e
59513e1
 
 
 
 
 
 
 
 
6e1dd5b
 
 
 
ce5a12e
 
6e1dd5b
 
 
 
 
ce5a12e
 
6e1dd5b
 
 
 
 
ce5a12e
 
6e1dd5b
 
 
 
ce5a12e
6e1dd5b
 
59513e1
ce5a12e
 
6e1dd5b
ce5a12e
 
 
 
 
 
 
 
 
 
6e1dd5b
ce5a12e
 
 
 
 
 
 
 
 
 
 
 
 
6e1dd5b
ce5a12e
6e1dd5b
ce5a12e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6e1dd5b
 
ce5a12e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
"""AutoGKB Annotation Benchmark dataset."""

import csv
import json
import os
from pathlib import Path
from typing import Dict, List, Any

import datasets


_CITATION = """\
@misc{autogkb_annotation_benchmark_2025,
  title={AutoGKB Annotation Benchmark},
  author={Shlok Natarajan},
  year={2025},
  note={A benchmark for pharmacogenomic variant-drug annotation extraction from scientific literature}
}
"""

_DESCRIPTION = """\
The AutoGKB Annotation Benchmark evaluates models' ability to extract pharmacogenomic variant-drug associations from scientific literature. 

This dataset contains 4,516 annotations across 1,431 papers, covering genetic variants, drugs, phenotype categories, and association details. Each annotation includes 25 fields ranging from core information (variant, gene, drug, PMID) to detailed phenotype and association data.

The dataset is split into train, validation, and test sets. Each example includes the full article text in markdown format, making it easy to develop and test text mining models.

Key features:
- Pharmacogenomic variant-drug associations
- Multiple phenotype categories (Efficacy, Toxicity, Dosage, Metabolism/PK)
- Population-specific annotations
- Comparison data for statistical associations
- Full scientific article texts included directly in the dataset
- PMC IDs, article titles, and file paths for easy reference
"""

_HOMEPAGE = ""

_LICENSE = "Apache-2.0"

_URLS = {}


class AutoGKBAnnotationBenchmark(datasets.GeneratorBasedBuilder):
    """AutoGKB Annotation Benchmark dataset."""

    VERSION = datasets.Version("1.2.0")

    BUILDER_CONFIGS = [
        datasets.BuilderConfig(
            name="default",
            version=VERSION,
            description="The full AutoGKB Annotation Benchmark dataset with JSONL format",
        ),
    ]

    DEFAULT_CONFIG_NAME = "default"

    def _info(self) -> datasets.DatasetInfo:
        features = datasets.Features({
            "pmcid": datasets.Value("string"),
            "article_title": datasets.Value("string"),
            "article_path": datasets.Value("string"),
            "article_text": datasets.Value("string"),
            "variant_annotation_id": datasets.Value("int64"),
            "variant_haplotypes": datasets.Value("string"),
            "gene": datasets.Value("string"),
            "drugs": datasets.Value("string"),
            "pmid": datasets.Value("int64"),
            "phenotype_category": datasets.Value("string"),
            "significance": datasets.Value("string"),
            "notes": datasets.Value("string"),
            "sentence": datasets.Value("string"),
            "alleles": datasets.Value("string"),
            "specialty_population": datasets.Value("string"),
            "metabolizer_types": datasets.Value("string"),
            "is_plural": datasets.Value("string"),
            "is_is_not_associated": datasets.Value("string"),
            "direction_of_effect": datasets.Value("string"),
            "pd_pk_terms": datasets.Value("string"),
            "multiple_drugs_and_or": datasets.Value("string"),
            "population_types": datasets.Value("string"),
            "population_phenotypes_or_diseases": datasets.Value("string"),
            "multiple_phenotypes_or_diseases_and_or": datasets.Value("string"),
            "comparison_alleles_or_genotypes": datasets.Value("string"),
            "comparison_metabolizer_types": datasets.Value("string"),
        })

        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
        # Get the data directory - either from data_dir config or current directory
        if hasattr(self.config, 'data_dir') and self.config.data_dir:
            data_dir = Path(self.config.data_dir)
        elif hasattr(self.config, 'data_files') and self.config.data_files:
            # Extract base directory from data_files
            data_dir = Path(list(self.config.data_files.values())[0][0]).parent
        else:
            data_dir = Path.cwd()
        
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "jsonl_file": data_dir / "train.jsonl",
                    "articles_dir": data_dir / "articles",
                },
            ),
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION,
                gen_kwargs={
                    "jsonl_file": data_dir / "val.jsonl",
                    "articles_dir": data_dir / "articles",
                },
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={
                    "jsonl_file": data_dir / "test.jsonl",
                    "articles_dir": data_dir / "articles",
                },
            ),
        ]

    def _generate_examples(self, jsonl_file, articles_dir):
        """Generate examples for each split."""
        
        # Convert to Path objects if they aren't already
        jsonl_path = Path(jsonl_file)
        articles_path = Path(articles_dir)
        
        # Process JSONL file
        with open(jsonl_path, 'r', encoding='utf-8') as f:
            for idx, line in enumerate(f):
                data = json.loads(line.strip())
                
                # Handle null/NaN values by converting them to None or empty string
                def clean_value(value):
                    if value is None or (isinstance(value, float) and str(value).lower() == 'nan'):
                        return None
                    return value
                
                # Load article text if available
                article_text = ""
                article_path = data.get("article_path", "")
                if article_path:
                    # Construct full path to article file
                    full_article_path = articles_path.parent / article_path
                    if full_article_path.exists():
                        try:
                            with open(full_article_path, 'r', encoding='utf-8') as f_article:
                                article_text = f_article.read()
                        except Exception:
                            # If we can't read the file, leave article_text empty
                            article_text = ""
                
                # Create example with all fields from JSONL
                example = {
                    "pmcid": clean_value(data.get("pmcid", "")),
                    "article_title": clean_value(data.get("article_title", "")),
                    "article_path": clean_value(data.get("article_path", "")),
                    "article_text": article_text,
                    "variant_annotation_id": data.get("variant_annotation_id", 0),
                    "variant_haplotypes": clean_value(data.get("variant_haplotypes", "")),
                    "gene": clean_value(data.get("gene", "")),
                    "drugs": clean_value(data.get("drugs", "")),
                    "pmid": data.get("pmid", 0),
                    "phenotype_category": clean_value(data.get("phenotype_category", "")),
                    "significance": clean_value(data.get("significance", "")),
                    "notes": clean_value(data.get("notes", "")),
                    "sentence": clean_value(data.get("sentence", "")),
                    "alleles": clean_value(data.get("alleles", "")),
                    "specialty_population": clean_value(data.get("specialty_population", "")),
                    "metabolizer_types": clean_value(data.get("metabolizer_types", "")),
                    "is_plural": clean_value(data.get("is_plural", "")),
                    "is_is_not_associated": clean_value(data.get("is_is_not_associated", "")),
                    "direction_of_effect": clean_value(data.get("direction_of_effect", "")),
                    "pd_pk_terms": clean_value(data.get("pd_pk_terms", "")),
                    "multiple_drugs_and_or": clean_value(data.get("multiple_drugs_and_or", "")),
                    "population_types": clean_value(data.get("population_types", "")),
                    "population_phenotypes_or_diseases": clean_value(data.get("population_phenotypes_or_diseases", "")),
                    "multiple_phenotypes_or_diseases_and_or": clean_value(data.get("multiple_phenotypes_or_diseases_and_or", "")),
                    "comparison_alleles_or_genotypes": clean_value(data.get("comparison_alleles_or_genotypes", "")),
                    "comparison_metabolizer_types": clean_value(data.get("comparison_metabolizer_types", "")),
                }
                
                yield f"{jsonl_path.stem}_{idx}", example