autogkb / autogkb_annotation_benchmark.py
shlokn's picture
chore: updated version number
2279b79 unverified
"""AutoGKB Annotation Benchmark dataset."""
import csv
import json
import os
from pathlib import Path
from typing import Dict, List, Any
import datasets
_CITATION = """\
@misc{autogkb_annotation_benchmark_2025,
title={AutoGKB Annotation Benchmark},
author={Shlok Natarajan},
year={2025},
note={A benchmark for pharmacogenomic variant-drug annotation extraction from scientific literature}
}
"""
_DESCRIPTION = """\
The AutoGKB Annotation Benchmark evaluates models' ability to extract pharmacogenomic variant-drug associations from scientific literature.
This dataset contains 4,516 annotations across 1,431 papers, covering genetic variants, drugs, phenotype categories, and association details. Each annotation includes 25 fields ranging from core information (variant, gene, drug, PMID) to detailed phenotype and association data.
The dataset is split into train, validation, and test sets. Each example includes the full article text in markdown format, making it easy to develop and test text mining models.
Key features:
- Pharmacogenomic variant-drug associations
- Multiple phenotype categories (Efficacy, Toxicity, Dosage, Metabolism/PK)
- Population-specific annotations
- Comparison data for statistical associations
- Full scientific article texts included directly in the dataset
- PMC IDs, article titles, and file paths for easy reference
"""
_HOMEPAGE = ""
_LICENSE = "Apache-2.0"
_URLS = {}
class AutoGKBAnnotationBenchmark(datasets.GeneratorBasedBuilder):
"""AutoGKB Annotation Benchmark dataset."""
VERSION = datasets.Version("1.2.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="default",
version=VERSION,
description="The full AutoGKB Annotation Benchmark dataset with JSONL format",
),
]
DEFAULT_CONFIG_NAME = "default"
def _info(self) -> datasets.DatasetInfo:
features = datasets.Features({
"pmcid": datasets.Value("string"),
"article_title": datasets.Value("string"),
"article_path": datasets.Value("string"),
"article_text": datasets.Value("string"),
"variant_annotation_id": datasets.Value("int64"),
"variant_haplotypes": datasets.Value("string"),
"gene": datasets.Value("string"),
"drugs": datasets.Value("string"),
"pmid": datasets.Value("int64"),
"phenotype_category": datasets.Value("string"),
"significance": datasets.Value("string"),
"notes": datasets.Value("string"),
"sentence": datasets.Value("string"),
"alleles": datasets.Value("string"),
"specialty_population": datasets.Value("string"),
"metabolizer_types": datasets.Value("string"),
"is_plural": datasets.Value("string"),
"is_is_not_associated": datasets.Value("string"),
"direction_of_effect": datasets.Value("string"),
"pd_pk_terms": datasets.Value("string"),
"multiple_drugs_and_or": datasets.Value("string"),
"population_types": datasets.Value("string"),
"population_phenotypes_or_diseases": datasets.Value("string"),
"multiple_phenotypes_or_diseases_and_or": datasets.Value("string"),
"comparison_alleles_or_genotypes": datasets.Value("string"),
"comparison_metabolizer_types": datasets.Value("string"),
})
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
# Get the data directory - either from data_dir config or current directory
if hasattr(self.config, 'data_dir') and self.config.data_dir:
data_dir = Path(self.config.data_dir)
elif hasattr(self.config, 'data_files') and self.config.data_files:
# Extract base directory from data_files
data_dir = Path(list(self.config.data_files.values())[0][0]).parent
else:
data_dir = Path.cwd()
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"jsonl_file": data_dir / "train.jsonl",
"articles_dir": data_dir / "articles",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"jsonl_file": data_dir / "val.jsonl",
"articles_dir": data_dir / "articles",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"jsonl_file": data_dir / "test.jsonl",
"articles_dir": data_dir / "articles",
},
),
]
def _generate_examples(self, jsonl_file, articles_dir):
"""Generate examples for each split."""
# Convert to Path objects if they aren't already
jsonl_path = Path(jsonl_file)
articles_path = Path(articles_dir)
# Process JSONL file
with open(jsonl_path, 'r', encoding='utf-8') as f:
for idx, line in enumerate(f):
data = json.loads(line.strip())
# Handle null/NaN values by converting them to None or empty string
def clean_value(value):
if value is None or (isinstance(value, float) and str(value).lower() == 'nan'):
return None
return value
# Load article text if available
article_text = ""
article_path = data.get("article_path", "")
if article_path:
# Construct full path to article file
full_article_path = articles_path.parent / article_path
if full_article_path.exists():
try:
with open(full_article_path, 'r', encoding='utf-8') as f_article:
article_text = f_article.read()
except Exception:
# If we can't read the file, leave article_text empty
article_text = ""
# Create example with all fields from JSONL
example = {
"pmcid": clean_value(data.get("pmcid", "")),
"article_title": clean_value(data.get("article_title", "")),
"article_path": clean_value(data.get("article_path", "")),
"article_text": article_text,
"variant_annotation_id": data.get("variant_annotation_id", 0),
"variant_haplotypes": clean_value(data.get("variant_haplotypes", "")),
"gene": clean_value(data.get("gene", "")),
"drugs": clean_value(data.get("drugs", "")),
"pmid": data.get("pmid", 0),
"phenotype_category": clean_value(data.get("phenotype_category", "")),
"significance": clean_value(data.get("significance", "")),
"notes": clean_value(data.get("notes", "")),
"sentence": clean_value(data.get("sentence", "")),
"alleles": clean_value(data.get("alleles", "")),
"specialty_population": clean_value(data.get("specialty_population", "")),
"metabolizer_types": clean_value(data.get("metabolizer_types", "")),
"is_plural": clean_value(data.get("is_plural", "")),
"is_is_not_associated": clean_value(data.get("is_is_not_associated", "")),
"direction_of_effect": clean_value(data.get("direction_of_effect", "")),
"pd_pk_terms": clean_value(data.get("pd_pk_terms", "")),
"multiple_drugs_and_or": clean_value(data.get("multiple_drugs_and_or", "")),
"population_types": clean_value(data.get("population_types", "")),
"population_phenotypes_or_diseases": clean_value(data.get("population_phenotypes_or_diseases", "")),
"multiple_phenotypes_or_diseases_and_or": clean_value(data.get("multiple_phenotypes_or_diseases_and_or", "")),
"comparison_alleles_or_genotypes": clean_value(data.get("comparison_alleles_or_genotypes", "")),
"comparison_metabolizer_types": clean_value(data.get("comparison_metabolizer_types", "")),
}
yield f"{jsonl_path.stem}_{idx}", example