File size: 34,742 Bytes
698c80f
 
 
 
 
 
 
 
 
 
 
 
 
 
35e8439
 
698c80f
 
 
7a4b68c
698c80f
 
35e8439
698c80f
 
 
 
 
 
 
 
 
7a4b68c
 
 
 
698c80f
 
 
 
7a4b68c
698c80f
35e8439
 
 
 
 
 
 
 
 
 
 
 
 
 
 
698c80f
35e8439
 
 
 
 
698c80f
 
 
 
 
 
 
 
 
 
 
7a4b68c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
698c80f
 
 
 
7a4b68c
 
698c80f
7a4b68c
 
 
 
698c80f
 
 
7a4b68c
698c80f
7a4b68c
 
 
 
698c80f
 
 
 
 
7a4b68c
 
 
 
698c80f
7a4b68c
 
 
698c80f
7a4b68c
698c80f
 
 
7a4b68c
 
698c80f
 
 
 
7a4b68c
698c80f
7a4b68c
 
698c80f
7a4b68c
 
698c80f
 
7a4b68c
698c80f
7a4b68c
698c80f
 
 
 
7a4b68c
698c80f
 
7a4b68c
698c80f
 
 
7a4b68c
698c80f
 
7a4b68c
698c80f
 
 
7a4b68c
698c80f
 
7a4b68c
698c80f
 
 
 
 
7a4b68c
698c80f
7a4b68c
 
698c80f
7a4b68c
698c80f
 
7a4b68c
 
 
 
698c80f
 
7a4b68c
698c80f
 
 
 
7a4b68c
698c80f
7a4b68c
 
698c80f
7a4b68c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
698c80f
 
7a4b68c
 
 
 
 
698c80f
7a4b68c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
698c80f
7a4b68c
 
 
 
 
 
 
 
 
 
 
 
698c80f
7a4b68c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
698c80f
7a4b68c
 
 
 
 
698c80f
7a4b68c
698c80f
7a4b68c
698c80f
7a4b68c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
698c80f
7a4b68c
 
 
 
698c80f
7a4b68c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
698c80f
 
 
7a4b68c
 
698c80f
 
 
 
 
 
 
 
 
 
7a4b68c
698c80f
 
 
 
 
 
 
 
7a4b68c
698c80f
7a4b68c
698c80f
 
 
7a4b68c
 
698c80f
 
7a4b68c
 
698c80f
 
 
7a4b68c
 
698c80f
7a4b68c
 
 
 
 
 
698c80f
7a4b68c
 
698c80f
7a4b68c
698c80f
7a4b68c
 
698c80f
 
 
7a4b68c
698c80f
7a4b68c
698c80f
7a4b68c
698c80f
 
7a4b68c
 
698c80f
 
 
7a4b68c
698c80f
 
7a4b68c
 
 
 
 
698c80f
7a4b68c
 
 
 
 
 
 
698c80f
 
 
7a4b68c
 
698c80f
7a4b68c
698c80f
 
 
 
 
 
 
 
 
7a4b68c
 
 
 
 
 
698c80f
 
7a4b68c
 
698c80f
 
 
 
 
7a4b68c
 
 
698c80f
 
 
7a4b68c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
698c80f
 
 
35e8439
 
 
7a4b68c
698c80f
7a4b68c
 
 
 
35e8439
7a4b68c
 
 
35e8439
 
7a4b68c
 
 
 
 
35e8439
7a4b68c
 
 
 
 
 
 
 
35e8439
 
7a4b68c
 
 
 
 
 
35e8439
 
 
 
 
 
 
 
 
 
 
 
 
7a4b68c
35e8439
 
 
 
 
 
 
 
7a4b68c
 
 
 
35e8439
7a4b68c
 
35e8439
 
7a4b68c
 
 
35e8439
 
7a4b68c
 
 
 
 
35e8439
7a4b68c
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MEDLINE/PubMed data - Modified for full abstract text extraction."""


import copy
import gzip
import xml.etree.ElementTree as ET # Using standard ElementTree

import datasets
import random

logger = datasets.logging.get_logger(__name__)


_CITATION = """\
Courtesy of the U.S. National Library of Medicine.
"""

_DESCRIPTION = """\
NLM produces a baseline set of MEDLINE/PubMed citation records in XML format for download on an annual basis.
The annual baseline is released in December of each year. Each day, NLM produces update files that include
new, revised and deleted citations. See our documentation page for more information.
This version is modified to extract the full text from structured abstracts.
"""

_HOMEPAGE = "https://www.nlm.nih.gov/databases/download/pubmed_medline.html"

_LICENSE = "" # Assuming standard NLM terms apply, check source for specifics

# Parameters
total_files = 1274
num_bins = 50
total_urls = 20

# Compute bin size
bin_size = total_files // num_bins

# Sample one random file from each bin
selected_indices = []
for b in range(num_bins):
    start = b * bin_size + 1
    end = min((b + 1) * bin_size + 1, total_files + 1)
    if start < end:
        selected_indices.append(random.randint(start, end - 1))

# Ensure we only keep 20 URLs total (in case rounding leads to more)
selected_indices = sorted(random.sample(selected_indices, total_urls))

# Create URLs
_URLs = [f"https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed25n{i:04d}.xml.gz" for i in range(1200, 1274)]

# Copyright Ferry Boender, released under the MIT license.
# Modified by @Narsil to handle more oddities
def deepupdate(target, src):
    """Deep update target dict with src
    For each k,v in src: if k doesn't exist in target, it is deep copied from
    src to target. Otherwise, if v is a list, target[k] is extended with
    src[k]. If v is a set, target[k] is updated with v, If v is a dict,
    recursively deep-update it.
    """
    for k, v in src.items():
        # Handling type mismatches observed in original script
        if k in target:
            target_type = type(target[k])
            v_type = type(v)

            # Allow string to int conversion for specific known fields if needed
            if target_type == int and v_type == str:
                try:
                    v_int = int(v)
                    v = v_int
                    v_type = int
                except (ValueError, TypeError):
                    logger.warning(f"Field '{k}': Could not convert string '{v}' to expected type {target_type}. Skipping update.")
                    continue # Skip this key if conversion fails

            # If types still don't match after potential conversion, log and skip
            if target_type != v_type and not (isinstance(target[k], list) and isinstance(v, list)) \
               and not (isinstance(target[k], dict) and isinstance(v, dict)) \
               and not (isinstance(target[k], set) and isinstance(v, set)):
                 # Check if target is list and source is not (common issue)
                if isinstance(target[k], list) and not isinstance(v, list):
                     logger.warning(f"Field '{k}': Trying to update a list with a non-list ({v_type}). Wrapping source value in a list.")
                     v = [v] # Attempt to wrap the source value in a list
                # Check if target is dict and source is not
                elif isinstance(target[k], dict) and not isinstance(v, dict):
                     logger.warning(f"Field '{k}': Trying to update a dict with a non-dict ({v_type}). Skipping update.")
                     continue
                # Other mismatches
                else:
                     logger.warning(f"Field '{k}': Type mismatch. Target is {target_type}, Source is {v_type}. Skipping update.")
                     continue


        # Recursive update logic based on type
        if isinstance(v, list):
            if k not in target:
                target[k] = copy.deepcopy(v)
            elif isinstance(target[k], list):
                target[k].extend(v)
            # Handle cases where target is not a list but should be (based on schema usually)
            # This part might be less needed if schema is strictly enforced earlier
            else:
                 logger.warning(f"Field '{k}': Trying to extend a non-list ({type(target[k])}) with a list. Replacing value.")
                 target[k] = copy.deepcopy(v)

        elif isinstance(v, dict):
            if k not in target:
                target[k] = copy.deepcopy(v)
            elif isinstance(target[k], dict):
                deepupdate(target[k], v) # Recurse
            else:
                 logger.warning(f"Field '{k}': Trying to update a non-dict ({type(target[k])}) with a dict. Replacing value.")
                 target[k] = copy.deepcopy(v)

        elif isinstance(v, set):
            if k not in target:
                target[k] = v.copy()
            elif isinstance(target[k], set):
                target[k].update(v.copy())
            else:
                logger.warning(f"Field '{k}': Trying to update a non-set ({type(target[k])}) with a set. Replacing value.")
                target[k] = v.copy()

        # Handle primitive types
        else:
            # Prevent overwriting structured types with primitives if key exists
            if k in target and isinstance(target[k], (list, tuple, dict, set)):
                logger.warning(f"Field '{k}': Trying to overwrite a structured type ({type(target[k])}) with a primitive ({type(v)}). Skipping update.")
                continue
            target[k] = copy.copy(v) # Shallow copy for primitives


def default_date():
    # Using 0 might cause issues with date parsing later. Using None or empty strings might be safer
    # depending on how downstream processing handles missing dates. Sticking to 0 for now.
    return {"Year": 0, "Month": 0, "Day": 0}


def default_inline_article():
    # Default structure expected by the schema
    return {
        # 'Journal': Journal, # Kept commented out as in original
        "Abstract": {"AbstractText": ""}, # Key field for abstract
        "ArticleTitle": "",
        # 'Pagination': {'MedlinePgn': ""}, # Kept commented out
        "AuthorList": {"Author": []}, # Expects a list of Author dicts
        "Language": "",
        "GrantList": {
            "Grant": [], # Expects a list of Grant dicts
        },
        "PublicationTypeList": {"PublicationType": []}, # Expects a list of strings
    }


def default_article():
    # Top-level default structure for a PubmedArticle
    return {
        "MedlineCitation": {
            "PMID": 0, # Needs to be populated
            "DateCompleted": default_date(),
            "NumberOfReferences": 0,
            "DateRevised": default_date(),
            "Article": default_inline_article(), # Embed the article structure
            "MedlineJournalInfo": {"Country": ""},
            "ChemicalList": {"Chemical": []},
            "CitationSubset": "", # Often a list, handle potential type mismatch
            "MeshHeadingList": {"MeshHeading": []},
        },
        "PubmedData": {
            "ArticleIdList": [], # Schema expects Sequence({'ArticleId': Sequence(Value('string'))})
            "PublicationStatus": "",
            "History": {"PubMedPubDate": []},
            "ReferenceList": [], # Modified by update_citation
        },
    }


class Pubmed(datasets.GeneratorBasedBuilder):
    """Pubmed citations records - Modified for full abstract text"""

    # Use a version reflecting the modification, e.g., 5.0.1 or 6.0.0
    # Check the actual NLM baseline year you are targeting
    BUILDER_CONFIGS = [
        datasets.BuilderConfig(name="2025", description="Subset of 2025 annual record with full abstract parsing", version=datasets.Version("6.0.0")),
    ]

    # Keys identified from the schema (_info method) to guide parsing
    SIMPLE_KEYS = set() # Will be populated by fill_keys_from_features
    LIST_KEYS = set()   # Will be populated by fill_keys_from_features
    IGNORE_KEYS = set() # Tracks keys explicitly ignored during parsing

    def fill_keys_from_features(self, features):
        """Recursively populates SIMPLE_KEYS and LIST_KEYS based on the dataset features."""
        if isinstance(features, dict):
            for key, value in features.items():
                if isinstance(value, datasets.Sequence):
                    self.LIST_KEYS.add(key)
                    # Recurse into the sequence's feature type
                    self.fill_keys_from_features(value.feature)
                elif isinstance(value, datasets.Value):
                    # Simple value types (string, int32, etc.)
                    self.SIMPLE_KEYS.add(key)
                elif isinstance(value, dict): # Handle nested dictionaries in features
                    self.SIMPLE_KEYS.add(key) # The key itself points to a dict structure
                    self.fill_keys_from_features(value) # Recurse into the nested dict
                # Add handling for other potential feature types if necessary
        elif isinstance(features, datasets.Sequence): # Handle top-level Sequence feature
             self.fill_keys_from_features(features.feature)

    def get_full_abstract_text(self, abstract_element):
        """
        Extracts and concatenates text from all AbstractText elements
        within the given Abstract element. Handles structured abstracts and inline markup.
        Uses ElementTree's itertext for robustness.
        """
        if abstract_element is None:
            return ""

        # Find all AbstractText child elements directly under Abstract
        # Using XPath './/AbstractText' would find them anywhere below,
        # but './AbstractText' finds only direct children. Let's use direct children first.
        abstract_text_elements = abstract_element.findall('./AbstractText')

        full_text_parts = []
        if not abstract_text_elements:
            # Fallback 1: No direct AbstractText children, maybe text is directly in Abstract? (Unlikely for PubMed)
            # Fallback 2: Or maybe AbstractText tags are nested deeper unexpectedly. Try finding them anywhere.
            abstract_text_elements = abstract_element.findall('.//AbstractText')
            if not abstract_text_elements:
                 # Fallback 3: Use itertext on the Abstract element itself if no AbstractText found
                 all_text = [text.strip() for text in abstract_element.itertext() if text and text.strip()]
                 if all_text:
                      logger.debug(f"Found abstract text directly within <Abstract> or nested tags (no <AbstractText>): {' '.join(all_text)}")
                      return "\n".join(all_text)
                 else:
                      return "" # No text found at all

        # Process found AbstractText elements
        for text_element in abstract_text_elements:
            # Get label attribute if present
            label = text_element.get('Label')
            nlm_category = text_element.get('NlmCategory') # Capture NlmCategory if needed

            # Use itertext() to get *all* text within the element,
            # effectively stripping tags like <i>, <b> but keeping their content.
            # We join fragments within one AbstractText element with spaces,
            # and fragments from different AbstractText elements with newlines later.
            text_content = " ".join(text.strip() for text in text_element.itertext() if text and text.strip())

            # Construct the output string for this part
            current_part = ""
            if label:
                current_part += f"{label}: "
            # Optionally include NlmCategory:
            # if nlm_category:
            #     current_part += f"({nlm_category}) "
            current_part += text_content

            if current_part: # Avoid adding empty parts
                full_text_parts.append(current_part)

        # Join the parts from different AbstractText elements with newlines
        return "\n".join(full_text_parts)


    def xml_to_dictionnary(self, parentElement):
        """
        Recursively converts an XML element and its children into a dictionary,
        guided by SIMPLE_KEYS and LIST_KEYS derived from the dataset schema.
        Includes specific handling for Abstract and ArticleTitle.
        """
        data = {}

        # --- Handling for specific tags ---
        # Handle ArticleTitle: Preserve inner XML tags as a string
        if parentElement.tag == "ArticleTitle":
             # Extract inner XML/HTML as string. itertext() gets only text.
             # To keep tags: concatenate text before first child + string representation of children + tail text
             inner_xml_parts = [ET.tostring(e, encoding='unicode', method='xml') for e in parentElement]
             full_content = (parentElement.text or "").strip() + "".join(inner_xml_parts) + (parentElement.tail or "").strip()
             # Clean up potential extra whitespace between elements
             full_content = ' '.join(full_content.split())
             # Return directly as {Tag: Value} structure expected by caller
             return {parentElement.tag: full_content.strip()}

        # --- Process Children ---
        children = list(parentElement)
        for child in children:
            key = child.tag
            value = None

            # --- Specific Handling for Abstract Tag ---
            if key == "Abstract":
                full_abstract = self.get_full_abstract_text(child)
                # Structure according to schema: {Abstract: {AbstractText: "..."}}
                value = {"AbstractText": full_abstract}
            # Skip AbstractText if found outside Abstract (already handled by get_full_abstract_text)
            elif key == "AbstractText":
                # logger.warning(f"Skipping <{key}> found outside <Abstract> context.")
                continue # Handled within the Abstract block
            # --- End Specific Handling ---
            else:
                # --- Generic Handling for other tags ---
                # Ensure child.text is not None before stripping
                child_text = child.text if child.text is not None else ""

                if len(child) == 0: # Element has no children, just text content
                    value = child_text.strip()
                else: # Element has children, recurse
                    recursive_result = self.xml_to_dictionnary(child)
                    # recursive_result is {'ChildTag': {...}} or {'ChildTag': 'text'}
                    if isinstance(recursive_result, dict) and key in recursive_result:
                         value = recursive_result[key] # Extract the inner value/dict
                    else:
                         # This case might happen if recursion failed or returned unexpected structure
                         logger.warning(f"Unexpected recursive result for <{key}>: {recursive_result}. Using direct text if available.")
                         value = child_text.strip() # Fallback to text

            # --- Add parsed value to data dictionary ---
            if value is None or value == "": # Skip empty/None values unless schema requires empty string
                # Check schema if empty string is valid/expected for this key?
                # For simplicity, skipping None/empty string for now.
                 continue

            if key in data: # Key already exists
                if isinstance(data[key], list):
                    data[key].append(value) # Append to existing list
                else:
                    data[key] = [data[key], value] # Convert to list
            elif key in self.LIST_KEYS:
                # Key expects a list according to schema
                if isinstance(value, list):
                    data[key] = value # Value is already a list (e.g. multiple identical children)
                else:
                    data[key] = [value] # Wrap single value in a list
            elif key in self.SIMPLE_KEYS:
                data[key] = value # Store as simple value
            elif key in self.IGNORE_KEYS:
                continue # Explicitly ignore
            else:
                # Key not in schema or explicitly ignored yet
                # Heuristic: If it appears multiple times, treat as list? Risky.
                # Safest is to log and ignore, or add to IGNORE_KEYS.
                if key not in ["PublicationStatus", "CitationSubset"]: # Allow known schema deviations
                     logger.info(f"Ignoring unexpected key '{key}' found under <{parentElement.tag}>. Content: {value}. Add to Features or IGNORE_KEYS if needed.")
                     self.IGNORE_KEYS.add(key) # Ignore future occurrences in this run
                else: # Handle known deviations that might appear as single strings but schema expects sequence
                     if key in self.LIST_KEYS: # Check if it SHOULD be a list
                          data[key] = [value]
                     else:
                          data[key] = value # Store as simple if not expected list


        # --- Filling Defaults for missing optional elements (as per original script) ---
        if parentElement.tag == "MeshHeading":
            if "QualifierName" not in data: data["QualifierName"] = "" # Expects string
        elif parentElement.tag == "Author":
            if "LastName" not in data: data["LastName"] = ""
            if "ForeName" not in data: data["ForeName"] = ""
            if "Initials" not in data: data["Initials"] = ""
            if "CollectiveName" not in data: data["CollectiveName"] = ""
        elif parentElement.tag == "JournalIssue":
            if "Volume" not in data: data["Volume"] = ""
            if "Issue" not in data: data["Issue"] = ""
            # PubDate handling might be needed if it's inconsistent
        elif parentElement.tag == "Grant":
            if "GrantID" not in data: data["GrantID"] = ""
            # Might need defaults for Agency, Country if they are optional but in schema


        # Return structure expected by recursion {TagName: data_dict}
        # Return empty dict if no children were parsed successfully?
        if data or parentElement.text: # Include if it has data OR direct text (for simple elements)
             # If it only had text content (no children processed), return that directly
             if not data and parentElement.text and parentElement.text.strip():
                  return {parentElement.tag: parentElement.text.strip()}
             else:
                # If it's a structure with attributes, add them
                if parentElement.attrib:
                     # Careful not to overwrite parsed children with same key as attribute
                     attr_dict = {f"@{k}": v for k, v in parentElement.attrib.items() if k not in data}
                     data.update(attr_dict)
                return {parentElement.tag: data}
        else:
             # Return the tag with an empty dict to indicate it was present but empty/unparsed
             return {parentElement.tag: {}}


    def _info(self):
        """Defines the dataset schema."""
        # Define reusable structures
        Date = {
            "Year": datasets.Value("int32"),
            "Month": datasets.Value("int32"),
            "Day": datasets.Value("int32"),
        }

        MeshHeading = {"DescriptorName": datasets.Value("string"), "QualifierName": datasets.Value("string")}

        MedlineJournalInfo = {
            "Country": datasets.Value("string"),
            # Original script commented these out due to inconsistency, keeping them commented
            # 'MedlineTA': datasets.Value('string'),
            # 'NlmUniqueID': datasets.Value('string'),
            # 'ISSNLinking': datasets.Value('string'),
        }
        Chemical = {
            "RegistryNumber": datasets.Value("string"),
            "NameOfSubstance": datasets.Value("string"),
        }

        Author = {
            # Attributes like ValidYN, EqualContrib can be added if needed: "@ValidYN": datasets.Value("string")
            "LastName": datasets.Value("string"),
            "ForeName": datasets.Value("string"),
            "Initials": datasets.Value("string"),
            "CollectiveName": datasets.Value("string"), # Handle cases where Author is CollectiveName
            # AffiliationInfo might be needed
        }
        Grant = {
            # GrantID might be optional in data but required by schema? Check data.
            "GrantID": datasets.Value("string"), # Default added in parser if missing
            "Agency": datasets.Value("string"),
            "Country": datasets.Value("string"),
        }

        # Define the main Article structure, including the corrected Abstract
        Article = {
            # 'Journal': Journal, # Kept commented out
            "Abstract": {"AbstractText": datasets.Value("string")}, # Expects a single string now
            "ArticleTitle": datasets.Value("string"), # Allows HTML tags based on parser
            # 'Pagination': {'MedlinePgn': datasets.Value('string')}, # Kept commented out
            "AuthorList": {"Author": datasets.Sequence(Author)}, # Sequence of Author dicts
            "Language": datasets.Value("string"), # Usually single, but XML allows multiple, use Sequence? Check data. Assuming single for now.
            "GrantList": {
                # GrantList might be optional, Grant inside might be optional/list
                "Grant": datasets.Sequence(Grant), # Sequence of Grant dicts
            },
            "PublicationTypeList": {"PublicationType": datasets.Sequence(datasets.Value("string"))}, # Sequence of strings
        }

        # Define the top-level features including MedlineCitation and PubmedData
        features = datasets.Features(
            {
                "MedlineCitation": {
                    "PMID": datasets.Value("int32"), # Primary key
                    "DateCompleted": Date,
                    "NumberOfReferences": datasets.Value("int32"), # Often missing, default to 0
                    "DateRevised": Date,
                    "Article": Article, # Embed the article structure
                    "MedlineJournalInfo": MedlineJournalInfo,
                    "ChemicalList": {"Chemical": datasets.Sequence(Chemical)},
                    # CitationSubset can be single or multiple - using string for simplicity, may need Sequence
                    "CitationSubset": datasets.Value("string"), # Or datasets.Sequence(datasets.Value("string"))? Check data.
                    "MeshHeadingList": {
                        "MeshHeading": datasets.Sequence(MeshHeading),
                    },
                    # Other potential fields: KeywordList, GeneSymbolList, CommentsCorrectionsList etc.
                },
                "PubmedData": {
                    # ArticleIdList structure is complex: list of dicts, where 'ArticleId' is list of strings
                    "ArticleIdList": datasets.Sequence({
                        "ArticleId": datasets.Sequence(datasets.Value("string")),
                         # Potential attribute: "@IdType": datasets.Value("string")
                        }),
                    "PublicationStatus": datasets.Value("string"),
                    "History": {"PubMedPubDate": datasets.Sequence(Date)}, # Sequence of Date dicts
                    # ReferenceList is modified by update_citation to be simpler
                    "ReferenceList": datasets.Sequence({
                         "Citation": datasets.Value("string"),
                         "CitationId": datasets.Value("int32"), # Assuming PMID is used as CitationId
                         # Potential other fields from original reference structure if needed
                        }),
                },
            }
        )

        # Populate SIMPLE_KEYS and LIST_KEYS based on the final features
        self.fill_keys_from_features(features)

        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        """Downloads data and defines splits."""
        # Use dl_manager.download_and_extract for Gzip files
        # Using download() and manual gzip opening allows iterparse directly on the stream potentially
        # but download_and_extract is standard practice if disk space is okay.
        # Let's stick to download() and manual gzip opening to facilitate iterparse later.
        dl_dir = dl_manager.download(_URLs) # Returns list/dict of paths to downloaded .xml.gz files
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN, # Assuming all data goes to train split
                gen_kwargs={"filenames": dl_dir if isinstance(dl_dir, list) else list(dl_dir.values())},
            ),
        ]

    def update_citation(self, article):
        """
        Flattens the complex ReferenceList structure into a simpler list of {Citation, CitationId}.
        Modifies the 'article' dictionary in-place.
        Needs careful error handling for potentially missing keys.
        """
        citations = []
        try:
            # Navigate the potentially complex structure safely
            reference_list_container = article.get("PubmedData", {}).get("ReferenceList")
            if not reference_list_container:
                article["PubmedData"]["ReferenceList"] = [] # Ensure key exists even if empty
                return

            # reference_list_container is likely a list of {'Reference': [...]} dicts
            for ref_container in reference_list_container:
                references = ref_container.get("Reference")
                if not references: continue

                # references is likely a list of actual reference dicts
                for ref in references:
                    citation_text = ref.get("Citation")
                    if not citation_text: continue

                    citation_id = None
                    # Find the PMID in the ArticleIdList within the reference
                    article_id_list_container = ref.get("ArticleIdList")
                    if article_id_list_container:
                         # This is likely a list of {'ArticleId': [...]}
                         for id_container in article_id_list_container:
                              article_ids = id_container.get("ArticleId")
                              if article_ids:
                                   # article_ids is a list of strings, look for integer ones (PMIDs)
                                   for art_id in article_ids:
                                        try:
                                            # Check if it's a digit string and potentially a PMID
                                            if art_id.isdigit():
                                                 citation_id = int(art_id)
                                                 # Optional: Check IdType attribute if available
                                                 # id_type = id_container.get("@IdType")
                                                 # if id_type == "pubmed": # Found explicit PMID
                                                 break # Use the first valid integer ID found
                                        except (ValueError, TypeError):
                                             continue # Skip non-integer IDs
                                   if citation_id: break # Stop looking once an ID is found for this reference
                    # If a valid CitationId (PMID) was found, add the pair
                    if citation_id is not None:
                         citations.append({"Citation": citation_text, "CitationId": citation_id})
                    # else: logger.debug(f"Reference found without parsable PMID: {citation_text[:50]}...")


            # Replace the old ReferenceList structure in PubmedData
            if "PubmedData" in article:
                 article["PubmedData"]["ReferenceList"] = citations
            else:
                 # Handle case where PubmedData might be missing entirely? Unlikely if default used.
                 article["PubmedData"] = {"ReferenceList": citations}


        except Exception as e:
            logger.error(f"Error during citation update for article: {e}")
            # Ensure the key exists even if update fails, matching schema
            if "PubmedData" not in article:
                 article["PubmedData"] = {}
            article["PubmedData"]["ReferenceList"] = []


    def _generate_examples(self, filenames):
        """Yields examples parsing XML files using iterparse for memory efficiency, skipping duplicate PMIDs."""
        # id_ = 0 # Simple counter if needed for logging, but not used as key anymore
        yielded_pmids = set() # Keep track of PMIDs we've already yielded

        for filename in filenames:
            logger.info(f"Processing file: {filename}")
            try:
                with gzip.open(filename, "rb") as f: # Read as bytes for ET
                    context = ET.iterparse(f, events=("end",))
                    event, root = next(context) # Get root iterator

                    for event, elem in context:
                        if event == "end" and elem.tag == "PubmedArticle":
                            article_dict_wrapper = None
                            pmid = "UNKNOWN_PMID" # Default for logging if extraction fails early
                            try:
                                article_dict_wrapper = self.xml_to_dictionnary(elem)

                                if not article_dict_wrapper or 'PubmedArticle' not in article_dict_wrapper:
                                    logger.warning(f"Parser returned empty or invalid structure for a PubmedArticle element in {filename}")
                                    elem.clear()
                                    continue

                                article = article_dict_wrapper.get('PubmedArticle')
                                if not article or not isinstance(article, dict):
                                    logger.warning(f"Parsed empty or invalid article data from element in {filename}")
                                    elem.clear()
                                    continue

                                # --- Extract PMID early for duplicate check ---
                                pmid_val = article.get("MedlineCitation", {}).get("PMID", 0)
                                try:
                                    pmid = int(pmid_val)
                                    if pmid <= 0: raise ValueError("PMID must be positive")
                                except (ValueError, TypeError):
                                    logger.warning(f"Skipping article due to invalid or missing PMID: '{pmid_val}' in {filename}")
                                    elem.clear()
                                    continue # Skip this article if PMID invalid

                                # --- !!! DUPLICATE CHECK !!! ---
                                if pmid in yielded_pmids:
                                    logger.warning(f"Skipping duplicate PMID {pmid} found in {filename}.")
                                    elem.clear()
                                    continue # Skip this duplicate entry
                                # --- End DUPLICATE CHECK ---

                                # --- If not duplicate, proceed with processing ---
                                self.update_citation(article)
                                new_article = default_article()
                                deepupdate(new_article, article)

                                # --- Final validation before yield (PMID check redundant but safe) ---
                                final_pmid_check = new_article.get("MedlineCitation", {}).get("PMID", 0)
                                if final_pmid_check != pmid:
                                     logger.error(f"PMID mismatch after processing! Expected {pmid}, got {final_pmid_check}. Skipping article.")
                                     elem.clear()
                                     continue

                                # Validate against schema
                                encoded_example = self.info.features.encode_example(new_article)

                                # Yield pmid as key and the validated dictionary
                                yield pmid, new_article # Use actual PMID as the example key
                                yielded_pmids.add(pmid) # Add to set *after* successful yield

                            except Exception as e:
                                logger.error(f"Failed to process article PMID {pmid} in {filename}: {e}", exc_info=False) # exc_info=True for full traceback
                                # Logging data causing error can be helpful but verbose:
                                # if 'new_article' in locals(): logger.debug(f"Problematic data: {new_article}")

                            finally:
                                elem.clear() # Clear the element in all cases (success, skip, error)

                    if root is not None:
                        root.clear()

            except ET.ParseError as e:
                logger.error(f"XML ParseError in file {filename}: {e}")
                continue
            except gzip.BadGzipFile:
                logger.error(f"Bad Gzip File error for {filename}. It might be corrupted or incomplete.")
                continue
            except FileNotFoundError:
                 logger.error(f"File not found: {filename}")
                 continue
            except Exception as e:
                logger.error(f"An unexpected error occurred processing file {filename}: {e}", exc_info=True)
                continue