File size: 4,313 Bytes
cb5bdd5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
# /// script
# requires-python = ">=3.11"
# dependencies = [
#     "datasets",
#     "huggingface-hub[hf_transfer]",
#     "pdfplumber",
# ]
# ///
"""
Convert a directory of PDF files to a Hugging Face dataset.

This script uses the built-in PDF support in the datasets library to create
a dataset from PDF files. Each PDF is converted to images (one per page).

Example usage:
    # Basic usage - convert PDFs in a directory
    uv run pdf-to-dataset.py /path/to/pdfs username/my-dataset

    # Create a private dataset
    uv run pdf-to-dataset.py /path/to/pdfs username/my-dataset --private

    # Organize by subdirectories (creates labels)
    # folder/invoice/doc1.pdf -> label: invoice
    # folder/receipt/doc2.pdf -> label: receipt
    uv run pdf-to-dataset.py /path/to/organized-pdfs username/categorized-pdfs
"""

import logging
import os
import sys
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from pathlib import Path

from datasets import load_dataset
from huggingface_hub import login

logging.basicConfig(
    level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger(__name__)


def validate_directory(directory: Path) -> int:
    """Validate directory and count PDF files."""
    if not directory.exists():
        raise ValueError(f"Directory does not exist: {directory}")

    if not directory.is_dir():
        raise ValueError(f"Path is not a directory: {directory}")

    # Count PDFs (including in subdirectories)
    pdf_count = len(list(directory.rglob("*.pdf")))

    if pdf_count == 0:
        raise ValueError(f"No PDF files found in directory: {directory}")

    return pdf_count


def main():
    parser = ArgumentParser(
        description="Convert PDF files to Hugging Face datasets",
        formatter_class=RawDescriptionHelpFormatter,
        epilog=__doc__,
    )

    parser.add_argument("directory", type=Path, help="Directory containing PDF files")
    parser.add_argument(
        "repo_id",
        type=str,
        help="Hugging Face dataset repository ID (e.g., 'username/dataset-name')",
    )
    parser.add_argument(
        "--private", action="store_true", help="Create a private dataset repository"
    )
    parser.add_argument(
        "--hf-token",
        type=str,
        default=None,
        help="Hugging Face API token (can also use HF_TOKEN environment variable)",
    )

    args = parser.parse_args()

    # Handle authentication
    hf_token = args.hf_token or os.environ.get("HF_TOKEN")
    if hf_token:
        login(token=hf_token)
    else:
        logger.info("No HF token provided. Will attempt to use cached credentials.")

    try:
        # Validate directory
        pdf_count = validate_directory(args.directory)
        logger.info(f"Found {pdf_count} PDF files to process")

        # Load dataset using built-in PDF support
        logger.info("Loading PDFs as dataset (this may take a while for large PDFs)...")
        dataset = load_dataset("pdffolder", data_dir=str(args.directory))

        # Log dataset info
        logger.info("\nDataset created successfully!")
        logger.info(f"Structure: {dataset}")

        if "train" in dataset:
            train_size = len(dataset["train"])
            logger.info(f"Training examples: {train_size}")

            # Show sample if available
            if train_size > 0:
                sample = dataset["train"][0]
                logger.info(f"\nSample structure: {list(sample.keys())}")
                if "label" in sample:
                    logger.info("Labels found - PDFs are organized by category")

        # Push to Hub
        logger.info(f"\nPushing to Hugging Face Hub: {args.repo_id}")
        dataset.push_to_hub(args.repo_id, private=args.private)

        logger.info("✅ Dataset uploaded successfully!")
        logger.info(f"🔗 Available at: https://huggingface.co/datasets/{args.repo_id}")

        # Provide next steps
        logger.info("\nTo use your dataset:")
        logger.info(f'  dataset = load_dataset("{args.repo_id}")')

    except Exception as e:
        logger.error(f"Failed to create dataset: {e}")
        sys.exit(1)


if __name__ == "__main__":
    if len(sys.argv) == 1:
        # Show help if no arguments provided
        print(__doc__)
        sys.exit(0)

    main()