dataset-creation / pdf-to-dataset.py
davanstrien's picture
davanstrien HF Staff
Add PDF to dataset conversion script with examples and documentation
cb5bdd5
# /// script
# requires-python = ">=3.11"
# dependencies = [
# "datasets",
# "huggingface-hub[hf_transfer]",
# "pdfplumber",
# ]
# ///
"""
Convert a directory of PDF files to a Hugging Face dataset.
This script uses the built-in PDF support in the datasets library to create
a dataset from PDF files. Each PDF is converted to images (one per page).
Example usage:
# Basic usage - convert PDFs in a directory
uv run pdf-to-dataset.py /path/to/pdfs username/my-dataset
# Create a private dataset
uv run pdf-to-dataset.py /path/to/pdfs username/my-dataset --private
# Organize by subdirectories (creates labels)
# folder/invoice/doc1.pdf -> label: invoice
# folder/receipt/doc2.pdf -> label: receipt
uv run pdf-to-dataset.py /path/to/organized-pdfs username/categorized-pdfs
"""
import logging
import os
import sys
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from pathlib import Path
from datasets import load_dataset
from huggingface_hub import login
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger(__name__)
def validate_directory(directory: Path) -> int:
"""Validate directory and count PDF files."""
if not directory.exists():
raise ValueError(f"Directory does not exist: {directory}")
if not directory.is_dir():
raise ValueError(f"Path is not a directory: {directory}")
# Count PDFs (including in subdirectories)
pdf_count = len(list(directory.rglob("*.pdf")))
if pdf_count == 0:
raise ValueError(f"No PDF files found in directory: {directory}")
return pdf_count
def main():
parser = ArgumentParser(
description="Convert PDF files to Hugging Face datasets",
formatter_class=RawDescriptionHelpFormatter,
epilog=__doc__,
)
parser.add_argument("directory", type=Path, help="Directory containing PDF files")
parser.add_argument(
"repo_id",
type=str,
help="Hugging Face dataset repository ID (e.g., 'username/dataset-name')",
)
parser.add_argument(
"--private", action="store_true", help="Create a private dataset repository"
)
parser.add_argument(
"--hf-token",
type=str,
default=None,
help="Hugging Face API token (can also use HF_TOKEN environment variable)",
)
args = parser.parse_args()
# Handle authentication
hf_token = args.hf_token or os.environ.get("HF_TOKEN")
if hf_token:
login(token=hf_token)
else:
logger.info("No HF token provided. Will attempt to use cached credentials.")
try:
# Validate directory
pdf_count = validate_directory(args.directory)
logger.info(f"Found {pdf_count} PDF files to process")
# Load dataset using built-in PDF support
logger.info("Loading PDFs as dataset (this may take a while for large PDFs)...")
dataset = load_dataset("pdffolder", data_dir=str(args.directory))
# Log dataset info
logger.info("\nDataset created successfully!")
logger.info(f"Structure: {dataset}")
if "train" in dataset:
train_size = len(dataset["train"])
logger.info(f"Training examples: {train_size}")
# Show sample if available
if train_size > 0:
sample = dataset["train"][0]
logger.info(f"\nSample structure: {list(sample.keys())}")
if "label" in sample:
logger.info("Labels found - PDFs are organized by category")
# Push to Hub
logger.info(f"\nPushing to Hugging Face Hub: {args.repo_id}")
dataset.push_to_hub(args.repo_id, private=args.private)
logger.info("โœ… Dataset uploaded successfully!")
logger.info(f"๐Ÿ”— Available at: https://huggingface.co/datasets/{args.repo_id}")
# Provide next steps
logger.info("\nTo use your dataset:")
logger.info(f' dataset = load_dataset("{args.repo_id}")')
except Exception as e:
logger.error(f"Failed to create dataset: {e}")
sys.exit(1)
if __name__ == "__main__":
if len(sys.argv) == 1:
# Show help if no arguments provided
print(__doc__)
sys.exit(0)
main()