vedaMD / batch_ocr_pipeline.py
sniro23's picture
Initial commit without binary files
19aaa42
import os
from pathlib import Path
from pdf2image import convert_from_path
from PIL import Image
from transformers import AutoTokenizer, AutoProcessor, AutoModelForImageTextToText
# Load Nanonets OCR model
model_id = "nanonets/Nanonets-OCR-s"
model = AutoModelForImageTextToText.from_pretrained(model_id)
tokenizer = AutoTokenizer.from_pretrained(model_id)
processor = AutoProcessor.from_pretrained(model_id)
model.eval()
prompt = """Extract the text from the above document as if you were reading it naturally. Return the tables in html format. Return the equations in LaTeX representation. If there is an image in the document and image caption is not present, add a small description of the image inside the <img></img> tag; otherwise, add the image caption inside <img></img>. Watermarks should be wrapped in brackets. Ex: <watermark>OFFICIAL COPY</watermark>. Page numbers should be wrapped in brackets. Ex: <page_number>14</page_number> or <page_number>9/22</page_number>. Prefer using ☐ and ☑ for check boxes."""
def ocr_image(image_path):
image = Image.open(image_path)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": [
{"type": "image", "image": f"file://{image_path}"},
{"type": "text", "text": prompt},
]}
]
text_input = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = processor(text=[text_input], images=[image], padding=True, return_tensors="pt").to(model.device)
output_ids = model.generate(**inputs, max_new_tokens=4096, do_sample=False)
output_text = processor.batch_decode(output_ids, skip_special_tokens=True)[0]
return output_text
def process_pdf(pdf_path, output_dir):
images = convert_from_path(pdf_path, dpi=300)
for i, img in enumerate(images):
img_path = f"temp_page_{i}.jpg"
img.save(img_path)
print(f"Processing page {i+1} of {pdf_path.name}")
extracted_text = ocr_image(img_path)
# Save each page as markdown
out_file = output_dir / f"{pdf_path.stem}_page_{i+1}.md"
with open(out_file, "w", encoding="utf-8") as f:
f.write(extracted_text)
os.remove(img_path)
# === CONFIG ===
script_dir = Path(__file__).parent
input_folder = script_dir / "pdfs"
output_folder = script_dir / "ocr_output"
output_folder.mkdir(exist_ok=True)
pdf_files = list(input_folder.glob("*.pdf"))
print(f"Found {len(pdf_files)} PDFs.")
for pdf in pdf_files:
process_pdf(pdf, output_folder)
print("✅ OCR complete. Check ocr_output folder.")