File size: 8,672 Bytes
c53dc19
 
 
 
430a6c7
 
c53dc19
bbc77e0
 
430a6c7
 
c53dc19
 
 
 
 
 
 
 
 
 
 
 
 
 
430a6c7
c53dc19
 
 
 
430a6c7
c53dc19
 
430a6c7
c53dc19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b97942e
 
c53dc19
b97942e
c82a662
b97942e
c53dc19
 
 
b97942e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4721608
b97942e
 
 
 
 
 
 
c53dc19
b97942e
c53dc19
b97942e
c53dc19
 
 
 
 
b97942e
c82a662
c53dc19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c82a662
 
 
c53dc19
 
c82a662
 
 
c53dc19
c82a662
 
 
 
 
 
b97942e
c53dc19
0475645
b97942e
bbc77e0
c53dc19
bbc77e0
 
 
 
 
 
b97942e
 
c53dc19
b97942e
 
c53dc19
b97942e
 
c53dc19
 
 
 
 
 
 
 
 
 
 
 
 
 
b97942e
 
 
 
c53dc19
 
 
 
b97942e
 
 
c53dc19
4721608
b97942e
4721608
b97942e
c53dc19
 
 
 
 
 
 
 
b97942e
4721608
 
 
 
 
b97942e
 
 
 
c53dc19
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
import gradio as gr
import torch
from transformers import AutoProcessor, Qwen2VLForConditionalGeneration
from PIL import Image
import requests
import pandas as pd
import numpy as np
import uuid
import os

# ──────────────────────────────────────────────────────────────
# 1. Load Qwen2-VL OCR Model & Processor (once at startup)
# ──────────────────────────────────────────────────────────────
MODEL_ID = "prithivMLmods/Qwen2-VL-OCR-2B-Instruct"

# Choose device: GPU if available, otherwise CPU
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"

processor = AutoProcessor.from_pretrained(MODEL_ID, trust_remote_code=True)
model = Qwen2VLForConditionalGeneration.from_pretrained(
    MODEL_ID,
    trust_remote_code=True,
    torch_dtype=torch.float16 if DEVICE == "cuda" else torch.float32,
).to(DEVICE).eval()

# ──────────────────────────────────────────────────────────────
# 2. OCR Helper: Extract text from a single PIL image
# ──────────────────────────────────────────────────────────────
@torch.no_grad()
def run_qwen_ocr(pil_image: Image.Image) -> str:
    """
    Use Qwen2-VL to OCR the given PIL image.
    Returns a single string of the extracted text.
    """
    # Build β€œchat” content: first a text prompt, then the image
    user_message = [
        {"type": "text", "text": "OCR the text in the image."},
        {"type": "image", "image": pil_image},
    ]
    messages = [{"role": "user", "content": user_message}]

    # Create the full prompt
    prompt_full = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
    inputs = processor(
        text=[prompt_full],
        images=[pil_image],
        return_tensors="pt",
        padding=True,
    ).to(DEVICE)

    # Generate
    outputs = model.generate(**inputs, max_new_tokens=1024)
    decoded = processor.decode(outputs[0], skip_special_tokens=True).strip()
    # The model’s response may include some markup like β€œ<|im_end|>”; remove it
    return decoded.replace("<|im_end|>", "").strip()

# ──────────────────────────────────────────────────────────────
# 3. OpenLibrary Lookup Helper
# ──────────────────────────────────────────────────────────────
def query_openlibrary(title_text: str, author_text: str = None) -> dict | None:
    """
    Query OpenLibrary.search.json by title (and optional author).
    Returns a dict with keys: title, author_name, publisher, first_publish_year.
    If no results, returns None.
    """
    base_url = "https://openlibrary.org/search.json"
    params = {"title": title_text}
    if author_text:
        params["author"] = author_text

    try:
        resp = requests.get(base_url, params=params, timeout=5)
        resp.raise_for_status()
        data = resp.json()
        if data.get("docs"):
            doc = data["docs"][0]
            return {
                "title": doc.get("title", ""),
                "author_name": ", ".join(doc.get("author_name", [])),
                "publisher": ", ".join(doc.get("publisher", [])),
                "first_publish_year": doc.get("first_publish_year", ""),
            }
    except Exception as e:
        print(f"OpenLibrary query failed: {e}")

    return None

# ──────────────────────────────────────────────────────────────
# 4. Main Processing: OCR β†’ Parse β†’ OpenLibrary β†’ CSV/DF
# ──────────────────────────────────────────────────────────────
def process_image_list(images: list[Image.Image]):
    """
    Takes a list of PIL images (each ideally a single book cover).
    Runs OCR on each via Qwen2-VL, parses first two nonempty lines as title/author,
    looks up metadata once per image, and returns:
      - A pandas DataFrame of all results
      - A filepath to a CSV (written under /tmp)
    """
    records = []

    for pil_img in images:
        # 1) OCR
        try:
            ocr_text = run_qwen_ocr(pil_img)
        except Exception as e:
            # If model fails, skip this image
            print(f"OCR failed on one image: {e}")
            continue

        # 2) Parse lines: first nonempty β†’ title, second β†’ author if present
        lines = [line.strip() for line in ocr_text.splitlines() if line.strip()]
        if not lines:
            # No text extracted; skip
            continue

        title_guess = lines[0]
        author_guess = lines[1] if len(lines) > 1 else None

        # 3) Query OpenLibrary
        meta = query_openlibrary(title_guess, author_guess)
        if meta:
            records.append(meta)
        else:
            # Fallback: record OCR guesses if no OpenLibrary match
            records.append({
                "title": title_guess,
                "author_name": author_guess or "",
                "publisher": "",
                "first_publish_year": "",
            })

    # 4) Build DataFrame (even if empty)
    df = pd.DataFrame(records, columns=["title", "author_name", "publisher", "first_publish_year"])
    csv_bytes = df.to_csv(index=False).encode()

    # 5) Write CSV to a temporary file
    unique_name = f"books_{uuid.uuid4().hex}.csv"
    temp_path = os.path.join("/tmp", unique_name)
    with open(temp_path, "wb") as f:
        f.write(csv_bytes)

    return df, temp_path

# ──────────────────────────────────────────────────────────────
# 5. Gradio Interface
# ──────────────────────────────────────────────────────────────
def build_interface():
    with gr.Blocks(title="Book Cover Scanner (Qwen2-VL OCR)") as demo:
        gr.Markdown(
            """
            # πŸ“š Book Cover Scanner + Metadata Lookup

            1. Upload **one or more** images, each containing a single book cover.  
            2. The app will OCR each cover (via Qwen2-VL), take:
               - the **first nonempty line** as a β€œtitle” guess, and  
               - the **second nonempty line** (if present) as an β€œauthor” guess, then  
               - query OpenLibrary once per image for metadata.  
            3. A table appears below with Title, Author(s), Publisher, Year.  
            4. Click β€œDownload CSV” to export all results.  

            **Tips:**  
            - Use clear, high‐contrast photos (text should be legible).  
            - For best results, crop each cover to the image frame (no extra background).  
            - If Qwen2-VL fails on any image, that image is skipped in the table.
            """
        )

        with gr.Row():
            img_in = gr.Gallery(label="Upload Book Cover(s)", elem_id="input_gallery").style(
                height="auto"
            )
            run_button = gr.Button("OCR & Lookup")

        output_table = gr.Dataframe(
            headers=["title", "author_name", "publisher", "first_publish_year"],
            label="Detected Books + Metadata",
            datatype="pandas",
        )
        download_file = gr.File(label="Download CSV")

        def on_run(image_list):
            # image_list is a list of numpy arrays (HΓ—WΓ—3). Convert to PIL:
            pil_images = []
            for np_img in image_list:
                if isinstance(np_img, np.ndarray):
                    pil_images.append(Image.fromarray(np_img))
            df, csv_path = process_image_list(pil_images)
            return df, csv_path

        run_button.click(
            fn=on_run,
            inputs=[img_in],
            outputs=[output_table, download_file],
        )

    return demo

if __name__ == "__main__":
    build_interface().launch()