Spaces:
Running
Running
import requests | |
from paddleocr import PaddleOCR, draw_ocr | |
from PIL import Image | |
import gradio as gr | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
import torch | |
img = "input_data/ocr_input/japan1.jpg" | |
model_id = "deepseek-ai/deepseek-llm-7b-chat" | |
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True) | |
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.float16, trust_remote_code=True) | |
def text_inference(text, language): | |
system_prompt = ( | |
f"Given the following {language} text, extract all words in their base (dictionary) form, including verbs, adjectives, nouns, and particles. " | |
"Remove all duplicates. Return the base form words as a comma-separated list, and nothing else." | |
) | |
user_prompt = f"{system_prompt}\n\nText:\n{text}" | |
input_ids = tokenizer.apply_chat_template([{"role": "user", "content": user_prompt}], return_tensors="pt").to(model.device) | |
output_ids = model.generate(input_ids, max_new_tokens=256) | |
output_text = tokenizer.decode(output_ids[0], skip_special_tokens=True) | |
# Parse comma-separated string into list | |
words = [word.strip() for word in output_text.split(",") if word.strip()] | |
return words | |
def ocr_inference(img, lang): | |
ocr = PaddleOCR(use_angle_cls=True, lang=lang,use_gpu=False) | |
img_path = img | |
result = ocr.ocr(img_path, cls=True)[0] | |
image = Image.open(img_path).convert('RGB') | |
boxes = [line[0] for line in result] | |
txts = [line[1][0] for line in result] | |
scores = [line[1][1] for line in result] | |
return txts | |
def make_flashcards(words): | |
pass; | |