| from transformers import AutoModel, AutoTokenizer |
| import os |
| import torch |
|
|
| class OCRModel: |
| _instance = None |
| |
| def __new__(cls): |
| if cls._instance is None: |
| cls._instance = super(OCRModel, cls).__new__(cls) |
| cls._instance.initialize() |
| return cls._instance |
| |
| def initialize(self): |
| |
| model_path = os.getenv('MODEL_PATH', 'ucaslcl/GOT-OCR2_0') |
| |
| self.tokenizer = AutoTokenizer.from_pretrained( |
| model_path, |
| trust_remote_code=True, |
| local_files_only=False |
| ) |
| |
| self.model = AutoModel.from_pretrained( |
| model_path, |
| trust_remote_code=True, |
| low_cpu_mem_usage=True, |
| device_map='auto', |
| use_safetensors=True, |
| pad_token_id=self.tokenizer.eos_token_id |
| ) |
| |
| self.model = self.model.eval() |
| |
| def process_image(self, image_path): |
| try: |
| with torch.no_grad(): |
| result = self.model.chat(self.tokenizer, image_path, ocr_type='format') |
| return result |
| except Exception as e: |
| return f"Error processing image: {str(e)}" |