import os import torch from dotenv import load_dotenv # Load environment variables from a .env file if it exists load_dotenv() # ─── Environment & Directory Setup ──────────────────────────────────────────── BASE_DIR = os.path.dirname(os.path.abspath(__file__)) STATIC_DIR = os.path.join(BASE_DIR, 'static') os.makedirs(STATIC_DIR, exist_ok=True) # ─── Hardware Configuration ─────────────────────────────────────────────────── # Automatically use GPU if available (recommended for Hugging Face Spaces with T4) USE_GPU = torch.cuda.is_available() DEVICE = "cuda" if USE_GPU else "cpu" # ─── API & Model Configuration ──────────────────────────────────────────────── # API Keys should be set as Secrets in your Hugging Face Space HF_TOKEN = os.getenv("HF_TOKEN") GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY") GOOGLE_APPLICATION_CREDENTIALS = os.getenv("GOOGLE_APPLICATION_CREDENTIALS") # Model IDs for the hybrid pipeline HF_MODELS = { # Layout-aware model for initial structured extraction "donut": "Javeria98/donut-base-Medical_Handwritten_Prescriptions_Information_Extraction_Final_model1", # Small, powerful model for re-parsing medication details "phi3": "Muizzzz8/phi3-prescription-reader" } # Final resolver model GEMINI_MODEL_NAME = "gemini-1.5-flash" # ─── File Paths (can be used for other utilities) ───────────────────────────── DB_PATH = os.path.join(STATIC_DIR, "rxguard.db") SIGNATURES_DIR = os.path.join(STATIC_DIR, "signatures") os.makedirs(SIGNATURES_DIR, exist_ok=True)