Download model using HTTP only
Browse files- app/services/model_service.py +28 -70
app/services/model_service.py
CHANGED
@@ -7,8 +7,9 @@ import logging
|
|
7 |
import torch
|
8 |
from PIL import Image
|
9 |
from transformers import AutoTokenizer, ViTImageProcessor
|
10 |
-
from huggingface_hub import
|
11 |
-
|
|
|
12 |
from app.config import settings
|
13 |
from app.models.vqa_model import VQAModel
|
14 |
|
@@ -42,74 +43,31 @@ class ModelService:
|
|
42 |
def _download_model_from_hub(self):
|
43 |
"""Download the model from Hugging Face Hub if not present locally"""
|
44 |
try:
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
cache_dir=os.environ.get("HF_HOME"),
|
71 |
-
force_download=True
|
72 |
-
)
|
73 |
-
|
74 |
-
logger.info(f"Model downloaded to: {downloaded_path}")
|
75 |
-
|
76 |
-
# If needed, copy the file to the expected location
|
77 |
-
if downloaded_path != settings.MODEL_PATH:
|
78 |
-
import shutil
|
79 |
-
logger.info(f"Copying from {downloaded_path} to {settings.MODEL_PATH}")
|
80 |
-
shutil.copy2(downloaded_path, settings.MODEL_PATH)
|
81 |
-
|
82 |
-
logger.info(f"Model download successful")
|
83 |
-
return True
|
84 |
-
|
85 |
-
except Exception as download_error:
|
86 |
-
logger.error(f"Error during Hugging Face Hub download: {download_error}")
|
87 |
-
|
88 |
-
# Fallback method: direct download using requests
|
89 |
-
logger.info("Trying fallback method with direct HTTP download")
|
90 |
-
import requests
|
91 |
-
from huggingface_hub.utils import build_hf_headers
|
92 |
-
|
93 |
-
# Get Hugging Face token from settings
|
94 |
-
token = settings.HUGGINGFACE_TOKEN
|
95 |
-
|
96 |
-
# Build proper URL for the model file
|
97 |
-
url = f"https://huggingface.co/{settings.HF_MODEL_REPO}/resolve/main/{settings.HF_MODEL_FILENAME}"
|
98 |
-
logger.info(f"Downloading from URL: {url}")
|
99 |
-
|
100 |
-
# Download with proper headers
|
101 |
-
headers = build_hf_headers(token=token)
|
102 |
-
response = requests.get(url, headers=headers, stream=True)
|
103 |
-
response.raise_for_status()
|
104 |
-
|
105 |
-
# Write the file in chunks to avoid memory issues
|
106 |
-
logger.info(f"Writing downloaded content to {settings.MODEL_PATH}")
|
107 |
-
with open(settings.MODEL_PATH, 'wb') as f:
|
108 |
-
for chunk in response.iter_content(chunk_size=8192):
|
109 |
-
f.write(chunk)
|
110 |
-
|
111 |
-
logger.info(f"Model downloaded successfully using fallback method")
|
112 |
-
return True
|
113 |
|
114 |
except Exception as e:
|
115 |
logger.error(f"Error downloading model from Hugging Face Hub: {e}")
|
|
|
7 |
import torch
|
8 |
from PIL import Image
|
9 |
from transformers import AutoTokenizer, ViTImageProcessor
|
10 |
+
from huggingface_hub import login
|
11 |
+
import requests
|
12 |
+
from huggingface_hub.utils import build_hf_headers
|
13 |
from app.config import settings
|
14 |
from app.models.vqa_model import VQAModel
|
15 |
|
|
|
43 |
def _download_model_from_hub(self):
|
44 |
"""Download the model from Hugging Face Hub if not present locally"""
|
45 |
try:
|
46 |
+
|
47 |
+
# Fallback method: direct download using requests
|
48 |
+
logger.info("Downlaoding model from Hugging Face Hub")
|
49 |
+
|
50 |
+
|
51 |
+
# Get Hugging Face token from settings
|
52 |
+
token = settings.HUGGINGFACE_TOKEN
|
53 |
+
|
54 |
+
# Build proper URL for the model file
|
55 |
+
url = f"https://huggingface.co/{settings.HF_MODEL_REPO}/resolve/main/{settings.HF_MODEL_FILENAME}"
|
56 |
+
logger.info(f"Downloading from URL: {url}")
|
57 |
+
|
58 |
+
# Download with proper headers
|
59 |
+
headers = build_hf_headers(token=token)
|
60 |
+
response = requests.get(url, headers=headers, stream=True)
|
61 |
+
response.raise_for_status()
|
62 |
+
|
63 |
+
# Write the file in chunks to avoid memory issues
|
64 |
+
logger.info(f"Writing downloaded content to {settings.MODEL_PATH}")
|
65 |
+
with open(settings.MODEL_PATH, 'wb') as f:
|
66 |
+
for chunk in response.iter_content(chunk_size=8192):
|
67 |
+
f.write(chunk)
|
68 |
+
|
69 |
+
logger.info(f"Model downloaded successfully")
|
70 |
+
return True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
|
72 |
except Exception as e:
|
73 |
logger.error(f"Error downloading model from Hugging Face Hub: {e}")
|