Spaces:
Sleeping
Sleeping
Commit
·
6b80242
1
Parent(s):
053d849
Refactor model loading: replace open_clip monkey patch with transformers device mapping for CPU and fp32, add robust fallbacks to prevent meta tensor issues
Browse files
app.py
CHANGED
|
@@ -17,38 +17,39 @@ device = torch.device('cpu')
|
|
| 17 |
import os
|
| 18 |
os.environ['HF_HOME'] = '/tmp/hf_cache' # Use temporary cache directory
|
| 19 |
|
| 20 |
-
#
|
| 21 |
try:
|
| 22 |
-
|
| 23 |
-
|
| 24 |
|
| 25 |
-
|
| 26 |
-
# Force device to CPU to prevent meta tensor creation
|
| 27 |
-
kwargs['device'] = 'cpu'
|
| 28 |
-
kwargs['precision'] = 'fp32' # Force float32 precision
|
| 29 |
-
return original_create_model(*args, **kwargs)
|
| 30 |
-
|
| 31 |
-
open_clip.factory.create_model = patched_create_model
|
| 32 |
-
except Exception as e:
|
| 33 |
-
print(f"Could not patch open_clip: {e}")
|
| 34 |
-
|
| 35 |
-
# Load model with patched open_clip to prevent meta tensor issues
|
| 36 |
-
try:
|
| 37 |
model = AutoModel.from_pretrained(
|
| 38 |
model_name,
|
| 39 |
trust_remote_code=True,
|
| 40 |
-
torch_dtype=torch.float32
|
|
|
|
|
|
|
| 41 |
)
|
| 42 |
-
model = model.to(device)
|
| 43 |
|
| 44 |
except Exception as e:
|
| 45 |
-
print(f"
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 52 |
|
| 53 |
processor = AutoProcessor.from_pretrained(model_name, trust_remote_code=True)
|
| 54 |
|
|
|
|
| 17 |
import os
|
| 18 |
os.environ['HF_HOME'] = '/tmp/hf_cache' # Use temporary cache directory
|
| 19 |
|
| 20 |
+
# Prevent meta tensor creation by controlling model initialization
|
| 21 |
try:
|
| 22 |
+
# Import transformers and patch the model loading if needed
|
| 23 |
+
import transformers
|
| 24 |
|
| 25 |
+
# Load model with specific configuration to prevent meta tensors
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
model = AutoModel.from_pretrained(
|
| 27 |
model_name,
|
| 28 |
trust_remote_code=True,
|
| 29 |
+
torch_dtype=torch.float32,
|
| 30 |
+
low_cpu_mem_usage=False, # Disable to avoid accelerate issues
|
| 31 |
+
device_map={"": "cpu"} # Explicitly map to CPU
|
| 32 |
)
|
|
|
|
| 33 |
|
| 34 |
except Exception as e:
|
| 35 |
+
print(f"Primary loading method failed: {e}")
|
| 36 |
+
try:
|
| 37 |
+
# Fallback method - load with explicit device control
|
| 38 |
+
model = AutoModel.from_pretrained(
|
| 39 |
+
model_name,
|
| 40 |
+
trust_remote_code=True,
|
| 41 |
+
torch_dtype=torch.float32,
|
| 42 |
+
device_map="cpu"
|
| 43 |
+
)
|
| 44 |
+
except Exception as e2:
|
| 45 |
+
print(f"Fallback method also failed: {e2}")
|
| 46 |
+
# Last resort - load and manually move to device
|
| 47 |
+
model = AutoModel.from_pretrained(
|
| 48 |
+
model_name,
|
| 49 |
+
trust_remote_code=True,
|
| 50 |
+
torch_dtype=torch.float32
|
| 51 |
+
)
|
| 52 |
+
model = model.to(device)
|
| 53 |
|
| 54 |
processor = AutoProcessor.from_pretrained(model_name, trust_remote_code=True)
|
| 55 |
|