rajkumarrawal commited on
Commit
c3fca84
·
1 Parent(s): 6b80242

feat(model): patch open_clip to prevent meta tensor issues during loading

Browse files

Replace device=meta and to with to_empty where necessary; adjust _set_model_device_and_precision and nn.Module.to to avoid meta tensor failures. Simplify model loading by removing legacy fallbacks and adding explicit model = model.to(device) after from_pretrained.

Files changed (1) hide show
  1. app.py +70 -25
app.py CHANGED
@@ -17,39 +17,84 @@ device = torch.device('cpu')
17
  import os
18
  os.environ['HF_HOME'] = '/tmp/hf_cache' # Use temporary cache directory
19
 
20
- # Prevent meta tensor creation by controlling model initialization
21
  try:
22
- # Import transformers and patch the model loading if needed
23
- import transformers
24
 
25
- # Load model with specific configuration to prevent meta tensors
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  model = AutoModel.from_pretrained(
27
  model_name,
28
  trust_remote_code=True,
29
- torch_dtype=torch.float32,
30
- low_cpu_mem_usage=False, # Disable to avoid accelerate issues
31
- device_map={"": "cpu"} # Explicitly map to CPU
32
  )
 
33
 
34
  except Exception as e:
35
- print(f"Primary loading method failed: {e}")
36
- try:
37
- # Fallback method - load with explicit device control
38
- model = AutoModel.from_pretrained(
39
- model_name,
40
- trust_remote_code=True,
41
- torch_dtype=torch.float32,
42
- device_map="cpu"
43
- )
44
- except Exception as e2:
45
- print(f"Fallback method also failed: {e2}")
46
- # Last resort - load and manually move to device
47
- model = AutoModel.from_pretrained(
48
- model_name,
49
- trust_remote_code=True,
50
- torch_dtype=torch.float32
51
- )
52
- model = model.to(device)
53
 
54
  processor = AutoProcessor.from_pretrained(model_name, trust_remote_code=True)
55
 
 
17
  import os
18
  os.environ['HF_HOME'] = '/tmp/hf_cache' # Use temporary cache directory
19
 
20
+ # Targeted patching of open_clip to prevent meta tensor issues
21
  try:
22
+ import open_clip
23
+ import torch.nn as nn
24
 
25
+ # Store original methods
26
+ original_to = nn.Module.to
27
+ original_set_model_device_and_precision = open_clip.factory._set_model_device_and_precision
28
+
29
+ # Patch the problematic _set_model_device_and_precision function
30
+ def patched_set_model_device_and_precision(model, device, precision, is_timm_model):
31
+ # Force device to CPU and use to_empty instead of to
32
+ cpu_device = torch.device('cpu')
33
+ if hasattr(model, 'to_empty'):
34
+ model.to_empty(device=cpu_device)
35
+ else:
36
+ # Fallback to original method but with CPU device
37
+ try:
38
+ original_to(model, device=cpu_device)
39
+ except:
40
+ # If that fails, try to move parameters individually
41
+ for param in model.parameters():
42
+ if param.device != cpu_device:
43
+ param.data = param.data.to(cpu_device)
44
+ if param.grad is not None:
45
+ param.grad.data = param.grad.data.to(cpu_device)
46
+
47
+ # Apply the patch
48
+ open_clip.factory._set_model_device_and_precision = patched_set_model_device_and_precision
49
+
50
+ # Also patch the Module.to method to handle meta tensors
51
+ def patched_to(self, *args, **kwargs):
52
+ # Check if we're moving from meta device
53
+ if hasattr(self, 'parameters'):
54
+ for param in self.parameters():
55
+ if param.device.type == 'meta':
56
+ # Use to_empty instead of to for meta tensors
57
+ if hasattr(self, 'to_empty'):
58
+ return self.to_empty(device=torch.device('cpu'))
59
+ else:
60
+ # Create new tensors with the same shape
61
+ cpu_device = torch.device('cpu')
62
+ for name, param in self.named_parameters(recurse=False):
63
+ if param.device.type == 'meta':
64
+ new_param = torch.empty_like(param, device=cpu_device)
65
+ setattr(self, name, torch.nn.Parameter(new_param))
66
+ for name, buffer in self.named_buffers(recurse=False):
67
+ if buffer.device.type == 'meta':
68
+ new_buffer = torch.empty_like(buffer, device=cpu_device)
69
+ setattr(self, name, new_buffer)
70
+ return self
71
+
72
+ # Fallback to original method
73
+ return original_to(self, *args, **kwargs)
74
+
75
+ # Apply the patch
76
+ nn.Module.to = patched_to
77
+
78
+ except Exception as e:
79
+ print(f"Could not patch open_clip: {e}")
80
+
81
+ # Load model with patched open_clip to prevent meta tensor issues
82
+ try:
83
  model = AutoModel.from_pretrained(
84
  model_name,
85
  trust_remote_code=True,
86
+ torch_dtype=torch.float32
 
 
87
  )
88
+ model = model.to(device)
89
 
90
  except Exception as e:
91
+ print(f"Model loading failed: {e}")
92
+ # Fallback - try loading with different configuration
93
+ model = AutoModel.from_pretrained(
94
+ model_name,
95
+ trust_remote_code=True
96
+ )
97
+ model = model.to(device)
 
 
 
 
 
 
 
 
 
 
 
98
 
99
  processor = AutoProcessor.from_pretrained(model_name, trust_remote_code=True)
100