File size: 6,747 Bytes
20d720d 292f6f6 20d720d 292f6f6 20d720d 292f6f6 20d720d 292f6f6 20d720d 292f6f6 20d720d 292f6f6 20d720d 292f6f6 20d720d 292f6f6 20d720d 292f6f6 20d720d 292f6f6 20d720d 292f6f6 20d720d 292f6f6 20d720d 292f6f6 20d720d 292f6f6 20d720d 292f6f6 20d720d 292f6f6 20d720d 292f6f6 20d720d 292f6f6 20d720d 292f6f6 20d720d 292f6f6 20d720d 292f6f6 20d720d 292f6f6 20d720d 292f6f6 20d720d 292f6f6 20d720d 292f6f6 20d720d 292f6f6 20d720d 292f6f6 20d720d 292f6f6 20d720d 292f6f6 20d720d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 |
"""
Models module for Personal Coach CrewAI Application
Handles all AI model loading and management
"""
from typing import TYPE_CHECKING, Optional, Dict, Any
import torch
# Version info
__version__ = "1.0.0"
# Lazy imports
if TYPE_CHECKING:
from .mistral_model import MistralModel, MistralConfig, MistralPromptFormatter
from .tiny_gpt2_model import TinyGPT2Model
# Public API
__all__ = [
# Main model classes
"MistralModel",
"MistralConfig",
"MistralPromptFormatter",
"TinyGPT2Model",
# Model management
"load_model",
"get_model_info",
"clear_model_cache",
# Constants
"AVAILABLE_MODELS",
"MODEL_REQUIREMENTS",
"DEFAULT_MODEL_CONFIG"
]
# Available models
AVAILABLE_MODELS = {
"mistral-7b-instruct": {
"model_id": "mistralai/Mistral-7B-Instruct-v0.1",
"type": "instruction-following",
"size": "7B",
"context_length": 32768,
"languages": ["multilingual"]
},
"mistral-7b": {
"model_id": "mistralai/Mistral-7B-v0.1",
"type": "base",
"size": "7B",
"context_length": 32768,
"languages": ["multilingual"]
},
"tiny-gpt2": {
"model_id": "sshleifer/tiny-gpt2",
"type": "tiny",
"size": "small",
"context_length": 256,
"languages": ["en"]
}
}
# Model requirements
MODEL_REQUIREMENTS = {
"mistral-7b-instruct": {
"ram": "16GB",
"vram": "8GB (GPU) or 16GB (CPU)",
"disk": "15GB",
"compute": "GPU recommended"
},
"tiny-gpt2": {
"ram": "≤1GB",
"vram": "CPU only",
"disk": "<1GB",
"compute": "CPU"
}
}
# Default configuration: Set to CPU/float32
DEFAULT_MODEL_CONFIG = {
"max_length": 256,
"temperature": 0.7,
"top_p": 0.95,
"top_k": 50,
"do_sample": True,
"num_return_sequences": 1,
"device": "cpu",
"torch_dtype": torch.float32,
"load_in_8bit": False,
"cache_dir": ".cache/models"
}
# Model instance cache
_model_cache: Dict[str, Any] = {}
def load_model(model_name: str = "tiny-gpt2", config: Optional[Dict[str, Any]] = None):
"""
Load a model with caching support
Args:
model_name: Name of the model to load
config: Optional configuration override
Returns:
Model instance
"""
# Check cache first
cache_key = f"{model_name}_{str(config)}"
if cache_key in _model_cache:
return _model_cache[cache_key]
# Import here to avoid circular imports
if model_name == "tiny-gpt2":
from .tiny_gpt2_model import TinyGPT2Model
# No config needed for TinyGPT2, ignore config for now
model = TinyGPT2Model()
elif model_name in ["mistral-7b-instruct", "mistral-7b"]:
from .mistral_model import MistralModel, MistralConfig
model_info = AVAILABLE_MODELS.get(model_name)
if not model_info:
raise ValueError(f"Unknown model: {model_name}")
model_config = DEFAULT_MODEL_CONFIG.copy()
if config:
model_config.update(config)
mistral_config = MistralConfig(
model_id=model_info["model_id"],
**model_config
)
model = MistralModel(mistral_config)
else:
raise ValueError(f"Unknown model: {model_name}")
# Cache it
_model_cache[cache_key] = model
return model
def get_model_info(model_name: str) -> Optional[Dict[str, Any]]:
"""
Get information about a model
Args:
model_name: Name of the model
Returns:
Model information dictionary or None
"""
info = AVAILABLE_MODELS.get(model_name)
if info:
# Add requirements
requirements = MODEL_REQUIREMENTS.get(model_name, {})
info = info.copy() # avoid mutating global dict!
info["requirements"] = requirements
# Add loading status
cache_keys = [k for k in _model_cache.keys() if k.startswith(model_name)]
info["is_loaded"] = len(cache_keys) > 0
return info
def clear_model_cache(model_name: Optional[str] = None):
"""
Clear model cache to free memory
Args:
model_name: Specific model to clear, or None for all
"""
global _model_cache
if model_name:
# Clear specific model
keys_to_remove = [k for k in _model_cache.keys() if k.startswith(model_name)]
for key in keys_to_remove:
del _model_cache[key]
else:
# Clear all
_model_cache.clear()
# Force garbage collection
import gc
gc.collect()
# Clear GPU cache if using CUDA
if torch.cuda.is_available():
torch.cuda.empty_cache()
# Utility functions
def estimate_memory_usage(model_name: str) -> Dict[str, Any]:
"""
Estimate memory usage for a model
Args:
model_name: Name of the model
Returns:
Memory estimation dictionary
"""
model_info = AVAILABLE_MODELS.get(model_name)
if not model_info:
return {}
size = model_info.get("size", "7B")
if size.endswith("B"):
size_gb = float(size.replace("B", "")) # e.g. "7B"
elif size == "small":
size_gb = 0.02 # Arbitrary tiny model size in GB
else:
size_gb = 0.1 # catchall
estimates = {
"model_size_gb": size_gb,
"fp32_memory_gb": size_gb * 4, # 4 bytes per parameter
"fp16_memory_gb": size_gb * 2, # 2 bytes per parameter
"int8_memory_gb": size_gb, # 1 byte per parameter
"recommended_ram_gb": size_gb * 2.5,
"recommended_vram_gb": size_gb * 1.5
}
return estimates
def get_device_info() -> Dict[str, Any]:
"""Get information about available compute devices"""
info = {
"cuda_available": torch.cuda.is_available(),
"device_count": torch.cuda.device_count() if torch.cuda.is_available() else 0,
"current_device": torch.cuda.current_device() if torch.cuda.is_available() else None,
"device_name": torch.cuda.get_device_name() if torch.cuda.is_available() else "CPU"
}
if torch.cuda.is_available():
info["gpu_memory"] = {
"allocated": torch.cuda.memory_allocated() / 1024**3, # GB
"reserved": torch.cuda.memory_reserved() / 1024**3, # GB
"total": torch.cuda.get_device_properties(0).total_memory / 1024**3 # GB
}
return info
# Module initialization
import os
if os.getenv("DEBUG_MODE", "false").lower() == "true":
print(f"Models module v{__version__} initialized")
device_info = get_device_info()
print(f"Device: {device_info['device_name']}")
if device_info['cuda_available']:
print(f"GPU Memory: {device_info['gpu_memory']['total']:.1f}GB") |