Spaces:
Running
Running
File size: 3,793 Bytes
44b5c36 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 |
#!/usr/bin/env python3
"""
Download a sample GGUF model for testing llama.cpp integration
"""
import os
from huggingface_hub import hf_hub_download
from config import get_recommended_model, MODEL_DOWNLOAD_CONFIG
def download_sample_model():
"""Download a recommended small model for testing"""
model_info = get_recommended_model()
print(f"π₯ Downloading {model_info['name']}...")
print(f" Repository: {model_info['repo_id']}")
print(f" File: {model_info['filename']}")
print(f" Size: {model_info['size']}")
print(f" Description: {model_info['description']}")
try:
# Create models directory if it doesn't exist
os.makedirs(MODEL_DOWNLOAD_CONFIG['cache_dir'], exist_ok=True)
# Download the model
model_path = hf_hub_download(
repo_id=model_info['repo_id'],
filename=model_info['filename'],
cache_dir=MODEL_DOWNLOAD_CONFIG['cache_dir'],
resume_download=MODEL_DOWNLOAD_CONFIG['resume_download'],
token=MODEL_DOWNLOAD_CONFIG['use_auth_token']
)
print(f"β
Model downloaded successfully!")
print(f" Path: {model_path}")
# Create a symlink in the models directory for easy access
symlink_path = os.path.join(MODEL_DOWNLOAD_CONFIG['cache_dir'], "model.gguf")
if os.path.exists(symlink_path):
os.remove(symlink_path)
try:
os.symlink(model_path, symlink_path)
print(f" Symlink created: {symlink_path}")
except OSError:
# Symlinks might not work on all systems, just copy the path
print(f" Use this path in your code: {model_path}")
return model_path
except Exception as e:
print(f"β Error downloading model: {e}")
print("π‘ You can manually download a GGUF model and place it in ./models/")
return None
def list_available_models():
"""List models available in the models directory"""
models_dir = MODEL_DOWNLOAD_CONFIG['cache_dir']
if not os.path.exists(models_dir):
print(f"π Models directory doesn't exist: {models_dir}")
return []
model_files = []
for file in os.listdir(models_dir):
if file.endswith('.gguf') or file.endswith('.ggml'):
file_path = os.path.join(models_dir, file)
file_size = os.path.getsize(file_path)
model_files.append({
'name': file,
'path': file_path,
'size_mb': file_size / (1024 * 1024)
})
if model_files:
print("π Available models:")
for model in model_files:
print(f" - {model['name']} ({model['size_mb']:.1f} MB)")
else:
print("π No GGUF/GGML models found in models directory")
return model_files
if __name__ == "__main__":
print("π€ Model Download Utility for llama.cpp")
print("=" * 50)
# List existing models
print("\nπ Checking for existing models...")
existing_models = list_available_models()
if not existing_models:
print("\nπ₯ No models found. Downloading sample model...")
download_sample_model()
else:
print(f"\nβ
Found {len(existing_models)} existing model(s)")
# Ask if user wants to download another model
print("\nβ Download sample model anyway? (y/n): ", end="")
try:
response = input().lower().strip()
if response in ['y', 'yes']:
download_sample_model()
else:
print("π Using existing models")
except (EOFError, KeyboardInterrupt):
print("\nπ Using existing models") |