File size: 4,053 Bytes
22ca508 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 |
#!/usr/bin/env python3
"""
Script de utilidades para GPT Local
Manejo de modelos, limpieza, actualización
"""
import sys
import os
import shutil
import subprocess
from pathlib import Path
def check_environment():
"""Verificar el entorno y dependencias"""
print("🔍 Verificando entorno...")
try:
import torch
import transformers
import gradio as gr
print(f"✅ PyTorch: {torch.__version__}")
print(f"✅ Transformers: {transformers.__version__}")
print(f"✅ Gradio: {gr.__version__}")
print(f"✅ CUDA disponible: {torch.cuda.is_available()}")
print(f"✅ MPS disponible: {torch.backends.mps.is_available() if hasattr(torch.backends, 'mps') else 'N/A'}")
return True
except ImportError as e:
print(f"❌ Error de importación: {e}")
return False
def clean_cache():
"""Limpiar cache de modelos"""
print("🧹 Limpiando cache...")
cache_dirs = [
Path.home() / ".cache" / "huggingface",
Path("models_cache"),
Path("__pycache__"),
]
for cache_dir in cache_dirs:
if cache_dir.exists():
size = sum(f.stat().st_size for f in cache_dir.rglob('*') if f.is_file())
print(f"📁 {cache_dir}: {size / (1024*1024*1024):.2f} GB")
response = input(f"¿Eliminar {cache_dir}? (y/N): ")
if response.lower() == 'y':
shutil.rmtree(cache_dir)
print(f"✅ {cache_dir} eliminado")
def install_dependencies():
"""Instalar dependencias"""
print("📦 Instalando dependencias...")
subprocess.run([sys.executable, "-m", "pip", "install", "-r", "requirements.txt"])
def update_dependencies():
"""Actualizar dependencias"""
print("⬆️ Actualizando dependencias...")
subprocess.run([sys.executable, "-m", "pip", "install", "--upgrade", "-r", "requirements.txt"])
def list_models():
"""Listar modelos disponibles localmente"""
print("📋 Modelos disponibles:")
cache_dir = Path.home() / ".cache" / "huggingface" / "hub"
if cache_dir.exists():
models = [d.name for d in cache_dir.iterdir() if d.is_dir()]
for model in models:
print(f" - {model}")
else:
print(" No hay modelos en cache")
def download_model(model_name):
"""Descargar un modelo específico"""
print(f"⬇️ Descargando modelo: {model_name}")
try:
from transformers import AutoTokenizer, AutoModelForCausalLM
print("Descargando tokenizer...")
tokenizer = AutoTokenizer.from_pretrained(model_name)
print("Descargando modelo...")
model = AutoModelForCausalLM.from_pretrained(model_name)
print(f"✅ Modelo {model_name} descargado exitosamente")
except Exception as e:
print(f"❌ Error al descargar: {e}")
def show_usage():
"""Mostrar información de uso"""
print("""
🤖 GPT Local - Utilidades
Comandos disponibles:
check - Verificar entorno
clean - Limpiar cache
install - Instalar dependencias
update - Actualizar dependencias
models - Listar modelos
download - Descargar modelo específico
Ejemplos:
python3 utils.py check
python3 utils.py download microsoft/DialoGPT-small
python3 utils.py clean
""")
def main():
if len(sys.argv) < 2:
show_usage()
return
command = sys.argv[1]
if command == "check":
check_environment()
elif command == "clean":
clean_cache()
elif command == "install":
install_dependencies()
elif command == "update":
update_dependencies()
elif command == "models":
list_models()
elif command == "download":
if len(sys.argv) < 3:
print("❌ Especifica el nombre del modelo")
return
download_model(sys.argv[2])
else:
print(f"❌ Comando desconocido: {command}")
show_usage()
if __name__ == "__main__":
main()
|