gpt-local / utils.py
DRDELATV's picture
Upload folder using huggingface_hub
22ca508 verified
#!/usr/bin/env python3
"""
Script de utilidades para GPT Local
Manejo de modelos, limpieza, actualización
"""
import sys
import os
import shutil
import subprocess
from pathlib import Path
def check_environment():
"""Verificar el entorno y dependencias"""
print("🔍 Verificando entorno...")
try:
import torch
import transformers
import gradio as gr
print(f"✅ PyTorch: {torch.__version__}")
print(f"✅ Transformers: {transformers.__version__}")
print(f"✅ Gradio: {gr.__version__}")
print(f"✅ CUDA disponible: {torch.cuda.is_available()}")
print(f"✅ MPS disponible: {torch.backends.mps.is_available() if hasattr(torch.backends, 'mps') else 'N/A'}")
return True
except ImportError as e:
print(f"❌ Error de importación: {e}")
return False
def clean_cache():
"""Limpiar cache de modelos"""
print("🧹 Limpiando cache...")
cache_dirs = [
Path.home() / ".cache" / "huggingface",
Path("models_cache"),
Path("__pycache__"),
]
for cache_dir in cache_dirs:
if cache_dir.exists():
size = sum(f.stat().st_size for f in cache_dir.rglob('*') if f.is_file())
print(f"📁 {cache_dir}: {size / (1024*1024*1024):.2f} GB")
response = input(f"¿Eliminar {cache_dir}? (y/N): ")
if response.lower() == 'y':
shutil.rmtree(cache_dir)
print(f"✅ {cache_dir} eliminado")
def install_dependencies():
"""Instalar dependencias"""
print("📦 Instalando dependencias...")
subprocess.run([sys.executable, "-m", "pip", "install", "-r", "requirements.txt"])
def update_dependencies():
"""Actualizar dependencias"""
print("⬆️ Actualizando dependencias...")
subprocess.run([sys.executable, "-m", "pip", "install", "--upgrade", "-r", "requirements.txt"])
def list_models():
"""Listar modelos disponibles localmente"""
print("📋 Modelos disponibles:")
cache_dir = Path.home() / ".cache" / "huggingface" / "hub"
if cache_dir.exists():
models = [d.name for d in cache_dir.iterdir() if d.is_dir()]
for model in models:
print(f" - {model}")
else:
print(" No hay modelos en cache")
def download_model(model_name):
"""Descargar un modelo específico"""
print(f"⬇️ Descargando modelo: {model_name}")
try:
from transformers import AutoTokenizer, AutoModelForCausalLM
print("Descargando tokenizer...")
tokenizer = AutoTokenizer.from_pretrained(model_name)
print("Descargando modelo...")
model = AutoModelForCausalLM.from_pretrained(model_name)
print(f"✅ Modelo {model_name} descargado exitosamente")
except Exception as e:
print(f"❌ Error al descargar: {e}")
def show_usage():
"""Mostrar información de uso"""
print("""
🤖 GPT Local - Utilidades
Comandos disponibles:
check - Verificar entorno
clean - Limpiar cache
install - Instalar dependencias
update - Actualizar dependencias
models - Listar modelos
download - Descargar modelo específico
Ejemplos:
python3 utils.py check
python3 utils.py download microsoft/DialoGPT-small
python3 utils.py clean
""")
def main():
if len(sys.argv) < 2:
show_usage()
return
command = sys.argv[1]
if command == "check":
check_environment()
elif command == "clean":
clean_cache()
elif command == "install":
install_dependencies()
elif command == "update":
update_dependencies()
elif command == "models":
list_models()
elif command == "download":
if len(sys.argv) < 3:
print("❌ Especifica el nombre del modelo")
return
download_model(sys.argv[2])
else:
print(f"❌ Comando desconocido: {command}")
show_usage()
if __name__ == "__main__":
main()