|
|
|
""" |
|
Versión simple del GPT Local sin Gradio |
|
""" |
|
|
|
import sys |
|
import logging |
|
from pathlib import Path |
|
|
|
|
|
sys.path.append(str(Path(__file__).parent)) |
|
|
|
from models.model_loader import ModelLoader |
|
from models.text_generator import TextGenerator |
|
from config.settings import DEFAULT_MODEL |
|
|
|
def setup_logging(): |
|
"""Configurar el sistema de logging""" |
|
logging.basicConfig( |
|
level=logging.INFO, |
|
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s" |
|
) |
|
|
|
def main(): |
|
"""Función principal para el chat en terminal""" |
|
setup_logging() |
|
logger = logging.getLogger(__name__) |
|
|
|
print("🤖 GPT Local - Chat en Terminal") |
|
print("="*50) |
|
|
|
|
|
model_loader = ModelLoader() |
|
text_generator = TextGenerator(model_loader) |
|
|
|
|
|
print(f"📦 Cargando modelo: {DEFAULT_MODEL}") |
|
if not model_loader.load_model(DEFAULT_MODEL): |
|
print("❌ Error al cargar el modelo") |
|
return |
|
|
|
print("✅ Modelo cargado exitosamente") |
|
print("💡 Escribe 'salir' para terminar") |
|
print("💡 Escribe 'reset' para reiniciar el chat") |
|
print("-"*50) |
|
|
|
try: |
|
while True: |
|
|
|
user_input = input("\n👤 Tú: ").strip() |
|
|
|
if user_input.lower() in ['salir', 'exit', 'quit']: |
|
print("👋 ¡Hasta luego!") |
|
break |
|
|
|
if user_input.lower() == 'reset': |
|
text_generator.reset_chat_history() |
|
print("🔄 Chat reiniciado") |
|
continue |
|
|
|
if not user_input: |
|
continue |
|
|
|
|
|
print("🤖 GPT: ", end="", flush=True) |
|
try: |
|
response = text_generator.generate_response( |
|
user_input, |
|
temperature=0.7, |
|
max_length=200 |
|
) |
|
print(response) |
|
|
|
except Exception as e: |
|
print(f"❌ Error: {str(e)}") |
|
|
|
except KeyboardInterrupt: |
|
print("\n👋 Chat interrumpido por el usuario") |
|
except Exception as e: |
|
logger.error(f"❌ Error en el chat: {str(e)}") |
|
|
|
if __name__ == "__main__": |
|
main() |
|
|