gpt-local / ui /gradio_interface.py
DRDELATV's picture
Upload folder using huggingface_hub
22ca508 verified
"""
Interfaz web usando Gradio para el GPT local
"""
import gradio as gr
from models.model_loader import ModelLoader
from models.text_generator import TextGenerator
import logging
logger = logging.getLogger(__name__)
class GradioInterface:
def __init__(self):
self.model_loader = ModelLoader()
self.text_generator = TextGenerator(self.model_loader)
self.available_models = [
"microsoft/DialoGPT-small",
"gpt2",
"distilgpt2"
]
def load_model_ui(self, model_name):
"""Cargar modelo desde la interfaz"""
if self.model_loader.load_model(model_name):
self.text_generator.reset_chat_history()
return f"✅ Modelo '{model_name}' cargado exitosamente"
else:
return f"❌ Error al cargar el modelo '{model_name}'"
def chat_response(self, message, history, temperature, max_length, top_p):
"""Generar respuesta de chat"""
if not self.model_loader.is_loaded():
return history + [("Error", "Por favor, carga un modelo primero")]
try:
# Generar respuesta
response = self.text_generator.generate_response(
message,
temperature=temperature,
max_length=max_length,
top_p=top_p
)
# Actualizar historial
history.append((message, response))
return history
except Exception as e:
error_msg = f"Error: {str(e)}"
history.append((message, error_msg))
return history
def generate_text_ui(self, prompt, temperature, max_length, top_p):
"""Generar texto desde prompt"""
if not self.model_loader.is_loaded():
return "Error: Por favor, carga un modelo primero"
return self.text_generator.generate_text(
prompt,
temperature=temperature,
max_length=max_length,
top_p=top_p
)
def reset_chat(self):
"""Reiniciar chat"""
self.text_generator.reset_chat_history()
return [], "Chat reiniciado"
def get_model_status(self):
"""Obtener estado del modelo"""
info = self.model_loader.get_model_info()
stats = self.text_generator.get_generation_stats()
status = f"""
**Estado del Modelo:**
- {info['status']}
- Dispositivo: {info.get('device', 'N/A')}
- Tipo: {info.get('model_type', 'N/A')}
- Vocabulario: {info.get('vocab_size', 'N/A')}
**Estadísticas de Chat:**
- Longitud del historial: {stats['history_length']} tokens
- Dispositivo activo: {stats['device']}
"""
return status
def create_interface(self):
"""Crear la interfaz Gradio simplificada"""
with gr.Blocks(title="GPT Local") as demo:
gr.Markdown("# 🤖 GPT Local con Hugging Face")
with gr.Tab("💬 Chat"):
chatbot = gr.Chatbot(height=400)
msg = gr.Textbox(placeholder="Escribe tu mensaje...", label="Mensaje")
with gr.Row():
send_btn = gr.Button("Enviar", variant="primary")
clear_btn = gr.Button("Limpiar")
# Parámetros básicos
with gr.Row():
temperature = gr.Slider(0.1, 2.0, 0.7, label="Temperatura")
max_length = gr.Slider(50, 500, 200, label="Longitud Máxima")
with gr.Tab("⚙️ Configuración"):
model_dropdown = gr.Dropdown(
choices=self.available_models,
value=self.available_models[0],
label="Modelo"
)
load_btn = gr.Button("Cargar Modelo")
status_text = gr.Textbox(label="Estado", interactive=False)
# Event handlers simplificados
def respond(message, history, temp, max_len):
if not message.strip():
return history, ""
try:
response = self.text_generator.generate_response(
message, temperature=temp, max_length=max_len
)
history.append((message, response))
return history, ""
except Exception as e:
history.append((message, f"Error: {str(e)}"))
return history, ""
def load_model(model_name):
try:
if self.model_loader.load_model(model_name):
self.text_generator.reset_chat_history()
return f"✅ Modelo '{model_name}' cargado"
else:
return f"❌ Error al cargar '{model_name}'"
except Exception as e:
return f"❌ Error: {str(e)}"
def clear_chat():
self.text_generator.reset_chat_history()
return []
# Conectar eventos
msg.submit(respond, [msg, chatbot, temperature, max_length], [chatbot, msg])
send_btn.click(respond, [msg, chatbot, temperature, max_length], [chatbot, msg])
clear_btn.click(clear_chat, outputs=chatbot)
load_btn.click(load_model, model_dropdown, status_text)
# Cargar modelo por defecto
demo.load(lambda: load_model(self.available_models[0]), outputs=status_text)
return demo
def launch(self, **kwargs):
"""Lanzar la interfaz"""
demo = self.create_interface()
demo.launch(**kwargs)