File size: 5,846 Bytes
22ca508
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
"""
Interfaz web usando Gradio para el GPT local
"""
import gradio as gr
from models.model_loader import ModelLoader
from models.text_generator import TextGenerator
import logging

logger = logging.getLogger(__name__)

class GradioInterface:
    def __init__(self):
        self.model_loader = ModelLoader()
        self.text_generator = TextGenerator(self.model_loader)
        self.available_models = [
            "microsoft/DialoGPT-small",
            "gpt2",
            "distilgpt2"
        ]
        
    def load_model_ui(self, model_name):
        """Cargar modelo desde la interfaz"""
        if self.model_loader.load_model(model_name):
            self.text_generator.reset_chat_history()
            return f"✅ Modelo '{model_name}' cargado exitosamente"
        else:
            return f"❌ Error al cargar el modelo '{model_name}'"
    
    def chat_response(self, message, history, temperature, max_length, top_p):
        """Generar respuesta de chat"""
        if not self.model_loader.is_loaded():
            return history + [("Error", "Por favor, carga un modelo primero")]
        
        try:
            # Generar respuesta
            response = self.text_generator.generate_response(
                message,
                temperature=temperature,
                max_length=max_length,
                top_p=top_p
            )
            
            # Actualizar historial
            history.append((message, response))
            return history
            
        except Exception as e:
            error_msg = f"Error: {str(e)}"
            history.append((message, error_msg))
            return history
    
    def generate_text_ui(self, prompt, temperature, max_length, top_p):
        """Generar texto desde prompt"""
        if not self.model_loader.is_loaded():
            return "Error: Por favor, carga un modelo primero"
        
        return self.text_generator.generate_text(
            prompt,
            temperature=temperature,
            max_length=max_length,
            top_p=top_p
        )
    
    def reset_chat(self):
        """Reiniciar chat"""
        self.text_generator.reset_chat_history()
        return [], "Chat reiniciado"
    
    def get_model_status(self):
        """Obtener estado del modelo"""
        info = self.model_loader.get_model_info()
        stats = self.text_generator.get_generation_stats()
        
        status = f"""
**Estado del Modelo:**
- {info['status']}
- Dispositivo: {info.get('device', 'N/A')}
- Tipo: {info.get('model_type', 'N/A')}
- Vocabulario: {info.get('vocab_size', 'N/A')}

**Estadísticas de Chat:**
- Longitud del historial: {stats['history_length']} tokens
- Dispositivo activo: {stats['device']}
        """
        return status
    
    def create_interface(self):
        """Crear la interfaz Gradio simplificada"""
        
        with gr.Blocks(title="GPT Local") as demo:
            gr.Markdown("# 🤖 GPT Local con Hugging Face")
            
            with gr.Tab("💬 Chat"):
                chatbot = gr.Chatbot(height=400)
                msg = gr.Textbox(placeholder="Escribe tu mensaje...", label="Mensaje")
                
                with gr.Row():
                    send_btn = gr.Button("Enviar", variant="primary")
                    clear_btn = gr.Button("Limpiar")
                
                # Parámetros básicos
                with gr.Row():
                    temperature = gr.Slider(0.1, 2.0, 0.7, label="Temperatura")
                    max_length = gr.Slider(50, 500, 200, label="Longitud Máxima")
            
            with gr.Tab("⚙️ Configuración"):
                model_dropdown = gr.Dropdown(
                    choices=self.available_models,
                    value=self.available_models[0],
                    label="Modelo"
                )
                load_btn = gr.Button("Cargar Modelo")
                status_text = gr.Textbox(label="Estado", interactive=False)
            
            # Event handlers simplificados
            def respond(message, history, temp, max_len):
                if not message.strip():
                    return history, ""
                
                try:
                    response = self.text_generator.generate_response(
                        message, temperature=temp, max_length=max_len
                    )
                    history.append((message, response))
                    return history, ""
                except Exception as e:
                    history.append((message, f"Error: {str(e)}"))
                    return history, ""
            
            def load_model(model_name):
                try:
                    if self.model_loader.load_model(model_name):
                        self.text_generator.reset_chat_history()
                        return f"✅ Modelo '{model_name}' cargado"
                    else:
                        return f"❌ Error al cargar '{model_name}'"
                except Exception as e:
                    return f"❌ Error: {str(e)}"
            
            def clear_chat():
                self.text_generator.reset_chat_history()
                return []
            
            # Conectar eventos
            msg.submit(respond, [msg, chatbot, temperature, max_length], [chatbot, msg])
            send_btn.click(respond, [msg, chatbot, temperature, max_length], [chatbot, msg])
            clear_btn.click(clear_chat, outputs=chatbot)
            load_btn.click(load_model, model_dropdown, status_text)
            
            # Cargar modelo por defecto
            demo.load(lambda: load_model(self.available_models[0]), outputs=status_text)
        
        return demo
    
    def launch(self, **kwargs):
        """Lanzar la interfaz"""
        demo = self.create_interface()
        demo.launch(**kwargs)