File size: 30,351 Bytes
1f486ac
 
79a143d
 
1f486ac
 
 
c83148e
 
ea504fb
198ea21
298f185
 
 
 
 
 
7ca2981
 
 
 
 
 
 
 
1f486ac
ac713b3
3c653a9
 
 
7ca2981
3c653a9
 
298f185
e536078
298f185
 
 
 
e35dd3d
 
 
e536078
e35dd3d
e536078
e35dd3d
298f185
ac713b3
abee61f
ac713b3
 
 
abee61f
ac713b3
abee61f
ac713b3
298f185
ac713b3
298f185
 
 
 
ac713b3
e35dd3d
298f185
 
e35dd3d
 
 
e536078
e35dd3d
e536078
e35dd3d
298f185
ac713b3
abee61f
e536078
 
 
 
298f185
 
 
 
ac713b3
e35dd3d
298f185
 
e35dd3d
 
 
e536078
e35dd3d
e536078
e35dd3d
298f185
ac713b3
298f185
 
 
 
 
 
 
ac713b3
298f185
 
 
 
 
e35dd3d
298f185
 
ac713b3
298f185
 
 
 
 
 
 
ac713b3
e536078
 
 
 
298f185
ac713b3
298f185
 
ac713b3
e35dd3d
298f185
 
ac713b3
 
298f185
e536078
 
298f185
 
e536078
 
298f185
e536078
 
298f185
 
e35dd3d
298f185
 
ac713b3
298f185
 
 
 
ac713b3
298f185
 
 
 
ac713b3
e35dd3d
298f185
 
ac713b3
298f185
 
 
ac713b3
298f185
 
 
 
 
e35dd3d
298f185
 
 
 
 
 
 
 
 
 
 
 
 
198ea21
298f185
ac713b3
298f185
 
ac713b3
298f185
7ca2981
6186002
7ca2981
298f185
1f486ac
79a143d
e536078
 
1f486ac
ac713b3
e536078
ac713b3
79a143d
abee61f
79a143d
7ca2981
ea504fb
e536078
7ca2981
ea504fb
7ca2981
e536078
 
 
 
 
c83148e
e536078
 
 
 
 
 
 
 
 
c83148e
e536078
ea504fb
7ca2981
79a143d
e536078
 
79a143d
e536078
ac713b3
abee61f
 
ea504fb
 
 
315dad4
 
 
ac713b3
 
79a143d
7ca2981
 
 
 
 
 
 
 
 
 
 
 
 
 
e536078
 
79a143d
e536078
7ca2981
1f486ac
7ca2981
 
 
ac713b3
e536078
 
79a143d
e536078
 
79a143d
ac713b3
e536078
7ca2981
e536078
7ca2981
3c653a9
e536078
 
 
 
 
ac713b3
3c653a9
bae9d2f
ac713b3
298f185
e536078
298f185
 
ac713b3
 
298f185
bae9d2f
e536078
3c653a9
e536078
 
 
 
 
298f185
e536078
3c653a9
315dad4
e536078
 
 
 
3c653a9
e536078
3c653a9
7ca2981
e536078
 
 
 
 
 
 
79a143d
e536078
3c653a9
e536078
 
 
abee61f
 
 
3c653a9
e536078
3c653a9
e536078
 
 
3c653a9
6186002
 
 
 
 
 
 
 
 
e536078
6186002
 
7ca2981
 
 
ac713b3
 
 
33e8773
7ca2981
ac713b3
 
7ca2981
6186002
 
f29d404
7ca2981
 
 
 
 
 
 
 
 
 
ac2448d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7ca2981
 
 
6186002
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7ca2981
79a143d
 
7ca2981
 
 
 
 
 
 
 
 
79a143d
7ca2981
 
 
6186002
7ca2981
 
 
 
 
79a143d
7ca2981
 
315dad4
7ca2981
abee61f
7ca2981
abee61f
7ca2981
abee61f
7ca2981
abee61f
7ca2981
 
 
 
 
 
e35dd3d
7ca2981
 
 
 
 
 
 
 
 
 
 
298f185
1f486ac
7ca2981
 
 
3c653a9
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
import gradio as gr
from gradio_client import Client, handle_file
import pandas as pd
import json
import tempfile
import os
from datetime import datetime
import plotly.graph_objects as go
import plotly.express as px
import numpy as np
from smolagents import CodeAgent, tool, InferenceClientModel
import logging

# Configuración de logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# --- INICIO DE CONFIGURACIÓN DE CLIENTES ---
try:
    biotech_client = Client("C2MV/BiotechU4")
    analysis_client = Client("C2MV/Project-HF-2025")
except Exception as e:
    logger.error(f"No se pudieron inicializar los clientes de Gradio. Verifica las conexiones y los Spaces: {e}")
    biotech_client = None
    analysis_client = None

# Configuración del motor de Hugging Face (opcional)
try:
    hf_engine = InferenceClientModel(model_id="mistralai/Mistral-7B-Instruct-v0.2")
except Exception:
    logger.warning("No se pudo inicializar el modelo de HF. Los agentes usarán lógica simple.")
    hf_engine = None

# ============================================================================
# 🤖 SISTEMA DE AGENTES
# ============================================================================
class BiotechAgentTools:
    @tool
    def analyze_data_characteristics(data_info: str) -> dict:
        """
        Analiza las características de los datos biotecnológicos subidos.
        Args:
            data_info (str): Información sobre el archivo de datos incluyendo nombre, tipo y contenido
        Returns:
            dict: Diccionario con tipo de experimento, modelos recomendados, parámetros sugeridos y calidad de datos
        """
        try:
            characteristics = {"experiment_type": "unknown", "recommended_models": [], "suggested_params": {}, "data_quality": "good"}
            data_lower = data_info.lower()
            models_from_docs = ['logistic', 'gompertz', 'moser', 'baranyi', 'monod', 'contois', 'andrews', 'tessier', 'richards', 'stannard', 'huang']
            growth_models = [m for m in ['logistic', 'gompertz', 'baranyi', 'richards'] if m in models_from_docs]
            fermentation_models = [m for m in ['monod', 'contois', 'andrews', 'moser'] if m in models_from_docs]
            if "biomass" in data_lower or "growth" in data_lower:
                characteristics.update({"experiment_type": "growth_kinetics", "recommended_models": growth_models, "suggested_params": {"component": "biomass", "use_de": True, "maxfev": 75000}})
            elif "ferment" in data_lower or "substrate" in data_lower:
                characteristics.update({"experiment_type": "fermentation", "recommended_models": fermentation_models,"suggested_params": {"component": "all", "use_de": False, "maxfev": 50000}})
            else:
                characteristics.update({"experiment_type": "general_biotech", "recommended_models": growth_models, "suggested_params": {"component": "all", "use_de": False, "maxfev": 50000}})
            logger.info(f"Análisis completado: {characteristics['experiment_type']}")
            return characteristics
        except Exception as e:
            logger.error(f"Error en análisis de datos: {str(e)}")
            return {"experiment_type": "error", "recommended_models": ['logistic', 'gompertz'], "suggested_params": {"component": "all", "use_de": False, "maxfev": 50000}, "data_quality": "unknown"}

    @tool
    def evaluate_analysis_quality(results_info: str) -> dict:
        """
        Evalúa la calidad de los resultados del análisis biotecnológico.
        Args:
            results_info (str): Información sobre los resultados del análisis incluyendo métricas y estado
        Returns:
            dict: Diccionario con puntuación de calidad, estado satisfactorio, recomendaciones y necesidad de reintento
        """
        try:
            evaluation = {"quality_score": 0.8, "is_satisfactory": True, "recommendations": [], "needs_retry": False}
            results_lower = results_info.lower()
            if "error" in results_lower or "failed" in results_lower:
                evaluation.update({"quality_score": 0.2, "is_satisfactory": False, "needs_retry": True, "recommendations": ["Retry with different parameters"]})
            elif "r2" in results_lower or "rmse" in results_lower:
                evaluation.update({"quality_score": 0.9, "is_satisfactory": True, "recommendations": ["Results look good"]})
            logger.info(f"Evaluación de calidad: {evaluation['quality_score']}")
            return evaluation
        except Exception as e:
            logger.error(f"Error en evaluación: {str(e)}")
            return {"quality_score": 0.5, "is_satisfactory": True, "recommendations": ["Continue with analysis"], "needs_retry": False}

    @tool
    def prepare_claude_context(data_summary: str) -> str:
        """
        Prepara el contexto específico para el análisis de Claude.
        Args:
            data_summary (str): Resumen de los datos analizados incluyendo tipo de experimento y resultados
        Returns:
            str: Contexto enriquecido y estructurado para el análisis de Claude
        """
        try:
            enhanced_context = f"""CONTEXTO BIOTECNOLÓGICO ESPECÍFICO:
            Datos analizados: {data_summary}
            Por favor, enfócate en:
            1. Interpretación biológica de los parámetros ajustados
            2. Comparación de la bondad de ajuste entre modelos
            3. Implicaciones prácticas para el proceso biotecnológico
            4. Recomendaciones para optimización del proceso
            5. Identificación de posibles limitaciones del modelo
            Incluye análisis estadístico riguroso y recomendaciones prácticas."""
            logger.info("Contexto preparado para Claude")
            return enhanced_context
        except Exception as e:
            logger.error(f"Error preparando contexto: {str(e)}")
            return data_summary

class CoordinatorAgent:
    def __init__(self):
        self.agent = CodeAgent(tools=[BiotechAgentTools.analyze_data_characteristics, BiotechAgentTools.evaluate_analysis_quality, BiotechAgentTools.prepare_claude_context], model=hf_engine) if hf_engine else None
        self.tools = BiotechAgentTools()
    def analyze_and_optimize(self, file_info: str, current_config: dict) -> dict:
        try:
            logger.info("🤖 Agente Coordinador iniciando análisis...")
            characteristics = self.tools.analyze_data_characteristics(file_info)
            optimized_config = current_config.copy()
            if characteristics["experiment_type"] != "error":
                optimized_config.update({"models": characteristics["recommended_models"], "component": characteristics["suggested_params"]["component"], "use_de": characteristics["suggested_params"]["use_de"], "maxfev": characteristics["suggested_params"]["maxfev"]})
                if characteristics["experiment_type"] == "growth_kinetics":
                    optimized_config["additional_specs"] = "Enfócate en el análisis de cinética de crecimiento: interpretación de μmax, lag time, etc."
                elif characteristics["experiment_type"] == "fermentation":
                    optimized_config["additional_specs"] = "Enfócate en el análisis de fermentación: eficiencia de conversión, productividad, etc."
            logger.info(f"✅ Configuración optimizada para: {characteristics['experiment_type']}")
            return {"config": optimized_config, "analysis": characteristics, "recommendations": f"Configuración optimizada para {characteristics['experiment_type']}"}
        except Exception as e:
            logger.error(f"❌ Error en Agente Coordinador: {str(e)}")
            return {"config": current_config, "analysis": {"experiment_type": "error"}, "recommendations": f"Error en optimización: {str(e)}"}

class RecoveryAgent:
    def __init__(self):
        self.agent = CodeAgent(tools=[BiotechAgentTools.analyze_data_characteristics], model=hf_engine) if hf_engine else None
        self.retry_strategies = [{"use_de": False, "maxfev": 25000, "models_subset": 2}, {"use_de": True, "maxfev": 100000, "models_subset": 1}, {"component": "biomass", "use_de": False, "maxfev": 50000}]
    def attempt_recovery(self, original_config: dict, error_info: str, attempt: int = 0) -> dict:
        if attempt >= len(self.retry_strategies):
            return {"success": False, "message": "Todas las estrategias fallaron"}
        strategy = self.retry_strategies[attempt]
        recovery_config = original_config.copy()
        if "models_subset" in strategy:
            recovery_config["models"] = recovery_config.get("models", ["logistic"])[:strategy["models_subset"]]
        for key, value in strategy.items():
            if key != "models_subset":
                recovery_config[key] = value
        logger.info(f"🔧 Aplicando estrategia de recuperación {attempt + 1}")
        return {"success": True, "config": recovery_config, "strategy": strategy}

class QualityAgent:
    def __init__(self):
        self.agent = CodeAgent(tools=[BiotechAgentTools.evaluate_analysis_quality], model=hf_engine) if hf_engine else None
        self.tools = BiotechAgentTools()
    def evaluate_results(self, results_summary: str) -> dict:
        try:
            evaluation = self.tools.evaluate_analysis_quality(results_summary)
            quality_feedback = {"quality_score": evaluation["quality_score"], "is_acceptable": evaluation["is_satisfactory"], "feedback": evaluation["recommendations"], "needs_improvement": evaluation["needs_retry"]}
            logger.info(f"✅ Evaluación de calidad: {quality_feedback['quality_score']:.2f}")
            return quality_feedback
        except Exception as e:
            logger.error(f"❌ Error en evaluación de calidad: {str(e)}")
            return {"quality_score": 0.7, "is_acceptable": True, "feedback": ["Evaluación completada con advertencias"], "needs_improvement": False}

class ContextAgent:
    def __init__(self):
        self.agent = CodeAgent(tools=[BiotechAgentTools.prepare_claude_context], model=hf_engine) if hf_engine else None
        self.tools = BiotechAgentTools()
    def enhance_analysis_context(self, data_summary: str, experiment_type: str) -> str:
        try:
            enhanced_context = self.tools.prepare_claude_context(f"Tipo de experimento: {experiment_type}. Datos: {data_summary}")
            logger.info("📊 Contexto mejorado para Claude")
            return enhanced_context
        except Exception as e:
            logger.error(f"❌ Error mejorando contexto: {str(e)}")
            return data_summary

class BiotechAgentSystem:
    def __init__(self):
        self.coordinator = CoordinatorAgent()
        self.recovery = RecoveryAgent()
        self.quality = QualityAgent()
        self.context = ContextAgent()
        logger.info("🚀 Sistema de agentes inicializado")
    def process_with_agents(self, file_info: str, user_config: dict) -> dict:
        try:
            coordination_result = self.coordinator.analyze_and_optimize(file_info, user_config)
            optimized_config = coordination_result["config"]
            experiment_type = coordination_result["analysis"]["experiment_type"]
            quality_result = self.quality.evaluate_results("Initial configuration optimized")
            enhanced_specs = self.context.enhance_analysis_context(file_info, experiment_type)
            optimized_config["additional_specs"] = enhanced_specs
            return {"success": True, "optimized_config": optimized_config, "experiment_type": experiment_type, "recommendations": coordination_result["recommendations"], "quality_score": quality_result["quality_score"]}
        except Exception as e:
            logger.error(f"❌ Error en sistema de agentes: {str(e)}")
            return {"success": False, "optimized_config": user_config, "experiment_type": "error", "recommendations": f"Error: {str(e)}", "quality_score": 0.5}

# ============================================================================
# ⚙️ FUNCIONES DEL PIPELINE
# ============================================================================
agent_system = BiotechAgentSystem()

def process_biotech_data(file, models, component, use_de, maxfev, exp_names):
    if not biotech_client:
        return None, None, "Error: El cliente de biotecnología no está inicializado."
    try:
        file_path = file.name if hasattr(file, 'name') else file
        # Llamada a la API según la documentación
        return biotech_client.predict(file=handle_file(file_path), models=models, component=component, use_de=use_de, maxfev=maxfev, exp_names=exp_names, theme=False, api_name="/run_analysis_wrapper")
    except Exception as e:
        logger.error(f"Error en proceso biotecnológico: {str(e)}")
        return None, None, f"Error en el análisis: {str(e)}"

def create_dummy_plot():
    fig = go.Figure(go.Scatter(x=[], y=[]))
    fig.update_layout(title="Esperando resultados...", template="plotly_white", height=500, annotations=[dict(text="Sube un archivo y ejecuta el pipeline para ver los resultados", showarrow=False)])
    return fig

# --- FUNCIÓN CORREGIDA ---
def parse_plot_data(plot_info):
    """Parsea la información de la gráfica recibida de la API."""
    if not plot_info:
        return create_dummy_plot()
    try:
        # La API devuelve un diccionario {'plot': 'JSON_STRING'}. Extraemos el string.
        if isinstance(plot_info, dict) and 'plot' in plot_info:
            plot_json_string = plot_info['plot']
            return go.Figure(json.loads(plot_json_string))
        # Fallback por si la estructura cambia o es inesperada
        if isinstance(plot_info, str):
            return go.Figure(json.loads(plot_info))
        if isinstance(plot_info, dict):
             return go.Figure(plot_info)
    except Exception as e:
        logger.error(f"Error parsing plot: {e}")
    return create_dummy_plot()

def download_results_as_csv(df_data):
    if not biotech_client:
        return None
    try:
        # La API espera el objeto DataFrame de Gradio y devuelve una ruta de archivo
        return biotech_client.predict(df=df_data, api_name="/download_results_excel")
    except Exception as e:
        logger.warning(f"Error con API de descarga: {str(e)}")
        if df_data and 'data' in df_data and 'headers' in df_data:
            try:
                df = pd.DataFrame(df_data['data'], columns=df_data['headers'])
                with tempfile.NamedTemporaryFile(mode='w+', suffix='.csv', delete=False) as temp_file:
                    df.to_csv(temp_file.name, index=False)
                    return temp_file.name
            except Exception as e_local:
                logger.error(f"Error creando CSV local: {e_local}")
        return None

def generate_claude_report(csv_file_path, model, detail_level, language, additional_specs, use_personal_key, personal_api_key):
    local_analysis_client = None
    if use_personal_key and personal_api_key:
        logger.info("Intentando usar la clave API personal de Claude.")
        try:
            local_analysis_client = Client("C2MV/Project-HF-2025", hf_token=personal_api_key)
            logger.info("Cliente inicializado exitosamente con la clave personal.")
        except Exception as e:
            logger.error(f"Fallo al inicializar el cliente con la clave personal: {e}")
            return f"Error: La clave API personal es inválida o hubo un problema de conexión. Detalles: {e}", ""
    else:
        logger.info("Usando la configuración por defecto (secreto) de la API de Claude.")
        local_analysis_client = analysis_client
    if not local_analysis_client:
        return "Error: El cliente de análisis no está disponible.", ""
    try:
        # Llamada a la API según la documentación
        return local_analysis_client.predict(files=[handle_file(csv_file_path)], model=model, detail=detail_level, language=language, additional_specs=additional_specs, api_name="/process_and_store")
    except Exception as e:
        logger.error(f"Error generando reporte con Claude: {str(e)}")
        return f"Error en el análisis de Claude: {str(e)}", ""

def export_report(format_type, language):
    if not analysis_client:
        return None, "Error: El cliente de análisis no está inicializado para la exportación."
    try:
        # La API devuelve (status, filepath). El orden es importante.
        return analysis_client.predict(format=format_type, language=language, api_name="/handle_export")
    except Exception as e:
        logger.error(f"Error en exportación: {str(e)}")
        return f"Error al exportar: {str(e)}", None

# --- FUNCIÓN PRINCIPAL CORREGIDA ---
def process_complete_pipeline_with_agents(file, models, component, use_de, maxfev, exp_names, claude_model, detail_level, language, additional_specs, export_format, use_personal_key, personal_api_key, progress=gr.Progress()):
    progress(0, desc="🚀 Iniciando Pipeline...")
    if not file:
        return create_dummy_plot(), None, None, None, None, "❌ Por favor, sube un archivo."
    if not models:
        return create_dummy_plot(), None, None, None, None, "❌ Por favor, selecciona al menos un modelo."
    
    progress_updates = []
    progress(0.1, desc="🤖 Activando sistema de agentes...")
    file_info = f"Archivo: {os.path.basename(file.name)}, Modelos: {models}"
    user_config = {"models": models, "component": component, "use_de": use_de, "maxfev": maxfev, "claude_model": claude_model, "detail_level": detail_level, "language": language, "additional_specs": additional_specs, "export_format": export_format}
    agent_result = agent_system.process_with_agents(file_info, user_config)
    
    if agent_result["success"]:
        optimized_config = agent_result["optimized_config"]
        progress_updates.extend([f"✅ Agentes detectaron: {agent_result['experiment_type']}", f"🎯 {agent_result['recommendations']}", f"📊 Calidad esperada: {agent_result['quality_score']:.1%}"])
        models, component, use_de, maxfev, additional_specs = (optimized_config.get("models", models), optimized_config.get("component", component), optimized_config.get("use_de", use_de), optimized_config.get("maxfev", maxfev), optimized_config.get("additional_specs", additional_specs))
    else:
        progress_updates.append("⚠️ Agentes no pudieron optimizar, usando config original.")
    
    progress(0.2, desc="🔄 Procesando datos biotecnológicos...")
    plot_info, df_data, status = process_biotech_data(file, models, component, use_de, maxfev, exp_names)
    
    if not plot_info or not df_data or "Error" in str(status):
        return create_dummy_plot(), None, None, None, None, "\n".join(progress_updates) + f"\n❌ Error en análisis: {status}"
    
    progress_updates.append("✅ Análisis biotecnológico completado")
    
    progress(0.4, desc="📥 Descargando resultados...")
    csv_file_path = download_results_as_csv(df_data)
    
    if not csv_file_path:
        return parse_plot_data(plot_info), df_data, None, None, None, "\n".join(progress_updates) + "\n❌ Error al descargar resultados para análisis."
    
    progress_updates.append("✅ Resultados descargados")
    
    progress(0.5, desc=f"🤖 Generando análisis con {claude_model}...")
    analysis, code = generate_claude_report(csv_file_path, claude_model, detail_level, language, additional_specs, use_personal_key, personal_api_key)
    
    if os.path.exists(csv_file_path):
        os.remove(csv_file_path)
    
    if "Error" in analysis:
        return parse_plot_data(plot_info), df_data, analysis, code, None, "\n".join(progress_updates) + f"\n❌ {analysis}"
    
    progress_updates.append("✅ Análisis con Claude completado")
    
    progress(0.9, desc=f"📄 Exportando informe en {export_format}...")
    # CORRECCIÓN: Desempacar en el orden correcto (status, filepath)
    export_status, report_file = export_report(export_format, language)
    
    if report_file:
        progress_updates.append("✅ Informe exportado.")
    else:
        progress_updates.append(f"❌ Error al exportar: {export_status}")
    
    progress(1, desc="🎉 Pipeline Completado")
    
    # CORRECCIÓN: Pasar el objeto `plot_info` directamente a `parse_plot_data`
    return parse_plot_data(plot_info), df_data, analysis, code, report_file, "\n".join(progress_updates)

def create_example_videos():
    examples_dir = "examples"
    if not os.path.exists(examples_dir):
        os.makedirs(examples_dir)
    video_files = ["video1.mp4", "video2.mp4"]
    for video_file in video_files:
        video_path = os.path.join(examples_dir, video_file)
        if not os.path.exists(video_path):
            with open(video_path, 'w') as f:
                f.write("# Video placeholder")
            logger.info(f"Created placeholder for {video_file}")

# ============================================================================
# 🖼️ INTERFAZ DE USUARIO CON GRADIO
# ============================================================================
BIOTECH_MODELS = ['logistic', 'gompertz', 'moser', 'baranyi', 'monod', 'contois', 'andrews', 'tessier', 'richards', 'stannard', 'huang']
DEFAULT_BIOTECH_SELECTION = [model for model in ['logistic', 'gompertz', 'moser', 'baranyi'] if model in BIOTECH_MODELS]
CLAUDE_MODELS = ['claude-opus-4-20250514', 'claude-sonnet-4-20250514', 'claude-3-5-haiku-20241022', 'claude-3-7-sonnet-20250219', 'claude-3-5-sonnet-20241022']
DEFAULT_CLAUDE_MODEL = 'claude-3-5-sonnet-20241022'

theme = gr.themes.Soft(primary_hue="blue", secondary_hue="indigo", neutral_hue="slate")
custom_css = ".file-upload { border: 2px dashed #3b82f6; } button.primary { background: linear-gradient(135deg, #3b82f6 0%, #8b5cf6 100%); }"

create_example_videos()

with gr.Blocks(theme=theme, title="BioTech Analysis & Report Generator", css=custom_css) as demo:
    gr.Markdown(
        """
        # 🧬 BioTech Analysis & Report Generator
        ## **Based on the pipeline: [C2MV/Agent-Biotech](https://huggingface.co/spaces/C2MV/Agent-Biotech)**
        *An intelligent pipeline that automates the analysis of bioprocess data, from kinetic modeling to generating detailed reports with AI Agents.*
        """
    )
    with gr.Accordion("🤖 How the AI Agents Work (Click to Expand)", open=True):
        gr.Markdown(
            """
            ```text
            [ 👤 USER INPUT: Data File & Initial Settings ]


            ┌──────────────────────────────────────────────────────────┐
            │ 🤖 Coordinator Agent                                     │
            │  • Analyzes experiment type (e.g., kinetics, ferment.).  │
            │  • Recommends optimal models and parameters.             │
            └──────────────────────────┬───────────────────────────────┘

            (If analysis fails)        │ (If analysis succeeds)
                      ┌────────────────┘


            ┌───────────────────────┐        ┌────────────────────────────┐
            │ 🔧 Recovery Agent     │        │ 🧐 Quality Agent           │
            │  • Applies different  │ ◀───── │  • Evaluates statistical  │
            │    strategies to      │ (Retry)│    quality (R², etc).      │
            │    find a solution.   │        └────────────┬───────────────┘
            └───────────────────────┘                     │


                                             ┌────────────────────────────┐
                                             │ ✍️ Context Agent           │
                                             │  • Prepares a rich,       │
                                             │    detailed prompt for     │
                                             │    the final report.       │
                                             └────────────┬───────────────┘


            [ 📄 FINAL OUTPUT: Report, Visualization & Data ]
            ```
            """
        )
    with gr.Row():
        with gr.Column():
            try:
                video1_path = os.path.join("examples", "video1.mp4")
                if os.path.exists(video1_path) and os.path.getsize(video1_path) > 100:
                    gr.Video(value=video1_path, label="Example 1: Automated Analysis", interactive=False)
                else:
                    gr.Markdown("### 🎥 Example 1: Automated Analysis\n*Video placeholder. Add `video1.mp4` to `examples` folder.*")
            except Exception as e:
                logger.warning(f"Could not load video1: {e}")
                gr.Markdown("### 🎥 Example 1: Automated Analysis\n*Video not available.*")
        with gr.Column():
            try:
                video2_path = os.path.join("examples", "video2.mp4")
                if os.path.exists(video2_path) and os.path.getsize(video2_path) > 100:
                    gr.Video(value=video2_path, label="Example 2: Report Generation", interactive=False)
                else:
                    gr.Markdown("### 🎥 Example 2: Report Generation\n*Video placeholder. Add `video2.mp4` to `examples` folder.*")
            except Exception as e:
                logger.warning(f"Could not load video2: {e}")
                gr.Markdown("### 🎥 Example 2: Report Generation\n*Video not available.*")

    with gr.Row():
        with gr.Column(scale=1):
            gr.Markdown("## 📊 Configuration")
            file_input = gr.File(label="📁 Data File (CSV/Excel)", file_types=[".csv", ".xlsx", ".xls"], elem_classes=["file-upload"])
            gr.Examples(examples=[os.path.join("examples", "archivo.xlsx")], inputs=[file_input], label="Click an example to run")
            with gr.Accordion("🔬 Analysis Parameters (AI Optimized)", open=False):
                models_input = gr.CheckboxGroup(choices=BIOTECH_MODELS, value=DEFAULT_BIOTECH_SELECTION, label="📊 Models")
                component_input = gr.Dropdown(['all', 'biomass', 'substrate', 'product'], value='all', label="📈 Component")
                exp_names_input = gr.Textbox(label="🏷️ Experiment Names", value="Biotech Analysis")
                use_de_input = gr.Checkbox(label="🧮 Use Differential Evolution", value=False)
                maxfev_input = gr.Number(label="🔄 Max Iterations", value=50000, step=1000)
            with gr.Group():
                claude_model_input = gr.Dropdown(choices=CLAUDE_MODELS, value=DEFAULT_CLAUDE_MODEL, label="🤖 Claude Model")
                with gr.Accordion("🔑 Personal API Key (Optional)", open=False):
                    use_personal_key_input = gr.Checkbox(label="Use Personal Claude API Key", value=False, info="Check this to use your own API key instead of the default one.")
                    personal_api_key_input = gr.Textbox(label="Personal Claude API Key", type="password", placeholder="Enter your key here (e.g., sk-ant-...)", visible=False)
                detail_level_input = gr.Radio(['detailed', 'summarized'], value='detailed', label="📋 Detail Level")
                language_input = gr.Dropdown(['en', 'es', 'fr', 'de', 'pt'], value='es', label="🌐 Language")
                export_format_input = gr.Radio(['PDF', 'DOCX'], value='PDF', label="📄 Format")
                additional_specs_input = gr.Textbox(label="📝 Additional Specifications", placeholder="AI Agents will customize this...", lines=3, value="Detailed analysis of models, metrics, and recommendations.")
            process_btn = gr.Button("🚀 Run Pipeline with AI Agents", variant="primary", size="lg")
        with gr.Column(scale=2):
            gr.Markdown("## 📈 Results")
            status_output = gr.Textbox(label="📊 Process Status (with AI Agents)", lines=5, interactive=False)
            with gr.Tabs():
                with gr.TabItem("📊 Visualization"):
                    plot_output = gr.Plot()
                with gr.TabItem("📋 Table"):
                    table_output = gr.Dataframe()
                with gr.TabItem("📝 Analysis"):
                    analysis_output = gr.Markdown()
                with gr.TabItem("💻 Code"):
                    code_output = gr.Code(language="python")
            report_output = gr.File(label="📥 Download Report", interactive=False)

    def toggle_api_key_visibility(checked):
        return gr.Textbox(visible=checked)

    use_personal_key_input.change(fn=toggle_api_key_visibility, inputs=use_personal_key_input, outputs=personal_api_key_input)
    
    process_btn.click(
        fn=process_complete_pipeline_with_agents,
        inputs=[
            file_input, models_input, component_input, use_de_input, maxfev_input, exp_names_input,
            claude_model_input, detail_level_input, language_input, additional_specs_input,
            export_format_input, use_personal_key_input, personal_api_key_input
        ],
        outputs=[
            plot_output, table_output, analysis_output, code_output, report_output, status_output
        ]
    )

if __name__ == "__main__":
    if not os.path.exists("examples"):
        os.makedirs("examples")
        print("Carpeta 'examples' creada. Por favor, añade 'video1.mp4', 'video2.mp4', y 'archivo.xlsx' dentro.")
    demo.launch(show_error=True)