Spaces:
Sleeping
Sleeping
import gradio as gr | |
from gradio_client import Client, handle_file | |
import pandas as pd | |
import json | |
import tempfile | |
import os | |
from datetime import datetime | |
import plotly.graph_objects as go | |
import plotly.express as px | |
import numpy as np | |
from smolagents import CodeAgent, tool, InferenceClientModel | |
import logging | |
# Configuración de logging | |
logging.basicConfig(level=logging.INFO) | |
logger = logging.getLogger(__name__) | |
# --- INICIO DE CONFIGURACIÓN DE CLIENTES --- | |
try: | |
biotech_client = Client("C2MV/BiotechU4") | |
analysis_client = Client("C2MV/Project-HF-2025") | |
except Exception as e: | |
logger.error(f"No se pudieron inicializar los clientes de Gradio. Verifica las conexiones y los Spaces: {e}") | |
biotech_client = None | |
analysis_client = None | |
# Configuración del motor de Hugging Face (opcional) | |
try: | |
hf_engine = InferenceClientModel(model_id="mistralai/Mistral-7B-Instruct-v0.2") | |
except Exception: | |
logger.warning("No se pudo inicializar el modelo de HF. Los agentes usarán lógica simple.") | |
hf_engine = None | |
# ============================================================================ | |
# 🤖 SISTEMA DE AGENTES | |
# ============================================================================ | |
class BiotechAgentTools: | |
def analyze_data_characteristics(data_info: str) -> dict: | |
""" | |
Analiza las características de los datos biotecnológicos subidos. | |
Args: | |
data_info (str): Información sobre el archivo de datos incluyendo nombre, tipo y contenido | |
Returns: | |
dict: Diccionario con tipo de experimento, modelos recomendados, parámetros sugeridos y calidad de datos | |
""" | |
try: | |
characteristics = {"experiment_type": "unknown", "recommended_models": [], "suggested_params": {}, "data_quality": "good"} | |
data_lower = data_info.lower() | |
models_from_docs = ['logistic', 'gompertz', 'moser', 'baranyi', 'monod', 'contois', 'andrews', 'tessier', 'richards', 'stannard', 'huang'] | |
growth_models = [m for m in ['logistic', 'gompertz', 'baranyi', 'richards'] if m in models_from_docs] | |
fermentation_models = [m for m in ['monod', 'contois', 'andrews', 'moser'] if m in models_from_docs] | |
if "biomass" in data_lower or "growth" in data_lower: | |
characteristics.update({"experiment_type": "growth_kinetics", "recommended_models": growth_models, "suggested_params": {"component": "biomass", "use_de": True, "maxfev": 75000}}) | |
elif "ferment" in data_lower or "substrate" in data_lower: | |
characteristics.update({"experiment_type": "fermentation", "recommended_models": fermentation_models,"suggested_params": {"component": "all", "use_de": False, "maxfev": 50000}}) | |
else: | |
characteristics.update({"experiment_type": "general_biotech", "recommended_models": growth_models, "suggested_params": {"component": "all", "use_de": False, "maxfev": 50000}}) | |
logger.info(f"Análisis completado: {characteristics['experiment_type']}") | |
return characteristics | |
except Exception as e: | |
logger.error(f"Error en análisis de datos: {str(e)}") | |
return {"experiment_type": "error", "recommended_models": ['logistic', 'gompertz'], "suggested_params": {"component": "all", "use_de": False, "maxfev": 50000}, "data_quality": "unknown"} | |
def evaluate_analysis_quality(results_info: str) -> dict: | |
""" | |
Evalúa la calidad de los resultados del análisis biotecnológico. | |
Args: | |
results_info (str): Información sobre los resultados del análisis incluyendo métricas y estado | |
Returns: | |
dict: Diccionario con puntuación de calidad, estado satisfactorio, recomendaciones y necesidad de reintento | |
""" | |
try: | |
evaluation = {"quality_score": 0.8, "is_satisfactory": True, "recommendations": [], "needs_retry": False} | |
results_lower = results_info.lower() | |
if "error" in results_lower or "failed" in results_lower: | |
evaluation.update({"quality_score": 0.2, "is_satisfactory": False, "needs_retry": True, "recommendations": ["Retry with different parameters"]}) | |
elif "r2" in results_lower or "rmse" in results_lower: | |
evaluation.update({"quality_score": 0.9, "is_satisfactory": True, "recommendations": ["Results look good"]}) | |
logger.info(f"Evaluación de calidad: {evaluation['quality_score']}") | |
return evaluation | |
except Exception as e: | |
logger.error(f"Error en evaluación: {str(e)}") | |
return {"quality_score": 0.5, "is_satisfactory": True, "recommendations": ["Continue with analysis"], "needs_retry": False} | |
def prepare_claude_context(data_summary: str) -> str: | |
""" | |
Prepara el contexto específico para el análisis de Claude. | |
Args: | |
data_summary (str): Resumen de los datos analizados incluyendo tipo de experimento y resultados | |
Returns: | |
str: Contexto enriquecido y estructurado para el análisis de Claude | |
""" | |
try: | |
enhanced_context = f"""CONTEXTO BIOTECNOLÓGICO ESPECÍFICO: | |
Datos analizados: {data_summary} | |
Por favor, enfócate en: | |
1. Interpretación biológica de los parámetros ajustados | |
2. Comparación de la bondad de ajuste entre modelos | |
3. Implicaciones prácticas para el proceso biotecnológico | |
4. Recomendaciones para optimización del proceso | |
5. Identificación de posibles limitaciones del modelo | |
Incluye análisis estadístico riguroso y recomendaciones prácticas.""" | |
logger.info("Contexto preparado para Claude") | |
return enhanced_context | |
except Exception as e: | |
logger.error(f"Error preparando contexto: {str(e)}") | |
return data_summary | |
class CoordinatorAgent: | |
def __init__(self): | |
self.agent = CodeAgent(tools=[BiotechAgentTools.analyze_data_characteristics, BiotechAgentTools.evaluate_analysis_quality, BiotechAgentTools.prepare_claude_context], model=hf_engine) if hf_engine else None | |
self.tools = BiotechAgentTools() | |
def analyze_and_optimize(self, file_info: str, current_config: dict) -> dict: | |
try: | |
logger.info("🤖 Agente Coordinador iniciando análisis...") | |
characteristics = self.tools.analyze_data_characteristics(file_info) | |
optimized_config = current_config.copy() | |
if characteristics["experiment_type"] != "error": | |
optimized_config.update({"models": characteristics["recommended_models"], "component": characteristics["suggested_params"]["component"], "use_de": characteristics["suggested_params"]["use_de"], "maxfev": characteristics["suggested_params"]["maxfev"]}) | |
if characteristics["experiment_type"] == "growth_kinetics": | |
optimized_config["additional_specs"] = "Enfócate en el análisis de cinética de crecimiento: interpretación de μmax, lag time, etc." | |
elif characteristics["experiment_type"] == "fermentation": | |
optimized_config["additional_specs"] = "Enfócate en el análisis de fermentación: eficiencia de conversión, productividad, etc." | |
logger.info(f"✅ Configuración optimizada para: {characteristics['experiment_type']}") | |
return {"config": optimized_config, "analysis": characteristics, "recommendations": f"Configuración optimizada para {characteristics['experiment_type']}"} | |
except Exception as e: | |
logger.error(f"❌ Error en Agente Coordinador: {str(e)}") | |
return {"config": current_config, "analysis": {"experiment_type": "error"}, "recommendations": f"Error en optimización: {str(e)}"} | |
class RecoveryAgent: | |
def __init__(self): | |
self.agent = CodeAgent(tools=[BiotechAgentTools.analyze_data_characteristics], model=hf_engine) if hf_engine else None | |
self.retry_strategies = [{"use_de": False, "maxfev": 25000, "models_subset": 2}, {"use_de": True, "maxfev": 100000, "models_subset": 1}, {"component": "biomass", "use_de": False, "maxfev": 50000}] | |
def attempt_recovery(self, original_config: dict, error_info: str, attempt: int = 0) -> dict: | |
if attempt >= len(self.retry_strategies): | |
return {"success": False, "message": "Todas las estrategias fallaron"} | |
strategy = self.retry_strategies[attempt] | |
recovery_config = original_config.copy() | |
if "models_subset" in strategy: | |
recovery_config["models"] = recovery_config.get("models", ["logistic"])[:strategy["models_subset"]] | |
for key, value in strategy.items(): | |
if key != "models_subset": | |
recovery_config[key] = value | |
logger.info(f"🔧 Aplicando estrategia de recuperación {attempt + 1}") | |
return {"success": True, "config": recovery_config, "strategy": strategy} | |
class QualityAgent: | |
def __init__(self): | |
self.agent = CodeAgent(tools=[BiotechAgentTools.evaluate_analysis_quality], model=hf_engine) if hf_engine else None | |
self.tools = BiotechAgentTools() | |
def evaluate_results(self, results_summary: str) -> dict: | |
try: | |
evaluation = self.tools.evaluate_analysis_quality(results_summary) | |
quality_feedback = {"quality_score": evaluation["quality_score"], "is_acceptable": evaluation["is_satisfactory"], "feedback": evaluation["recommendations"], "needs_improvement": evaluation["needs_retry"]} | |
logger.info(f"✅ Evaluación de calidad: {quality_feedback['quality_score']:.2f}") | |
return quality_feedback | |
except Exception as e: | |
logger.error(f"❌ Error en evaluación de calidad: {str(e)}") | |
return {"quality_score": 0.7, "is_acceptable": True, "feedback": ["Evaluación completada con advertencias"], "needs_improvement": False} | |
class ContextAgent: | |
def __init__(self): | |
self.agent = CodeAgent(tools=[BiotechAgentTools.prepare_claude_context], model=hf_engine) if hf_engine else None | |
self.tools = BiotechAgentTools() | |
def enhance_analysis_context(self, data_summary: str, experiment_type: str) -> str: | |
try: | |
enhanced_context = self.tools.prepare_claude_context(f"Tipo de experimento: {experiment_type}. Datos: {data_summary}") | |
logger.info("📊 Contexto mejorado para Claude") | |
return enhanced_context | |
except Exception as e: | |
logger.error(f"❌ Error mejorando contexto: {str(e)}") | |
return data_summary | |
class BiotechAgentSystem: | |
def __init__(self): | |
self.coordinator = CoordinatorAgent() | |
self.recovery = RecoveryAgent() | |
self.quality = QualityAgent() | |
self.context = ContextAgent() | |
logger.info("🚀 Sistema de agentes inicializado") | |
def process_with_agents(self, file_info: str, user_config: dict) -> dict: | |
try: | |
coordination_result = self.coordinator.analyze_and_optimize(file_info, user_config) | |
optimized_config = coordination_result["config"] | |
experiment_type = coordination_result["analysis"]["experiment_type"] | |
quality_result = self.quality.evaluate_results("Initial configuration optimized") | |
enhanced_specs = self.context.enhance_analysis_context(file_info, experiment_type) | |
optimized_config["additional_specs"] = enhanced_specs | |
return {"success": True, "optimized_config": optimized_config, "experiment_type": experiment_type, "recommendations": coordination_result["recommendations"], "quality_score": quality_result["quality_score"]} | |
except Exception as e: | |
logger.error(f"❌ Error en sistema de agentes: {str(e)}") | |
return {"success": False, "optimized_config": user_config, "experiment_type": "error", "recommendations": f"Error: {str(e)}", "quality_score": 0.5} | |
# ============================================================================ | |
# ⚙️ FUNCIONES DEL PIPELINE | |
# ============================================================================ | |
agent_system = BiotechAgentSystem() | |
def process_biotech_data(file, models, component, use_de, maxfev, exp_names): | |
if not biotech_client: | |
return None, None, "Error: El cliente de biotecnología no está inicializado." | |
try: | |
file_path = file.name if hasattr(file, 'name') else file | |
# Llamada a la API según la documentación | |
return biotech_client.predict(file=handle_file(file_path), models=models, component=component, use_de=use_de, maxfev=maxfev, exp_names=exp_names, theme=False, api_name="/run_analysis_wrapper") | |
except Exception as e: | |
logger.error(f"Error en proceso biotecnológico: {str(e)}") | |
return None, None, f"Error en el análisis: {str(e)}" | |
def create_dummy_plot(): | |
fig = go.Figure(go.Scatter(x=[], y=[])) | |
fig.update_layout(title="Esperando resultados...", template="plotly_white", height=500, annotations=[dict(text="Sube un archivo y ejecuta el pipeline para ver los resultados", showarrow=False)]) | |
return fig | |
# --- FUNCIÓN CORREGIDA --- | |
def parse_plot_data(plot_info): | |
"""Parsea la información de la gráfica recibida de la API.""" | |
if not plot_info: | |
return create_dummy_plot() | |
try: | |
# La API devuelve un diccionario {'plot': 'JSON_STRING'}. Extraemos el string. | |
if isinstance(plot_info, dict) and 'plot' in plot_info: | |
plot_json_string = plot_info['plot'] | |
return go.Figure(json.loads(plot_json_string)) | |
# Fallback por si la estructura cambia o es inesperada | |
if isinstance(plot_info, str): | |
return go.Figure(json.loads(plot_info)) | |
if isinstance(plot_info, dict): | |
return go.Figure(plot_info) | |
except Exception as e: | |
logger.error(f"Error parsing plot: {e}") | |
return create_dummy_plot() | |
def download_results_as_csv(df_data): | |
if not biotech_client: | |
return None | |
try: | |
# La API espera el objeto DataFrame de Gradio y devuelve una ruta de archivo | |
return biotech_client.predict(df=df_data, api_name="/download_results_excel") | |
except Exception as e: | |
logger.warning(f"Error con API de descarga: {str(e)}") | |
if df_data and 'data' in df_data and 'headers' in df_data: | |
try: | |
df = pd.DataFrame(df_data['data'], columns=df_data['headers']) | |
with tempfile.NamedTemporaryFile(mode='w+', suffix='.csv', delete=False) as temp_file: | |
df.to_csv(temp_file.name, index=False) | |
return temp_file.name | |
except Exception as e_local: | |
logger.error(f"Error creando CSV local: {e_local}") | |
return None | |
def generate_claude_report(csv_file_path, model, detail_level, language, additional_specs, use_personal_key, personal_api_key): | |
local_analysis_client = None | |
if use_personal_key and personal_api_key: | |
logger.info("Intentando usar la clave API personal de Claude.") | |
try: | |
local_analysis_client = Client("C2MV/Project-HF-2025", hf_token=personal_api_key) | |
logger.info("Cliente inicializado exitosamente con la clave personal.") | |
except Exception as e: | |
logger.error(f"Fallo al inicializar el cliente con la clave personal: {e}") | |
return f"Error: La clave API personal es inválida o hubo un problema de conexión. Detalles: {e}", "" | |
else: | |
logger.info("Usando la configuración por defecto (secreto) de la API de Claude.") | |
local_analysis_client = analysis_client | |
if not local_analysis_client: | |
return "Error: El cliente de análisis no está disponible.", "" | |
try: | |
# Llamada a la API según la documentación | |
return local_analysis_client.predict(files=[handle_file(csv_file_path)], model=model, detail=detail_level, language=language, additional_specs=additional_specs, api_name="/process_and_store") | |
except Exception as e: | |
logger.error(f"Error generando reporte con Claude: {str(e)}") | |
return f"Error en el análisis de Claude: {str(e)}", "" | |
def export_report(format_type, language): | |
if not analysis_client: | |
return None, "Error: El cliente de análisis no está inicializado para la exportación." | |
try: | |
# La API devuelve (status, filepath). El orden es importante. | |
return analysis_client.predict(format=format_type, language=language, api_name="/handle_export") | |
except Exception as e: | |
logger.error(f"Error en exportación: {str(e)}") | |
return f"Error al exportar: {str(e)}", None | |
# --- FUNCIÓN PRINCIPAL CORREGIDA --- | |
def process_complete_pipeline_with_agents(file, models, component, use_de, maxfev, exp_names, claude_model, detail_level, language, additional_specs, export_format, use_personal_key, personal_api_key, progress=gr.Progress()): | |
progress(0, desc="🚀 Iniciando Pipeline...") | |
if not file: | |
return create_dummy_plot(), None, None, None, None, "❌ Por favor, sube un archivo." | |
if not models: | |
return create_dummy_plot(), None, None, None, None, "❌ Por favor, selecciona al menos un modelo." | |
progress_updates = [] | |
progress(0.1, desc="🤖 Activando sistema de agentes...") | |
file_info = f"Archivo: {os.path.basename(file.name)}, Modelos: {models}" | |
user_config = {"models": models, "component": component, "use_de": use_de, "maxfev": maxfev, "claude_model": claude_model, "detail_level": detail_level, "language": language, "additional_specs": additional_specs, "export_format": export_format} | |
agent_result = agent_system.process_with_agents(file_info, user_config) | |
if agent_result["success"]: | |
optimized_config = agent_result["optimized_config"] | |
progress_updates.extend([f"✅ Agentes detectaron: {agent_result['experiment_type']}", f"🎯 {agent_result['recommendations']}", f"📊 Calidad esperada: {agent_result['quality_score']:.1%}"]) | |
models, component, use_de, maxfev, additional_specs = (optimized_config.get("models", models), optimized_config.get("component", component), optimized_config.get("use_de", use_de), optimized_config.get("maxfev", maxfev), optimized_config.get("additional_specs", additional_specs)) | |
else: | |
progress_updates.append("⚠️ Agentes no pudieron optimizar, usando config original.") | |
progress(0.2, desc="🔄 Procesando datos biotecnológicos...") | |
plot_info, df_data, status = process_biotech_data(file, models, component, use_de, maxfev, exp_names) | |
if not plot_info or not df_data or "Error" in str(status): | |
return create_dummy_plot(), None, None, None, None, "\n".join(progress_updates) + f"\n❌ Error en análisis: {status}" | |
progress_updates.append("✅ Análisis biotecnológico completado") | |
progress(0.4, desc="📥 Descargando resultados...") | |
csv_file_path = download_results_as_csv(df_data) | |
if not csv_file_path: | |
return parse_plot_data(plot_info), df_data, None, None, None, "\n".join(progress_updates) + "\n❌ Error al descargar resultados para análisis." | |
progress_updates.append("✅ Resultados descargados") | |
progress(0.5, desc=f"🤖 Generando análisis con {claude_model}...") | |
analysis, code = generate_claude_report(csv_file_path, claude_model, detail_level, language, additional_specs, use_personal_key, personal_api_key) | |
if os.path.exists(csv_file_path): | |
os.remove(csv_file_path) | |
if "Error" in analysis: | |
return parse_plot_data(plot_info), df_data, analysis, code, None, "\n".join(progress_updates) + f"\n❌ {analysis}" | |
progress_updates.append("✅ Análisis con Claude completado") | |
progress(0.9, desc=f"📄 Exportando informe en {export_format}...") | |
# CORRECCIÓN: Desempacar en el orden correcto (status, filepath) | |
export_status, report_file = export_report(export_format, language) | |
if report_file: | |
progress_updates.append("✅ Informe exportado.") | |
else: | |
progress_updates.append(f"❌ Error al exportar: {export_status}") | |
progress(1, desc="🎉 Pipeline Completado") | |
# CORRECCIÓN: Pasar el objeto `plot_info` directamente a `parse_plot_data` | |
return parse_plot_data(plot_info), df_data, analysis, code, report_file, "\n".join(progress_updates) | |
def create_example_videos(): | |
examples_dir = "examples" | |
if not os.path.exists(examples_dir): | |
os.makedirs(examples_dir) | |
video_files = ["video1.mp4", "video2.mp4"] | |
for video_file in video_files: | |
video_path = os.path.join(examples_dir, video_file) | |
if not os.path.exists(video_path): | |
with open(video_path, 'w') as f: | |
f.write("# Video placeholder") | |
logger.info(f"Created placeholder for {video_file}") | |
# ============================================================================ | |
# 🖼️ INTERFAZ DE USUARIO CON GRADIO | |
# ============================================================================ | |
BIOTECH_MODELS = ['logistic', 'gompertz', 'moser', 'baranyi', 'monod', 'contois', 'andrews', 'tessier', 'richards', 'stannard', 'huang'] | |
DEFAULT_BIOTECH_SELECTION = [model for model in ['logistic', 'gompertz', 'moser', 'baranyi'] if model in BIOTECH_MODELS] | |
CLAUDE_MODELS = ['claude-opus-4-20250514', 'claude-sonnet-4-20250514', 'claude-3-5-haiku-20241022', 'claude-3-7-sonnet-20250219', 'claude-3-5-sonnet-20241022'] | |
DEFAULT_CLAUDE_MODEL = 'claude-3-5-sonnet-20241022' | |
theme = gr.themes.Soft(primary_hue="blue", secondary_hue="indigo", neutral_hue="slate") | |
custom_css = ".file-upload { border: 2px dashed #3b82f6; } button.primary { background: linear-gradient(135deg, #3b82f6 0%, #8b5cf6 100%); }" | |
create_example_videos() | |
with gr.Blocks(theme=theme, title="BioTech Analysis & Report Generator", css=custom_css) as demo: | |
gr.Markdown( | |
""" | |
# 🧬 BioTech Analysis & Report Generator | |
## **Based on the pipeline: [C2MV/Agent-Biotech](https://huggingface.co/spaces/C2MV/Agent-Biotech)** | |
*An intelligent pipeline that automates the analysis of bioprocess data, from kinetic modeling to generating detailed reports with AI Agents.* | |
""" | |
) | |
with gr.Accordion("🤖 How the AI Agents Work (Click to Expand)", open=True): | |
gr.Markdown( | |
""" | |
```text | |
[ 👤 USER INPUT: Data File & Initial Settings ] | |
│ | |
▼ | |
┌──────────────────────────────────────────────────────────┐ | |
│ 🤖 Coordinator Agent │ | |
│ • Analyzes experiment type (e.g., kinetics, ferment.). │ | |
│ • Recommends optimal models and parameters. │ | |
└──────────────────────────┬───────────────────────────────┘ | |
│ | |
(If analysis fails) │ (If analysis succeeds) | |
┌────────────────┘ | |
│ | |
▼ | |
┌───────────────────────┐ ┌────────────────────────────┐ | |
│ 🔧 Recovery Agent │ │ 🧐 Quality Agent │ | |
│ • Applies different │ ◀───── │ • Evaluates statistical │ | |
│ strategies to │ (Retry)│ quality (R², etc). │ | |
│ find a solution. │ └────────────┬───────────────┘ | |
└───────────────────────┘ │ | |
│ | |
▼ | |
┌────────────────────────────┐ | |
│ ✍️ Context Agent │ | |
│ • Prepares a rich, │ | |
│ detailed prompt for │ | |
│ the final report. │ | |
└────────────┬───────────────┘ | |
│ | |
▼ | |
[ 📄 FINAL OUTPUT: Report, Visualization & Data ] | |
``` | |
""" | |
) | |
with gr.Row(): | |
with gr.Column(): | |
try: | |
video1_path = os.path.join("examples", "video1.mp4") | |
if os.path.exists(video1_path) and os.path.getsize(video1_path) > 100: | |
gr.Video(value=video1_path, label="Example 1: Automated Analysis", interactive=False) | |
else: | |
gr.Markdown("### 🎥 Example 1: Automated Analysis\n*Video placeholder. Add `video1.mp4` to `examples` folder.*") | |
except Exception as e: | |
logger.warning(f"Could not load video1: {e}") | |
gr.Markdown("### 🎥 Example 1: Automated Analysis\n*Video not available.*") | |
with gr.Column(): | |
try: | |
video2_path = os.path.join("examples", "video2.mp4") | |
if os.path.exists(video2_path) and os.path.getsize(video2_path) > 100: | |
gr.Video(value=video2_path, label="Example 2: Report Generation", interactive=False) | |
else: | |
gr.Markdown("### 🎥 Example 2: Report Generation\n*Video placeholder. Add `video2.mp4` to `examples` folder.*") | |
except Exception as e: | |
logger.warning(f"Could not load video2: {e}") | |
gr.Markdown("### 🎥 Example 2: Report Generation\n*Video not available.*") | |
with gr.Row(): | |
with gr.Column(scale=1): | |
gr.Markdown("## 📊 Configuration") | |
file_input = gr.File(label="📁 Data File (CSV/Excel)", file_types=[".csv", ".xlsx", ".xls"], elem_classes=["file-upload"]) | |
gr.Examples(examples=[os.path.join("examples", "archivo.xlsx")], inputs=[file_input], label="Click an example to run") | |
with gr.Accordion("🔬 Analysis Parameters (AI Optimized)", open=False): | |
models_input = gr.CheckboxGroup(choices=BIOTECH_MODELS, value=DEFAULT_BIOTECH_SELECTION, label="📊 Models") | |
component_input = gr.Dropdown(['all', 'biomass', 'substrate', 'product'], value='all', label="📈 Component") | |
exp_names_input = gr.Textbox(label="🏷️ Experiment Names", value="Biotech Analysis") | |
use_de_input = gr.Checkbox(label="🧮 Use Differential Evolution", value=False) | |
maxfev_input = gr.Number(label="🔄 Max Iterations", value=50000, step=1000) | |
with gr.Group(): | |
claude_model_input = gr.Dropdown(choices=CLAUDE_MODELS, value=DEFAULT_CLAUDE_MODEL, label="🤖 Claude Model") | |
with gr.Accordion("🔑 Personal API Key (Optional)", open=False): | |
use_personal_key_input = gr.Checkbox(label="Use Personal Claude API Key", value=False, info="Check this to use your own API key instead of the default one.") | |
personal_api_key_input = gr.Textbox(label="Personal Claude API Key", type="password", placeholder="Enter your key here (e.g., sk-ant-...)", visible=False) | |
detail_level_input = gr.Radio(['detailed', 'summarized'], value='detailed', label="📋 Detail Level") | |
language_input = gr.Dropdown(['en', 'es', 'fr', 'de', 'pt'], value='es', label="🌐 Language") | |
export_format_input = gr.Radio(['PDF', 'DOCX'], value='PDF', label="📄 Format") | |
additional_specs_input = gr.Textbox(label="📝 Additional Specifications", placeholder="AI Agents will customize this...", lines=3, value="Detailed analysis of models, metrics, and recommendations.") | |
process_btn = gr.Button("🚀 Run Pipeline with AI Agents", variant="primary", size="lg") | |
with gr.Column(scale=2): | |
gr.Markdown("## 📈 Results") | |
status_output = gr.Textbox(label="📊 Process Status (with AI Agents)", lines=5, interactive=False) | |
with gr.Tabs(): | |
with gr.TabItem("📊 Visualization"): | |
plot_output = gr.Plot() | |
with gr.TabItem("📋 Table"): | |
table_output = gr.Dataframe() | |
with gr.TabItem("📝 Analysis"): | |
analysis_output = gr.Markdown() | |
with gr.TabItem("💻 Code"): | |
code_output = gr.Code(language="python") | |
report_output = gr.File(label="📥 Download Report", interactive=False) | |
def toggle_api_key_visibility(checked): | |
return gr.Textbox(visible=checked) | |
use_personal_key_input.change(fn=toggle_api_key_visibility, inputs=use_personal_key_input, outputs=personal_api_key_input) | |
process_btn.click( | |
fn=process_complete_pipeline_with_agents, | |
inputs=[ | |
file_input, models_input, component_input, use_de_input, maxfev_input, exp_names_input, | |
claude_model_input, detail_level_input, language_input, additional_specs_input, | |
export_format_input, use_personal_key_input, personal_api_key_input | |
], | |
outputs=[ | |
plot_output, table_output, analysis_output, code_output, report_output, status_output | |
] | |
) | |
if __name__ == "__main__": | |
if not os.path.exists("examples"): | |
os.makedirs("examples") | |
print("Carpeta 'examples' creada. Por favor, añade 'video1.mp4', 'video2.mp4', y 'archivo.xlsx' dentro.") | |
demo.launch(show_error=True) |