Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -4,7 +4,7 @@ from typing import List, Dict, Tuple
|
|
| 4 |
|
| 5 |
import gradio as gr
|
| 6 |
|
| 7 |
-
# Carga perezosa de modelos (se inicializan al primer uso)
|
| 8 |
_llm = None
|
| 9 |
_llm_tok = None
|
| 10 |
_gen = None
|
|
@@ -23,38 +23,40 @@ TEXTO:
|
|
| 23 |
"""
|
| 24 |
|
| 25 |
HYP: Dict[str, List[str]] = {
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
}
|
| 56 |
|
|
|
|
| 57 |
def lazy_load_llm():
|
|
|
|
| 58 |
global _llm, _llm_tok, _gen
|
| 59 |
if _gen is not None:
|
| 60 |
return _gen
|
|
@@ -62,8 +64,7 @@ def lazy_load_llm():
|
|
| 62 |
_llm_tok = AutoTokenizer.from_pretrained(LLM_ID)
|
| 63 |
_llm = AutoModelForCausalLM.from_pretrained(
|
| 64 |
LLM_ID,
|
| 65 |
-
|
| 66 |
-
device_map="auto"
|
| 67 |
)
|
| 68 |
_gen = pipeline(
|
| 69 |
"text-generation",
|
|
@@ -71,96 +72,128 @@ def lazy_load_llm():
|
|
| 71 |
tokenizer=_llm_tok,
|
| 72 |
max_new_tokens=512,
|
| 73 |
do_sample=False,
|
| 74 |
-
temperature=0.0,
|
| 75 |
repetition_penalty=1.1,
|
| 76 |
)
|
| 77 |
return _gen
|
| 78 |
|
|
|
|
| 79 |
def lazy_load_nli():
|
|
|
|
| 80 |
global _nli
|
| 81 |
if _nli is not None:
|
| 82 |
return _nli
|
| 83 |
from transformers import pipeline
|
| 84 |
-
# top_k=None -> devuelve todas las etiquetas; evita return_all_scores (deprecado)
|
| 85 |
_nli = pipeline(
|
| 86 |
"text-classification",
|
| 87 |
model=NLI_ID,
|
| 88 |
tokenizer=NLI_ID,
|
| 89 |
-
top_k=None,
|
| 90 |
function_to_apply="softmax"
|
| 91 |
)
|
| 92 |
return _nli
|
| 93 |
|
| 94 |
-
|
| 95 |
def extract_json_block(text: str) -> str:
|
| 96 |
"""Intenta extraer el primer bloque JSON válido de una cadena."""
|
| 97 |
start = text.find("{")
|
| 98 |
end = text.rfind("}")
|
| 99 |
if start != -1 and end != -1 and end > start:
|
| 100 |
return text[start:end+1]
|
| 101 |
-
# fallback minimalista si el modelo devolviera texto
|
| 102 |
return '{"situation":"","task":"","action":[],"result":[]}'
|
| 103 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 104 |
def extract_star(user_text: str) -> Dict:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 105 |
gen = lazy_load_llm()
|
| 106 |
-
prompt = STAR_PROMPT.format(texto=user_text.strip())
|
| 107 |
out = gen(prompt)[0]["generated_text"]
|
| 108 |
raw = extract_json_block(out)
|
| 109 |
try:
|
| 110 |
data = json.loads(raw)
|
| 111 |
except Exception:
|
| 112 |
-
#
|
| 113 |
data = {"situation": "", "task": "", "action": [], "result": []}
|
| 114 |
-
|
| 115 |
-
m
|
| 116 |
-
|
| 117 |
-
#
|
| 118 |
data["action"] = data.get("action", [])
|
| 119 |
data["result"] = data.get("result", [])
|
| 120 |
-
if isinstance(data["action"], str):
|
| 121 |
-
|
|
|
|
|
|
|
| 122 |
return {
|
| 123 |
-
"situation": data.get("situation", "").strip(),
|
| 124 |
-
"task": data.get("task", "").strip(),
|
| 125 |
-
"action": [a.strip(" •-") for a in data["action"] if str(a).strip()],
|
| 126 |
-
"result": [r.strip(" •-") for r in data["result"] if str(r).strip()],
|
| 127 |
}
|
| 128 |
|
| 129 |
def nli_entails(premise: str, hypothesis: str) -> float:
|
|
|
|
| 130 |
nli = lazy_load_nli()
|
| 131 |
-
# Premisa vacía puede ocurrir si no se extrajo STAR; evitamos llamadas inútiles
|
| 132 |
text_a = (premise or "").strip()
|
| 133 |
text_b = (hypothesis or "").strip()
|
| 134 |
if not text_a or not text_b:
|
| 135 |
return 0.0
|
| 136 |
|
| 137 |
-
|
|
|
|
|
|
|
|
|
|
| 138 |
|
| 139 |
# Normalización de formatos posibles:
|
|
|
|
| 140 |
# - [{label, score}, ...]
|
| 141 |
# - [[{label, score}, ...]]
|
| 142 |
-
# - {label, score}
|
| 143 |
if isinstance(res, dict):
|
| 144 |
candidates = [res]
|
| 145 |
elif isinstance(res, list):
|
| 146 |
-
if
|
| 147 |
-
candidates = res[0]
|
| 148 |
-
else:
|
| 149 |
-
candidates = res
|
| 150 |
else:
|
| 151 |
return 0.0
|
| 152 |
|
| 153 |
-
#
|
| 154 |
-
|
| 155 |
-
for c in candidates:
|
| 156 |
lab = str(c.get("label", "")).lower()
|
| 157 |
-
if
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
|
|
|
|
|
|
| 161 |
|
| 162 |
def map_prob_to_score(p: float) -> int:
|
| 163 |
-
|
| 164 |
if p >= 0.80: return 4
|
| 165 |
if p >= 0.60: return 3
|
| 166 |
if p >= 0.40: return 2
|
|
@@ -175,50 +208,54 @@ def score_indicator(premise: str, hyps: List[str]) -> Tuple[int, List[Tuple[str,
|
|
| 175 |
return score, probs_sorted, avg
|
| 176 |
|
| 177 |
def evaluate(texto: str) -> Tuple[str, Dict, Dict]:
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
"
|
| 185 |
-
"star": star,
|
| 186 |
-
"indicators": {},
|
| 187 |
-
"overall": {}
|
| 188 |
-
}
|
| 189 |
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
|
| 194 |
-
|
| 195 |
-
report["indicators"][ind] = {
|
| 196 |
-
"score": s,
|
| 197 |
-
"evidence": [ev[0][0], ev[1][0]] if len(ev) > 1 else [ev[0][0]],
|
| 198 |
-
"rationale": f"Promedio de entailment NLI ≈ {avg:.2f}"
|
| 199 |
}
|
| 200 |
-
table_rows.append([ind, s, f"{avg:.2f}", " / ".join([h for h,_ in ev])])
|
| 201 |
|
| 202 |
-
|
| 203 |
-
|
| 204 |
-
|
| 205 |
-
|
| 206 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 207 |
|
| 208 |
-
|
| 209 |
-
|
| 210 |
-
|
| 211 |
-
|
| 212 |
-
|
| 213 |
|
| 214 |
-
|
| 215 |
-
|
| 216 |
-
|
| 217 |
-
|
| 218 |
-
|
| 219 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 220 |
|
| 221 |
-
|
|
|
|
|
|
|
|
|
|
| 222 |
with gr.Blocks(title="ICB4 4.4.5 Leadership — Evaluación STAR (FRAQX)") as demo:
|
| 223 |
gr.Markdown(
|
| 224 |
"""
|
|
@@ -236,29 +273,38 @@ with gr.Blocks(title="ICB4 4.4.5 Leadership — Evaluación STAR (FRAQX)") as de
|
|
| 236 |
with gr.Row():
|
| 237 |
json_out = gr.JSON(label="Reporte JSON")
|
| 238 |
with gr.Row():
|
| 239 |
-
table = gr.Dataframe(
|
| 240 |
-
|
| 241 |
-
|
|
|
|
|
|
|
| 242 |
|
| 243 |
ejemplos = gr.Examples(
|
| 244 |
-
examples=[
|
| 245 |
-
|
| 246 |
-
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
],
|
| 253 |
inputs=[texto],
|
| 254 |
)
|
| 255 |
|
| 256 |
def run_eval(t):
|
| 257 |
msg, rep, tbl = evaluate(t)
|
| 258 |
-
return msg, rep, gr.update(value=tbl
|
| 259 |
|
| 260 |
btn.click(run_eval, inputs=[texto], outputs=[status, json_out, table])
|
| 261 |
|
|
|
|
| 262 |
if __name__ == "__main__":
|
| 263 |
-
demo.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 264 |
|
|
|
|
| 4 |
|
| 5 |
import gradio as gr
|
| 6 |
|
| 7 |
+
# --- Carga perezosa de modelos (se inicializan al primer uso) ---
|
| 8 |
_llm = None
|
| 9 |
_llm_tok = None
|
| 10 |
_gen = None
|
|
|
|
| 23 |
"""
|
| 24 |
|
| 25 |
HYP: Dict[str, List[str]] = {
|
| 26 |
+
"4.4.5.1": [
|
| 27 |
+
"Tomó la iniciativa sin que se lo pidieran.",
|
| 28 |
+
"Ofreció ayuda o asesoría no solicitada.",
|
| 29 |
+
"Pensó con orientación al futuro.",
|
| 30 |
+
"Equilibró iniciativa y riesgo."
|
| 31 |
+
],
|
| 32 |
+
"4.4.5.2": [
|
| 33 |
+
"Mostró compromiso personal con los objetivos.",
|
| 34 |
+
"Promovió el proyecto y generó entusiasmo.",
|
| 35 |
+
"Definió o monitoreó indicadores de desempeño.",
|
| 36 |
+
"Buscó mejoras en procesos."
|
| 37 |
+
],
|
| 38 |
+
"4.4.5.3": [
|
| 39 |
+
"Proporcionó dirección clara al equipo.",
|
| 40 |
+
"Realizó coaching o mentoring para mejorar capacidades.",
|
| 41 |
+
"Estableció y comunicó visión y valores.",
|
| 42 |
+
"Alineó objetivos individuales con los comunes."
|
| 43 |
+
],
|
| 44 |
+
"4.4.5.4": [
|
| 45 |
+
"Usó apropiadamente poder e influencia.",
|
| 46 |
+
"Seleccionó el canal de comunicación adecuado para influir.",
|
| 47 |
+
"Fue percibido como líder por los stakeholders."
|
| 48 |
+
],
|
| 49 |
+
"4.4.5.5": [
|
| 50 |
+
"Tomó decisiones bajo incertidumbre considerando pros y contras.",
|
| 51 |
+
"Explicó el razonamiento de las decisiones.",
|
| 52 |
+
"Revisó decisiones con nueva evidencia.",
|
| 53 |
+
"Comunicó claramente la decisión e influyó su adopción."
|
| 54 |
+
]
|
| 55 |
}
|
| 56 |
|
| 57 |
+
# --------- Modelos ---------
|
| 58 |
def lazy_load_llm():
|
| 59 |
+
"""Crea pipeline de generación sin flags que generen warnings en CPU."""
|
| 60 |
global _llm, _llm_tok, _gen
|
| 61 |
if _gen is not None:
|
| 62 |
return _gen
|
|
|
|
| 64 |
_llm_tok = AutoTokenizer.from_pretrained(LLM_ID)
|
| 65 |
_llm = AutoModelForCausalLM.from_pretrained(
|
| 66 |
LLM_ID,
|
| 67 |
+
device_map="auto" # dtype auto en CPU
|
|
|
|
| 68 |
)
|
| 69 |
_gen = pipeline(
|
| 70 |
"text-generation",
|
|
|
|
| 72 |
tokenizer=_llm_tok,
|
| 73 |
max_new_tokens=512,
|
| 74 |
do_sample=False,
|
|
|
|
| 75 |
repetition_penalty=1.1,
|
| 76 |
)
|
| 77 |
return _gen
|
| 78 |
|
| 79 |
+
|
| 80 |
def lazy_load_nli():
|
| 81 |
+
"""Config moderna: top_k=None devuelve todas las etiquetas; softmax para probas estables."""
|
| 82 |
global _nli
|
| 83 |
if _nli is not None:
|
| 84 |
return _nli
|
| 85 |
from transformers import pipeline
|
|
|
|
| 86 |
_nli = pipeline(
|
| 87 |
"text-classification",
|
| 88 |
model=NLI_ID,
|
| 89 |
tokenizer=NLI_ID,
|
| 90 |
+
top_k=None, # en lugar de return_all_scores=True (deprecado)
|
| 91 |
function_to_apply="softmax"
|
| 92 |
)
|
| 93 |
return _nli
|
| 94 |
|
| 95 |
+
# --------- Utilidades ---------
|
| 96 |
def extract_json_block(text: str) -> str:
|
| 97 |
"""Intenta extraer el primer bloque JSON válido de una cadena."""
|
| 98 |
start = text.find("{")
|
| 99 |
end = text.rfind("}")
|
| 100 |
if start != -1 and end != -1 and end > start:
|
| 101 |
return text[start:end+1]
|
| 102 |
+
# fallback minimalista si el modelo devolviera texto con ruido
|
| 103 |
return '{"situation":"","task":"","action":[],"result":[]}'
|
| 104 |
|
| 105 |
+
def quick_parse_star(txt: str):
|
| 106 |
+
"""Atajo: si el texto ya viene en STAR, lo parseamos localmente para ahorrar tiempo."""
|
| 107 |
+
t = (txt or "").strip()
|
| 108 |
+
if not t:
|
| 109 |
+
return None
|
| 110 |
+
keys = ("SITUATION", "TASK", "ACTION", "RESULT", "S:", "T:", "A:", "R:")
|
| 111 |
+
if not any(k in t for k in keys):
|
| 112 |
+
return None
|
| 113 |
+
sections = {"situation": "", "task": "", "action": [], "result": []}
|
| 114 |
+
blocks = re.split(r'(?im)^(SITUATION|TASK|ACTION|RESULT|S:|T:|A:|R:)\s*:?', t)
|
| 115 |
+
# blocks = ["", "SITUATION", "...", "TASK", "...", ...]
|
| 116 |
+
for i in range(1, len(blocks), 2):
|
| 117 |
+
key = blocks[i].lower()[0] # s/t/a/r
|
| 118 |
+
val = blocks[i+1].strip()
|
| 119 |
+
if key == "s":
|
| 120 |
+
sections["situation"] = val
|
| 121 |
+
elif key == "t":
|
| 122 |
+
sections["task"] = val
|
| 123 |
+
elif key == "a":
|
| 124 |
+
sections["action"] = [x.strip("•- ") for x in val.splitlines() if x.strip()]
|
| 125 |
+
elif key == "r":
|
| 126 |
+
sections["result"] = [x.strip("•- ") for x in val.splitlines() if x.strip()]
|
| 127 |
+
return sections
|
| 128 |
+
|
| 129 |
def extract_star(user_text: str) -> Dict:
|
| 130 |
+
"""Si ya viene en STAR, parsea localmente; si no, usa el LLM para extraer STAR en JSON."""
|
| 131 |
+
parsed = quick_parse_star(user_text)
|
| 132 |
+
if parsed:
|
| 133 |
+
return parsed
|
| 134 |
+
|
| 135 |
gen = lazy_load_llm()
|
| 136 |
+
prompt = STAR_PROMPT.format(texto=(user_text or "").strip())
|
| 137 |
out = gen(prompt)[0]["generated_text"]
|
| 138 |
raw = extract_json_block(out)
|
| 139 |
try:
|
| 140 |
data = json.loads(raw)
|
| 141 |
except Exception:
|
| 142 |
+
# Heurística de rescate si llega formato con bullets no JSON
|
| 143 |
data = {"situation": "", "task": "", "action": [], "result": []}
|
| 144 |
+
m = re.search(r'Situation[::]\s*(.*)', user_text or "", flags=re.I)
|
| 145 |
+
if m:
|
| 146 |
+
data["situation"] = m.group(1).strip()
|
| 147 |
+
# Normaliza tipos
|
| 148 |
data["action"] = data.get("action", [])
|
| 149 |
data["result"] = data.get("result", [])
|
| 150 |
+
if isinstance(data["action"], str):
|
| 151 |
+
data["action"] = [data["action"]]
|
| 152 |
+
if isinstance(data["result"], str):
|
| 153 |
+
data["result"] = [data["result"]]
|
| 154 |
return {
|
| 155 |
+
"situation": (data.get("situation", "") or "").strip(),
|
| 156 |
+
"task": (data.get("task", "") or "").strip(),
|
| 157 |
+
"action": [str(a).strip(" •-") for a in data["action"] if str(a).strip()],
|
| 158 |
+
"result": [str(r).strip(" •-") for r in data["result"] if str(r).strip()],
|
| 159 |
}
|
| 160 |
|
| 161 |
def nli_entails(premise: str, hypothesis: str) -> float:
|
| 162 |
+
"""Devuelve probabilidad de entailment (0..1), robusto a formatos y errores del pipeline."""
|
| 163 |
nli = lazy_load_nli()
|
|
|
|
| 164 |
text_a = (premise or "").strip()
|
| 165 |
text_b = (hypothesis or "").strip()
|
| 166 |
if not text_a or not text_b:
|
| 167 |
return 0.0
|
| 168 |
|
| 169 |
+
try:
|
| 170 |
+
res = nli({"text": text_a, "text_pair": text_b})
|
| 171 |
+
except Exception:
|
| 172 |
+
return 0.0
|
| 173 |
|
| 174 |
# Normalización de formatos posibles:
|
| 175 |
+
# - {label, score}
|
| 176 |
# - [{label, score}, ...]
|
| 177 |
# - [[{label, score}, ...]]
|
|
|
|
| 178 |
if isinstance(res, dict):
|
| 179 |
candidates = [res]
|
| 180 |
elif isinstance(res, list):
|
| 181 |
+
candidates = res[0] if (res and isinstance(res[0], list)) else res
|
|
|
|
|
|
|
|
|
|
| 182 |
else:
|
| 183 |
return 0.0
|
| 184 |
|
| 185 |
+
# Filtra sólo dicts por seguridad y busca 'entail'
|
| 186 |
+
for c in (d for d in candidates if isinstance(d, dict)):
|
|
|
|
| 187 |
lab = str(c.get("label", "")).lower()
|
| 188 |
+
if "entail" in lab: # cubre entail/entails/entailment
|
| 189 |
+
try:
|
| 190 |
+
return float(c.get("score", 0.0))
|
| 191 |
+
except Exception:
|
| 192 |
+
return 0.0
|
| 193 |
+
return 0.0
|
| 194 |
|
| 195 |
def map_prob_to_score(p: float) -> int:
|
| 196 |
+
"""Mapeo inicial de probabilidad promedio a escala 0–4 (ajusta tras validación interna)."""
|
| 197 |
if p >= 0.80: return 4
|
| 198 |
if p >= 0.60: return 3
|
| 199 |
if p >= 0.40: return 2
|
|
|
|
| 208 |
return score, probs_sorted, avg
|
| 209 |
|
| 210 |
def evaluate(texto: str) -> Tuple[str, Dict, Dict]:
|
| 211 |
+
"""Orquestación completa con manejo de errores (mensaje visible en 'Estado')."""
|
| 212 |
+
try:
|
| 213 |
+
if not texto or not texto.strip():
|
| 214 |
+
return "Introduce un caso en formato STAR (o texto libre).", {}, {}
|
| 215 |
+
|
| 216 |
+
star = extract_star(texto)
|
| 217 |
+
premise = " ".join(star.get("action", [])) + " " + " ".join(star.get("result", []))
|
|
|
|
|
|
|
|
|
|
|
|
|
| 218 |
|
| 219 |
+
report = {
|
| 220 |
+
"competence": "ICB4 4.4.5 Leadership",
|
| 221 |
+
"star": star,
|
| 222 |
+
"indicators": {},
|
| 223 |
+
"overall": {}
|
|
|
|
|
|
|
|
|
|
|
|
|
| 224 |
}
|
|
|
|
| 225 |
|
| 226 |
+
scores = []
|
| 227 |
+
table_rows = []
|
| 228 |
+
for ind, hyps in HYP.items():
|
| 229 |
+
s, ev, avg = score_indicator(premise, hyps)
|
| 230 |
+
scores.append(s)
|
| 231 |
+
report["indicators"][ind] = {
|
| 232 |
+
"score": s,
|
| 233 |
+
"evidence": [e[0] for e in ev],
|
| 234 |
+
"rationale": f"Promedio de entailment NLI ≈ {avg:.2f}"
|
| 235 |
+
}
|
| 236 |
+
table_rows.append([ind, s, f"{avg:.2f}", " / ".join([h for h, _ in ev])])
|
| 237 |
|
| 238 |
+
overall = round(sum(scores) / max(1, len(scores)), 2)
|
| 239 |
+
report["overall"] = {
|
| 240 |
+
"score_weighted": overall,
|
| 241 |
+
"notes": "Ajusta umbrales tras validar con 10–20 ejemplos reales."
|
| 242 |
+
}
|
| 243 |
|
| 244 |
+
msg = (
|
| 245 |
+
f"Evaluación completada. Score global (0–4): {overall}\n"
|
| 246 |
+
f"Sugerencia: revisa la evidencia y ajusta umbrales según tu rúbrica."
|
| 247 |
+
)
|
| 248 |
+
|
| 249 |
+
table = {
|
| 250 |
+
"columns": ["Indicador", "Score (0–4)", "Entailment medio", "Mejores evidencias (hipótesis)"],
|
| 251 |
+
"data": table_rows
|
| 252 |
+
}
|
| 253 |
+
return msg, report, table
|
| 254 |
|
| 255 |
+
except Exception as e:
|
| 256 |
+
return f"⚠️ Error en evaluate(): {type(e).__name__}: {e}", {}, {"columns": [], "data": []}
|
| 257 |
+
|
| 258 |
+
# --------- UI Gradio ---------
|
| 259 |
with gr.Blocks(title="ICB4 4.4.5 Leadership — Evaluación STAR (FRAQX)") as demo:
|
| 260 |
gr.Markdown(
|
| 261 |
"""
|
|
|
|
| 273 |
with gr.Row():
|
| 274 |
json_out = gr.JSON(label="Reporte JSON")
|
| 275 |
with gr.Row():
|
| 276 |
+
table = gr.Dataframe(
|
| 277 |
+
headers=["Indicador", "Score (0–4)", "Entailment medio", "Mejores evidencias (hipótesis)"],
|
| 278 |
+
datatype=["str", "number", "str", "str"],
|
| 279 |
+
interactive=False
|
| 280 |
+
)
|
| 281 |
|
| 282 |
ejemplos = gr.Examples(
|
| 283 |
+
examples=[[
|
| 284 |
+
"S: El proyecto CRM estaba retrasado 6 semanas y el equipo estaba desmotivado. "
|
| 285 |
+
"T: Recuperar el plan y mejorar la colaboración en 2 sprints. "
|
| 286 |
+
"A: Organicé una sesión de visión y valores con el Product Owner; definí métricas de entrega y calidad; "
|
| 287 |
+
"implementé dailies focalizadas; ofrecí mentoring a dos líderes junior; negocié con stakeholders para "
|
| 288 |
+
"priorizar un backlog mínimo; comuniqué riesgos y comprometí fechas realistas. "
|
| 289 |
+
"R: Recuperamos 4 semanas en dos sprints; NPS interno +22; se redujo retrabajo 18%; el equipo pidió "
|
| 290 |
+
"mantener las prácticas y dos líderes asumieron más responsabilidad."
|
| 291 |
+
]],
|
| 292 |
inputs=[texto],
|
| 293 |
)
|
| 294 |
|
| 295 |
def run_eval(t):
|
| 296 |
msg, rep, tbl = evaluate(t)
|
| 297 |
+
return msg, rep, gr.update(value=tbl.get("data", []), headers=tbl.get("columns", []))
|
| 298 |
|
| 299 |
btn.click(run_eval, inputs=[texto], outputs=[status, json_out, table])
|
| 300 |
|
| 301 |
+
# --- Lanzamiento con cola y sin SSR (estabiliza CPU Basic) ---
|
| 302 |
if __name__ == "__main__":
|
| 303 |
+
demo.queue(
|
| 304 |
+
max_size=16, # cola máxima
|
| 305 |
+
concurrency_count=1 # 1 ejecución simultánea en CPU Basic
|
| 306 |
+
).launch(
|
| 307 |
+
ssr_mode=False, # desactiva SSR para evitar glitches
|
| 308 |
+
show_error=True
|
| 309 |
+
)
|
| 310 |
|