Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -300,19 +300,19 @@ QWEN_MODELS = {
|
|
300 |
"Qwen/Qwen3-14B": {
|
301 |
"name": "Qwen 3 14B",
|
302 |
"description": "Modelo potente multilingüe de Alibaba",
|
303 |
-
"max_tokens":
|
304 |
"best_for": "Análisis complejos y detallados"
|
305 |
},
|
306 |
"Qwen/Qwen3-7B": {
|
307 |
"name": "Qwen 3 7B",
|
308 |
"description": "Modelo equilibrado para uso general",
|
309 |
-
"max_tokens":
|
310 |
"best_for": "Análisis rápidos y precisos"
|
311 |
},
|
312 |
"Qwen/Qwen1.5-14B": {
|
313 |
"name": "Qwen 1.5 14B",
|
314 |
"description": "Modelo avanzado para tareas complejas",
|
315 |
-
"max_tokens":
|
316 |
"best_for": "Análisis técnicos detallados"
|
317 |
}
|
318 |
}
|
@@ -553,7 +553,7 @@ class AIAnalyzer:
|
|
553 |
try:
|
554 |
response = self.client.chat.completions.create(
|
555 |
model="Qwen/Qwen3-14B",
|
556 |
-
max_tokens=
|
557 |
temperature=0.0,
|
558 |
messages=[{"role": "user", "content": f"{prompt}\n\n{content[:1000]}"}]
|
559 |
)
|
@@ -767,7 +767,7 @@ class AIAnalyzer:
|
|
767 |
# Análisis principal
|
768 |
response = self.client.chat.completions.create(
|
769 |
model=qwen_model,
|
770 |
-
max_tokens=
|
771 |
temperature=0.3,
|
772 |
messages=[{
|
773 |
"role": "user",
|
@@ -811,7 +811,7 @@ class AIAnalyzer:
|
|
811 |
|
812 |
code_response = self.client.chat.completions.create(
|
813 |
model=qwen_model,
|
814 |
-
max_tokens=
|
815 |
temperature=0.1,
|
816 |
messages=[{
|
817 |
"role": "user",
|
|
|
300 |
"Qwen/Qwen3-14B": {
|
301 |
"name": "Qwen 3 14B",
|
302 |
"description": "Modelo potente multilingüe de Alibaba",
|
303 |
+
"max_tokens": 10000,
|
304 |
"best_for": "Análisis complejos y detallados"
|
305 |
},
|
306 |
"Qwen/Qwen3-7B": {
|
307 |
"name": "Qwen 3 7B",
|
308 |
"description": "Modelo equilibrado para uso general",
|
309 |
+
"max_tokens": 10000,
|
310 |
"best_for": "Análisis rápidos y precisos"
|
311 |
},
|
312 |
"Qwen/Qwen1.5-14B": {
|
313 |
"name": "Qwen 1.5 14B",
|
314 |
"description": "Modelo avanzado para tareas complejas",
|
315 |
+
"max_tokens": 10000,
|
316 |
"best_for": "Análisis técnicos detallados"
|
317 |
}
|
318 |
}
|
|
|
553 |
try:
|
554 |
response = self.client.chat.completions.create(
|
555 |
model="Qwen/Qwen3-14B",
|
556 |
+
max_tokens=10000,
|
557 |
temperature=0.0,
|
558 |
messages=[{"role": "user", "content": f"{prompt}\n\n{content[:1000]}"}]
|
559 |
)
|
|
|
767 |
# Análisis principal
|
768 |
response = self.client.chat.completions.create(
|
769 |
model=qwen_model,
|
770 |
+
max_tokens=10000,
|
771 |
temperature=0.3,
|
772 |
messages=[{
|
773 |
"role": "user",
|
|
|
811 |
|
812 |
code_response = self.client.chat.completions.create(
|
813 |
model=qwen_model,
|
814 |
+
max_tokens=10000,
|
815 |
temperature=0.1,
|
816 |
messages=[{
|
817 |
"role": "user",
|