Caikejs commited on
Commit
fcd3ba8
·
verified ·
1 Parent(s): 5b14e76

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -1
app.py CHANGED
@@ -6,7 +6,7 @@ model_a = pipeline("text-generation", model="deepseek-ai/DeepSeek-R1-0528", trus
6
  model_b = pipeline("text-generation", model="botbot-ai/CabraLlama3-8b")
7
 
8
  # Modelo juiz
9
- arbiter = pipeline("text-classification", model="neuralmind/bert-base-portuguese-cased", trust_remote_code=True)
10
 
11
  # Julgamento por batch
12
  def judge_response(response_a, response_b):
@@ -17,8 +17,11 @@ def judge_response(response_a, response_b):
17
 
18
  # Função principal
19
  def chatbot(prompt):
 
20
  response_a = model_a(prompt, max_new_tokens=40)[0]['generated_text'].strip()
21
  response_b = model_b(prompt, max_new_tokens=40)[0]['generated_text'].strip()
 
 
22
  winner, final_response = judge_response(response_a, response_b)
23
  return prompt, response_a, response_b, winner, final_response
24
 
 
6
  model_b = pipeline("text-generation", model="botbot-ai/CabraLlama3-8b")
7
 
8
  # Modelo juiz
9
+ arbiter = pipeline("text-classification", model="neuralmind/bert-base-portuguese-cased")
10
 
11
  # Julgamento por batch
12
  def judge_response(response_a, response_b):
 
17
 
18
  # Função principal
19
  def chatbot(prompt):
20
+ # Gera respostas para ambos os modelos
21
  response_a = model_a(prompt, max_new_tokens=40)[0]['generated_text'].strip()
22
  response_b = model_b(prompt, max_new_tokens=40)[0]['generated_text'].strip()
23
+
24
+ # Julga as respostas
25
  winner, final_response = judge_response(response_a, response_b)
26
  return prompt, response_a, response_b, winner, final_response
27