Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,46 +1,28 @@
|
|
1 |
import gradio as gr
|
2 |
from transformers import pipeline
|
3 |
|
4 |
-
# Modelos
|
5 |
-
model_a = pipeline("text-generation", model="gpt2")
|
6 |
-
model_b = pipeline("text-generation", model="EleutherAI/gpt-neo-125M")
|
7 |
|
8 |
-
# Modelo juiz
|
9 |
arbiter = pipeline("text-classification", model="distilbert-base-uncased-finetuned-sst-2-english")
|
10 |
|
11 |
-
#
|
12 |
-
def judge_response(
|
13 |
-
|
14 |
-
|
|
|
|
|
15 |
|
16 |
-
|
17 |
-
score_b = arbiter(combined_b)[0]['score']
|
18 |
-
|
19 |
-
if score_a > score_b:
|
20 |
-
return "Model A", response_a
|
21 |
-
else:
|
22 |
-
return "Model B", response_b
|
23 |
-
|
24 |
-
# Função principal do chatbot
|
25 |
def chatbot(prompt):
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
response_a = raw_response_a[len(prompt):].strip()
|
31 |
-
response_b = raw_response_b[len(prompt):].strip()
|
32 |
-
|
33 |
-
winner, final_response = judge_response(prompt, response_a, response_b)
|
34 |
|
35 |
-
|
36 |
-
prompt,
|
37 |
-
response_a,
|
38 |
-
response_b,
|
39 |
-
winner,
|
40 |
-
final_response
|
41 |
-
)
|
42 |
-
|
43 |
-
# Interface Gradio
|
44 |
iface = gr.Interface(
|
45 |
fn=chatbot,
|
46 |
inputs=gr.Textbox(label="Digite sua pergunta:"),
|
@@ -51,18 +33,8 @@ iface = gr.Interface(
|
|
51 |
gr.Textbox(label="Modelo Vencedor"),
|
52 |
gr.Textbox(label="Resposta Escolhida"),
|
53 |
],
|
54 |
-
title="Chatbot
|
55 |
-
description=""
|
56 |
-
Este chatbot utiliza dois modelos diferentes para responder à pergunta de um usuário.
|
57 |
-
Um terceiro modelo avalia qual resposta possui maior sentimento positivo e a apresenta como a melhor.
|
58 |
-
|
59 |
-
🧠 Modelos usados:
|
60 |
-
- Modelo A: GPT-2 (`gpt2`)
|
61 |
-
- Modelo B: GPT-Neo 125M (`EleutherAI/gpt-neo-125M`)
|
62 |
-
- Árbitro: DistilBERT SST-2 (`distilbert-base-uncased-finetuned-sst-2-english`)
|
63 |
-
|
64 |
-
Critério de julgamento: Resposta com maior sentimento positivo vence.
|
65 |
-
"""
|
66 |
)
|
67 |
|
68 |
iface.launch()
|
|
|
1 |
import gradio as gr
|
2 |
from transformers import pipeline
|
3 |
|
4 |
+
# Modelos geradores otimizados
|
5 |
+
model_a = pipeline("text-generation", model="gpt2", return_full_text=False)
|
6 |
+
model_b = pipeline("text-generation", model="EleutherAI/gpt-neo-125M", return_full_text=False)
|
7 |
|
8 |
+
# Modelo juiz
|
9 |
arbiter = pipeline("text-classification", model="distilbert-base-uncased-finetuned-sst-2-english")
|
10 |
|
11 |
+
# Julgamento por batch
|
12 |
+
def judge_response(response_a, response_b):
|
13 |
+
results = arbiter([response_a, response_b])
|
14 |
+
score_a = results[0]['score']
|
15 |
+
score_b = results[1]['score']
|
16 |
+
return ("Model A", response_a) if score_a > score_b else ("Model B", response_b)
|
17 |
|
18 |
+
# Função principal
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
def chatbot(prompt):
|
20 |
+
response_a = model_a(prompt, max_new_tokens=40)[0]['generated_text'].strip()
|
21 |
+
response_b = model_b(prompt, max_new_tokens=40)[0]['generated_text'].strip()
|
22 |
+
winner, final_response = judge_response(response_a, response_b)
|
23 |
+
return prompt, response_a, response_b, winner, final_response
|
|
|
|
|
|
|
|
|
24 |
|
25 |
+
# Interface
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
iface = gr.Interface(
|
27 |
fn=chatbot,
|
28 |
inputs=gr.Textbox(label="Digite sua pergunta:"),
|
|
|
33 |
gr.Textbox(label="Modelo Vencedor"),
|
34 |
gr.Textbox(label="Resposta Escolhida"),
|
35 |
],
|
36 |
+
title="Chatbot com Julgamento",
|
37 |
+
description="Compara respostas de dois modelos e usa um árbitro para escolher a melhor com base no sentimento positivo."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
)
|
39 |
|
40 |
iface.launch()
|