SlickSlick commited on
Commit
02f22f2
·
verified ·
1 Parent(s): 2eacb77

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +74 -0
app.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM, AutoModelForSequenceClassification, pipeline
3
+ import torch
4
+ import numpy as np
5
+
6
+ # === CARREGAR OS MODELOS GERADORES ===
7
+ generator_1_name = "pierreguillou/gpt2-small-portuguese"
8
+ generator_2_name = "pierreguillou/gpt2-small-portuguese" # Usando o mesmo por simplicidade/teste
9
+
10
+ tokenizer_1 = AutoTokenizer.from_pretrained(generator_1_name)
11
+ model_1 = AutoModelForCausalLM.from_pretrained(generator_1_name)
12
+
13
+ tokenizer_2 = AutoTokenizer.from_pretrained(generator_2_name)
14
+ model_2 = AutoModelForCausalLM.from_pretrained(generator_2_name)
15
+
16
+ # === CARREGAR MODELO ÁRBITRO (BERT) ===
17
+ judge_model_name = "neuralmind/bert-base-portuguese-cased"
18
+ judge_tokenizer = AutoTokenizer.from_pretrained(judge_model_name)
19
+ judge_model = AutoModelForSequenceClassification.from_pretrained(judge_model_name, num_labels=2)
20
+
21
+ # Classificador de similaridade (baseado em relevância para o prompt)
22
+ def score_response(prompt, response):
23
+ inputs = judge_tokenizer(prompt, response, return_tensors="pt", truncation=True, padding=True)
24
+ with torch.no_grad():
25
+ outputs = judge_model(**inputs)
26
+ score = torch.softmax(outputs.logits, dim=1)[0][1].item() # Probabilidade da classe "boa"
27
+ return score
28
+
29
+ # Gerar resposta com modelo
30
+ def generate_response(model, tokenizer, prompt):
31
+ input_ids = tokenizer.encode(prompt, return_tensors="pt")
32
+ output_ids = model.generate(input_ids, max_new_tokens=60, num_return_sequences=1, do_sample=True)
33
+ return tokenizer.decode(output_ids[0], skip_special_tokens=True)
34
+
35
+ # Função principal
36
+ def chatbot(prompt):
37
+ response_1 = generate_response(model_1, tokenizer_1, prompt)
38
+ response_2 = generate_response(model_2, tokenizer_2, prompt)
39
+
40
+ score_1 = score_response(prompt, response_1)
41
+ score_2 = score_response(prompt, response_2)
42
+
43
+ if score_1 > score_2:
44
+ final = response_1
45
+ chosen = "Resposta 1"
46
+ else:
47
+ final = response_2
48
+ chosen = "Resposta 2"
49
+
50
+ return (
51
+ prompt,
52
+ response_1,
53
+ response_2,
54
+ chosen,
55
+ final
56
+ )
57
+
58
+ # === INTERFACE GRADIO ===
59
+ iface = gr.Interface(
60
+ fn=chatbot,
61
+ inputs=gr.Textbox(label="Digite sua pergunta"),
62
+ outputs=[
63
+ gr.Textbox(label="Prompt"),
64
+ gr.Textbox(label="Resposta 1"),
65
+ gr.Textbox(label="Resposta 2"),
66
+ gr.Textbox(label="Resposta escolhida pelo árbitro"),
67
+ gr.Textbox(label="Resposta final exibida")
68
+ ],
69
+ title="Chatbot em Cascata (Português)",
70
+ description="Dois modelos geram respostas e um árbitro (BERT) escolhe a melhor."
71
+ )
72
+
73
+ if __name__ == "__main__":
74
+ iface.launch()