import gradio as gr from transformers import pipeline # Modelos geradores otimizados model_a = pipeline("text-generation", model="Canarim-Instruct-PTBR-Dataset", return_full_text=False) model_b = pipeline("text-generation", model="EleutherAI/gpt-neo-125M", return_full_text=False) # Modelo juiz arbiter = pipeline("text-classification", model="neuralmind/bert-base-portuguese-cased") # Julgamento por batch def judge_response(response_a, response_b): results = arbiter([response_a, response_b]) score_a = results[0]['score'] score_b = results[1]['score'] return ("Modelo A", response_a) if score_a > score_b else ("Modelo B", response_b) # Função principal def chatbot(prompt): response_a = model_a(prompt, max_new_tokens=40)[0]['generated_text'].strip() response_b = model_b(prompt, max_new_tokens=40)[0]['generated_text'].strip() winner, final_response = judge_response(response_a, response_b) return prompt, response_a, response_b, winner, final_response # Interface iface = gr.Interface( fn=chatbot, inputs=gr.Textbox(label="Digite sua pergunta:"), outputs=[ gr.Textbox(label="Pergunta"), gr.Textbox(label="Resposta do Modelo A (Canarim-Instruct-PTBR-Dataset)"), gr.Textbox(label="Resposta do Modelo B (GPT-Neo-125M)"), gr.Textbox(label="Modelo Vencedor"), gr.Textbox(label="Resposta Escolhida"), ], title="Chatbot com Julgamento", description="Compara respostas de dois modelos e usa um árbitro para escolher a melhor com base no sentimento positivo." ) iface.launch()