import gradio as gr from typing import Tuple, List import requests import os url = "http://138.4.22.130/arena" def get_styling(values): colors = ["rgba(0,190,0)", "rgba(240,165,0)"] return [["", f"background: linear-gradient(90deg, {colors[ind%2]} {row[1]}%, transparent {row[1]}%)"] for ind, row in enumerate(values)] headers = ["Modelo", "Porcentaje %"] def get_display(values): display_values = [] for val in values: display_values.append( [val[0],'{:.2f}'.format(val[1])]) return display_values def fetch_gpt(): rAns = requests.get(url + "/v2/stats/gpt") ansJSON = rAns.json() gpt4omin, gpt4o = 0, 0 gpt1, gpt41mini = 0, 0 backdown = ansJSON["resWithAwareness"] ties = ansJSON["ties"] for model in ansJSON["resByModel"]: if(model["_id"] == "gpt-4o-mini"): gpt4omin = model["count"] elif(model["_id"]== "gpt-4o"): gpt4o = model["count"] data = [ ["GPT4 mini", (gpt4omin+ties)/(gpt4o+gpt4omin+ties)*100 ], ["GPT4", gpt4o/(gpt4o+gpt4omin+ties)*100 ], ["GPT4 mini con concienciación", (gpt4omin+backdown+ties)/(gpt4o+gpt4omin+ties)*100 ], ["GPT4 con concienciación", (gpt4o-backdown)/(gpt4o+gpt4omin+ties)*100 ], ] styling = get_styling(data) display_value = get_display(data) return { "data": data, "headers": headers, "metadata": { "styling": styling, "display_value": display_value } } def fetch_llama(): rAns = requests.get(url + "/v2/stats/llama") ansJSON = rAns.json() llama70, llama8 = 0, 0 gpt1, gpt41mini = 0, 0 backdown = ansJSON["resWithAwareness"] ties = ansJSON["ties"] for model in ansJSON["resByModel"]: if(model["_id"] == "llama-3.3-70b-versatile"): llama70 = model["count"] elif(model["_id"] == "llama3-8b-8192"): llama8 = model["count"] data = [ ["Llama 8", (llama8+ties)/(llama70+llama8+ties)*100 ], ["Llama 70", llama70/(llama70+llama8+ties)*100 ], ["Llama 8 con concienciación", (llama8+backdown+ties)/(llama70+llama8+ties)*100 ], ["Llama 70 con concienciación", (llama70-backdown)/(llama70+llama8+ties)*100 ], ] styling = get_styling(data) display_value = get_display(data) return { "data": data, "headers": headers, "metadata": { "styling": styling, "display_value": display_value } } def fetch_claude(): rAns = requests.get(url + "/v2/stats/claude") ansJSON = rAns.json() cloude, sonet = 0, 0 gpt1, gpt41mini = 0, 0 backdown = ansJSON["resWithAwareness"] ties = ansJSON["ties"] for model in ansJSON["resByModel"]: if(model["_id"] == "clause-haiku-3.5"): cloude = model["count"] elif(model["_id"] == "clause-sonet-3.5"): sonet = model["count"] data = [ ["Claude", (cloude+ties)/(sonet+cloude+ties)*100 ], ["Sonet", sonet/(sonet+cloude+ties)*100 ], ["Claude con concienciación", (cloude+backdown+ties)/(sonet+cloude+ties)*100 ], ["Sonet 70 con concienciación", (sonet-backdown+ties)/(sonet+cloude+ties)*100 ], ] styling = get_styling(data) display_value = get_display(data) return { "data": data, "headers": headers, "metadata": { "styling": styling, "display_value": display_value } } def fetch_gpt_2(): rAns = requests.get(url + "/v2/stats/gpt1") ansJSON = rAns.json() gpt1, gpt41mini = 0, 0 backdown = ansJSON["resWithAwareness"] ties = ansJSON["ties"] for model in ansJSON["resByModel"]: if(model["_id"] == "gpt-4.1"): gpt41 = model["count"] elif(model["_id"] == "gpt-4.1-mini"): gpt41mini = model["count"] data = [ ["GPT4.1 mini", (gpt41mini+ties)/(gpt41+gpt41mini+ties)*100 ], ["GPT4.1 ", gpt41/(gpt41+gpt41mini+ties)*100 ], ["GPT4.1 mini con concienciación", (backdown+gpt41mini+ties)/(gpt41+gpt41mini+ties)*100 ], ["GPT4.1 con concienciación", (gpt41-backdown)/(gpt41+gpt41mini+ties)*100 ], ] styling = get_styling(data) display_value = get_display(data) return { "data": data, "headers": headers, "metadata": { "styling": styling, "display_value": display_value } } def submit_prompt(prompt: str): return backend.router(prompt) def start_app()-> Tuple[bool, bool, bool]: return ( gr.update(visible=False), # landing visible gr.update(visible=True), # app visible gr.update(visible=False), # start_button visible ) #Dont ask, best way to get the request param def record_vote_0(prompt: str, left_chat: List, right_chat: List, left_model: str, right_model: str, energy, moreConsuming, request: gr.Request) -> Tuple[str, bool, bool, bool, bool, bool]: return record_vote(prompt, left_chat, right_chat, left_model, right_model, energy, moreConsuming, request, 0) def record_vote_1(prompt: str, left_chat: List, right_chat: List, left_model: str, right_model: str, energy, moreConsuming, request: gr.Request) -> Tuple[str, bool, bool, bool, bool, bool]: return record_vote(prompt, left_chat, right_chat, left_model, right_model, energy, moreConsuming, request, 1) def record_vote_2(prompt: str, left_chat: List, right_chat: List, left_model: str, right_model: str, energy, moreConsuming, request: gr.Request) -> Tuple[str, bool, bool, bool, bool, bool]: return record_vote(prompt, left_chat, right_chat, left_model, right_model, energy, moreConsuming, request, 2) def change_vote( _id:str, backdown: bool,) -> Tuple[bool, bool]: response = requests.post(url + "/v2/backdownvote", json={"backdown": backdown, "_id": _id}) return ( gr.update(visible=False), gr.update(visible=False) ) def record_vote(prompt: str, left_chat: List, right_chat: List, left_model: str, right_model: str, energy, moreConsuming, request: gr.Request, vote_type: int ) -> Tuple[str, bool, bool, bool, bool, bool, bool]: """Record a vote for either the left or right model""" vote_message = "Is a tie!" if vote_type == 0: vote_message = "Right model wins!" elif vote_type == 1: vote_message = "Left model wins!" result_msg = f"Vote recorded: {vote_message}" response = requests.post(url + "/v2/vote", json={"vote": vote_type, "prompt": prompt, "left_chat": left_chat, "right_chat": right_chat, "left_model": left_model, "right_model": right_model, "ip": request.client.host }) changeVisible = False jsonResponse = response.json() _id = jsonResponse["id"] if((moreConsuming == "izquierda" and vote_type == 0) or (moreConsuming == "derecha" and vote_type == 1)): changeVisible = True #result, left_model, buttons[0], buttons[1], tievote_btn, model_names_row, return ( result_msg, # result gr.update(interactive=False), # left_vote_btn interactive gr.update(interactive=False), # right_vote_btn interactive gr.update(interactive=False), # tie_btn interactive gr.update(visible=True), # model_names_row visible gr.update(visible=changeVisible), # backdown_row visible _id, gr.update(interactive=True) ) def send_prompt(prompt: str) -> Tuple[List, List, str, str, bool, bool, bool, bool, str, bool]: response = requests.post(url + "/v2/query", json={"prompt": prompt}) jsonResponse = response.json() if(jsonResponse["status"] == 200): moreConsuming = jsonResponse["message"]["moreConsumption"] return ( [{"role":"assistant", "content": jsonResponse["answers"][0]}], # left_output [{"role": "assistant", "content": jsonResponse["answers"][1]}], # right_output jsonResponse["models"][0], # left_model, jsonResponse["models"][1], # right_model, gr.update(interactive=True, visible=True), gr.update(interactive=True, visible=True), gr.update(interactive=True, visible=True), gr.update(visible=False), moreConsuming, gr.update(interactive=False) ) css = """ .logo {max-width: 200px !important; widht: 300px;} .myElemento { background-color:#E8E9E3; border-color:#E8E9E3; } """ js = """ document.getElementById("start").onclick = function() { window.scrollTo(0, 0); } """ buttons_toggle = [None] * 2 # Initialize the list with None elements # Initialize Gradio Blocks with gr.Blocks(css=css, js=js) as mainapp: _id = gr.State("") moreConsuming = gr.State("") with gr.Column(visible=True) as landing: gr.set_static_paths(paths=["static"]) with gr.Group(elem_classes="container"): gr.HTML("""
Este espacio es parte del proyecto "Sostenibilidad Generativa" 🌍, desarrollado en la Escuela Técnica Superior de Ingenieros de Telecomunicación de la Universidad Politécnica de Madrid y financiado por la Fundación Cotec. Nuestro objetivo es evaluar cómo la conciencia energética ⚡ impacta la evaluación de los usuarios sobre los Modelos de Lenguaje de Gran Escala (LLMs).
🌿 ¡Hagamos la IA más sostenible juntos! 🚀♻️
Este espacio es parte del proyecto "Sostenibilidad Generativa" 🌍, desarrollado en la Escuela Técnica Superior de Ingenieros de Telecomunicación de la Universidad Politécnica de Madrid y financiado por la Fundación Cotec. Nuestro objetivo es evaluar cómo la conciencia energética ⚡ impacta la evaluación de los usuarios sobre los Modelos de Lenguaje de Gran Escala (LLMs).
🌿 ¡Hagamos la IA más sostenible juntos! 🚀♻️