Arri98 commited on
Commit
ddfab79
verified
1 Parent(s): e71699e

Manually upload app because reasons

Browse files
Files changed (1) hide show
  1. app.py +162 -9
app.py CHANGED
@@ -6,6 +6,154 @@ import os
6
  url = "http://138.4.22.130/arena"
7
 
8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  def submit_prompt(prompt: str):
10
  return backend.router(prompt)
11
 
@@ -16,7 +164,6 @@ def start_app()-> Tuple[bool, bool, bool]:
16
  gr.update(visible=False), # start_button visible
17
  )
18
 
19
-
20
  #Dont ask, best way to get the request param
21
  def record_vote_0(prompt: str, left_chat: List, right_chat: List,
22
  left_model: str, right_model: str, energy, moreConsuming, request: gr.Request) -> Tuple[str, bool, bool, bool, bool, bool]:
@@ -31,7 +178,6 @@ def record_vote_2(prompt: str, left_chat: List, right_chat: List,
31
  return record_vote(prompt, left_chat, right_chat, left_model, right_model, energy, moreConsuming, request, 2)
32
 
33
  def change_vote( _id:str, backdown: bool,) -> Tuple[bool, bool]:
34
- print(backdown, _id)
35
  response = requests.post(url + "/v2/backdownvote", json={"backdown": backdown, "_id": _id})
36
  return (
37
  gr.update(visible=False),
@@ -52,10 +198,6 @@ def record_vote(prompt: str, left_chat: List, right_chat: List,
52
  "left_model": left_model, "right_model": right_model,
53
  "ip": request.client.host
54
  })
55
- print(request.client.host)
56
- print(request.client)
57
- print(request.headers)
58
- print(request.client.host)
59
  changeVisible = False
60
  jsonResponse = response.json()
61
  _id = jsonResponse["id"]
@@ -79,7 +221,6 @@ def record_vote(prompt: str, left_chat: List, right_chat: List,
79
  def send_prompt(prompt: str) -> Tuple[List, List, str, str, bool, bool, bool, bool, str, bool]:
80
  response = requests.post(url + "/v2/query", json={"prompt": prompt})
81
  jsonResponse = response.json()
82
- print(jsonResponse)
83
  if(jsonResponse["status"] == 200):
84
  moreConsuming = jsonResponse["message"]["moreConsumption"]
85
  return (
@@ -106,8 +247,11 @@ js = """
106
 
107
  """
108
 
 
 
 
109
  # Initialize Gradio Blocks
110
- with gr.Blocks(css=css, js=js) as demo:
111
  _id = gr.State("")
112
  moreConsuming = gr.State("")
113
  with gr.Column(visible=True) as landing:
@@ -303,7 +447,6 @@ with gr.Blocks(css=css, js=js) as demo:
303
  </div>
304
  </div>
305
  """, )
306
-
307
  start_button = gr.Button(value="Start", visible=True, interactive=True, size= "lg",elem_id="start", variant="primary")
308
  start_button.click(
309
  lambda *args: start_app(),
@@ -315,8 +458,18 @@ with gr.Blocks(css=css, js=js) as demo:
315
  Este space es parte de un proyecto de investigaci贸n para estudiar c贸mo el conocimiento sobre el consumo de energ铆a influye en las preferencias de los usuarios en los sistemas de IA. Debe usarse solo para ese prop贸sito y no para actividades ilegales, da帽inas u ofensivas. Por favor, no subas informaci贸n personal o privada. Este espacio recopila y almacena las preguntas y respuestas y se reserva el derecho de distribuirlas bajo una licencia Creative Commons Attribution (CC-BY). """
316
  )
317
 
 
 
 
 
 
 
 
 
 
318
 
319
  if __name__ == "__main__":
320
  gr.set_static_paths(paths=[os.path.join(os.path.dirname(__file__), "static")])
 
321
  demo.launch(show_api=False)
322
 
 
6
  url = "http://138.4.22.130/arena"
7
 
8
 
9
+ def get_styling(values):
10
+ colors = ["rgba(0,190,0)", "rgba(240,165,0)"]
11
+ return [["", f"background: linear-gradient(90deg, {colors[ind%2]} {row[1]}%, transparent {row[1]}%)"] for ind, row in enumerate(values)]
12
+
13
+ headers = ["Modelo", "Porcentaje %"]
14
+
15
+ def get_display(values):
16
+ display_values = []
17
+ for val in values:
18
+ display_values.append( [val[0],'{:.2f}'.format(val[1])])
19
+ return display_values
20
+
21
+ def fetch_gpt():
22
+ rAns = requests.get(url + "/v2/stats/gpt")
23
+ ansJSON = rAns.json()
24
+ gpt4omin, gpt4o = 0, 0
25
+ gpt1, gpt41mini = 0, 0
26
+ backdown = ansJSON["resWithAwareness"]
27
+ ties = ansJSON["ties"]
28
+
29
+ for model in ansJSON["resByModel"]:
30
+ if(model["_id"] == "gpt-4o-mini"):
31
+ gpt4omin = model["count"]
32
+ elif(model["_id"]== "gpt-4o"):
33
+ gpt4o = model["count"]
34
+
35
+ data = [
36
+ ["GPT4 mini", (gpt4omin+ties)/(gpt4o+gpt4omin+ties)*100 ],
37
+ ["GPT4", gpt4o/(gpt4o+gpt4omin+ties)*100 ],
38
+ ["GPT4 mini con concienciaci贸n", (gpt4omin+backdown+ties)/(gpt4o+gpt4omin+ties)*100 ],
39
+ ["GPT4 con concienciaci贸n", (gpt4o-backdown)/(gpt4o+gpt4omin+ties)*100 ],
40
+ ]
41
+ styling = get_styling(data)
42
+ display_value = get_display(data)
43
+
44
+
45
+ return {
46
+ "data": data,
47
+ "headers": headers,
48
+ "metadata": {
49
+ "styling": styling,
50
+ "display_value": display_value
51
+ }
52
+ }
53
+
54
+
55
+ def fetch_llama():
56
+ rAns = requests.get(url + "/v2/stats/llama")
57
+ ansJSON = rAns.json()
58
+ llama70, llama8 = 0, 0
59
+ gpt1, gpt41mini = 0, 0
60
+ backdown = ansJSON["resWithAwareness"]
61
+ ties = ansJSON["ties"]
62
+
63
+ for model in ansJSON["resByModel"]:
64
+ if(model["_id"] == "llama-3.3-70b-versatile"):
65
+ llama70 = model["count"]
66
+ elif(model["_id"] == "llama3-8b-8192"):
67
+ llama8 = model["count"]
68
+
69
+ data = [
70
+ ["Llama 8", (llama8+ties)/(llama70+llama8+ties)*100 ],
71
+ ["Llama 70", llama70/(llama70+llama8+ties)*100 ],
72
+ ["Llama 8 con concienciaci贸n", (llama8+backdown+ties)/(llama70+llama8+ties)*100 ],
73
+ ["Llama 70 con concienciaci贸n", (llama70-backdown)/(llama70+llama8+ties)*100 ],
74
+ ]
75
+ styling = get_styling(data)
76
+ display_value = get_display(data)
77
+
78
+
79
+ return {
80
+ "data": data,
81
+ "headers": headers,
82
+ "metadata": {
83
+ "styling": styling,
84
+ "display_value": display_value
85
+ }
86
+ }
87
+
88
+
89
+
90
+
91
+ def fetch_claude():
92
+ rAns = requests.get(url + "/v2/stats/claude")
93
+ ansJSON = rAns.json()
94
+ cloude, sonet = 0, 0
95
+ gpt1, gpt41mini = 0, 0
96
+ backdown = ansJSON["resWithAwareness"]
97
+ ties = ansJSON["ties"]
98
+
99
+ for model in ansJSON["resByModel"]:
100
+ if(model["_id"] == "clause-haiku-3.5"):
101
+ cloude = model["count"]
102
+ elif(model["_id"] == "clause-sonet-3.5"):
103
+ sonet = model["count"]
104
+
105
+ data = [
106
+ ["Claude", (cloude+ties)/(sonet+cloude+ties)*100 ],
107
+ ["Sonet", sonet/(sonet+cloude+ties)*100 ],
108
+ ["Claude con concienciaci贸n", (cloude+backdown+ties)/(sonet+cloude+ties)*100 ],
109
+ ["Sonet 70 con concienciaci贸n", (sonet-backdown+ties)/(sonet+cloude+ties)*100 ],
110
+ ]
111
+ styling = get_styling(data)
112
+ display_value = get_display(data)
113
+
114
+
115
+ return {
116
+ "data": data,
117
+ "headers": headers,
118
+ "metadata": {
119
+ "styling": styling,
120
+ "display_value": display_value
121
+ }
122
+ }
123
+
124
+ def fetch_gpt_2():
125
+ rAns = requests.get(url + "/v2/stats/gpt1")
126
+ ansJSON = rAns.json()
127
+ gpt1, gpt41mini = 0, 0
128
+ backdown = ansJSON["resWithAwareness"]
129
+ ties = ansJSON["ties"]
130
+
131
+ for model in ansJSON["resByModel"]:
132
+ if(model["_id"] == "gpt-4.1"):
133
+ gpt41 = model["count"]
134
+ elif(model["_id"] == "gpt-4.1-mini"):
135
+ gpt41mini = model["count"]
136
+
137
+ data = [
138
+ ["GPT4.1 mini", (gpt41mini+ties)/(gpt41+gpt41mini+ties)*100 ],
139
+ ["GPT4.1 ", gpt41/(gpt41+gpt41mini+ties)*100 ],
140
+ ["GPT4.1 mini con concienciaci贸n", (backdown+gpt41mini+ties)/(gpt41+gpt41mini+ties)*100 ],
141
+ ["GPT4.1 con concienciaci贸n", (gpt41-backdown)/(gpt41+gpt41mini+ties)*100 ],
142
+ ]
143
+ styling = get_styling(data)
144
+ display_value = get_display(data)
145
+
146
+
147
+ return {
148
+ "data": data,
149
+ "headers": headers,
150
+ "metadata": {
151
+ "styling": styling,
152
+ "display_value": display_value
153
+ }
154
+ }
155
+
156
+
157
  def submit_prompt(prompt: str):
158
  return backend.router(prompt)
159
 
 
164
  gr.update(visible=False), # start_button visible
165
  )
166
 
 
167
  #Dont ask, best way to get the request param
168
  def record_vote_0(prompt: str, left_chat: List, right_chat: List,
169
  left_model: str, right_model: str, energy, moreConsuming, request: gr.Request) -> Tuple[str, bool, bool, bool, bool, bool]:
 
178
  return record_vote(prompt, left_chat, right_chat, left_model, right_model, energy, moreConsuming, request, 2)
179
 
180
  def change_vote( _id:str, backdown: bool,) -> Tuple[bool, bool]:
 
181
  response = requests.post(url + "/v2/backdownvote", json={"backdown": backdown, "_id": _id})
182
  return (
183
  gr.update(visible=False),
 
198
  "left_model": left_model, "right_model": right_model,
199
  "ip": request.client.host
200
  })
 
 
 
 
201
  changeVisible = False
202
  jsonResponse = response.json()
203
  _id = jsonResponse["id"]
 
221
  def send_prompt(prompt: str) -> Tuple[List, List, str, str, bool, bool, bool, bool, str, bool]:
222
  response = requests.post(url + "/v2/query", json={"prompt": prompt})
223
  jsonResponse = response.json()
 
224
  if(jsonResponse["status"] == 200):
225
  moreConsuming = jsonResponse["message"]["moreConsumption"]
226
  return (
 
247
 
248
  """
249
 
250
+ buttons_toggle = [None] * 2 # Initialize the list with None elements
251
+
252
+
253
  # Initialize Gradio Blocks
254
+ with gr.Blocks(css=css, js=js) as mainapp:
255
  _id = gr.State("")
256
  moreConsuming = gr.State("")
257
  with gr.Column(visible=True) as landing:
 
447
  </div>
448
  </div>
449
  """, )
 
450
  start_button = gr.Button(value="Start", visible=True, interactive=True, size= "lg",elem_id="start", variant="primary")
451
  start_button.click(
452
  lambda *args: start_app(),
 
458
  Este space es parte de un proyecto de investigaci贸n para estudiar c贸mo el conocimiento sobre el consumo de energ铆a influye en las preferencias de los usuarios en los sistemas de IA. Debe usarse solo para ese prop贸sito y no para actividades ilegales, da帽inas u ofensivas. Por favor, no subas informaci贸n personal o privada. Este espacio recopila y almacena las preguntas y respuestas y se reserva el derecho de distribuirlas bajo una licencia Creative Commons Attribution (CC-BY). """
459
  )
460
 
461
+ with gr.Blocks(fill_height=False) as data:
462
+ gr.Markdown("## GPT4")
463
+ gr.DataFrame(fetch_gpt, every=gr.Timer(60))
464
+ gr.Markdown("## Llama 3")
465
+ gr.DataFrame(fetch_llama, every=gr.Timer(60))
466
+ gr.Markdown("## GPT4.1")
467
+ gr.DataFrame(fetch_gpt_2, every=gr.Timer(60))
468
+ gr.Markdown("## Claude")
469
+ gr.DataFrame(fetch_claude, every=gr.Timer(60))
470
 
471
  if __name__ == "__main__":
472
  gr.set_static_paths(paths=[os.path.join(os.path.dirname(__file__), "static")])
473
+ demo = gr.TabbedInterface([mainapp, data], ["Arena", "Data"], css=css, js=js)
474
  demo.launch(show_api=False)
475