|
import gradio as gr |
|
import requests |
|
import os |
|
import random |
|
import uuid |
|
from datetime import datetime |
|
import csv |
|
from huggingface_hub import HfApi |
|
|
|
|
|
|
|
HF_TOKEN = os.getenv("HF_Key") |
|
|
|
|
|
MODEL_INFOS = [ |
|
{ |
|
"name": "Base", |
|
"endpoint_url": os.getenv("BASE_ENDPOINT_URL", "https://o3pz2i9x2k6otr2a.eu-west-1.aws.endpoints.huggingface.cloud/v1/"), |
|
"model": os.getenv("BASE_ENDPOINT_MODEL", "qwen3-4b-instruct-2507-pxe") |
|
}, |
|
{ |
|
"name": "Math", |
|
"endpoint_url": os.getenv("MATH_ENDPOINT_URL", "https://jockj5ko30gpg5lg.eu-west-1.aws.endpoints.huggingface.cloud/v1/"), |
|
"model": os.getenv("MATH_ENDPOINT_MODEL", "teach-math-qwen3-4b-2507-r1--uab") |
|
}, |
|
{ |
|
"name": "General", |
|
"endpoint_url": os.getenv("GENERAL_ENDPOINT_URL", "https://ucewop5x3jsguqwq.eu-west-1.aws.endpoints.huggingface.cloud/v1/"), |
|
"model": os.getenv("GENERAL_ENDPOINT_MODEL", "CanisAI/teach-generalist-qwen3-4b-2507-r1-merged") |
|
}, |
|
] |
|
|
|
|
|
DATASET_REPO_ID = "CanisAI/mvlg-data" |
|
|
|
api = HfApi() |
|
|
|
|
|
FEEDBACK_POINTS = [ |
|
"How clear was the explanation?", |
|
"How helpful were the steps in guiding you to the solution?", |
|
"How well did the assistant adapt to your learning style?", |
|
"How motivating and encouraging was the response?", |
|
"How accurate and reliable was the information provided?", |
|
"How relevant was the information to your question?", |
|
"How natural and conversational was the interaction?", |
|
"How much do you trust the assistant?" |
|
] |
|
|
|
def query_chat_endpoint(endpoint_url, model, messages, max_tokens=150, temperature=0.7): |
|
url = endpoint_url.rstrip("/") + "/chat/completions" |
|
headers = { |
|
"Accept": "application/json", |
|
"Content-Type": "application/json", |
|
"Authorization": f"Bearer {HF_TOKEN}" |
|
} |
|
payload = { |
|
"model": model, |
|
"messages": messages, |
|
"max_tokens": max_tokens, |
|
"temperature": temperature, |
|
"stream": False |
|
} |
|
try: |
|
response = requests.post(url, headers=headers, json=payload) |
|
response.raise_for_status() |
|
result = response.json() |
|
return result["choices"][0]["message"]["content"] |
|
except Exception as e: |
|
return f"Error: {str(e)}" |
|
|
|
def chat_multi_llm(message, history, current_model_state, conversation_id_state): |
|
if history is None: |
|
history = [] |
|
if current_model_state is None: |
|
current_model_state = random.choice(MODEL_INFOS) |
|
conversation_id_state = str(uuid.uuid4()) |
|
messages = [{"role": msg["role"], "content": msg["content"]} for msg in history] |
|
messages.append({"role": "user", "content": message}) |
|
model_name = current_model_state["name"] |
|
endpoint_url = current_model_state["endpoint_url"] |
|
model = current_model_state["model"] |
|
answer = query_chat_endpoint(endpoint_url, model, messages) |
|
log_chat_to_csv(message, history, {model_name: answer}, model_name, conversation_id_state) |
|
new_history = history + [{"role": "user", "content": message}, {"role": "assistant", "content": answer}] |
|
return new_history, current_model_state, conversation_id_state |
|
|
|
def log_chat_to_csv(message, history, results, used_model, conversation_id, filename="chat_conversations_log.csv"): |
|
from os.path import isfile |
|
file_exists = isfile(filename) |
|
with open(filename, mode="a", encoding="utf-8", newline="") as csvfile: |
|
writer = csv.writer(csvfile) |
|
if not file_exists: |
|
header = ["timestamp", "conversation_id", "history", "user_message", "used_model", "response"] |
|
writer.writerow(header) |
|
row = [datetime.now().isoformat(), conversation_id, str(history), message, used_model, list(results.values())[0]] |
|
writer.writerow(row) |
|
|
|
|
|
try: |
|
api.upload_file( |
|
path_or_fileobj=filename, |
|
path_in_repo=filename, |
|
repo_id=DATASET_REPO_ID, |
|
repo_type="dataset", |
|
token=HF_TOKEN |
|
) |
|
except Exception as e: |
|
print(f"Error uploading to HF dataset: {e}") |
|
|
|
def submit_feedback(current_model, conversation_id, *slider_values): |
|
filename = "feedback_log.csv" |
|
from os.path import isfile |
|
file_exists = isfile(filename) |
|
|
|
with open(filename, "a", encoding="utf-8", newline="") as f: |
|
writer = csv.writer(f) |
|
if not file_exists: |
|
writer.writerow(["timestamp", "conversation_id", "used_model"] + FEEDBACK_POINTS) |
|
writer.writerow([ |
|
datetime.now().isoformat(), |
|
conversation_id, |
|
current_model["name"] if current_model else "None" |
|
] + list(slider_values)) |
|
|
|
|
|
try: |
|
api.upload_file( |
|
path_or_fileobj=filename, |
|
path_in_repo=filename, |
|
repo_id=DATASET_REPO_ID, |
|
repo_type="dataset", |
|
token=HF_TOKEN |
|
) |
|
except Exception as e: |
|
print(f"Error uploading feedback to HF dataset: {e}") |
|
|
|
return ( |
|
gr.update(visible=False), |
|
gr.update(visible=True), |
|
gr.update(value="Thank you! You can start a new conversation.", visible=True), |
|
[], |
|
None, |
|
None, |
|
gr.update(interactive=True), |
|
gr.update(interactive=True), |
|
[], |
|
gr.update(visible=False) |
|
) |
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown(""" |
|
# LLM Case Study: Multi-Model Chat Comparison |
|
Start a conversation. After finishing, you can provide feedback and start a new conversation. By Using the app you accept that your interactions and feedback will be logged and used for research purposes. Please don't share any personal, sensitive, or confidential information. |
|
""") |
|
|
|
history_state = gr.BrowserState([]) |
|
|
|
|
|
|
|
current_model_state = gr.BrowserState(None) |
|
conversation_id_state = gr.BrowserState(None) |
|
|
|
with gr.Column(visible=True) as chat_col: |
|
chatbot = gr.Chatbot(type="messages", value=[]) |
|
msg = gr.Textbox(placeholder="Enter your message...", show_label=False) |
|
submit_btn = gr.Button("Send") |
|
end_btn = gr.Button("End conversation and give feedback") |
|
end_info = gr.Markdown("", visible=False) |
|
|
|
with gr.Column(visible=False) as feedback_col: |
|
sliders = [gr.Slider(1, 10, value=5, step=1, label=label) for label in FEEDBACK_POINTS] |
|
feedback_btn = gr.Button("Submit feedback and start new conversation") |
|
feedback_info = gr.Markdown("", visible=False) |
|
|
|
def user_message(message, history, current_model, conversation_id): |
|
if message is None or message.strip() == "": |
|
return history, "", history, current_model, conversation_id |
|
new_history, updated_model, updated_conv_id = chat_multi_llm(message, history, current_model, conversation_id) |
|
return new_history, "", new_history, updated_model, updated_conv_id |
|
|
|
def load_chat_history(history): |
|
"""Load the chat history into the chatbot on page load""" |
|
if history is None: |
|
return [] |
|
return history |
|
|
|
def end_conversation(): |
|
return ( |
|
gr.update(visible=True), |
|
gr.update(visible=False), |
|
gr.update(value="Please provide feedback on the last conversation.", visible=True), |
|
gr.update(interactive=False), |
|
gr.update(interactive=False) |
|
) |
|
|
|
msg.submit( |
|
user_message, |
|
inputs=[msg, history_state, current_model_state, conversation_id_state], |
|
outputs=[chatbot, msg, history_state, current_model_state, conversation_id_state], |
|
queue=False |
|
) |
|
|
|
submit_btn.click( |
|
user_message, |
|
inputs=[msg, history_state, current_model_state, conversation_id_state], |
|
outputs=[chatbot, msg, history_state, current_model_state, conversation_id_state], |
|
queue=False |
|
) |
|
|
|
end_btn.click( |
|
end_conversation, |
|
inputs=None, |
|
outputs=[feedback_col, chat_col, end_info, msg, submit_btn] |
|
) |
|
|
|
feedback_btn.click( |
|
submit_feedback, |
|
inputs=[current_model_state, conversation_id_state] + sliders, |
|
outputs=[feedback_col, chat_col, feedback_info, history_state, current_model_state, conversation_id_state, msg, submit_btn, chatbot, end_info] |
|
) |
|
|
|
|
|
demo.load( |
|
load_chat_history, |
|
inputs=[history_state], |
|
outputs=[chatbot] |
|
) |
|
|
|
if __name__ == "__main__": |
|
demo.launch() |
|
|