quiz / app.py
Streamlit
app
d3ae65d
import gradio as gr
from openai import OpenAI
from dotenv import load_dotenv
import os
import json
load_dotenv()
def login(username, password):
return (username=="admin" and password=="NRSG4604")
def build_system_prompt(params: dict) -> str:
question = params.get("question", "")
raw_choices = params.get("choices", "[]")
try:
choices = json.loads(raw_choices)
except json.JSONDecodeError:
choices = []
student_answer = params.get("student_answer", "")
correct_answer = params.get("correct_answer", "")
# You can tune this prompt however you like
lines = []
lines.append("You are a tutoring assistant helping a student review a Canvas quiz question.")
if question:
lines.append(f"\nQuestion:\n{question}")
if choices:
lines.append("\nChoices:")
for i, c in enumerate(choices):
label = chr(ord("A") + i)
lines.append(f"{label}. {c}")
if student_answer:
lines.append(f"\nStudent's answer: {student_answer}")
if correct_answer:
lines.append(f"Correct answer: {correct_answer}")
lines.append("\n\nWhen the student asks something, explain step-by-step why the correct answer is correct and, if relevant, why the student's answer is incorrect. Be supportive and focus on reasoning, not just telling them the answer.")
return "\n".join(lines)
def predict(message, messages, request: gr.Request):
if request is not None and hasattr(request, "query_params"):
query_params = dict(request.query_params)
else:
return messages
# 2. On the first turn, inject a system prompt built from the quiz data
if len(messages) == 0:
system_prompt = build_system_prompt(query_params)
messages.append({"role": "system", "content": system_prompt})
messages.append({"role": "user", "content": message})
params = {
"model": "gpt-5",
"messages": messages,
"stream": True,
"stream_options": {"include_usage": True},
}
response = client.chat.completions.create(**params)
content = ""
for event in response:
if event.choices and event.choices[0].delta.content:
chunk = event.choices[0].delta.content
content += chunk
yield content
messages.append(
{
"role": "assistant",
"content": content,
}
)
return messages
def vote(data: gr.LikeData):
if data.liked:
print("You upvoted this response: " + data.value["value"])
else:
print("You downvoted this response: " + data.value["value"])
def show_question(request: gr.Request):
params = dict(request.query_params)
q = params.get("question", "")
return f"### Question\n\n{q}" if q else "No question data found in URL."
api_key=os.getenv('api')
client = OpenAI(api_key=api_key)
placeholder = """
<center><h1>Hello there!</h1><br>
How can I help you?
</center>
"""
examples=["Can you explain why my answer is wrong?", "Can you explain this concept?"]
with gr.Blocks(title="Chat") as demo:
question_md = gr.Markdown()
chatbot=gr.Chatbot(
placeholder=placeholder,
type='messages',
)
#chatbot.like(vote, None, None)
chat = gr.ChatInterface(
predict,
chatbot=chatbot,
type="messages",
examples=examples,
cache_examples=False,
flagging_mode="manual"
)
demo.load(show_question, inputs=None, outputs=question_md)
demo.launch(auth=login, ssr_mode=False)