Update app.py
Browse files
app.py
CHANGED
@@ -3,6 +3,8 @@ import gradio as gr
|
|
3 |
import requests
|
4 |
import pandas as pd
|
5 |
import json
|
|
|
|
|
6 |
from langchain_core.messages import HumanMessage
|
7 |
from langgraph_agent import build_graph
|
8 |
|
@@ -21,40 +23,38 @@ class BasicAgent:
|
|
21 |
messages = [HumanMessage(content=question)]
|
22 |
messages = self.graph.invoke({"messages": messages})
|
23 |
|
24 |
-
# Check if messages or content is missing
|
25 |
if not messages or not messages.get('messages') or messages['messages'][-1].content is None:
|
26 |
return "I am unable to determine the information using the available tools."
|
27 |
|
28 |
answer = messages['messages'][-1].content
|
29 |
|
30 |
-
# Handle empty list content
|
31 |
if isinstance(answer, list) and not answer:
|
32 |
return "I am unable to determine the information using the available tools."
|
33 |
|
34 |
-
# Convert non-string answers to string
|
35 |
if not isinstance(answer, str):
|
36 |
answer = str(answer)
|
37 |
|
38 |
-
# Normalize and strip common answer prefixes
|
39 |
answer = answer.strip()
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
48 |
answer = "I am unable to determine the information using the available tools."
|
49 |
|
50 |
return answer
|
51 |
|
52 |
|
53 |
def run_and_submit_all(profile: gr.OAuthProfile | None):
|
54 |
-
"""
|
55 |
-
Fetches all questions, runs the BasicAgent on them, submits all answers,
|
56 |
-
and displays the results.
|
57 |
-
"""
|
58 |
space_id = os.getenv("SPACE_ID")
|
59 |
|
60 |
if profile:
|
@@ -68,7 +68,6 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
|
|
68 |
questions_url = f"{api_url}/questions"
|
69 |
submit_url = f"{api_url}/submit"
|
70 |
|
71 |
-
# 1. Instantiate Agent
|
72 |
try:
|
73 |
agent = BasicAgent()
|
74 |
except Exception as e:
|
@@ -78,7 +77,6 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
|
|
78 |
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
|
79 |
print(agent_code)
|
80 |
|
81 |
-
# 2. Fetch Questions
|
82 |
print(f"Fetching questions from: {questions_url}")
|
83 |
try:
|
84 |
response = requests.get(questions_url, timeout=15)
|
@@ -88,18 +86,10 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
|
|
88 |
print("Fetched questions list is empty.")
|
89 |
return "Fetched questions list is empty or invalid format.", None
|
90 |
print(f"Fetched {len(questions_data)} questions.")
|
91 |
-
except
|
92 |
print(f"Error fetching questions: {e}")
|
93 |
return f"Error fetching questions: {e}", None
|
94 |
-
except requests.exceptions.JSONDecodeError as e:
|
95 |
-
print(f"Error decoding JSON response from questions endpoint: {e}")
|
96 |
-
print(f"Response text: {response.text[:500]}")
|
97 |
-
return f"Error decoding server response for questions: {e}", None
|
98 |
-
except Exception as e:
|
99 |
-
print(f"An unexpected error occurred fetching questions: {e}")
|
100 |
-
return f"An unexpected error occurred fetching questions: {e}", None
|
101 |
|
102 |
-
# 3. Run your Agent
|
103 |
results_log = []
|
104 |
answers_payload = []
|
105 |
|
@@ -111,23 +101,23 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
|
|
111 |
print(f"Skipping item with missing task_id or question: {item}")
|
112 |
continue
|
113 |
try:
|
114 |
-
|
115 |
-
print(f"Answer for task {task_id}: '{
|
116 |
answers_payload.append({
|
117 |
"task_id": task_id,
|
118 |
-
"
|
119 |
-
# "reasoning_trace": None #
|
120 |
})
|
121 |
results_log.append({
|
122 |
"Task ID": task_id,
|
123 |
"Question": question_text,
|
124 |
-
"Submitted Answer":
|
125 |
})
|
126 |
except Exception as e:
|
127 |
print(f"Error running agent on task {task_id}: {e}")
|
128 |
answers_payload.append({
|
129 |
"task_id": task_id,
|
130 |
-
"
|
131 |
# "reasoning_trace": None
|
132 |
})
|
133 |
results_log.append({
|
@@ -140,20 +130,27 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
|
|
140 |
print("Agent did not produce any answers to submit.")
|
141 |
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
|
142 |
|
143 |
-
#
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
155 |
try:
|
156 |
-
response = requests.post(submit_url,
|
157 |
response.raise_for_status()
|
158 |
result_data = response.json()
|
159 |
final_status = (
|
|
|
3 |
import requests
|
4 |
import pandas as pd
|
5 |
import json
|
6 |
+
import io
|
7 |
+
import re
|
8 |
from langchain_core.messages import HumanMessage
|
9 |
from langgraph_agent import build_graph
|
10 |
|
|
|
23 |
messages = [HumanMessage(content=question)]
|
24 |
messages = self.graph.invoke({"messages": messages})
|
25 |
|
|
|
26 |
if not messages or not messages.get('messages') or messages['messages'][-1].content is None:
|
27 |
return "I am unable to determine the information using the available tools."
|
28 |
|
29 |
answer = messages['messages'][-1].content
|
30 |
|
|
|
31 |
if isinstance(answer, list) and not answer:
|
32 |
return "I am unable to determine the information using the available tools."
|
33 |
|
|
|
34 |
if not isinstance(answer, str):
|
35 |
answer = str(answer)
|
36 |
|
|
|
37 |
answer = answer.strip()
|
38 |
+
|
39 |
+
# Extract final answer after "FINAL ANSWER:"
|
40 |
+
match = re.search(r'FINAL ANSWER:\s*(.*)', answer, re.IGNORECASE | re.DOTALL)
|
41 |
+
if match:
|
42 |
+
final_answer = match.group(1).strip()
|
43 |
+
# Remove surrounding quotes if present
|
44 |
+
if (final_answer.startswith('"') and final_answer.endswith('"')) or \
|
45 |
+
(final_answer.startswith("'") and final_answer.endswith("'")):
|
46 |
+
final_answer = final_answer[1:-1].strip()
|
47 |
+
answer = final_answer
|
48 |
+
else:
|
49 |
+
print("Warning: 'FINAL ANSWER:' not found in agent output; submitting full answer.")
|
50 |
+
|
51 |
+
if not answer:
|
52 |
answer = "I am unable to determine the information using the available tools."
|
53 |
|
54 |
return answer
|
55 |
|
56 |
|
57 |
def run_and_submit_all(profile: gr.OAuthProfile | None):
|
|
|
|
|
|
|
|
|
58 |
space_id = os.getenv("SPACE_ID")
|
59 |
|
60 |
if profile:
|
|
|
68 |
questions_url = f"{api_url}/questions"
|
69 |
submit_url = f"{api_url}/submit"
|
70 |
|
|
|
71 |
try:
|
72 |
agent = BasicAgent()
|
73 |
except Exception as e:
|
|
|
77 |
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
|
78 |
print(agent_code)
|
79 |
|
|
|
80 |
print(f"Fetching questions from: {questions_url}")
|
81 |
try:
|
82 |
response = requests.get(questions_url, timeout=15)
|
|
|
86 |
print("Fetched questions list is empty.")
|
87 |
return "Fetched questions list is empty or invalid format.", None
|
88 |
print(f"Fetched {len(questions_data)} questions.")
|
89 |
+
except Exception as e:
|
90 |
print(f"Error fetching questions: {e}")
|
91 |
return f"Error fetching questions: {e}", None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
92 |
|
|
|
93 |
results_log = []
|
94 |
answers_payload = []
|
95 |
|
|
|
101 |
print(f"Skipping item with missing task_id or question: {item}")
|
102 |
continue
|
103 |
try:
|
104 |
+
model_answer = agent(question_text, task_id=task_id)
|
105 |
+
print(f"Answer for task {task_id}: '{model_answer}'")
|
106 |
answers_payload.append({
|
107 |
"task_id": task_id,
|
108 |
+
"model_answer": model_answer,
|
109 |
+
# "reasoning_trace": None # Add reasoning trace here if available
|
110 |
})
|
111 |
results_log.append({
|
112 |
"Task ID": task_id,
|
113 |
"Question": question_text,
|
114 |
+
"Submitted Answer": model_answer
|
115 |
})
|
116 |
except Exception as e:
|
117 |
print(f"Error running agent on task {task_id}: {e}")
|
118 |
answers_payload.append({
|
119 |
"task_id": task_id,
|
120 |
+
"model_answer": f"AGENT ERROR: {e}",
|
121 |
# "reasoning_trace": None
|
122 |
})
|
123 |
results_log.append({
|
|
|
130 |
print("Agent did not produce any answers to submit.")
|
131 |
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
|
132 |
|
133 |
+
# Serialize answers to JSON Lines format
|
134 |
+
json_lines_str = "\n".join(json.dumps(ans, ensure_ascii=False) for ans in answers_payload)
|
135 |
+
file_like = io.BytesIO(json_lines_str.encode("utf-8"))
|
136 |
+
file_like.name = "submission.jsonl"
|
137 |
+
|
138 |
+
data = {
|
139 |
+
"username": username.strip(),
|
140 |
+
"agent_code": agent_code
|
141 |
+
}
|
142 |
+
files = {
|
143 |
+
"file": (file_like.name, file_like, "application/jsonl")
|
144 |
+
}
|
145 |
+
|
146 |
+
print(f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'...")
|
147 |
+
print("Submission payload preview (first 3 answers):")
|
148 |
+
for ans in answers_payload[:3]:
|
149 |
+
print(json.dumps(ans, ensure_ascii=False))
|
150 |
+
|
151 |
+
print(f"Submitting answers to: {submit_url}")
|
152 |
try:
|
153 |
+
response = requests.post(submit_url, data=data, files=files, timeout=60)
|
154 |
response.raise_for_status()
|
155 |
result_data = response.json()
|
156 |
final_status = (
|