Update app.py
Browse files
app.py
CHANGED
@@ -19,8 +19,10 @@ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
|
19 |
# ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
|
20 |
import os
|
21 |
|
22 |
-
|
23 |
-
|
|
|
|
|
24 |
web_search = DuckDuckGoSearchTool()
|
25 |
python_interpreter = PythonInterpreterTool()
|
26 |
visit_webpage_tool = VisitWebpageTool()
|
@@ -33,24 +35,14 @@ visit_webpage_tool = VisitWebpageTool()
|
|
33 |
|
34 |
|
35 |
|
36 |
-
def
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
path = f"cache/{key}.json"
|
42 |
-
if os.path.exists(path):
|
43 |
-
with open(path, "r") as f:
|
44 |
-
data = json.load(f)
|
45 |
-
return data.get("answer")
|
46 |
-
return None
|
47 |
-
|
48 |
-
def cache_answer(question: str, answer: str):
|
49 |
-
key = get_cache_key(question)
|
50 |
-
path = f"cache/{key}.json"
|
51 |
-
with open(path, "w") as f:
|
52 |
-
json.dump({"question": question, "answer": answer}, f)
|
53 |
|
|
|
|
|
54 |
|
55 |
|
56 |
# --- Model Setup ---
|
@@ -84,16 +76,16 @@ def load_model(model_name):
|
|
84 |
|
85 |
# Load the model and tokenizer locally
|
86 |
#model, tokenizer = load_model()
|
87 |
-
|
88 |
|
89 |
from smolagents import TransformersModel
|
90 |
|
91 |
|
92 |
|
93 |
model_id ="meta-llama/Llama-3.2-3B-Instruct"#"microsoft/phi-2"# not working out of the box"google/gemma-2-2b-it" #toobig"Qwen/Qwen1.5-7B-Chat"#working but stupid: "meta-llama/Llama-3.2-3B-Instruct"
|
94 |
-
model = TransformersModel(
|
95 |
-
model_id=model_id,
|
96 |
-
max_new_tokens=256)
|
97 |
|
98 |
#model = HfApiModel()
|
99 |
|
@@ -183,13 +175,13 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
183 |
print(f"Skipping item with missing task_id or question: {item}")
|
184 |
continue
|
185 |
try:
|
186 |
-
cached = load_cached_answer(
|
187 |
if cached:
|
188 |
submitted_answer = cached
|
189 |
print(f"Loaded cached answer for task {task_id}")
|
190 |
else:
|
191 |
submitted_answer = agent(question_text)
|
192 |
-
cache_answer(
|
193 |
print(f"Generated and cached answer for task {task_id}")
|
194 |
|
195 |
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|
|
|
19 |
# ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
|
20 |
import os
|
21 |
|
22 |
+
|
23 |
+
cache= {}
|
24 |
+
|
25 |
+
|
26 |
web_search = DuckDuckGoSearchTool()
|
27 |
python_interpreter = PythonInterpreterTool()
|
28 |
visit_webpage_tool = VisitWebpageTool()
|
|
|
35 |
|
36 |
|
37 |
|
38 |
+
def load_cached_answer(question_id: str) -> str:
|
39 |
+
if question_id in cache.keys():
|
40 |
+
return cache[question_id]
|
41 |
+
else:
|
42 |
+
return None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
|
44 |
+
def cache_answer(question_id: str, answer: str):
|
45 |
+
cache[question_id] = answer
|
46 |
|
47 |
|
48 |
# --- Model Setup ---
|
|
|
76 |
|
77 |
# Load the model and tokenizer locally
|
78 |
#model, tokenizer = load_model()
|
79 |
+
model = HfApiModel()#model_id=MODEL_NAME, max_tokens=512)
|
80 |
|
81 |
from smolagents import TransformersModel
|
82 |
|
83 |
|
84 |
|
85 |
model_id ="meta-llama/Llama-3.2-3B-Instruct"#"microsoft/phi-2"# not working out of the box"google/gemma-2-2b-it" #toobig"Qwen/Qwen1.5-7B-Chat"#working but stupid: "meta-llama/Llama-3.2-3B-Instruct"
|
86 |
+
#model = TransformersModel(
|
87 |
+
# model_id=model_id,
|
88 |
+
# max_new_tokens=256)
|
89 |
|
90 |
#model = HfApiModel()
|
91 |
|
|
|
175 |
print(f"Skipping item with missing task_id or question: {item}")
|
176 |
continue
|
177 |
try:
|
178 |
+
cached = load_cached_answer(task_id)
|
179 |
if cached:
|
180 |
submitted_answer = cached
|
181 |
print(f"Loaded cached answer for task {task_id}")
|
182 |
else:
|
183 |
submitted_answer = agent(question_text)
|
184 |
+
cache_answer(task_id, submitted_answer)
|
185 |
print(f"Generated and cached answer for task {task_id}")
|
186 |
|
187 |
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|