Update app.py
Browse files
app.py
CHANGED
@@ -35,6 +35,15 @@ HF_TOKEN = os.getenv("HF_TOKEN")
|
|
35 |
# Initialize HF client
|
36 |
client = InferenceClient(token=HF_TOKEN)
|
37 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
# -------------------------
|
39 |
# Utility helpers
|
40 |
# -------------------------
|
@@ -190,9 +199,9 @@ class BasicAgent:
|
|
190 |
def _call_llm(self, prompt: str, max_tokens: int = 256) -> str:
|
191 |
try:
|
192 |
resp = self.llm.chat.completions.create(
|
193 |
-
model="gpt-4.1
|
194 |
messages=[
|
195 |
-
{"role": "system", "content":
|
196 |
{"role": "user", "content": prompt},
|
197 |
],
|
198 |
temperature=0.3,
|
|
|
35 |
# Initialize HF client
|
36 |
client = InferenceClient(token=HF_TOKEN)
|
37 |
|
38 |
+
# -------------------------
|
39 |
+
# Constants
|
40 |
+
# -------------------------
|
41 |
+
|
42 |
+
SYSTEM = (
|
43 |
+
"You are a parser-safe assistant.\n"
|
44 |
+
"Output **ONLY** the JSON object requested—no extra words."
|
45 |
+
)
|
46 |
+
|
47 |
# -------------------------
|
48 |
# Utility helpers
|
49 |
# -------------------------
|
|
|
199 |
def _call_llm(self, prompt: str, max_tokens: int = 256) -> str:
|
200 |
try:
|
201 |
resp = self.llm.chat.completions.create(
|
202 |
+
model="gpt-4.1",
|
203 |
messages=[
|
204 |
+
{"role": "system", "content": SYSTEM},
|
205 |
{"role": "user", "content": prompt},
|
206 |
],
|
207 |
temperature=0.3,
|