Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Update app.py
Browse files
app.py
CHANGED
@@ -58,6 +58,35 @@ def clean_json_string(json_str):
|
|
58 |
return re.sub(r'[ ,}\s]+$', '', json_str) + '}'
|
59 |
|
60 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
def completion(history, model, system_prompt: str, tools=None):
|
62 |
messages = [{"role": "system", "content": system_prompt.format(date=today_date())}]
|
63 |
for msg in history:
|
@@ -75,7 +104,7 @@ def completion(history, model, system_prompt: str, tools=None):
|
|
75 |
request_params = {
|
76 |
"model": model,
|
77 |
"messages": messages,
|
78 |
-
"stream":
|
79 |
"max_tokens": 1000,
|
80 |
"temperature": 0.2,
|
81 |
"frequency_penalty": 0.2,
|
@@ -132,8 +161,11 @@ def llm_in_loop(history, system_prompt, recursive):
|
|
132 |
if appended:
|
133 |
recursive -= 1
|
134 |
if name:
|
135 |
-
|
136 |
-
|
|
|
|
|
|
|
137 |
# msg = ChatMessage(
|
138 |
# role="assistant",
|
139 |
# content="",
|
|
|
58 |
return re.sub(r'[ ,}\s]+$', '', json_str) + '}'
|
59 |
|
60 |
|
61 |
+
def get_summary(model, text):
|
62 |
+
messages = [{"role": "system", "content": """You are an AI assistant that generates **detailed and complete summaries** of user-provided text. Your task is to produce a **faithful resumen** that preserves **all key information**, facts, and relevant points from the original content.
|
63 |
+
|
64 |
+
### Summary Guidelines:
|
65 |
+
|
66 |
+
- **No Detail Skipping**: Do **not** omit or simplify important content. Every critical fact, event, name, number, and nuance must be included.
|
67 |
+
- **Structured Clarity**: Organize the summary clearly and logically. If the original has sections or topics, reflect that structure.
|
68 |
+
- **No Personal Input**: Do **not** add opinions, interpretations, or external knowledge. Stay 100% faithful to the source text.
|
69 |
+
- **Conciseness with Completeness**: Be as concise as possible **without losing any important detail**.
|
70 |
+
|
71 |
+
Only produce the summary after fully reading and understanding the input text.
|
72 |
+
"""}]
|
73 |
+
messages.append({"role": msg.role, "content": f"**TEXT**:\n\n{text}"})
|
74 |
+
|
75 |
+
|
76 |
+
request_params = {
|
77 |
+
"model": model,
|
78 |
+
"messages": messages,
|
79 |
+
"stream": True,
|
80 |
+
"max_tokens": 1000,
|
81 |
+
"temperature": 0.2,
|
82 |
+
"frequency_penalty": 0.2,
|
83 |
+
"extra_body": {"repetition_penalty": 1.2},
|
84 |
+
}
|
85 |
+
if tools:
|
86 |
+
request_params.update({"tool_choice": "auto", "tools": tools})
|
87 |
+
|
88 |
+
return client.chat.completions.create(**request_params)
|
89 |
+
|
90 |
def completion(history, model, system_prompt: str, tools=None):
|
91 |
messages = [{"role": "system", "content": system_prompt.format(date=today_date())}]
|
92 |
for msg in history:
|
|
|
104 |
request_params = {
|
105 |
"model": model,
|
106 |
"messages": messages,
|
107 |
+
"stream": False,
|
108 |
"max_tokens": 1000,
|
109 |
"temperature": 0.2,
|
110 |
"frequency_penalty": 0.2,
|
|
|
161 |
if appended:
|
162 |
recursive -= 1
|
163 |
if name:
|
164 |
+
try:
|
165 |
+
result = str(tools[name].invoke(input=arguments))
|
166 |
+
result = get_summary(result).choices[0].message.content
|
167 |
+
except Exception as err:
|
168 |
+
result = f"💥 Error: {err}"}
|
169 |
# msg = ChatMessage(
|
170 |
# role="assistant",
|
171 |
# content="",
|