Spaces:
Paused
Paused
Commit
·
0576ce3
1
Parent(s):
330b156
fixed treatment input code
Browse files
app.py
CHANGED
@@ -65,15 +65,22 @@ print(
|
|
65 |
# === Role Agent with instruction/input/output format ===
|
66 |
class RoleAgent:
|
67 |
def __init__(self, role_instruction, tokenizer, model):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
self.tokenizer = tokenizer
|
69 |
self.model = model
|
70 |
-
self.role_instruction = role_instruction
|
71 |
|
72 |
def act(self, input_text):
|
73 |
prompt = (
|
74 |
-
f"
|
75 |
-
f"
|
76 |
-
f"
|
77 |
)
|
78 |
encoding = self.tokenizer(prompt, return_tensors="pt")
|
79 |
inputs = {k: v.to(self.model.device) for k, v in encoding.items()}
|
@@ -172,20 +179,14 @@ def simulate_interaction(user_input, iterations=1):
|
|
172 |
diagnosis = diag_out["output"]
|
173 |
|
174 |
# Question
|
175 |
-
q_in =
|
176 |
-
f"Vignette: {summary}\n"
|
177 |
-
f"Current Estimated Diagnosis:\n"
|
178 |
-
f"ANSWER: {diagnosis}"
|
179 |
-
)
|
180 |
q_out = questioner.act(q_in)
|
181 |
history.append(f"Doctor: {q_out['output']}")
|
182 |
# Append actual user input to keep the conversation realistic
|
183 |
history.append(f"Patient: {user_input}")
|
184 |
|
185 |
# Treatment
|
186 |
-
treatment_out = treatment_agent.act(
|
187 |
-
f"Diagnosis: {diagnosis}\nVignette: {summary}"
|
188 |
-
)
|
189 |
|
190 |
return {
|
191 |
"summary": sum_out,
|
|
|
65 |
# === Role Agent with instruction/input/output format ===
|
66 |
class RoleAgent:
|
67 |
def __init__(self, role_instruction, tokenizer, model):
|
68 |
+
format_instr = (
|
69 |
+
"When you respond, ALWAYS use exactly two sections:\n"
|
70 |
+
"THINKING: <your detailed reasoning>\n"
|
71 |
+
"ANSWER: <your concise final output>\n"
|
72 |
+
"Do NOT output anything else.\n\n"
|
73 |
+
)
|
74 |
+
|
75 |
self.tokenizer = tokenizer
|
76 |
self.model = model
|
77 |
+
self.role_instruction = role_instruction + format_instr
|
78 |
|
79 |
def act(self, input_text):
|
80 |
prompt = (
|
81 |
+
f"instruction: {self.role_instruction}\n"
|
82 |
+
f"input: {input_text}\n"
|
83 |
+
f"output:"
|
84 |
)
|
85 |
encoding = self.tokenizer(prompt, return_tensors="pt")
|
86 |
inputs = {k: v.to(self.model.device) for k, v in encoding.items()}
|
|
|
179 |
diagnosis = diag_out["output"]
|
180 |
|
181 |
# Question
|
182 |
+
q_in = f"Vignette: {summary}\n" f"Current Estimated Diagnosis:\n" f"{diagnosis}"
|
|
|
|
|
|
|
|
|
183 |
q_out = questioner.act(q_in)
|
184 |
history.append(f"Doctor: {q_out['output']}")
|
185 |
# Append actual user input to keep the conversation realistic
|
186 |
history.append(f"Patient: {user_input}")
|
187 |
|
188 |
# Treatment
|
189 |
+
treatment_out = treatment_agent.act(f"Diagnosis: {diagnosis}\n")
|
|
|
|
|
190 |
|
191 |
return {
|
192 |
"summary": sum_out,
|