CodCodingCode commited on
Commit
d25cc99
·
1 Parent(s): f702238

added more debugging code and fixed up summarizer's input

Browse files
Files changed (1) hide show
  1. app.py +25 -7
app.py CHANGED
@@ -75,7 +75,6 @@ class RoleAgent:
75
  f"Input: {input_text}\n"
76
  f"Output:"
77
  )
78
- print("[DEBUG] prompt:", prompt)
79
  encoding = self.tokenizer(prompt, return_tensors="pt")
80
  inputs = {k: v.to(self.model.device) for k, v in encoding.items()}
81
 
@@ -89,13 +88,19 @@ class RoleAgent:
89
  response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
90
 
91
  thinking = ""
92
- print(response)
93
  answer = response
94
- if all(tag in response for tag in ("THINKING:", "ANSWER:", "END")):
95
- print("[FIX] response:", response)
96
- block = response.split("THINKING:")[1].split("END")[0]
97
- thinking = block.split("ANSWER:")[0].strip()
98
- answer = block.split("ANSWER:")[1].strip()
 
 
 
 
 
 
99
 
100
  return {"thinking": thinking, "output": answer}
101
 
@@ -122,6 +127,19 @@ treatment_agent = RoleAgent(
122
  model=model,
123
  )
124
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
 
126
  # === Inference State ===
127
  conversation_history = []
 
75
  f"Input: {input_text}\n"
76
  f"Output:"
77
  )
 
78
  encoding = self.tokenizer(prompt, return_tensors="pt")
79
  inputs = {k: v.to(self.model.device) for k, v in encoding.items()}
80
 
 
88
  response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
89
 
90
  thinking = ""
91
+ print(f"[RESPONSE]: {response}")
92
  answer = response
93
+ if "Output:" in response:
94
+ # Split on the last occurrence of 'Output:' in case it's repeated
95
+ answer = response.rsplit("Output:", 1)[-1].strip()
96
+ else:
97
+ # Fallback: if thinking/answer/end tags exist, use previous logic
98
+ tags = ("THINKING:", "ANSWER:", "END")
99
+ if all(tag in response for tag in tags):
100
+ print("[FIX] tagged response detected:", response)
101
+ block = response.split("THINKING:", 1)[1].split("END", 1)[0]
102
+ thinking = block.split("ANSWER:", 1)[0].strip()
103
+ answer = block.split("ANSWER:", 1)[1].strip()
104
 
105
  return {"thinking": thinking, "output": answer}
106
 
 
127
  model=model,
128
  )
129
 
130
+ """[DEBUG] prompt: Instruction: You are a clinical summarizer trained to extract structured vignettes from doctor–patient dialogues.
131
+ Input: Doctor: What brings you in today?
132
+ Patient: I am a male. I am 15. My knee hurts. What may be the issue with my knee?
133
+
134
+ Previous Vignette:
135
+ Output:
136
+ Instruction: You are a clinical summarizer trained to extract structured vignettes from doctor–patient dialogues.
137
+ Input: Doctor: What brings you in today?
138
+ Patient: I am a male. I am 15. My knee hurts. What may be the issue with my knee?
139
+
140
+ Previous Vignette:
141
+ Output: The patient is a 15-year-old male presenting with knee pain."""
142
+
143
 
144
  # === Inference State ===
145
  conversation_history = []