gpaasch commited on
Commit
ed04336
·
1 Parent(s): 0ef172c

interactive questioning enhancement

Browse files
Files changed (1) hide show
  1. src/app.py +13 -10
src/app.py CHANGED
@@ -2,6 +2,7 @@ import os
2
  import gradio as gr
3
  from llama_index.llms.huggingface import HuggingFaceLLM
4
  from parse_tabular import symptom_index
 
5
 
6
  # --- System prompt ---
7
  SYSTEM_PROMPT = """
@@ -16,19 +17,21 @@ def process_speech(new_transcript, history):
16
  if not new_transcript:
17
  return history
18
 
19
- # Build LLM predictor
20
- llm_predictor = HuggingFaceLLM(model_name=os.getenv("HF_MODEL", "models/gpt2-medium"))
21
 
22
- # Query index with conversation
23
- prompt = "\n".join([f"{role}: {msg}" for role, msg in history])
24
- prompt += f"\nuser: {new_transcript}"
25
 
26
- response = symptom_index.as_query_engine(
27
- llm_predictor=llm_predictor
28
- ).query(prompt)
 
 
 
29
 
30
- # Append the new exchange to history
31
- history.append((new_transcript, str(response)))
32
  return history
33
 
34
  # Build Gradio interface
 
2
  import gradio as gr
3
  from llama_index.llms.huggingface import HuggingFaceLLM
4
  from parse_tabular import symptom_index
5
+ import json
6
 
7
  # --- System prompt ---
8
  SYSTEM_PROMPT = """
 
17
  if not new_transcript:
18
  return history
19
 
20
+ # Build conversation context
21
+ context = "\n".join([f"{role}: {msg}" for role, msg in history])
22
 
23
+ # Query symptom index for relevant codes
24
+ response = symptom_index.as_query_engine().query(new_transcript)
 
25
 
26
+ # Format response as structured JSON
27
+ formatted_response = {
28
+ "diagnoses": [str(response).split(":")[0]], # Extract ICD code
29
+ "confidences": [0.8], # Add confidence scoring
30
+ "follow_up": "Is the cough productive or dry?" # Add interactive questioning
31
+ }
32
 
33
+ # Append exchange to history
34
+ history.append((new_transcript, json.dumps(formatted_response, indent=2)))
35
  return history
36
 
37
  # Build Gradio interface