gpaasch commited on
Commit
0ef172c
·
1 Parent(s): 3f1fdcf

getting local to work to prevent burining up my credits

Browse files
Files changed (4) hide show
  1. .gitignore +3 -3
  2. requirements.txt +4 -3
  3. src/app.py +5 -10
  4. src/parse_tabular.py +18 -0
.gitignore CHANGED
@@ -1,4 +1,4 @@
1
- venv
2
- .venv
3
  __pycache__
4
- gpt2-medium
 
1
+ venv/
2
+ .venv/
3
  __pycache__
4
+ models/
requirements.txt CHANGED
@@ -3,12 +3,13 @@ gradio[mcp]
3
  gradio
4
 
5
  # core Llama-Index + HF model support
6
- llama-index>=0.9.0 # Specify minimum version
7
- llama-index-embeddings-huggingface
8
  openai
9
- transformers
10
  torch
 
11
  accelerate
 
 
 
12
 
13
  # optional extras
14
  langchain
 
3
  gradio
4
 
5
  # core Llama-Index + HF model support
 
 
6
  openai
 
7
  torch
8
+ transformers[torch]
9
  accelerate
10
+ llama-index>=0.9.0 # Specify minimum version
11
+ llama-index-embeddings-huggingface
12
+ llama-index-llms-huggingface
13
 
14
  # optional extras
15
  langchain
src/app.py CHANGED
@@ -1,12 +1,7 @@
1
  import os
2
  import gradio as gr
3
- from llama_index.core import VectorStoreIndex
4
- from llama_index.llms import HuggingFaceLLMPredictor
5
- from llama_index.readers import SimpleDirectoryReader
6
-
7
- # Relative imports should be explicit
8
- from parse_tabular import symptom_index # Changed from relative import
9
- from ..utils.llama_index_utils import get_llm_predictor, build_index, query_symptoms
10
 
11
  # --- System prompt ---
12
  SYSTEM_PROMPT = """
@@ -22,7 +17,7 @@ def process_speech(new_transcript, history):
22
  return history
23
 
24
  # Build LLM predictor
25
- llm_predictor = HuggingFaceLLMPredictor(model_name_or_path=os.getenv("HF_MODEL", "gpt2-medium"))
26
 
27
  # Query index with conversation
28
  prompt = "\n".join([f"{role}: {msg}" for role, msg in history])
@@ -33,7 +28,7 @@ def process_speech(new_transcript, history):
33
  ).query(prompt)
34
 
35
  # Append the new exchange to history
36
- history.append((new_transcript, response.response))
37
  return history
38
 
39
  # Build Gradio interface
@@ -41,7 +36,7 @@ demo = gr.Blocks()
41
  with demo:
42
  gr.Markdown("# Symptom to ICD-10 Code Lookup (Audio Input)")
43
  chatbot = gr.Chatbot(label="Conversation")
44
- audio = gr.Audio(source="microphone", type="text", streaming=True)
45
 
46
  audio.stream(
47
  process_speech,
 
1
  import os
2
  import gradio as gr
3
+ from llama_index.llms.huggingface import HuggingFaceLLM
4
+ from parse_tabular import symptom_index
 
 
 
 
 
5
 
6
  # --- System prompt ---
7
  SYSTEM_PROMPT = """
 
17
  return history
18
 
19
  # Build LLM predictor
20
+ llm_predictor = HuggingFaceLLM(model_name=os.getenv("HF_MODEL", "models/gpt2-medium"))
21
 
22
  # Query index with conversation
23
  prompt = "\n".join([f"{role}: {msg}" for role, msg in history])
 
28
  ).query(prompt)
29
 
30
  # Append the new exchange to history
31
+ history.append((new_transcript, str(response)))
32
  return history
33
 
34
  # Build Gradio interface
 
36
  with demo:
37
  gr.Markdown("# Symptom to ICD-10 Code Lookup (Audio Input)")
38
  chatbot = gr.Chatbot(label="Conversation")
39
+ audio = gr.Audio(type="filepath", streaming=True)
40
 
41
  audio.stream(
42
  process_speech,
src/parse_tabular.py CHANGED
@@ -84,3 +84,21 @@ if __name__ == "__main__":
84
  else:
85
  main() # Use default path
86
  symptom_index = create_symptom_index()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84
  else:
85
  main() # Use default path
86
  symptom_index = create_symptom_index()
87
+
88
+ # Test multiple queries
89
+ test_queries = [
90
+ "persistent cough with fever",
91
+ "severe headache with nausea",
92
+ "lower back pain",
93
+ "difficulty breathing"
94
+ ]
95
+
96
+ print("\nTesting symptom matching:")
97
+ print("-" * 50)
98
+
99
+ for query in test_queries:
100
+ response = symptom_index.as_query_engine().query(query)
101
+ print(f"\nQuery: {query}")
102
+ print(f"Relevant ICD-10 codes:")
103
+ print(str(response))
104
+ print("-" * 50)