Sbnos commited on
Commit
47f40b7
·
verified ·
1 Parent(s): 8a2c4ea

Check check

Browse files
Files changed (1) hide show
  1. app.py +31 -25
app.py CHANGED
@@ -3,7 +3,6 @@ import streamlit as st
3
  from together import Together
4
  from langchain.vectorstores import Chroma
5
  from langchain.embeddings import HuggingFaceBgeEmbeddings
6
- from langchain.chains import ConversationalRetrievalChain
7
 
8
  # --- Configuration ---
9
  TOGETHER_API_KEY = os.environ.get("pilotikval")
@@ -43,8 +42,8 @@ cols = {
43
  'OldMedicine': 'mrcppassmednotes'
44
  }
45
 
46
- persist_directory = dirs.get(collection)
47
- collection_name = cols.get(collection)
48
 
49
  # Load Chroma vector store
50
  vectorstore = Chroma(
@@ -55,47 +54,51 @@ vectorstore = Chroma(
55
  retriever = vectorstore.as_retriever(search_kwargs={"k":20})
56
 
57
  # System prompt template
58
- SYSTEM_PROMPT = (
59
- "You are a helpful assistant for medical professionals. "
60
- "Use the following context from medical documents to answer the question. "
61
- "If you don't know, say you don't know.\n\nContext:\n{context}\n"
62
- )
 
63
 
64
  st.title("🩺 DocChatter RAG (Streaming)")
65
 
66
  # Initialize chat history
67
  if 'chat_history' not in st.session_state:
68
- st.session_state.chat_history = [] # list of dicts {role, content}
 
 
 
69
 
70
- # Tabs
71
  chat_tab, clear_tab = st.tabs(["Chat", "Clear History"])
 
72
  with chat_tab:
73
- # Display history
74
  for msg in st.session_state.chat_history:
75
  st.chat_message(msg['role']).write(msg['content'])
76
 
77
- # User input
78
- if prompt := st.chat_input("Ask anything about your docs..."):
79
- # User message
80
  st.chat_message("user").write(prompt)
81
  st.session_state.chat_history.append({"role": "user", "content": prompt})
82
 
83
- # Retrieve relevant docs
84
  docs = retriever.get_relevant_documents(prompt)
85
  context = "\n---\n".join([d.page_content for d in docs])
86
 
87
- # Build messages for TogetherAI
88
- system_msg = {"role": "system", "content": SYSTEM_PROMPT.format(context=context)}
89
- messages = [system_msg]
90
- # include prior conversation
91
- for msg in st.session_state.chat_history:
92
- if msg['role'] in ('user', 'assistant'):
93
- messages.append(msg)
94
- # Prepare streaming response
95
  response_container = st.chat_message("assistant")
96
  placeholder = response_container.empty()
97
  answer = ""
98
- # Stream tokens
99
  for token in client.chat.completions.create(
100
  model="meta-llama/Llama-4-Scout-17B-16E-Instruct",
101
  messages=messages,
@@ -105,10 +108,13 @@ with chat_tab:
105
  delta = token.choices[0].delta.content
106
  answer += delta
107
  placeholder.write(answer)
108
- # Save assistant message
109
  st.session_state.chat_history.append({"role": "assistant", "content": answer})
110
 
111
  with clear_tab:
112
  if st.button("🗑️ Clear chat history"):
113
  st.session_state.chat_history = []
114
  st.experimental_rerun()
 
 
 
 
3
  from together import Together
4
  from langchain.vectorstores import Chroma
5
  from langchain.embeddings import HuggingFaceBgeEmbeddings
 
6
 
7
  # --- Configuration ---
8
  TOGETHER_API_KEY = os.environ.get("pilotikval")
 
42
  'OldMedicine': 'mrcppassmednotes'
43
  }
44
 
45
+ persist_directory = dirs[collection]
46
+ collection_name = cols[collection]
47
 
48
  # Load Chroma vector store
49
  vectorstore = Chroma(
 
54
  retriever = vectorstore.as_retriever(search_kwargs={"k":20})
55
 
56
  # System prompt template
57
+ def build_system(context: str) -> dict:
58
+ return {"role": "system", "content": (
59
+ "You are a helpful assistant for medical professionals. "
60
+ "Use the following context from medical documents to answer the question. "
61
+ "If you don't know, say you don't know.\n\nContext:\n" + context
62
+ )}
63
 
64
  st.title("🩺 DocChatter RAG (Streaming)")
65
 
66
  # Initialize chat history
67
  if 'chat_history' not in st.session_state:
68
+ st.session_state.chat_history = []
69
+
70
+ # Get user input (must be at top-level)
71
+ prompt = st.chat_input("Ask anything about your docs...")
72
 
73
+ # Tabs for display
74
  chat_tab, clear_tab = st.tabs(["Chat", "Clear History"])
75
+
76
  with chat_tab:
77
+ # Display conversation so far
78
  for msg in st.session_state.chat_history:
79
  st.chat_message(msg['role']).write(msg['content'])
80
 
81
+ # If user has just submitted
82
+ if prompt:
83
+ # Show user message
84
  st.chat_message("user").write(prompt)
85
  st.session_state.chat_history.append({"role": "user", "content": prompt})
86
 
87
+ # Retrieve context
88
  docs = retriever.get_relevant_documents(prompt)
89
  context = "\n---\n".join([d.page_content for d in docs])
90
 
91
+ # Assemble messages for streaming
92
+ messages = [build_system(context)]
93
+ # Include full history
94
+ for m in st.session_state.chat_history:
95
+ if m['role'] in ('user','assistant'):
96
+ messages.append(m)
97
+
98
+ # Stream assistant response
99
  response_container = st.chat_message("assistant")
100
  placeholder = response_container.empty()
101
  answer = ""
 
102
  for token in client.chat.completions.create(
103
  model="meta-llama/Llama-4-Scout-17B-16E-Instruct",
104
  messages=messages,
 
108
  delta = token.choices[0].delta.content
109
  answer += delta
110
  placeholder.write(answer)
111
+ # Save response
112
  st.session_state.chat_history.append({"role": "assistant", "content": answer})
113
 
114
  with clear_tab:
115
  if st.button("🗑️ Clear chat history"):
116
  st.session_state.chat_history = []
117
  st.experimental_rerun()
118
+
119
+ # Optionally persist new embeddings
120
+ # vectorstore.persist()