Spaces:
Sleeping
Sleeping
File size: 4,605 Bytes
1be66cb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 |
import streamlit as st
import openai
import os
from dotenv import load_dotenv
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai.embeddings import OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_community.vectorstores import Qdrant
from langchain_core.documents import Document
from operator import itemgetter
# Load API key from .env file
load_dotenv()
base_llm = ChatOpenAI(model="gpt-4o")
openai.api_key = os.getenv("OPENAI_API_KEY")
embedding_model = OpenAIEmbeddings(model="text-embedding-3-small")
def analyze_survey_document(survey_text, question):
"""Send entire survey document to OpenAI for RAG-based analysis."""
prompt = """
You are an HR analyst answering questions about employee pulse survey responses. Below is the entire survey with questions and corresponding employee answers.
**context:**
{context}
**question:**
{question}
Answer all questions based on the context. If you don't know say I don't know.
"""
rag_prompt = ChatPromptTemplate.from_template(prompt)
# Token length function
# Text splitter for chunking per page
text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(
chunk_size=500, # Adjust chunk size based on contract structure
chunk_overlap=50
)
chunks = text_splitter.split_text(survey_text)
# Initialize Qdrant Vector Store for each page
qdrant_vectorstore = Qdrant.from_texts(
chunks, # Pass as a Document object
embedding_model, # Ensure embedding_model is defined
location=":memory:",
collection_name="formatted_text"
)
retriever = qdrant_vectorstore.as_retriever()
# Define retrieval and response generation chain
lcel_rag_chain = (
{"context": itemgetter("question") | retriever, "question": itemgetter("question")}
| RunnablePassthrough.assign(context=itemgetter("context"))
| {"response": rag_prompt | base_llm, "context": itemgetter("context")}
)
response = lcel_rag_chain.invoke({"context": survey_text, "question": question})['response'].content
return response
def chat_with_ai(chat_history):
"""Chat interface with OpenAI model."""
try:
response = openai.chat.completions.create(
model="gpt-4o",
messages=chat_history
)
return response.choices[0].message.content
except Exception as e:
return f"Error: {str(e)}"
def survey_agent(prompt,uploaded_file):
st.subheader("π Employee Pulse Survey Analysis & AI Chat")
with open('./data/employee_pulse_survey.txt', 'r') as file:
survey_text_base = file.read()
#uploaded_file = st.file_uploader("π Upload Pulse Survey (.txt)", type="txt")
if not uploaded_file:
uploaded_file1 = survey_text_base
#check1 = st.button(f"Generate summary")
#if check1:
if uploaded_file or uploaded_file1:
st.write("β
File uploaded successfully! Analyzing responses...")
if uploaded_file:
survey_text = uploaded_file.read().decode("utf-8").strip()
else:
survey_text = uploaded_file1
with st.spinner("π Analyzing entire survey..."):
analysis = analyze_survey_document(survey_text)
st.session_state["survey_summary"] = analysis
st.markdown(analysis)
# AI Chat with the survey analysis
st.header("π¬ Chat with AI about the Survey")
st.write("Ask questions about the pulse survey insights.")
if "messages" not in st.session_state:
st.session_state["messages"] = [
{"role": "system", "content": "You are an HR expert analyzing a pulse survey. Provide insights based on the summary below."},
{"role": "user", "content": st.session_state.get("survey_summary", "No survey uploaded yet.")}
]
user_input = st.text_input("π Ask a question about the survey results:")
if st.button("Ask AI"):
if user_input:
st.session_state["messages"].append({"role": "user", "content": user_input})
with st.spinner("π Thinking..."):
ai_response = chat_with_ai(st.session_state["messages"])
st.session_state["messages"].append({"role": "assistant", "content": ai_response})
st.markdown(f"**AI:** {ai_response}")
else:
st.warning("β οΈ Please enter a question.")
return ai_response
|