ByteX-Ai / app.py
Krish-Upgrix's picture
Upload 5 files
c14f8f8 verified
# version 2: added custom prompts.
import os
import json
import sqlite3
from datetime import datetime
import streamlit as st
from langchain_huggingface import HuggingFaceEmbeddings
from langchain_chroma import Chroma
from langchain_groq import ChatGroq
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationalRetrievalChain
from langchain.prompts import PromptTemplate
from vectorize_documents import embeddings # If needed elsewhere
# Load config
working_dir = os.path.dirname(os.path.abspath(__file__))
config_data = json.load(open(f"{working_dir}/config.json"))
GROQ_API_KEY = config_data["GROQ_API_KEY"]
os.environ["GROQ_API_KEY"] = GROQ_API_KEY
# Set up the database
def setup_db():
conn = sqlite3.connect("chat_history.db", check_same_thread=False)
cursor = conn.cursor()
cursor.execute("""
CREATE TABLE IF NOT EXISTS chat_histories (
id INTEGER PRIMARY KEY AUTOINCREMENT,
username TEXT,
timestamp TEXT,
day TEXT,
user_message TEXT,
assistant_response TEXT
)
""")
conn.commit()
return conn
# Set up vectorstore
def setup_vectorstore():
embeddings = HuggingFaceEmbeddings()
vectorstore = Chroma(persist_directory="Vector_db", embedding_function=embeddings)
return vectorstore
# Custom prompt template
custom_prompt_template = PromptTemplate.from_template("""
You are a helpful assistant that helps users choose laptops.
1. Analyze the user's query, take information from vectordb and then give top 3 laptops to user from Relevent information that is context.
2. Keep suggestions clear and concise with names, specs, and reasons only from relevant information context.
Relevant Information:
{context}
Chat History:
{chat_history}
User Query:
{question}
Assistant Response:
""")
# Set up the chatbot chain with a specific model
def chat_chain(vectorstore, model_name):
llm = ChatGroq(model=model_name, temperature=0.3)
retriever = vectorstore.as_retriever()
memory = ConversationBufferMemory(
llm=llm,
output_key="answer",
memory_key="chat_history",
return_messages=True
)
chain = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=retriever,
memory=memory,
combine_docs_chain_kwargs={"prompt": custom_prompt_template},
return_source_documents=True,
verbose=True
)
return chain
# Streamlit UI setup
st.set_page_config(page_title="ByteX-Ai", page_icon="🤖AI", layout="centered")
st.title("🤖 ByteX-Ai")
st.subheader("Hey! Get your Laptop!!")
# Initialize DB connection
if "conn" not in st.session_state:
st.session_state.conn = setup_db()
# Prompt user to log in
if "username" not in st.session_state:
username = st.text_input("Enter your name to proceed:")
if username:
with st.spinner("Loading chatbot interface... Please wait."):
st.session_state.username = username
st.session_state.chat_history = []
st.session_state.vectorstore = setup_vectorstore()
st.success(f"Welcome, {username}! Now select a model to start chatting.")
else:
username = st.session_state.username
# Model selection options
model_options = [
"gemma2-9b-it",
"llama-3.1-8b-instant",
"llama3-70b-8192",
"llama3-8b-8192"
]
selected_model = st.selectbox("Choose a model:", model_options)
# Ensure vectorstore exists
if "vectorstore" not in st.session_state:
st.session_state.vectorstore = setup_vectorstore()
# Set or update the selected model
if "selected_model" not in st.session_state:
st.session_state.selected_model = selected_model
# Reset conversational_chain if model changes or not yet initialized
if ("conversational_chain" not in st.session_state) or (st.session_state.selected_model != selected_model):
st.session_state.selected_model = selected_model
st.session_state.conversational_chain = chat_chain(st.session_state.vectorstore, selected_model)
st.session_state.chat_history = []
# Reset chat manually
if st.button("🔄 Reset Chat"):
st.session_state.chat_history = []
st.session_state.conversational_chain = chat_chain(st.session_state.vectorstore, st.session_state.selected_model)
st.success("Chat reset!")
# Show chat UI
if "username" in st.session_state:
st.subheader(f"Hello {username}, start your query below!")
if st.session_state.chat_history:
for message in st.session_state.chat_history:
if message['role'] == 'user':
with st.chat_message("user"):
st.markdown(message["content"])
elif message['role'] == 'assistant':
with st.chat_message("assistant"):
st.markdown(message["content"])
user_input = st.chat_input("Ask AI....")
if user_input:
with st.spinner("Processing your query... Please wait."):
st.session_state.chat_history.append({"role": "user", "content": user_input})
with st.chat_message("user"):
st.markdown(user_input)
with st.chat_message("assistant"):
response = st.session_state.conversational_chain({"question": user_input})
assistant_response = response["answer"]
st.markdown(assistant_response)
st.session_state.chat_history.append({"role": "assistant", "content": assistant_response})
# Version 1: working properly but there is no prompt refinement.
# import os
# import json
# import sqlite3
# from datetime import datetime
# import streamlit as st
# from langchain_huggingface import HuggingFaceEmbeddings
# from langchain_chroma import Chroma
# from langchain_groq import ChatGroq
# from langchain.memory import ConversationBufferMemory
# from langchain.chains import ConversationalRetrievalChain
# from vectorize_documents import embeddings # If needed elsewhere
# # Load config
# working_dir = os.path.dirname(os.path.abspath(__file__))
# config_data = json.load(open(f"{working_dir}/config.json"))
# GROQ_API_KEY = config_data["GROQ_API_KEY"]
# os.environ["GROQ_API_KEY"] = GROQ_API_KEY
# # Set up the database
# def setup_db():
# conn = sqlite3.connect("chat_history.db", check_same_thread=False)
# cursor = conn.cursor()
# cursor.execute("""
# CREATE TABLE IF NOT EXISTS chat_histories (
# id INTEGER PRIMARY KEY AUTOINCREMENT,
# username TEXT,
# timestamp TEXT,
# day TEXT,
# user_message TEXT,
# assistant_response TEXT
# )
# """)
# conn.commit()
# return conn
# # Set up vectorstore
# def setup_vectorstore():
# embeddings = HuggingFaceEmbeddings()
# vectorstore = Chroma(persist_directory="Vector_db", embedding_function=embeddings)
# return vectorstore
# # Set up the chatbot chain with a specific model
# def chat_chain(vectorstore, model_name):
# llm = ChatGroq(model=model_name, temperature=0)
# retriever = vectorstore.as_retriever()
# memory = ConversationBufferMemory(
# llm=llm,
# output_key="answer",
# memory_key="chat_history",
# return_messages=True
# )
# chain = ConversationalRetrievalChain.from_llm(
# llm=llm,
# retriever=retriever,
# chain_type="stuff",
# memory=memory,
# verbose=True,
# return_source_documents=True
# )
# return chain
# # Streamlit UI setup
# st.set_page_config(page_title="ByteX-Ai", page_icon="🤖AI", layout="centered")
# st.title("🤖 ByteX-Ai")
# st.subheader("Hey! Get your Laptop!!")
# # Initialize DB connection
# if "conn" not in st.session_state:
# st.session_state.conn = setup_db()
# # Prompt user to log in
# if "username" not in st.session_state:
# username = st.text_input("Enter your name to proceed:")
# if username:
# with st.spinner("Loading chatbot interface... Please wait."):
# st.session_state.username = username
# st.session_state.chat_history = []
# st.session_state.vectorstore = setup_vectorstore()
# st.success(f"Welcome, {username}! Now select a model to start chatting.")
# else:
# username = st.session_state.username
# # Model selection options
# model_options = [
# "gemma2-9b-it",
# "llama-3.1-8b-instant",
# "llama3-70b-8192",
# "llama3-8b-8192"
# ]
# # Model dropdown
# selected_model = st.selectbox("Choose a model:", model_options)
# # Ensure vectorstore exists
# if "vectorstore" not in st.session_state:
# st.session_state.vectorstore = setup_vectorstore()
# # Set or update the selected model
# if "selected_model" not in st.session_state:
# st.session_state.selected_model = selected_model
# # Reset conversational_chain if model changes or not yet initialized
# if ("conversational_chain" not in st.session_state) or (st.session_state.selected_model != selected_model):
# st.session_state.selected_model = selected_model
# st.session_state.conversational_chain = chat_chain(st.session_state.vectorstore, selected_model)
# st.session_state.chat_history = []
# # Reset chat manually
# if st.button("🔄 Reset Chat"):
# st.session_state.chat_history = []
# st.session_state.conversational_chain = chat_chain(st.session_state.vectorstore, st.session_state.selected_model)
# st.success("Chat reset!")
# # Show chat UI
# if "username" in st.session_state:
# st.subheader(f"Hello {username}, start your query below!")
# if st.session_state.chat_history:
# for message in st.session_state.chat_history:
# if message['role'] == 'user':
# with st.chat_message("user"):
# st.markdown(message["content"])
# elif message['role'] == 'assistant':
# with st.chat_message("assistant"):
# st.markdown(message["content"])
# user_input = st.chat_input("Ask AI....")
# if user_input:
# with st.spinner("Processing your query... Please wait."):
# st.session_state.chat_history.append({"role": "user", "content": user_input})
# with st.chat_message("user"):
# st.markdown(user_input)
# with st.chat_message("assistant"):
# response = st.session_state.conversational_chain({"question": user_input})
# assistant_response = response["answer"]
# st.markdown(assistant_response)
# st.session_state.chat_history.append({"role": "assistant", "content": assistant_response})