Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,125 +1,67 @@
|
|
1 |
-
'''
|
2 |
import streamlit as st
|
3 |
-
import
|
4 |
-
import
|
5 |
-
from
|
|
|
6 |
|
7 |
-
#
|
8 |
-
|
9 |
-
|
|
|
|
|
10 |
|
11 |
-
|
|
|
|
|
12 |
|
13 |
-
|
14 |
-
|
|
|
15 |
|
16 |
-
#
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
f.write(video_file.read())
|
22 |
-
st.success("Video uploaded successfully!")
|
23 |
-
build_embeddings("data/videos") # Update embeddings
|
24 |
|
|
|
|
|
|
|
|
|
|
|
25 |
|
26 |
-
|
27 |
-
|
28 |
-
if pdf_file:
|
29 |
-
with open(os.path.join("data/machine_catalogs", pdf_file.name), "wb") as f:
|
30 |
-
f.write(pdf_file.read())
|
31 |
-
st.success("Catalog uploaded!")
|
32 |
|
|
|
33 |
|
34 |
-
|
35 |
-
|
36 |
-
videos = os.listdir("data/videos")
|
37 |
-
if videos:
|
38 |
-
for vid in videos:
|
39 |
-
st.video(os.path.join("data/videos", vid))
|
40 |
-
else:
|
41 |
-
st.info("No videos uploaded yet.")
|
42 |
|
|
|
43 |
|
44 |
-
#
|
45 |
-
st.
|
46 |
-
catalogs = os.listdir("data/machine_catalogs")
|
47 |
-
if catalogs:
|
48 |
-
for cat in catalogs:
|
49 |
-
st.markdown(f"[{cat}](data/machine_catalogs/{cat})")
|
50 |
-
else:
|
51 |
-
st.info("No catalogs available.")
|
52 |
|
53 |
-
|
54 |
-
# --- AI Assistant Section ---
|
55 |
-
st.header("AI Assistant - Ask for Help")
|
56 |
-
user_query = st.text_input("Describe the task or ask about a machine...")
|
57 |
if user_query:
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
# Directory containing uploaded videos
|
80 |
-
VIDEO_DIR = "videos"
|
81 |
-
|
82 |
-
# --- Helper Function to Find Video ---
|
83 |
-
def find_video(query):
|
84 |
-
"""Search the video directory for relevant video files based on user input."""
|
85 |
-
matches = []
|
86 |
-
for filename in os.listdir(VIDEO_DIR):
|
87 |
-
# Simple match based on keywords in the filename
|
88 |
-
if query.lower() in filename.lower():
|
89 |
-
matches.append(filename)
|
90 |
-
return matches
|
91 |
-
|
92 |
-
# --- Chatbot Section ---
|
93 |
-
st.header("AI Assistant - Ask for Help")
|
94 |
-
|
95 |
-
if "chat_history" not in st.session_state:
|
96 |
-
st.session_state.chat_history = []
|
97 |
-
|
98 |
-
user_input = st.chat_input("Ask about machine, maintenance, or videos...")
|
99 |
-
|
100 |
-
if user_input:
|
101 |
-
st.chat_message("user").write(user_input)
|
102 |
-
|
103 |
-
# Add to Gemini chat history
|
104 |
-
st.session_state.chat_history.append({"role": "user", "parts": [user_input]})
|
105 |
-
|
106 |
-
# Generate response from Gemini model
|
107 |
-
response = model.generate_content(st.session_state.chat_history)
|
108 |
-
reply = response.text
|
109 |
-
|
110 |
-
# Show Gemini's response
|
111 |
-
st.chat_message("assistant").write(reply)
|
112 |
-
|
113 |
-
# Save bot's reply to the session state
|
114 |
-
st.session_state.chat_history.append({"role": "model", "parts": [reply]})
|
115 |
-
|
116 |
-
# --- Video Recommendation based on query ---
|
117 |
-
matched_videos = find_video(user_input)
|
118 |
-
|
119 |
-
if matched_videos:
|
120 |
-
st.write("### Recommended Videos for You:")
|
121 |
-
for video in matched_videos:
|
122 |
-
# Show video using Streamlit video player
|
123 |
-
st.video(os.path.join(VIDEO_DIR, video))
|
124 |
-
else:
|
125 |
-
st.info("No video found for your query. Try uploading one or rephrasing.")
|
|
|
|
|
1 |
import streamlit as st
|
2 |
+
from pinecone import Pinecone
|
3 |
+
from langchain_groq import ChatGroq
|
4 |
+
from langchain.prompts import PromptTemplate
|
5 |
+
from langchain.chains.llm import LLMChain
|
6 |
|
7 |
+
# ========== CONFIGURATION ========== #
|
8 |
+
PINECONE_API_KEY = "your_pinecone_api_key"
|
9 |
+
GROQ_API_KEY = "your_groq_api_key"
|
10 |
+
INDEX_NAME = "rag-granite-index"
|
11 |
+
NAMESPACE = "rag-ns"
|
12 |
|
13 |
+
# ========== SETUP ========== #
|
14 |
+
st.set_page_config(page_title="RAG Assistant", page_icon="🤖")
|
15 |
+
st.title("💬 RAG-Powered Q&A Assistant")
|
16 |
|
17 |
+
# Init Pinecone
|
18 |
+
pc = Pinecone(api_key=PINECONE_API_KEY)
|
19 |
+
index = pc.Index(INDEX_NAME)
|
20 |
|
21 |
+
# Init Groq LLM
|
22 |
+
llm = ChatGroq(
|
23 |
+
model_name="llama3-70b-8192",
|
24 |
+
api_key=GROQ_API_KEY
|
25 |
+
)
|
|
|
|
|
|
|
26 |
|
27 |
+
# Prompt Template
|
28 |
+
prompt = PromptTemplate(
|
29 |
+
input_variables=["context", "question"],
|
30 |
+
template="""
|
31 |
+
You are a smart assistant. Based on the provided context, answer the question in 1 to 2 lines only.
|
32 |
|
33 |
+
Context:
|
34 |
+
{context}
|
|
|
|
|
|
|
|
|
35 |
|
36 |
+
Question: {question}
|
37 |
|
38 |
+
Answer:"""
|
39 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
|
41 |
+
llm_chain = LLMChain(llm=llm, prompt=prompt)
|
42 |
|
43 |
+
# ========== STREAMLIT UI ========== #
|
44 |
+
user_query = st.text_input("Ask a question from the document 👇")
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
|
|
|
|
|
|
|
|
|
46 |
if user_query:
|
47 |
+
with st.spinner("Fetching answer..."):
|
48 |
+
from sentence_transformers import SentenceTransformer
|
49 |
+
embedder = SentenceTransformer("all-MiniLM-L6-v2") # Match your embedding model
|
50 |
+
query_embedding = embedder.encode(user_query).tolist()
|
51 |
+
|
52 |
+
results = index.query(
|
53 |
+
namespace=NAMESPACE,
|
54 |
+
vector=query_embedding,
|
55 |
+
top_k=3,
|
56 |
+
include_metadata=True
|
57 |
+
)
|
58 |
+
|
59 |
+
context = "\n\n".join(match['metadata']['text'] for match in results['matches'])
|
60 |
+
|
61 |
+
response = llm_chain.invoke({
|
62 |
+
"context": context,
|
63 |
+
"question": user_query
|
64 |
+
})
|
65 |
+
|
66 |
+
st.success("Answer:")
|
67 |
+
st.write(response["text"])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|