Prathamesh1420 commited on
Commit
ac2329b
·
verified ·
1 Parent(s): ffb4c9f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +54 -112
app.py CHANGED
@@ -1,125 +1,67 @@
1
- '''
2
  import streamlit as st
3
- import os
4
- import shutil
5
- from retriever import find_video, build_embeddings
 
6
 
7
- # Create directories
8
- os.makedirs("data/videos", exist_ok=True)
9
- os.makedirs("data/machine_catalogs", exist_ok=True)
 
 
10
 
11
- st.set_page_config(page_title="Maintenance Knowledge Portal", layout="centered")
 
 
12
 
13
- st.title("Maintenance Knowledge Portal")
14
- st.markdown("Upload videos, explore catalogs, and ask the AI assistant for help.")
 
15
 
16
- # --- Upload Section ---
17
- st.header("Upload Maintenance Video")
18
- video_file = st.file_uploader("Upload Video", type=["mp4", "mov", "avi"])
19
- if video_file:
20
- with open(os.path.join("data/videos", video_file.name), "wb") as f:
21
- f.write(video_file.read())
22
- st.success("Video uploaded successfully!")
23
- build_embeddings("data/videos") # Update embeddings
24
 
 
 
 
 
 
25
 
26
- st.header("Upload Machine Catalog (PDF)")
27
- pdf_file = st.file_uploader("Upload PDF", type=["pdf"])
28
- if pdf_file:
29
- with open(os.path.join("data/machine_catalogs", pdf_file.name), "wb") as f:
30
- f.write(pdf_file.read())
31
- st.success("Catalog uploaded!")
32
 
 
33
 
34
- # --- Video Viewer Section ---
35
- st.header("Available Maintenance Videos")
36
- videos = os.listdir("data/videos")
37
- if videos:
38
- for vid in videos:
39
- st.video(os.path.join("data/videos", vid))
40
- else:
41
- st.info("No videos uploaded yet.")
42
 
 
43
 
44
- # --- Catalog Viewer Section ---
45
- st.header("Machine Catalogs")
46
- catalogs = os.listdir("data/machine_catalogs")
47
- if catalogs:
48
- for cat in catalogs:
49
- st.markdown(f"[{cat}](data/machine_catalogs/{cat})")
50
- else:
51
- st.info("No catalogs available.")
52
 
53
-
54
- # --- AI Assistant Section ---
55
- st.header("AI Assistant - Ask for Help")
56
- user_query = st.text_input("Describe the task or ask about a machine...")
57
  if user_query:
58
- try:
59
- video_file, desc = find_video(user_query)
60
- st.subheader("Recommended Video:")
61
- st.write(f"**{desc}**")
62
- st.video(os.path.join("data/videos", video_file))
63
- except Exception as e:
64
- st.error("Sorry, couldn't find a matching video. Try uploading more videos.")
65
- '''
66
-
67
-
68
- import os
69
- import google.generativeai as genai
70
- import streamlit as st
71
-
72
- # Configure Google Gemini API Key
73
- genai.configure(api_key="AIzaSyBTt66oOvxpLeYn41sR-KkjSYPK2vOAqkU")
74
-
75
- # Define the model
76
- model = genai.GenerativeModel(model_name="models/gemini-1.5-pro")
77
-
78
-
79
- # Directory containing uploaded videos
80
- VIDEO_DIR = "videos"
81
-
82
- # --- Helper Function to Find Video ---
83
- def find_video(query):
84
- """Search the video directory for relevant video files based on user input."""
85
- matches = []
86
- for filename in os.listdir(VIDEO_DIR):
87
- # Simple match based on keywords in the filename
88
- if query.lower() in filename.lower():
89
- matches.append(filename)
90
- return matches
91
-
92
- # --- Chatbot Section ---
93
- st.header("AI Assistant - Ask for Help")
94
-
95
- if "chat_history" not in st.session_state:
96
- st.session_state.chat_history = []
97
-
98
- user_input = st.chat_input("Ask about machine, maintenance, or videos...")
99
-
100
- if user_input:
101
- st.chat_message("user").write(user_input)
102
-
103
- # Add to Gemini chat history
104
- st.session_state.chat_history.append({"role": "user", "parts": [user_input]})
105
-
106
- # Generate response from Gemini model
107
- response = model.generate_content(st.session_state.chat_history)
108
- reply = response.text
109
-
110
- # Show Gemini's response
111
- st.chat_message("assistant").write(reply)
112
-
113
- # Save bot's reply to the session state
114
- st.session_state.chat_history.append({"role": "model", "parts": [reply]})
115
-
116
- # --- Video Recommendation based on query ---
117
- matched_videos = find_video(user_input)
118
-
119
- if matched_videos:
120
- st.write("### Recommended Videos for You:")
121
- for video in matched_videos:
122
- # Show video using Streamlit video player
123
- st.video(os.path.join(VIDEO_DIR, video))
124
- else:
125
- st.info("No video found for your query. Try uploading one or rephrasing.")
 
 
1
  import streamlit as st
2
+ from pinecone import Pinecone
3
+ from langchain_groq import ChatGroq
4
+ from langchain.prompts import PromptTemplate
5
+ from langchain.chains.llm import LLMChain
6
 
7
+ # ========== CONFIGURATION ========== #
8
+ PINECONE_API_KEY = "your_pinecone_api_key"
9
+ GROQ_API_KEY = "your_groq_api_key"
10
+ INDEX_NAME = "rag-granite-index"
11
+ NAMESPACE = "rag-ns"
12
 
13
+ # ========== SETUP ========== #
14
+ st.set_page_config(page_title="RAG Assistant", page_icon="🤖")
15
+ st.title("💬 RAG-Powered Q&A Assistant")
16
 
17
+ # Init Pinecone
18
+ pc = Pinecone(api_key=PINECONE_API_KEY)
19
+ index = pc.Index(INDEX_NAME)
20
 
21
+ # Init Groq LLM
22
+ llm = ChatGroq(
23
+ model_name="llama3-70b-8192",
24
+ api_key=GROQ_API_KEY
25
+ )
 
 
 
26
 
27
+ # Prompt Template
28
+ prompt = PromptTemplate(
29
+ input_variables=["context", "question"],
30
+ template="""
31
+ You are a smart assistant. Based on the provided context, answer the question in 1 to 2 lines only.
32
 
33
+ Context:
34
+ {context}
 
 
 
 
35
 
36
+ Question: {question}
37
 
38
+ Answer:"""
39
+ )
 
 
 
 
 
 
40
 
41
+ llm_chain = LLMChain(llm=llm, prompt=prompt)
42
 
43
+ # ========== STREAMLIT UI ========== #
44
+ user_query = st.text_input("Ask a question from the document 👇")
 
 
 
 
 
 
45
 
 
 
 
 
46
  if user_query:
47
+ with st.spinner("Fetching answer..."):
48
+ from sentence_transformers import SentenceTransformer
49
+ embedder = SentenceTransformer("all-MiniLM-L6-v2") # Match your embedding model
50
+ query_embedding = embedder.encode(user_query).tolist()
51
+
52
+ results = index.query(
53
+ namespace=NAMESPACE,
54
+ vector=query_embedding,
55
+ top_k=3,
56
+ include_metadata=True
57
+ )
58
+
59
+ context = "\n\n".join(match['metadata']['text'] for match in results['matches'])
60
+
61
+ response = llm_chain.invoke({
62
+ "context": context,
63
+ "question": user_query
64
+ })
65
+
66
+ st.success("Answer:")
67
+ st.write(response["text"])