File size: 5,073 Bytes
013bd0c
cc2c691
013bd0c
 
 
 
 
 
cc2c691
013bd0c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
import os
import streamlit as st
import PyPDF2
import torch
import weaviate
from transformers import AutoTokenizer, AutoModel
from weaviate.classes.init import Auth
import cohere

# Load credentials from environment variables or hardcoded (replace with env vars in prod)
WEAVIATE_URL = "vgwhgmrlqrqqgnlb1avjaa.c0.us-west3.gcp.weaviate.cloud"
WEAVIATE_API_KEY = "7VoeYTjkOS4aHINuhllGpH4JPgE2QquFmSMn"
COHERE_API_KEY = "LEvCVeZkqZMW1aLYjxDqlstCzWi4Cvlt9PiysqT8"

# Connect to Weaviate
client = weaviate.connect_to_weaviate_cloud(
    cluster_url=WEAVIATE_URL,
    auth_credentials=Auth.api_key(WEAVIATE_API_KEY),
    headers={"X-Cohere-Api-Key": COHERE_API_KEY}
)

cohere_client = cohere.Client(COHERE_API_KEY)

# Load sentence-transformer model
tokenizer = AutoTokenizer.from_pretrained('sentence-transformers/all-MiniLM-L6-v2')
model = AutoModel.from_pretrained('sentence-transformers/all-MiniLM-L6-v2')

def load_pdf(file):
    """Extract text from PDF file."""
    reader = PyPDF2.PdfReader(file)
    text = ''.join([page.extract_text() for page in reader.pages if page.extract_text()])
    return text

def get_embeddings(text):
    """Generate mean pooled embedding for the input text."""
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True)
    with torch.no_grad():
        embeddings = model(**inputs).last_hidden_state.mean(dim=1).squeeze().cpu().numpy()
    return embeddings

def upload_document_chunks(chunks):
    """Insert document chunks into Weaviate collection with embeddings."""
    doc_collection = client.collections.get("Document")
    for chunk in chunks:
        embedding = get_embeddings(chunk)
        doc_collection.data.insert(
            properties={"content": chunk},
            vector=embedding.tolist()
        )

def query_answer(query):
    """Search for top relevant document chunks based on query embedding."""
    query_embedding = get_embeddings(query)
    results = client.collections.get("Document").query.near_vector(
        near_vector=query_embedding.tolist(),
        limit=3
    )
    return results.objects

def generate_response(context, query):
    """Generate answer using Cohere model based on context and query."""
    response = cohere_client.generate(
        model='command',
        prompt=f"Context: {context}\n\nQuestion: {query}\nAnswer:",
        max_tokens=100
    )
    return response.generations[0].text.strip()

def qa_pipeline(pdf_file, query):
    """Main pipeline for QA: parse PDF, embed chunks, query Weaviate, and generate answer."""
    document_text = load_pdf(pdf_file)
    document_chunks = [document_text[i:i+500] for i in range(0, len(document_text), 500)]

    upload_document_chunks(document_chunks)
    top_docs = query_answer(query)

    context = ' '.join([doc.properties['content'] for doc in top_docs])
    answer = generate_response(context, query)

    return context, answer

# Streamlit UI
st.set_page_config(page_title="Interactive QA Bot", layout="wide")

st.markdown(
    """
    <div style="text-align: center; font-size: 28px; font-weight: bold; margin-bottom: 20px; color: #2D3748;">
        πŸ“„ Interactive QA Bot πŸ”
    </div>
    <p style="text-align: center; font-size: 16px; color: #4A5568;">
        Upload a PDF document, ask questions, and receive answers based on the document content.
    </p>
    <hr style="border: 1px solid #CBD5E0; margin: 20px 0;">
    """, unsafe_allow_html=True
)

col1, col2 = st.columns([1, 2])

with col1:
    pdf_file = st.file_uploader("πŸ“ Upload PDF", type=["pdf"])
    query = st.text_input("❓ Ask a Question", placeholder="Enter your question here...")
    submit = st.button("πŸ” Submit")

with col2:
    doc_segments_output = st.empty()
    answer_output = st.empty()

if submit:
    if not pdf_file:
        st.warning("Please upload a PDF file first.")
    elif not query.strip():
        st.warning("Please enter a question.")
    else:
        with st.spinner("Processing..."):
            context, answer = qa_pipeline(pdf_file, query)
            doc_segments_output.text_area("πŸ“œ Retrieved Document Segments", context, height=200)
            answer_output.text_area("πŸ’¬ Answer", answer, height=80)

# Optional custom CSS for styling
st.markdown(
    """
    <style>
    body {
        background-color: #EDF2F7;
    }
    .stFileUploader > div > div > input {
        background-color: #3182CE !important;
        color: white !important;
        padding: 8px !important;
        border-radius: 5px !important;
    }
    button {
        background-color: #3182CE !important;
        color: white !important;
        padding: 10px !important;
        font-size: 16px !important;
        border-radius: 5px !important;
        cursor: pointer;
        border: none !important;
    }
    button:hover {
        background-color: #2B6CB0 !important;
    }
    textarea {
        border: 2px solid #CBD5E0 !important;
        border-radius: 8px !important;
        padding: 10px !important;
        background-color: #FAFAFA !important;
    }
    </style>
    """, unsafe_allow_html=True
)