File size: 2,497 Bytes
b6cdc0c
99db692
 
b6cdc0c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99db692
b6cdc0c
 
 
 
 
 
 
99db692
 
 
 
 
 
 
 
 
 
 
 
b74fdef
 
 
99db692
 
b6cdc0c
b74fdef
 
 
 
b6cdc0c
b74fdef
 
 
b6cdc0c
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
import streamlit as st
import os
from llama_index import VectorStoreIndex, ServiceContext
from llama_index.llms import HuggingFaceLLM
from llama_index.prompts.prompts import SimpleInputPrompt
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from llama_index.embeddings import LangchainEmbedding

# Function to set up the Llama2 model and index
def setup_llama2():
    system_prompt = """
    You are a Q&A assistant. Your goal is to answer questions as
    accurately as possible based on the instructions and context provided.
    """

    query_wrapper_prompt = SimpleInputPrompt("{query_str}")

    llm = HuggingFaceLLM(
        context_window=4096,
        max_new_tokens=256,
        generate_kwargs={"temperature": 0.0, "do_sample": False},
        system_prompt=system_prompt,
        query_wrapper_prompt=query_wrapper_prompt,
        tokenizer_name="meta-llama/Llama-2-7b-chat-hf",
        model_name="meta-llama/Llama-2-7b-chat-hf",
        device_map="auto",
        model_kwargs={"torch_dtype": torch.float16, "load_in_8bit": True}
    )

    embed_model = LangchainEmbedding(
        HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2")
    )

    service_context = ServiceContext.from_defaults(
        chunk_size=1024,
        llm=llm,
        embed_model=embed_model
    )

    index = VectorStoreIndex(service_context=service_context)

    return index

# Streamlit app
def main():
    st.title("Q&A Assistant with Llama2 and Hugging Face")

    # Upload PDF file
    pdf_file = st.file_uploader("Upload a PDF file", type=["pdf"])

    if pdf_file is not None:
        # Process the uploaded PDF file (you may need to adapt this based on your needs)
        # For example, you might want to convert the PDF to text or extract relevant information.
        # Here, we assume the content is stored in a variable named 'pdf_content'.
        pdf_content = pdf_file.read()

        # Set up the Llama2 model and index
        index = setup_llama2()

        # Section for asking questions
        st.header("Ask a Question")

        # Query input
        query = st.text_input("Enter your question:")

        if st.button("Submit Question"):
            if query:
                # Execute the query
                response = index.as_query_engine().query(query)

                # Display the response
                st.header("Answer")
                st.markdown(response)

# Run the Streamlit app
if __name__ == "__main__":
    main()