File size: 2,151 Bytes
6c1e91d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
import pickle
import faiss
import openai
from langchain import LLMChain
from langchain.llms.openai import OpenAIChat
from langchain.prompts import Prompt
from langchain import OpenAI
from langchain.callbacks import get_openai_callback
from langchain.callbacks.base import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler


history = []
index = 0
store = None
prompt = ''
llmChain = ''
k = 0


def runPrompt(user_input):
    global index, k, store, prompt, llmChain
    k += 1
    if k <= 1:  # 避免重复请求
        index = faiss.read_index("after_training/training.index")
        with open("after_training/faiss.pkl", "rb") as f:
            store = pickle.load(f)
            store.index = index
        with open("training/master.txt", "r") as f:
            promptTemplate = f.read()
        prompt = Prompt(template=promptTemplate, input_variables=["history", "context", "question"])
        llmChain = LLMChain(prompt=prompt, llm=OpenAIChat(temperature=0))

    def onMessage(question, history_p):
        # contexts = []
        # response_prarm = OpenAI(
        #     temperature=0,
        #     openai_api_key=openai.api_key,
        #     model_name="gpt-3.5-turbo",
        #     callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]),
        #     verbose=True,
        #     streaming=True
        # )
        #
        # llmChain = LLMChain(prompt=prompt, llm=response_prarm)
        # ai_answer = llmChain.predict(question=question, context="\n\n".join(contexts), history=history_p,
        #                              stop=["Human:", "AI:"])

        docs = store.similarity_search(question, k=1)
        contexts = []
        for i, doc in enumerate(docs):
            contexts.append(f"Context {i}:\n{doc.page_content}")
            ai_answer = llmChain.predict(question=question, context="\n\n".join(contexts), history=history_p)
        return ai_answer

    answer = onMessage(user_input, history)
    history.append(f"Human: {user_input}")
    history.append(f"Bot: {answer}")
    return answer