training_data_chat / process.py
kain183's picture
new chat
6c1e91d
raw
history blame
2.15 kB
import pickle
import faiss
import openai
from langchain import LLMChain
from langchain.llms.openai import OpenAIChat
from langchain.prompts import Prompt
from langchain import OpenAI
from langchain.callbacks import get_openai_callback
from langchain.callbacks.base import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
history = []
index = 0
store = None
prompt = ''
llmChain = ''
k = 0
def runPrompt(user_input):
global index, k, store, prompt, llmChain
k += 1
if k <= 1: # 避免重复请求
index = faiss.read_index("after_training/training.index")
with open("after_training/faiss.pkl", "rb") as f:
store = pickle.load(f)
store.index = index
with open("training/master.txt", "r") as f:
promptTemplate = f.read()
prompt = Prompt(template=promptTemplate, input_variables=["history", "context", "question"])
llmChain = LLMChain(prompt=prompt, llm=OpenAIChat(temperature=0))
def onMessage(question, history_p):
# contexts = []
# response_prarm = OpenAI(
# temperature=0,
# openai_api_key=openai.api_key,
# model_name="gpt-3.5-turbo",
# callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]),
# verbose=True,
# streaming=True
# )
#
# llmChain = LLMChain(prompt=prompt, llm=response_prarm)
# ai_answer = llmChain.predict(question=question, context="\n\n".join(contexts), history=history_p,
# stop=["Human:", "AI:"])
docs = store.similarity_search(question, k=1)
contexts = []
for i, doc in enumerate(docs):
contexts.append(f"Context {i}:\n{doc.page_content}")
ai_answer = llmChain.predict(question=question, context="\n\n".join(contexts), history=history_p)
return ai_answer
answer = onMessage(user_input, history)
history.append(f"Human: {user_input}")
history.append(f"Bot: {answer}")
return answer