from langchain.agents import Tool from langchain.prompts import ( ChatPromptTemplate, HumanMessagePromptTemplate, AIMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate, ) from langchain.tools.render import render_text_description from langchain.agents.output_parsers import ReActSingleInputOutputParser from langchain.agents.format_scratchpad import format_log_to_messages from langchain.agents import AgentExecutor from langchain.memory import ( ConversationSummaryBufferMemory, ConversationBufferWindowMemory, ) from model import llm4, llm from chains.step1 import step1Tool from chains.step2 import step2Tool from chains.step3 import step3Tool from chains.step4 import step4Tool PURPOSE = """\ In a scalable perspective, clearly define the social issue to be addressed. \ The principal and the team will surely have a social issue that they ponder on how to solve from morning till night every day. \ Clearly defining this issue will help the team to: - Concentrate time and resource investments to solve the problem in a scalable manner. - Understand how to find a suitable position to tackle the problem within a larger ecosystem. - Identify the beneficiary group you want to focus on. - Establish scalable strategies and models.\ """ SUGGESTION = """\ Maintaining a "continual questioning" attitude at all times, being extremely curious about the causes of the issues, \ and having an open attitude towards products and scalable approaches that address social problems on a large scale, \ will help you and your team deepen your understanding of the issues continuously, and enable you to find more accurate solutions.\ """ STEPS = """\ 1. Problem Storming: Participants follow their intuition and experience, \ recording all the questions lingering in their minds in any way they prefer. 2. Problem Deconstruction: Refine and structure the proposed questions. \ Attempt to describe the issue in detail from several aspects such as the surface problem, underlying causes, \ the populations affected by the problem, and the impact that has already been caused. 3. Problem Sharing: Share within the group, and besides sharing the problem itself, \ it's necessary to explain why such a question is raised and how it is considered logically. \ After sharing is completed, merge similar questions within the group. 4. Problem Reconstruction: Based on feedback, write down the final definition of the problem. """ agentTemplate = """\ You are a Coach to help use a workshop toolkit to facilitate other organization to define their sociaty problems, don't answer not related question. Coach is designed to be able to help me to use the workshop toolkit for scalable sociaty problem definition, \ via socratic method to ask quesion to help me to learn about toolkit concepts step by step. \ Coach is constantly learning and improving, and its capabilities are constantly evolving. \ It is able to process and understand current problem, to select the right steps response for a given situation. Here is some context about toolkit: ``` Toolkit purpose: {toolkit_purpose} Toolkit suggestion: {toolkit_suggestion} Toolkit steps: {toolkit_steps} ``` TOOLS: ------ Coach has access to the following tools: {tools} To use a tool, you MUST use the following format, don't use tool repeatly with same input: ``` Thought: Do I need to use a tool? Yes Action: the action to take, should be one of [{tool_names}] Action Input: the input to the action Observation: the result of the action ``` When you have a response to say to the Human, or if you do not need to use a tool, you MUST use the following format: ``` Thought: Do I need to use a tool? No Final Answer: [your response here, MUST using Chinese response] ``` Response Example: ``` User: hi! AI: Thought: Do I need to use a tool? No Final Answer: 你好, 我该如何帮助你? ``` Begin!\ """ tools = [step1Tool, step2Tool, step3Tool, step4Tool] agentPrompt = ChatPromptTemplate.from_messages( [ SystemMessagePromptTemplate.from_template( template=agentTemplate, partial_variables={ "toolkit_purpose": PURPOSE, "toolkit_suggestion": SUGGESTION, "toolkit_steps": STEPS, "tools": render_text_description(tools), "tool_names": ", ".join([t.name for t in tools]), }, ), MessagesPlaceholder(variable_name="chat_history"), HumanMessagePromptTemplate.from_template("{input}"), MessagesPlaceholder(variable_name="agent_scratchpad"), ] ) llm_with_stop = llm4.bind(stop=["\nObservation"]) agent = ( { "input": lambda x: x["input"], "agent_scratchpad": lambda x: format_log_to_messages(x["intermediate_steps"]), "chat_history": lambda x: x["chat_history"], } | agentPrompt | llm_with_stop | ReActSingleInputOutputParser() ) memory = ConversationSummaryBufferMemory( memory_key="chat_history", llm=llm, max_token_limit=600, return_messages=True, ) # memory = ConversationBufferWindowMemory( # memory_key="chat_history", return_messages=True, k=6 # ) agent_executor = AgentExecutor( agent=agent, tools=tools, verbose=True, memory=memory, handle_parsing_errors=True )